diff --git a/tasks/makoveeva_matmul_double_seq/common/include/common.hpp b/tasks/makoveeva_matmul_double_seq/common/include/common.hpp new file mode 100644 index 000000000..b02a6c4ae --- /dev/null +++ b/tasks/makoveeva_matmul_double_seq/common/include/common.hpp @@ -0,0 +1,17 @@ +#pragma once + +#include +#include +#include +#include + +#include "task/include/task.hpp" + +namespace makoveeva_matmul_double_seq { + +using InType = std::tuple, std::vector>; +using OutType = std::vector; +using TestType = std::tuple; +using BaseTask = ppc::task::Task; + +} // namespace makoveeva_matmul_double_seq diff --git a/tasks/makoveeva_matmul_double_seq/info.json b/tasks/makoveeva_matmul_double_seq/info.json new file mode 100644 index 000000000..fef624f46 --- /dev/null +++ b/tasks/makoveeva_matmul_double_seq/info.json @@ -0,0 +1,9 @@ +{ + "student": { + "first_name": "Софья", + "group_number": "3823Б1ПР1", + "last_name": "Маковеева", + "middle_name": "Игоревна", + "task_number": "1" + } +} diff --git a/tasks/makoveeva_matmul_double_seq/seq/include/ops_seq.hpp b/tasks/makoveeva_matmul_double_seq/seq/include/ops_seq.hpp new file mode 100644 index 000000000..91d7d8c6d --- /dev/null +++ b/tasks/makoveeva_matmul_double_seq/seq/include/ops_seq.hpp @@ -0,0 +1,35 @@ +#pragma once + +#include +#include + +#include "makoveeva_matmul_double_seq/common/include/common.hpp" +#include "task/include/task.hpp" + +namespace makoveeva_matmul_double_seq { + +class MatmulDoubleSeqTask : public BaseTask { + public: + static constexpr ppc::task::TypeOfTask GetStaticTypeOfTask() { + return ppc::task::TypeOfTask::kSEQ; + } + + explicit MatmulDoubleSeqTask(const InType &in); + + bool ValidationImpl() override; + bool PreProcessingImpl() override; + bool RunImpl() override; + bool PostProcessingImpl() override; + + [[nodiscard]] const std::vector &GetResult() const { + return C_; + } + + private: + size_t n_; + std::vector A_; + std::vector B_; + std::vector C_; +}; + +} // namespace makoveeva_matmul_double_seq diff --git a/tasks/makoveeva_matmul_double_seq/seq/src/ops_seq.cpp b/tasks/makoveeva_matmul_double_seq/seq/src/ops_seq.cpp new file mode 100644 index 000000000..738a9dc3b --- /dev/null +++ b/tasks/makoveeva_matmul_double_seq/seq/src/ops_seq.cpp @@ -0,0 +1,87 @@ +#include "makoveeva_matmul_double_seq/seq/include/ops_seq.hpp" + +#include +#include +#include + +#include "makoveeva_matmul_double_seq/common/include/common.hpp" // для InType + +namespace makoveeva_matmul_double_seq { +namespace { + +void ProcessBlock(const std::vector &a, const std::vector &b, std::vector &c, int n, + int i_start, int i_end, int j_start, int j_end, int k_start, int k_end) { + for (int i = i_start; i < i_end; ++i) { + for (int j = j_start; j < j_end; ++j) { + double sum = 0.0; + for (int k = k_start; k < k_end; ++k) { + sum += a[(i * n) + k] * b[(k * n) + j]; + } + c[(i * n) + j] += sum; + } + } +} + +int CalculateBlockSize(int n) { + return std::max(1, static_cast(std::sqrt(static_cast(n)))); +} + +int CalculateNumBlocks(int n, int block_size) { + return (n + block_size - 1) / block_size; +} + +} // namespace + +MatmulDoubleSeqTask::MatmulDoubleSeqTask(const InType &in) + : n_(std::get<0>(in)), A_(std::get<1>(in)), B_(std::get<2>(in)), C_(n_ * n_, 0.0) { + SetTypeOfTask(GetStaticTypeOfTask()); + GetOutput() = C_; +} + +bool MatmulDoubleSeqTask::ValidationImpl() { + const bool valid_n = n_ > 0; + const bool valid_a = A_.size() == n_ * n_; + const bool valid_b = B_.size() == n_ * n_; + return valid_n && valid_a && valid_b; +} + +bool MatmulDoubleSeqTask::PreProcessingImpl() { + return true; +} + +bool MatmulDoubleSeqTask::RunImpl() { + if (n_ <= 0) { + return false; + } + + // Очищаем C_ перед вычислениями + C_.assign(C_.size(), 0.0); + + const int n_int = static_cast(n_); + const int block_size = CalculateBlockSize(n_int); + const int num_blocks = CalculateNumBlocks(n_int, block_size); + + for (int ib = 0; ib < num_blocks; ++ib) { + for (int jb = 0; jb < num_blocks; ++jb) { + for (int kb = 0; kb < num_blocks; ++kb) { + const int i_start = ib * block_size; + const int i_end = std::min(i_start + block_size, n_int); + const int j_start = jb * block_size; + const int j_end = std::min(j_start + block_size, n_int); + const int k_start = kb * block_size; + const int k_end = std::min(k_start + block_size, n_int); + + ProcessBlock(A_, B_, C_, n_int, i_start, i_end, j_start, j_end, k_start, k_end); + } + } + } + + GetOutput() = C_; + return true; +} + +bool MatmulDoubleSeqTask::PostProcessingImpl() { + return true; +} + +} // namespace makoveeva_matmul_double_seq diff --git a/tasks/makoveeva_matmul_double_seq/settings.json b/tasks/makoveeva_matmul_double_seq/settings.json new file mode 100644 index 000000000..0be0208fc --- /dev/null +++ b/tasks/makoveeva_matmul_double_seq/settings.json @@ -0,0 +1,10 @@ +{ + "tasks": { + "all": "enabled", + "omp": "enabled", + "seq": "enabled", + "stl": "enabled", + "tbb": "enabled" + }, + "tasks_type": "threads" +} diff --git a/tasks/makoveeva_matmul_double_seq/tests/functional/main.cpp b/tasks/makoveeva_matmul_double_seq/tests/functional/main.cpp new file mode 100644 index 000000000..36f893a61 --- /dev/null +++ b/tasks/makoveeva_matmul_double_seq/tests/functional/main.cpp @@ -0,0 +1,99 @@ +#include + +#include +#include +#include +#include + +#include "makoveeva_matmul_double_seq/seq/include/ops_seq.hpp" + +namespace makoveeva_matmul_double_seq { +namespace { + +void ReferenceMultiply(const std::vector &a, const std::vector &b, std::vector &c, size_t n) { + for (size_t i = 0; i < n; ++i) { + for (size_t k = 0; k < n; ++k) { + const double tmp = a[(i * n) + k]; + for (size_t j = 0; j < n; ++j) { + c[(i * n) + j] += tmp * b[(k * n) + j]; + } + } + } +} + +// Разбиваем функцию на еще более мелкие части +void ValidateTask(MatmulDoubleSeqTask &task) { + ASSERT_TRUE(task.ValidationImpl()); +} + +void PreProcessTask(MatmulDoubleSeqTask &task) { + ASSERT_TRUE(task.PreProcessingImpl()); +} + +void RunTask(MatmulDoubleSeqTask &task) { + ASSERT_TRUE(task.RunImpl()); +} + +void PostProcessTask(MatmulDoubleSeqTask &task) { + ASSERT_TRUE(task.PostProcessingImpl()); +} + +void CheckTaskExecution(MatmulDoubleSeqTask &task) { + ValidateTask(task); + PreProcessTask(task); + RunTask(task); + PostProcessTask(task); +} + +void CheckResults(const std::vector &result, const std::vector &expected) { + ASSERT_EQ(result.size(), expected.size()); + const double epsilon = 1e-10; + for (size_t i = 0; i < result.size(); ++i) { + ASSERT_NEAR(result[i], expected[i], epsilon); + } +} + +} // namespace + +TEST(MatmulDoubleFunctionalTest, multiply2x2) { + const size_t n = 2; + const std::vector a = {1.0, 2.0, 3.0, 4.0}; + const std::vector b = {5.0, 6.0, 7.0, 8.0}; + const std::vector expected = {19.0, 22.0, 43.0, 50.0}; + + auto input = std::make_tuple(n, a, b); + MatmulDoubleSeqTask task(input); + + CheckTaskExecution(task); + CheckResults(task.GetResult(), expected); +} + +TEST(MatmulDoubleFunctionalTest, multiply3x3) { + const size_t n = 3; + const std::vector a = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0}; + const std::vector b = {9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0}; + const std::vector expected = {30.0, 24.0, 18.0, 84.0, 69.0, 54.0, 138.0, 114.0, 90.0}; + + auto input = std::make_tuple(n, a, b); + MatmulDoubleSeqTask task(input); + + CheckTaskExecution(task); + CheckResults(task.GetResult(), expected); +} + +TEST(MatmulDoubleFunctionalTest, multiply4x4) { + const size_t n = 4; + const std::vector a = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0}; + const std::vector b = {16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0}; + + std::vector expected(n * n, 0.0); + ReferenceMultiply(a, b, expected, n); + + auto input = std::make_tuple(n, a, b); + MatmulDoubleSeqTask task(input); + + CheckTaskExecution(task); + CheckResults(task.GetResult(), expected); +} + +} // namespace makoveeva_matmul_double_seq diff --git a/tasks/makoveeva_matmul_double_seq/tests/performance/main.cpp b/tasks/makoveeva_matmul_double_seq/tests/performance/main.cpp new file mode 100644 index 000000000..b37363ed3 --- /dev/null +++ b/tasks/makoveeva_matmul_double_seq/tests/performance/main.cpp @@ -0,0 +1,93 @@ +#include + +#include // для std::abs +#include // для size_t +#include +#include + +#include "makoveeva_matmul_double_seq/common/include/common.hpp" +#include "makoveeva_matmul_double_seq/seq/include/ops_seq.hpp" +#include "util/include/perf_test_util.hpp" + +namespace makoveeva_matmul_double_seq { +namespace { + +void ReferenceMultiply(const std::vector &a, const std::vector &b, std::vector &c, size_t n) { + // Очищаем c перед вычислениями - используем assign вместо цикла + c.assign(c.size(), 0.0); + + for (size_t i = 0; i < n; ++i) { + for (size_t k = 0; k < n; ++k) { + const double tmp = a[(i * n) + k]; + for (size_t j = 0; j < n; ++j) { + c[(i * n) + j] += tmp * b[(k * n) + j]; + } + } + } +} + +} // namespace + +class MatmulDoublePerformanceTest : public ppc::util::BaseRunPerfTests { + InType input_data_; + std::vector expected_output_; + + protected: + void SetUp() override { + const size_t n = 400; + const size_t size = n * n; + + std::vector a(size); + std::vector b(size); + + for (size_t i = 0; i < size; ++i) { + a[i] = static_cast(i + 1); + b[i] = static_cast(size - i); + } + + input_data_ = std::make_tuple(n, a, b); + + expected_output_.resize(size); + ReferenceMultiply(a, b, expected_output_, n); + } + + bool CheckTestOutputData(OutType &output_data) final { + const auto &expected = expected_output_; + const auto &actual = output_data; + + if (expected.size() != actual.size()) { + return false; + } + + const double epsilon = 1e-7; + for (size_t i = 0; i < expected.size(); ++i) { + if (std::abs(expected[i] - actual[i]) > epsilon) { + return false; + } + } + return true; + } + + InType GetTestInputData() final { + return input_data_; + } +}; + +TEST_P(MatmulDoublePerformanceTest, RunPerfModes) { + ExecuteTest(GetParam()); +} + +namespace { + +const auto kAllPerfTasks = + ppc::util::MakeAllPerfTasks(PPC_SETTINGS_makoveeva_matmul_double_seq); + +const auto kGtestValues = ppc::util::TupleToGTestValues(kAllPerfTasks); + +const auto kPerfTestName = MatmulDoublePerformanceTest::CustomPerfTestName; + +INSTANTIATE_TEST_SUITE_P(RunModeTests, MatmulDoublePerformanceTest, kGtestValues, kPerfTestName); + +} // namespace + +} // namespace makoveeva_matmul_double_seq