
#define NDEBUG
#include "matrix.hpp"
// #include <cassert>
#include <chrono>
#include <cstdlib>
#include <iostream>
#include <mpi.h>
#include <stdio.h>
using namespace std;
using namespace std::chrono;

void PartialMatrixMultiply(const Matrix &a, const Matrix &b, Matrix &c, int row,
                           int column);
int CalculateColumnsPerProcess(int num_process, int column);

void MyTimer(int type, const char *info = nullptr) {
  static std::chrono::steady_clock::time_point start;
  if (type == 0) {
    start = std::chrono::steady_clock::now();
    printf("Begin %s\n", info);
    return;
  } else {
    auto end = std::chrono::steady_clock::now();
    auto dt =
        std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
    printf("End %s, time taken=%lf\n", info, dt.count());
  }
}


#define TEST
// 转置不实用
// scatter 和 gather
int main(int argc, char *argv[]) {
  int my_rank, num_process;

  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &num_process);
  MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);

  if (my_rank == 0 && argc != 4) {
    cerr << "./Matrix_mpi.out  N K M" << endl;
    exit(EXIT_FAILURE);
  }

  MPI_Datatype column_array;
  MPI_Datatype block_matrix, block_matrix2;
  MPI_Status status;

  int process_in_rows, process_in_columns;

  const int N = atoi(argv[1]);
  const int K = atoi(argv[2]);
  const int M = atoi(argv[3]);
  // 列必须要整除
  process_in_columns = CalculateColumnsPerProcess(num_process, M);
  process_in_rows = num_process / process_in_columns;
  //   if (process_in_columns * process_in_rows != num_process)
  //     MPI_Abort(MPI_COMM_WORLD, 0);

  int rows_per_process{N / process_in_rows},
      columns_per_process{M / process_in_columns};

  if (my_rank == 0) {
    printf(" argc =  %d\n", argc);
    printf("\n %d*%d multiply %d*%d\n", N, K, K, M);
    printf("num_process = %d\n", num_process);
    printf("process_in_rows = %d   process_in_columns = %d\n", process_in_rows,
           process_in_columns);
    printf("partial : rows_per_process = %d   columns_per_process = %d\n",
           rows_per_process, columns_per_process);
  }

  // printf("hello\n");
  if (my_rank == 0) {
    // 不能把这个提到外面, 对于其他进程, rows_per_process 可能不一样
    MPI_Type_vector(K, columns_per_process, M, MPI_DOUBLE, &column_array);
    MPI_Type_commit(&column_array);
    MPI_Type_vector(rows_per_process, columns_per_process, M, MPI_DOUBLE,
                    &block_matrix);
    MPI_Type_commit(&block_matrix);
    MPI_Type_vector(N - (process_in_rows - 1) * rows_per_process,
                    columns_per_process, M, MPI_DOUBLE, &block_matrix2);
    MPI_Type_commit(&block_matrix2);
    // printf("000000\n");
    Matrix a(N, K), b(K, M), c(N, M);
    // , d(rows_per_process, columns_per_process);
    a.RandomInitialize();
    b.RandomInitialize();

    double time_start, time_end;
    time_start = MPI_Wtime();

    vector<int> displacement(num_process), send_count(num_process);
    int count{0};
    // 分发 a  准备 scatterv的参数
    for (int i = 0; i < process_in_rows; ++i) {
      for (int j = 0; j < process_in_columns; ++j) {
        send_count[count] = rows_per_process * K;
        displacement[count] = i * rows_per_process * K;
        if (i == process_in_rows - 1) {
          send_count[count] =
              (N - (process_in_rows - 1) * rows_per_process) * K;
        }
        ++count;
      }
    }

    MPI_Scatterv(a.Data(), send_count.data(), displacement.data(), MPI_DOUBLE,
                 MPI_IN_PLACE, rows_per_process * K, MPI_DOUBLE, 0,
                 MPI_COMM_WORLD);
    // printf("000000\n");
    // 分发 b
    count = 1;
    for (int i = 0; i < process_in_rows; ++i) {
      for (int j = 0; j < process_in_columns; ++j) {
        //   i = 0  j= 0 就在本地计算
        if (i + j != 0) {

          auto buffer = b.Data() + j * columns_per_process;
          MPI_Send(buffer, 1, column_array, count, 0, MPI_COMM_WORLD);
          count++;
        }
      }
    }

    PartialMatrixMultiply(a, b, c, rows_per_process, columns_per_process);
    // printf("000000\n");

    // 收集 c
    count = 1;
    for (int i = 0; i < process_in_rows; ++i) {
      for (int j = 0; j < process_in_columns; ++j) {
        if (i + j != 0) {
          if (i == process_in_rows - 1) {
            //   收发匹配    接收是定义不同的 block_matrix 但正确接收了
            MPI_Recv(c.Data() + i * rows_per_process * M +
                         j * columns_per_process,
                     1, block_matrix2, count, 0, MPI_COMM_WORLD, &status);
            count++;

          } else {
            MPI_Recv(c.Data() + i * rows_per_process * M +
                         j * columns_per_process,
                     1, block_matrix, count, 0, MPI_COMM_WORLD, &status);
            count++;
          }
        }
      }
    }

    time_end = MPI_Wtime();
    printf("paralleral computation time = %f\n", time_end - time_start);
#ifdef TEST
    Matrix cc(N, M);
    MyTimer(0, "serial computation");
    MatrixMultiply(a, b, cc);
    MyTimer(1, "serial computation");
    // int is_equal_to_serial = cc.IsSame(c);
    // printf("is_equal_to_serial = %d\n", is_equal_to_serial);
    if (cc.IsSame(c)) {
      cout << "check passed\n" << endl;
    } else {
      cout << "not equal! there is something wrong!\n";
    }
    // if (my_rank == 0) {
    //   cout << a << endl;
    //   cout << b << endl;
    //   cout << c << endl;
    //   cout << cc << endl;
    // }

#endif
  } else {
    if (my_rank >= (process_in_rows - 1) * process_in_columns)
      rows_per_process = N - (process_in_rows - 1) * rows_per_process;

    // printf("%d\n", rows_per_process);
    // 接收者的 stride 与发送者不同   接收者的stride是本地的一块一块的间隔
    MPI_Type_vector(K, columns_per_process, columns_per_process, MPI_DOUBLE,
                    &column_array);
    MPI_Type_commit(&column_array);
    // block_matrix 块对不同的进程定义一样但数据可能不同
    MPI_Type_vector(rows_per_process, columns_per_process, columns_per_process,
                    MPI_DOUBLE, &block_matrix);
    MPI_Type_commit(&block_matrix);

    Matrix subA(rows_per_process, K), subB(K, columns_per_process),
        subC(rows_per_process, columns_per_process);
    // 接收 a
    MPI_Scatterv(nullptr, nullptr, nullptr, MPI_DOUBLE, subA.Data(),
                 rows_per_process * K, MPI_DOUBLE, 0, MPI_COMM_WORLD);
    // 接收 b
    MPI_Recv(subB.Data(), 1, column_array, 0, 0, MPI_COMM_WORLD, &status);

    MatrixMultiply(subA, subB, subC);

    MPI_Send(subC.Data(), 1, block_matrix, 0, 0, MPI_COMM_WORLD);
  }

  MPI_Finalize();
  return 0;
}

void PartialMatrixMultiply(const Matrix &a, const Matrix &b, Matrix &c, int row,
                           int column) {
  // assert((a.GetColumn() == b.()) && (row <= c.size()));
  int K{a.GetColumn()};
  for (int i = 0; i < row; ++i) {
    for (int j = 0; j < K; ++j) {
      for (int k = 0; k < column; ++k)
        c(i, k) += a(i, j) * b(j, k);
    }
  }
}

int CalculateColumnsPerProcess(int num_process, int column) {
  double tmp = sqrt(num_process);
  int p = (tmp + 1e-3);
  while (p > 1 && ((num_process % p != 0) || (column % p != 0)))
    --p;
  return p;
}