#include <assert.h>
#include <math.h>
#include <mpi.h>
#include <mpi-ext.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>

#include <algorithm>
#include <cstring>
#include <fstream>
#include <iostream>
#include <vector>

void cuda_aware_check()
{
    printf("Compile time check:\n");
#if defined(MPIX_CUDA_AWARE_SUPPORT) && MPIX_CUDA_AWARE_SUPPORT
    printf("This MPI library has CUDA-aware support.\n", MPIX_CUDA_AWARE_SUPPORT);
#elif defined(MPIX_CUDA_AWARE_SUPPORT) && !MPIX_CUDA_AWARE_SUPPORT
    printf("This MPI library does not have CUDA-aware support.\n");
#else
    printf("This MPI library cannot determine if there is CUDA-aware support.\n");
#endif /* MPIX_CUDA_AWARE_SUPPORT */
 
    printf("Run time check:\n");
#if defined(MPIX_CUDA_AWARE_SUPPORT)
    if (1 == MPIX_Query_cuda_support()) {
        printf("This MPI library has CUDA-aware support.\n");
    } else {
        printf("This MPI library does not have CUDA-aware support.\n");
    }
#else /* !defined(MPIX_CUDA_AWARE_SUPPORT) */
    printf("This MPI library cannot determine if there is CUDA-aware support.\n");
#endif /* MPIX_CUDA_AWARE_SUPPORT */
}

void SequentialCalculation(const int &n,
                           const int &m,
                           const std::vector<std::vector<int>> &A,
                           const std::vector<std::vector<int>> &B,
                           std::vector<std::vector<int>> *C) {

  std::vector<std::vector<int>> B_power, next_B_power;
  std::vector<std::vector<int>> D;
  (*C) = A;
  B_power = B;
  int tmp;
  for (int t = 1; t<=m; t++) {
    D = std::vector<std::vector<int>>(n, std::vector<int>(n,0));
    for (int i = 0; i<n; i++) {
      for (int j = 0; j<n; j++) {
        for (int k = 0; k<n; k++) {
          D[i][j] = (D[i][j] + A[i][k] * B_power[k][j])%2;
        }
      } 
    }
    for (int i = 0; i<n; i++) {
      for (int j = 0; j<n; j++) {
        (*C)[i][j] = ((*C)[i][j] + D[i][j]) %2; 
      }
    } 
    if (t==m)
      break;
    next_B_power = std::vector<std::vector<int>>(n, std::vector<int>(n,0));
    for (int i = 0; i<n; i++) {
      for (int j = 0; j<n; j++) {
        for (int k = 0; k<n; k++)
          next_B_power[i][j] = (next_B_power[i][j]+ B_power[i][k]*B[k][j])%2;
      } 
    }
    B_power = next_B_power;
  }
}

bool LoadFile(const std::string &input_file_path, int *n, int *m, std::vector<std::vector<int>> *A,
              std::vector<std::vector<int>> *B) {
  std::ifstream fin(input_file_path.c_str());
  if (!fin.is_open()) {
    return false;
  }
  fin >> (*n) >> (*m);
  *A = std::vector<std::vector<int>>(*n,std::vector<int>(*n,0));
  *B = std::vector<std::vector<int>>(*n,std::vector<int>(*n,0));
  for (int i = 0;i < (*n); i++)
    for (int j = 0;j < (*n); j++)
      fin >> (*A)[i][j];
  for (int i = 0;i < (*n); i++)
    for (int j = 0;j < (*n); j++)
      fin >> (*B)[i][j];
  fin.close();
  return true;
}

void TestAnswerCorrectness(const std::vector<std::vector<int>> &sequential_answer,
                           const std::vector<std::vector<int>> &parallel_answer) {
  if (sequential_answer.size() != parallel_answer.size()) {
    std::cout << "Error! The number of sequential_answer and parallel_answer "
                 "is not the same"
              << std::endl;
    return ;
  }
  long long sum_sequential_answer = 0;
  long long sum_parallel_answer = 0;
  int sum_error = 0;
  int error_count = 0;
  for (uint i = 0; i < sequential_answer.size(); i++) {
    if (sequential_answer[i].size() != parallel_answer[i].size())
    {
      std::cout << "Error! The number of sequential_answer and parallel_answer "
                 "is not the same"
              << std::endl;
      return ;
    }
    for (uint j = 0; j < sequential_answer[i].size(); j++) {
      sum_error +=  abs(sequential_answer[i][j] - parallel_answer[i][j]);
      if (sequential_answer[i][j] != parallel_answer[i][j]) {
        error_count++;
      }
      sum_sequential_answer += sequential_answer[i][j];
      sum_parallel_answer += parallel_answer[i][j];  
    }
  }
  std::cout << "Total number of errors: " << error_count << std::endl;
  std::cout << "sum_sequential_answer = " << sum_sequential_answer << std::endl;
  std::cout << "sum_parallel_answer = " << sum_parallel_answer << std::endl;

  if (sum_error > 0) {
    std::cout << "Wrong Answer" << std::endl;
  } else {
    std::cout << "Correct!!!" << std::endl;
  }
}

// ==============================================================
// ====    Write your functions below this line    ====
// ==============================================================
// ==============================================================

const int TILE_WIDTH = 16;
__global__ void MatrixMultiplyMod2Kernel(int n, int rows, const int* A, const int* B, int* C) {
  int bx = blockIdx.x;
  int by = blockIdx.y;
  int tx = threadIdx.x;
  int ty = threadIdx.y;
  int row = by * blockDim.y + ty;
  int col = bx * blockDim.x + tx;

  __shared__ int B_power_s[TILE_WIDTH][TILE_WIDTH];
  __shared__ int A_s[TILE_WIDTH][TILE_WIDTH];

  int D_value = 0;
  for(int ph = 0; ph < (n + TILE_WIDTH -1)/TILE_WIDTH; ph++){
    if((ph * TILE_WIDTH + tx < n) && (row < rows))
      A_s[ty][tx] = A[row * n + ph * TILE_WIDTH + tx];
    else
      A_s[ty][tx] = 0;
    if((ph * TILE_WIDTH + ty < n) && (col < n))
      B_power_s[ty][tx] = B[(ph * TILE_WIDTH + ty) * n + col];
    else
      B_power_s[ty][tx] = 0;
    __syncthreads();

    for(int k = 0; k < TILE_WIDTH; k++){
      D_value += A_s[ty][k] * B_power_s[k][tx];
    }
    __syncthreads();
  }
  if(row < rows && col < n){
    C[row * n + col] = D_value % 2;
  }
}

__global__ void MatrixAddMod2Kernel(int n,
                                    int rows,
                                    const int *A,
                                    const int *B,
                                    int *C) {
  int idx = blockIdx.x * blockDim.x + threadIdx.x;
  int size = rows * n;
  if (idx < size) {
    C[idx] = (A[idx] + B[idx]) % 2;
  }
}

void ParallelCalculation(const int n,
                         const int m,
                         const int rows,
                         const int *d_A,
                         const int *d_B,
                         int *d_C,
                         int rank) 
  {
    int *d_B_power, *d_next_B_power, *d_D;
    size_t size = n * n * sizeof(int);
    size_t row_size = rows * n * sizeof(int);

    cudaMalloc(&d_B_power, size);
    cudaMalloc(&d_next_B_power, size);
    cudaMalloc(&d_D, row_size);
    cudaMemcpy(d_B_power, d_B, size, cudaMemcpyDeviceToDevice);
    cudaMemcpy(d_C, d_A, row_size, cudaMemcpyDeviceToDevice);
    dim3 grid1((int)ceil(n*1.0 / TILE_WIDTH), ((int)ceil(rows*1.0 / TILE_WIDTH)));
    dim3 grid2((int)ceil(n*1.0 / TILE_WIDTH), ((int)ceil(n*1.0 / TILE_WIDTH)));
    dim3 block(TILE_WIDTH, TILE_WIDTH);

    for (int t = 0; t < m; t++) {
      MatrixMultiplyMod2Kernel<<<grid1, block>>>(n, rows, d_A, d_B_power, d_D);
      cudaDeviceSynchronize();

      int numThreads = 256;
      int numBlocks = (n * rows + numThreads - 1) / numThreads;
      MatrixAddMod2Kernel<<<numBlocks, numThreads>>>(n, rows, d_C, d_D, d_C);
      cudaDeviceSynchronize();

      if (t == m - 1)
        break;
      cudaDeviceSynchronize();
      MatrixMultiplyMod2Kernel<<<grid2, block>>>(n, n, d_B_power, d_B, d_next_B_power);
      cudaDeviceSynchronize();
      std::swap(d_B_power, d_next_B_power);
    }
    cudaFree(d_B_power);
    cudaFree(d_next_B_power);
    cudaFree(d_D);
  }

// ==============================================================
// ====    Write your functions above this line    ====
// ==============================================================
// ==============================================================


int main(int argc, char **argv) {
  int number_of_processes, rank;
  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &number_of_processes);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  cuda_aware_check();
  double parallel_start_time;

  int number_of_block_in_a_grid;
  int number_of_thread_in_a_block;
  int n,m;
  std::vector<std::vector<int>> A;
  std::vector<std::vector<int>> B;
  if (rank == 0) {
    if (argc < 4) {
      std::cout << "Error! Please use \"mpiexec -n [process number] "
                   "[--hostfile hostfile] multiple [number_of_block_in_a_grid] [number_of_thread_in_a_block] [data_file_name]\"\n";
      return 1;
    } else {
      number_of_block_in_a_grid = std::atoi(argv[1]);
      number_of_thread_in_a_block = std::atoi(argv[2]);
      std::string input_file_path = std::string(argv[3]);
      std::cout << "number_of_block_in_a_grid:" << number_of_block_in_a_grid<< std::endl;
      std::cout << "number_of_thread_in_a_block:" << number_of_thread_in_a_block<< std::endl;
      if (!LoadFile(input_file_path, &n, &m, &A, &B)) {
        std::cout << "Error! Please check the format of input file\n";
        return 1;
      }
    }
  }
  std::vector<std::vector<int>> parallel_answer;

  if (rank == 0) {
    parallel_start_time = MPI_Wtime();
  }
  
  // ==============================================================
  // ====    Write your implementation below this line    ====
  // ==============================================================
  // ==============================================================

  int *d_B, *d_A, *d_A_local, *d_C, *d_C_local;
  MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD);
  MPI_Bcast(&m, 1, MPI_INT, 0, MPI_COMM_WORLD);
  cudaMalloc(&d_A_local, (n / number_of_processes) * n * sizeof(int));
  cudaMalloc(&d_C_local, (n / number_of_processes) * n * sizeof(int));
  cudaMalloc(&d_B, n * n * sizeof(int));
  int rows_per_process = n / number_of_processes;
  if (rank == 0) {
    int *h_B = (int*)malloc(n * n * sizeof(int));
    int *h_A = (int*)malloc(n * n * sizeof(int));
    cudaMalloc(&d_A, n * n * sizeof(int));
    cudaMalloc(&d_C, n * n * sizeof(int));
    for (int i = 0; i < n; i++) {
      for (int j = 0; j < n; j++) {
        h_A[i * n + j] = A[i][j];
        h_B[i * n + j] = B[i][j];
      }  
    }
    cudaMemcpy(d_A, h_A, n * n * sizeof(int), cudaMemcpyHostToDevice);
    cudaMemcpy(d_B, h_B, n * n * sizeof(int), cudaMemcpyHostToDevice);
    free(h_A);
    free(h_B);
  }
  MPI_Bcast(d_B, n * n, MPI_INT, 0, MPI_COMM_WORLD);

  MPI_Scatter(d_A, rows_per_process * n, MPI_INT, 
              d_A_local, rows_per_process * n, MPI_INT, 0, MPI_COMM_WORLD);
  ParallelCalculation(n, m, rows_per_process, d_A_local, d_B, d_C_local, rank);
  MPI_Barrier(MPI_COMM_WORLD);

  MPI_Gather(d_C_local, rows_per_process * n, MPI_INT,
              d_C, rows_per_process * n, MPI_INT, 0, MPI_COMM_WORLD);
  if (rank == 0) {
    parallel_answer.assign(n, std::vector<int>(n, 0));
    int *h_C = (int*)malloc(n * n * sizeof(int));
    cudaMemcpy(h_C, d_C, n * n * sizeof(int), cudaMemcpyDeviceToHost);
    for (int i = 0; i < n; ++i) {
      for (int j = 0; j < n; ++j) {
        parallel_answer[i][j] = h_C[i * n + j];
      }
    }
    free(h_C);
    cudaFree(d_A);
    cudaFree(d_C);
  }
  cudaFree(d_A_local);
  cudaFree(d_C_local);
  cudaFree(d_B);
  // ==============================================================
  // ====    Write your implementation above this line    ====
  // ==============================================================
  // ==============================================================
  MPI_Barrier(MPI_COMM_WORLD);
  if (rank == 0) {
    double parallel_end_time = MPI_Wtime();
    double parallel_running_time = parallel_end_time - parallel_start_time;
    std::cout << "parallel running time:" << parallel_running_time << std::endl;
    std::vector<std::vector<int>> sequential_answer;
    double sequential_start_time = MPI_Wtime();

    SequentialCalculation(n, m, A, B, &sequential_answer);
    double sequential_end_time = MPI_Wtime();
    double sequential_running_time =
        sequential_end_time - sequential_start_time;
    std::cout << "sequential running time:" << sequential_running_time
              << std::endl;
    std::cout << "speed up:" <<  sequential_running_time/parallel_running_time
              << std::endl;
    TestAnswerCorrectness(sequential_answer, parallel_answer);
  }
  MPI_Finalize();
  return 0;
}