
// #include "../mrandom.hpp"
#include "mpi.h"
#include <cassert>
#include <iostream>
#include <string>
#include <vector>
using namespace std;
#include <climits>
#include <cstdlib>
#include <random>
// 并行会得到同样的数据
class RandDouble {
private:
  std::default_random_engine engine;
  std::uniform_real_distribution<> dist;

public:
  RandDouble(double low, double high) : dist(low, high) {}
  double operator()() { return dist(engine); }
  // double operator()() { return 1.0 * (random() % INT_MAX) * 2.0 / INT_MAX -
  // 1; }
};

constexpr int worker_start = 2;
constexpr int worker_rand_tag = 0;
constexpr int worker_last_tag = 1;
constexpr int last_rand_tag = 2;
constexpr int chunk_size = 1024;

int main(int argc, char **argv) {

  if (argc != 2) {
    cerr << "./a.out  N_thousands" << endl;
    MPI_Abort(MPI_COMM_WORLD, -1);
  }
  string str_tmp{argv[1]};
  const int n_thousands = 2 * stoi(str_tmp);

  int global_rank, num_proc;
  MPI_Init(&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &global_rank);
  MPI_Comm_size(MPI_COMM_WORLD, &num_proc);

  if (num_proc < 5) {
    cout << "num process must be large than 4!" << endl;
  }

  int color;
  // 两个进程用于生成随机数
  if (global_rank < worker_start) {
    color = 0;
  } else {
    color = 1;
  }
  // 获得计算后的pi
  if (global_rank == num_proc - 1)
    color = 2;

  // MPI_Group world_group;
  MPI_Comm local_comm;
  MPI_Comm_split(MPI_COMM_WORLD, color, global_rank, &local_comm);

  MPI_Comm inter_comm1, inter_comm2;
  if (global_rank < worker_start) {
    MPI_Intercomm_create(local_comm, 0, MPI_COMM_WORLD, worker_start,
                         worker_rand_tag, &inter_comm1);

    MPI_Intercomm_create(local_comm, 0, MPI_COMM_WORLD, num_proc - 1,
                         last_rand_tag, &inter_comm2);
  }
  if (worker_start <= global_rank && global_rank < (num_proc - 1)) {
    MPI_Intercomm_create(local_comm, 0, MPI_COMM_WORLD, 0, worker_rand_tag,
                         &inter_comm1);
    MPI_Intercomm_create(local_comm, 0, MPI_COMM_WORLD, num_proc - 1,
                         worker_last_tag, &inter_comm2);
  }
  if (global_rank == (num_proc - 1)) {
    MPI_Intercomm_create(local_comm, 0, MPI_COMM_WORLD, worker_start,
                         worker_last_tag, &inter_comm1);
    MPI_Intercomm_create(local_comm, 0, MPI_COMM_WORLD, 0, last_rand_tag,
                         &inter_comm2);
  }

  // random server
  if (global_rank < worker_start) {
    RandDouble rand_gen(-1.0, 1.0);
    const int num_worker = num_proc - worker_start - 1;

    int local_n_thousands = 1 + n_thousands / worker_start;
    // 对数据个数修正以便后续计算
    local_n_thousands = (local_n_thousands / num_worker + 1) * num_worker;
    const int n_elem = local_n_thousands * chunk_size;

    vector<double> rand_val(n_elem);

    vector<MPI_Request> requests(num_worker);
    vector<MPI_Status> status(num_worker);

    const int averge = n_elem / num_worker;
    vector<int> n_elem_per_proc(num_worker, averge);
    n_elem_per_proc[num_worker - 1] += n_elem - averge * num_worker;

    // 发送大小 对方开辟内存
    for (int i = 0; i < num_worker; ++i) {
      MPI_Isend(&n_elem_per_proc[i], 1, MPI_INT, i, worker_rand_tag,
                inter_comm1, &requests[i]);
    }

    for (int i = 0; i < rand_val.size(); ++i)
      rand_val[i] = rand_gen();
    MPI_Waitall(num_worker, requests.data(), status.data());

    for (int i = 0; i < num_worker; ++i) {
      MPI_Isend(rand_val.data() + i * averge, n_elem_per_proc[i], MPI_DOUBLE, i,
                worker_rand_tag, inter_comm1, &requests[i]);
    }
    MPI_Waitall(num_worker, requests.data(), status.data());

    MPI_Reduce(&n_elem, nullptr, 1, MPI_INT, MPI_SUM, 0, inter_comm2);
  }

  // workers 接收数据并判断
  if (worker_start <= global_rank && global_rank < (num_proc - 1)) {
    const int num_server = worker_start;

    vector<MPI_Request> requests(num_server);
    vector<MPI_Status> status(num_server);
    vector<vector<double>> recved_val(num_server);
    vector<int> num_val(num_server);

    // 接收大小
    for (int i = 0; i < num_server; ++i) {
      MPI_Irecv(&num_val[i], 1, MPI_INT, i, worker_rand_tag, inter_comm1,
                &requests[i]);
    }
    MPI_Waitall(num_server, requests.data(), status.data());

    for (int i = 0; i < num_server; ++i)
      recved_val[i].resize(num_val[i], 100.0);
    // 接收数据
    for (int i = 0; i < num_server; ++i) {
      MPI_Irecv(recved_val[i].data(), num_val[i], MPI_DOUBLE, i,
                worker_rand_tag, inter_comm1, &requests[i]);
    }
    MPI_Waitall(num_server, requests.data(), status.data());

    int num_inout_circle[2]{0, 0};
    for (int i = 0; i < num_server; ++i) {
      assert(recved_val[i].size() % 2 == 0);
      // 已经保证为偶数
      for (int j = 1; j < recved_val[i].size(); j += 2) {
        // 确保接收到了数据
        assert((recved_val[i][j] < 10.0) && (recved_val[i][j - 1] < 10.0));

        if ((recved_val[i][j] * recved_val[i][j] +
             recved_val[i][j - 1] * recved_val[i][j - 1]) < 1.0)
          ++num_inout_circle[0];
        else
          ++num_inout_circle[1];
      }
    }

    MPI_Isend(num_inout_circle, 2, MPI_INT, 0, worker_last_tag, inter_comm2,
              &requests[0]);
    MPI_Wait(&requests[0], &status[0]);
  }

  if (global_rank == (num_proc - 1)) {
    const int num_worker = num_proc - worker_start - 1;
    vector<MPI_Request> requests(num_worker);
    vector<MPI_Status> status(num_worker);
    vector<int> num_inout_circle(num_worker * 2);

    for (int i = 0; i < num_worker; ++i) {
      MPI_Irecv(&num_inout_circle[i * 2], 2, MPI_INT, i, worker_last_tag,
                inter_comm1, &requests[i]);
    }
    MPI_Waitall(num_worker, requests.data(), status.data());

    int num_in_circle{0}, all_points{0};
    for (int i = 0; i < num_worker; ++i) {
      num_in_circle += num_inout_circle[i * 2];
      all_points += (num_inout_circle[i * 2] + num_inout_circle[i * 2 + 1]);
    }

    //
    int num_points_all{0};
    MPI_Reduce(nullptr, &num_points_all, 1, MPI_INT, MPI_SUM, MPI_ROOT,
               inter_comm2);
    // 断言接收到的数据个数是否相等
    assert((num_points_all / 2 + num_points_all % 2) == all_points);

    const double pi = num_in_circle * 4.0 / all_points;
    cout << "num_in_circle = " << num_in_circle
         << "   all_points = " << all_points << endl;
    cout << "pi = " << pi << endl;
  }

  MPI_Finalize();

  return 0;
}