#include <omp.h>
#include <torch/extension.h>

#include <vector>

typedef uint64_t Grid;

int countSetBits(Grid n) {
  int count = 0;
  while (n) {
    count++;
    n ^= n & -n;
  }
  return count;
}

Grid pure_avail(Grid b, Grid w) {
  // const Grid sasks[] = {~0x0101010101010101, ~0x80808080808080FF,
  // ~0x00000000000000FF,
  //                       ~0x01010101010101FF, ~0x8080808080808080,
  //                       ~0xFF01010101010101, ~0xFF00000000000000,
  //                       ~0xFF80808080808080};
  // const int shifts[] = {1, 7, 8, 9, 1, 7, 8, 9};
  Grid avail = 0, alive;
  alive = b;
  while (alive) {
    alive = ((alive & ~0x0101010101010101) >> 1) & w;
    avail |= ((alive & ~0x0101010101010101) >> 1) & ~w & ~b;
  }
  alive = b;
  while (alive) {
    alive = ((alive & ~0x80808080808080FF) >> 7) & w;
    avail |= ((alive & ~0x80808080808080FF) >> 7) & ~w & ~b;
  }
  alive = b;
  while (alive) {
    alive = ((alive & ~0x00000000000000FF) >> 8) & w;
    avail |= ((alive & ~0x00000000000000FF) >> 8) & ~w & ~b;
  }
  alive = b;
  while (alive) {
    alive = ((alive & ~0x01010101010101FF) >> 9) & w;
    avail |= ((alive & ~0x01010101010101FF) >> 9) & ~w & ~b;
  }

  alive = b;
  while (alive) {
    alive = ((alive & ~0x8080808080808080) << 1) & w;
    avail |= ((alive & ~0x8080808080808080) << 1) & ~w & ~b;
  }
  alive = b;
  while (alive) {
    alive = ((alive & ~0xFF01010101010101) << 7) & w;
    avail |= ((alive & ~0xFF01010101010101) << 7) & ~w & ~b;
  }
  alive = b;
  while (alive) {
    alive = ((alive & ~0xFF00000000000000) << 8) & w;
    avail |= ((alive & ~0xFF00000000000000) << 8) & ~w & ~b;
  }
  alive = b;
  while (alive) {
    alive = ((alive & ~0xFF80808080808080) << 9) & w;
    avail |= ((alive & ~0xFF80808080808080) << 9) & ~w & ~b;
  }
  return avail;
}

Grid get_flipped(Grid b, Grid w, Grid s) {
  Grid f = s, alive, tmp;
  alive = s;
  tmp = 0;
  while (alive) {
    alive = ((alive & ~0x0101010101010101) >> 1) & w;
    tmp |= alive;
    if (((alive & ~0x0101010101010101) >> 1) & b) f |= tmp;
  }
  alive = s;
  tmp = 0;
  while (alive) {
    alive = ((alive & ~0x80808080808080FF) >> 7) & w;
    tmp |= alive;
    if (((alive & ~0x80808080808080FF) >> 7) & b) f |= tmp;
  }
  alive = s;
  tmp = 0;
  while (alive) {
    alive = ((alive & ~0x00000000000000FF) >> 8) & w;
    tmp |= alive;
    if (((alive & ~0x00000000000000FF) >> 8) & b) f |= tmp;
  }
  alive = s;
  tmp = 0;
  while (alive) {
    alive = ((alive & ~0x01010101010101FF) >> 9) & w;
    tmp |= alive;
    if (((alive & ~0x01010101010101FF) >> 9) & b) f |= tmp;
  }

  alive = s;
  tmp = 0;
  while (alive) {
    alive = ((alive & ~0x8080808080808080) << 1) & w;
    tmp |= alive;
    if (((alive & ~0x8080808080808080) << 1) & b) f |= tmp;
  }
  alive = s;
  tmp = 0;
  while (alive) {
    alive = ((alive & ~0xFF01010101010101) << 7) & w;
    tmp |= alive;
    if (((alive & ~0xFF01010101010101) << 7) & b) f |= tmp;
  }
  alive = s;
  tmp = 0;
  while (alive) {
    alive = ((alive & ~0xFF00000000000000) << 8) & w;
    tmp |= alive;
    if (((alive & ~0xFF00000000000000) << 8) & b) f |= tmp;
  }
  alive = s;
  tmp = 0;
  while (alive) {
    alive = ((alive & ~0xFF80808080808080) << 9) & w;
    tmp |= alive;
    if (((alive & ~0xFF80808080808080) << 9) & b) f |= tmp;
  }
  return f;
}

int (*eval)(Grid b, Grid w);

Grid flip_rev(Grid b) {
  Grid cw = 0;
  for (size_t i = 0; i < 64; i++) {
    size_t x = i / 8, y = i % 8;
    size_t j = (7 - y) * 8 + (7 - x);
    cw |= ((b >> j) & 1) << i;
  }
  return cw;
}

Grid flip(Grid b) {
  Grid cw = 0;
  for (size_t i = 0; i < 64; i++) {
    size_t x = i / 8, y = i % 8;
    cw |= ((b >> (y * 8 + x)) & 1) << i;
  }
  return cw;
}

int eval_init(Grid b, Grid w) {
  Grid b_avail = pure_avail(b, w);
  Grid w_avail = pure_avail(w, b);
  return 32 * (countSetBits(b_avail) - countSetBits(w_avail)) +
         1024 * (1 & (b >> 0)) + 1024 * (1 & (b >> 7)) +
         1024 * (1 & (b >> 63)) + 1024 * (1 & (b >> 56)) -
         1024 * (1 & (w >> 0)) - 1024 * (1 & (w >> 7)) -
         1024 * (1 & (w >> 63)) - 1024 * (1 & (w >> 56));
}

int eval_final(Grid b, Grid w) {
  Grid b_avail = pure_avail(b, w);
  Grid w_avail = pure_avail(w, b);
  if (!b_avail && !w_avail)
    return countSetBits(b) > countSetBits(w) ? 65536 : -65536;
  return 32 * (countSetBits(b_avail) - countSetBits(w_avail)) +
         countSetBits(b) - countSetBits(w) + 1024 * (1 & (b >> 0)) +
         1024 * (1 & (b >> 7)) + 1024 * (1 & (b >> 63)) +
         1024 * (1 & (b >> 56)) - 1024 * (1 & (w >> 0)) -
         1024 * (1 & (w >> 7)) - 1024 * (1 & (w >> 63)) -
         1024 * (1 & (w >> 56));
}

int search(int depth, int alpha, int beta, Grid b, Grid w) {
  if (depth == 0) return eval(b, w);

  // Generate moves
  Grid b_avail = pure_avail(b, w);

  if (!b_avail) return -search(depth - 1, -beta, -alpha, w, b);

  while (b_avail) {
    Grid selected = b_avail & -b_avail;
    Grid flipped = get_flipped(b, w, selected);
    b_avail ^= selected;

    int val = -search(depth - 1, -beta, -alpha, w & ~flipped, b | flipped);

    if (val >= beta) return beta;
    if (val > alpha) alpha = val;
  }
  return alpha;
}

int search_end(int depth, Grid b, Grid w, int alpha = -2100000000,
               int beta = 2100000000) {
  Grid b_avail = pure_avail(b, w);
  if (!b_avail && !pure_avail(w, b)) {
    auto b_cnt = countSetBits(b);
    auto w_cnt = countSetBits(w);
    if (b_cnt == w_cnt) return 0;
    return b_cnt > w_cnt ? 1 : -1;
  }
  if (depth == 0) return 2;

  // Generate moves
  if (!b_avail) {
    int val = -search_end(depth - 1, w, b, -beta, -alpha);
    return val == -2 ? 2 : val;
  }

  while (b_avail) {
    Grid selected = b_avail & -b_avail;
    Grid flipped = get_flipped(b, w, selected);
    b_avail ^= selected;

    int val = -search_end(depth - 1, w & ~flipped, b | flipped, -beta, -alpha);
    if (val == -2) return 2;

    if (val >= beta) return beta;
    if (val > alpha) alpha = val;
  }
  return alpha;
}

Grid std_ai(Grid b, Grid w) {
  int depth = 6;
  eval = countSetBits(w | b) + depth < 50 ? eval_init : eval_final;

  // cerr << depth << endl;
  int alpha = -2100000000;
  int beta = 2100000000;
  Grid argmax = 0;

  // Generate moves
  Grid b_avail = pure_avail(b, w);
  while (b_avail) {
    Grid selected = b_avail & -b_avail;
    Grid flipped = get_flipped(b, w, selected);
    b_avail ^= selected;

    int val = -search(depth - 1, -beta, -alpha, w & ~flipped, b | flipped);

    if (val > alpha) {
      alpha = val;
      argmax = selected;
    }
  }
  return argmax;
}

#include <iostream>

torch::Tensor reversi_std(torch::Tensor data_b, torch::Tensor data_w) {
  auto result = torch::zeros_like(data_b);
  auto batch_size = data_b.size(0);

  auto b_it = data_b.contiguous().data_ptr<int64_t>();
  auto w_it = data_w.contiguous().data_ptr<int64_t>();
  auto result_it = result.contiguous().data_ptr<int64_t>();
#pragma omp parallel for
  for (int i = 0; i < batch_size; i++) {
    Grid b = b_it[i];
    Grid w = w_it[i];
    Grid avail = pure_avail(b, w);
    if (avail) {
      Grid s = std_ai(b, w);
      Grid f = get_flipped(b, w, s);
      b |= f;
      w &= ~f;
    }
    if (!pure_avail(b, w) && !pure_avail(w, b)) {
      if (countSetBits(w) == countSetBits(b))
        result_it[i] = 1;
      else
        result_it[i] = 2 * (countSetBits(b) > countSetBits(w));
      b_it[i] = 0x0000000810000000;
      w_it[i] = 0x0000001008000000;
    } else {
      result_it[i] = -1;
      b_it[i] = (int64_t)(w);
      w_it[i] = (int64_t)(b);
    }
  }
  return result;
}

torch::Tensor reversi_win(torch::Tensor data_b, torch::Tensor data_w) {
  auto batch_size = data_b.size(0);
  auto result = torch::zeros_like(data_b);

  auto b_it = data_b.contiguous().data_ptr<int64_t>();
  auto w_it = data_w.contiguous().data_ptr<int64_t>();
  for (int i = 0; i < batch_size; i++) {
    Grid b = b_it[i];
    Grid w = w_it[i];
    if (!pure_avail(b, w) && !pure_avail(w, b)) {
      result[i] = countSetBits(b) > countSetBits(w);
    } else {
      result[i] = 3;
    }
  }
  return result;
}

#include <cmath>

std::vector<torch::Tensor> reversi_forward(torch::Tensor data_b,
                                           torch::Tensor data_w,
                                           torch::Tensor pred) {
  auto batch_size = data_b.size(0);
  auto selection = torch::zeros_like(data_b);
  auto result = torch::zeros_like(data_b);

  auto options = torch::TensorOptions().dtype(torch::kBool);
  auto valid_mask = torch::zeros_like(pred, options);

  auto b_it = data_b.data_ptr<int64_t>();
  auto w_it = data_w.data_ptr<int64_t>();
  auto result_it = result.data_ptr<int64_t>();
  auto selection_it = selection.data_ptr<int64_t>();
  auto valid_mask_it = valid_mask.data_ptr<bool>();
  auto pred_it = pred.accessor<float, 2>();
#pragma omp parallel for
  for (int i = 0; i < batch_size; i++) {
    Grid b = b_it[i];
    Grid w = w_it[i];
    Grid avail = pure_avail(b, w);
    if (!avail) {
      selection_it[i] = 64;
      b_it[i] = (int64_t)(w);
      w_it[i] = (int64_t)(b);
      valid_mask_it[i * 65 + 64] = 1;
    } else {
      float prob_max = -1e45;
      for (int j = 0; j < 64; j++) {
        if ((avail >> j) & 1) {
          prob_max = fmax(prob_max, pred_it[i][j]);
        }
      }
      float prob_sum = 0;
      for (int j = 0; j < 64; j++) {
        if ((avail >> j) & 1) {
          prob_sum += exp(pred_it[i][j] - prob_max);
          valid_mask_it[i * 65 + j] = 1;
        }
      }

      float _rand = rand() * prob_sum / RAND_MAX;
      for (int j = 0; j < 64; j++) {
        if ((avail >> j) & 1) {
          _rand -= exp(pred_it[i][j] - prob_max);
          selection_it[i] = j;
          if (_rand <= 0) break;
        }
      }
      Grid f = get_flipped(b, w, 1ULL << selection_it[i]);
      b_it[i] = w = (int64_t)(w & ~f);
      w_it[i] = b = (int64_t)(b | f);
    }
    result_it[i] = 1 - search_end(12, w, b);
    if (result_it[i] >= 0 && rand() % 2 ||
        !pure_avail(b, w) && !pure_avail(w, b)) {
      b_it[i] = 0x0000000810000000;
      w_it[i] = 0x0000001008000000;
    } else {
      if (rand() % 2) {
        b_it[i] = flip(b_it[i]);
        w_it[i] = flip(w_it[i]);
      }
      if (rand() % 2) {
        b_it[i] = flip_rev(b_it[i]);
        w_it[i] = flip_rev(w_it[i]);
      }
    }
  }

  return {selection, result, valid_mask};
}

struct Step {
  Grid b, w, s, avail;
};

struct Record {
  std::vector<Step> steps;
  long result;
};

struct State {
  Record* rec;
  size_t step;
};

std::vector<torch::Tensor> reversi_pretrain(int batch_size = 128) {
  static std::vector<Record> records;
  static std::vector<State> state;
  static std::string file_list[] = {
      "data/WTH_2001.wtb", "data/WTH_2002.wtb", "data/WTH_2003.wtb",
      "data/WTH_2004.wtb", "data/WTH_2005.wtb", "data/WTH_2006.wtb",
      "data/WTH_2007.wtb", "data/WTH_2008.wtb", "data/WTH_2009.wtb",
      "data/WTH_2010.wtb", "data/WTH_2011.wtb", "data/WTH_2012.wtb",
      "data/WTH_2013.wtb", "data/WTH_2014.wtb", "data/WTH_2015.wtb",
      "data/WTH_2016.wtb", "data/WTH_2017.wtb", "data/WTH_2018.wtb",
      "data/WTH_2019.wtb"};
  if (state.empty()) {
    unsigned char buf[64];
    for (auto file_path : file_list) {
      FILE* f = fopen(file_path.c_str(), "r");
      fread(buf, 16, 1, f);
      while (!feof(f)) {
        fread(buf, 8, 1, f);
        fread(buf, 60, 1, f);
        Record game;
        unsigned char* _buf = buf;
        Grid b = 0x0000000810000000;
        Grid w = 0x0000001008000000;
        while (true) {
          Grid avail = pure_avail(b, w);
          if (avail) {
            Grid s = (*_buf / 10 - 1) * 8 + (*_buf % 10 - 1);
            game.steps.push_back({b, w, s, avail});
            _buf++;
            Grid f = get_flipped(b, w, 1ULL << s);
            b |= f;
            w &= ~f;
          } else {
            game.steps.push_back({b, w, 64, avail});
          }
          if (!pure_avail(b, w) && !pure_avail(w, b)) {
            if (countSetBits(w) == countSetBits(b))
              game.result = 1;
            else
              game.result = 2 * (countSetBits(b) > countSetBits(w));
            break;
          }
          std::swap(b, w);
        }
        records.push_back(game);
      }
      fclose(f);
    }
    for (int i = 0; i < batch_size; i++) {
      size_t idx = rand() % records.size();
      state.push_back(
          (State){&records[idx], rand() % records[idx].steps.size()});
    }
  }

  auto options = torch::TensorOptions().dtype(torch::kLong);
  torch::Tensor data_b = torch::zeros({batch_size}, options);
  torch::Tensor data_w = torch::zeros({batch_size}, options);
  torch::Tensor select = torch::zeros({batch_size}, options);
  torch::Tensor win = torch::zeros({batch_size}, options);
  torch::Tensor valid = torch::zeros(
      {batch_size, 64}, torch::TensorOptions().dtype(torch::kBool));

  auto b_it = data_b.data_ptr<long>();
  auto w_it = data_w.data_ptr<long>();
  auto s_it = select.data_ptr<long>();
  auto v_it = valid.accessor<bool, 2>();
  auto win_it = win.data_ptr<long>();

#pragma omp parallel for
  for (int i = 0; i < batch_size; i++) {
    b_it[i] = state[i].rec->steps[state[i].step].b;
    w_it[i] = state[i].rec->steps[state[i].step].w;
    s_it[i] = state[i].rec->steps[state[i].step].s;
    Grid avail = state[i].rec->steps[state[i].step].avail;
    if (rand() % 2) {
      b_it[i] = flip(b_it[i]);
      w_it[i] = flip(w_it[i]);
      avail = flip(avail);
      if (s_it[i] != 64) {
        size_t x = s_it[i] / 8, y = s_it[i] % 8;
        s_it[i] = y * 8 + x;
      }
    }
    if (rand() % 2) {
      b_it[i] = flip_rev(b_it[i]);
      w_it[i] = flip_rev(w_it[i]);
      avail = flip_rev(avail);
      if (s_it[i] != 64) {
        size_t x = s_it[i] / 8, y = s_it[i] % 8;
        s_it[i] = (7 - y) * 8 + (7 - x);
      }
    }
    for (int j = 0; j < 64; j++) v_it[i][j] = (avail >> j) & 1;

    win_it[i] = 3;
    if (++state[i].step == state[i].rec->steps.size()) {
      win_it[i] = state[i].rec->result;
      // reset
      size_t idx = rand() % records.size();
      state[i] = (State){&records[idx], 1};
    }
  }

  return {data_b, data_w, select, valid, win};
}

torch::Tensor reversi_convert(torch::Tensor data_b, torch::Tensor data_w) {
  auto batch_size = data_b.size(0);
  auto options = torch::TensorOptions().dtype(torch::kFloat32);
  torch::Tensor x = torch::zeros({batch_size, 2, 8, 8}, options);

  auto b_it = data_b.data_ptr<int64_t>();
  auto w_it = data_w.data_ptr<int64_t>();
  auto x_it = x.accessor<float, 4>();
  for (int i = 0; i < batch_size; i++) {
    Grid b = b_it[i];
    Grid w = w_it[i];
    for (int j = 0; j < 8; j++)
      for (int k = 0; k < 8; k++) x_it[i][0][j][k] = (b >> (j * 8 + k)) & 1;
    for (int j = 0; j < 8; j++)
      for (int k = 0; k < 8; k++) x_it[i][1][j][k] = (w >> (j * 8 + k)) & 1;
    // Grid a = pure_avail(b, w);
    // for (int j = 0; j < 8; j++)
    //   for (int k = 0; k < 8; k++) x_it[i][2][j][k] = (a >> (j * 8 + k)) & 1;
    // a = pure_avail(w, b);
    // for (int j = 0; j < 8; j++)
    //   for (int k = 0; k < 8; k++) x_it[i][3][j][k] = (a >> (j * 8 + k)) & 1;
  }
  return x;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
  m.def("forward", &reversi_forward, "reversi forward");
  m.def("std", &reversi_std, "reversi std");
  m.def("win", &reversi_win, "reversi win");
  m.def("pretrain", &reversi_pretrain, "reversi pretrain");
  m.def("convert", &reversi_convert, "reversi convert");
}
