/**
 * @file Event.cc
 * @author Xiaoze Lin (linxiaoze96@gmail.com)
 * @brief The implementation of event class.
 * @version 0.1
 *
 * @copyright Copyright (c) 2024
 *
 */

#include "Event.hh"

namespace ifsim {

QueueBasedEventManager::QueueBasedEventManager(int num_nodes, int num_thread, const std::vector<int>& firstNodePerLevel)
    : _eventsPtr(NULL),
      _flagsPtr(NULL),
      _qOffsetsPtr(NULL),
      _qSizesPtr(NULL),
      _numLevels((int) firstNodePerLevel.size()),
      _curLevel(-1),
      _numThread(num_thread)
{
  init(num_nodes, num_thread, firstNodePerLevel);
}

QueueBasedEventManager::QueueBasedEventManager()
    : _eventsPtr(NULL), _flagsPtr(NULL), _qOffsetsPtr(NULL), _qSizesPtr(NULL), _numLevels(0), _curLevel(-1), _numThread(0)
{
  ;
}

QueueBasedEventManager::~QueueBasedEventManager()
{
  ;
}

/**
 * @brief: Init relevant data structures.
 */
void QueueBasedEventManager::init(int num_nodes, int num_thread, const std::vector<int>& firstNodePerLevel)
{
  _numLevels = (int) firstNodePerLevel.size();
  _numThread = num_thread;

  _events.resize(num_nodes * num_thread + firstNodePerLevel.size() * 255 * num_thread * num_thread);
  _events.shrink_to_fit();
  _eventsPtr = &(_events[0]);

  _flags.resize(num_nodes * num_thread);
  _flags.shrink_to_fit();
  _flagsPtr = &(_flags[0]);

  _queueSizes.resize(numLevels() * num_thread, 0);
  _queueSizes.shrink_to_fit();
  _qSizesPtr = &(_queueSizes[0]);

  _queueOffsets.resize(numLevels() * num_thread);
  _queueOffsets.shrink_to_fit();
  _qOffsetsPtr = &(_queueOffsets[0]);
  for (int thread_id = 0; thread_id < num_thread; ++thread_id) {
    int start_offset_id = (num_nodes + firstNodePerLevel.size() * 255 * num_thread) * thread_id;
    for (int offset_i = 0; offset_i < firstNodePerLevel.size(); ++offset_i) {
      _qOffsetsPtr[numLevels() * thread_id + offset_i] = start_offset_id + firstNodePerLevel[offset_i] + 255 * num_thread * offset_i;
    }
  }

  _threadCurLevel.resize(num_thread);

  clear();
}

/**
 * @brief: Clear relevant data structures.
 */
void QueueBasedEventManager::clear()
{
  std::fill(_queueSizes.begin(), _queueSizes.end(), 0);
  std::fill(_events.begin(), _events.end(), 0);
  std::fill(_flags.begin(), _flags.end(), 0);
  _curLevel = -1;
}

/**
 * @brief: Insert event to event queue.
 */
void QueueBasedEventManager::insert(int g, int level)
{
  if (0 == _flagsPtr[g]) {  // not in queue
    _flagsPtr[g] = 1;
    int pos = _qOffsetsPtr[level] + _qSizesPtr[level];
    _eventsPtr[pos] = g;
    _qSizesPtr[level] += 1;
  }
}

/**
 * @brief: Get next event from event queue.
 */
int QueueBasedEventManager::next()
{
  if (_qSizesPtr[curLevel()] == 0) {
    if ((numLevels() - 1) == curLevel()) {  // Now DFFs are in last level
      curLevel(-1);
    } else {
      int level = curLevel() + 1;

      while (level < numLevels()) {
        if (_qSizesPtr[level]) {
          curLevel(level);
          break;
        }
        level += 1;
      }
      if (numLevels() == level) {  // all levels are empty after the above WHILE loop
        curLevel(-1);
      }
    }
  }
  if (curLevel() == -1) {
    // loop back to locate non-empty queue
    for (int l = 0; l < numLevels(); l++) {
      if (_qSizesPtr[l]) {
        curLevel(l);
        break;
      }
    }
  }
  if (curLevel() == -1) {
    return -1;
  }

  int pos = _qOffsetsPtr[curLevel()] + _qSizesPtr[curLevel()] - 1;
  int g = _eventsPtr[pos];      // get the last element
  _flagsPtr[g] = 0;             // clear the flag
  _qSizesPtr[curLevel()] -= 1;  // pop the last element
  return g;
}

/**
 * @brief: Insert event by different thread.
 */
void QueueBasedEventManager::insertThreadEvent(int g, int level, int tid)
{
  if (0 == _flagsPPtr[tid][g]) {  // not in queue
    _flagsPPtr[tid][g] = 1;
    int pos = _qOffsetsPPtr[tid][level] + _qSizesPPtr[tid][level];
    _eventsPPtr[tid][pos] = g;
    _qSizesPPtr[tid][level] += 1;
  }
}

/**
 * @brief: Get next event by different thread.
 */
int QueueBasedEventManager::threadNextEvent(int tid)
{
  if (_qSizesPPtr[tid][threadCurLevel(tid)] == 0) {
    if ((_numLevels - 1) == threadCurLevel(tid)) {  // Now DFFs are in last level
      threadCurLevel(-1, tid);
    } else {
      int level = threadCurLevel(tid) + 1;

      while (level < _numLevels) {
        if (_qSizesPPtr[tid][level]) {
          threadCurLevel(level, tid);
          break;
        }
        level += 1;
      }
      if (_numLevels == level) {  // all levels are empty after the above WHILE loop
        threadCurLevel(-1, tid);
      }
    }
  }
  if (threadCurLevel(tid) == -1) {
    // loop back to locate non-empty queue
    for (int l = 0; l < _numLevels; ++l) {
      if (_qSizesPPtr[tid][l]) {
        threadCurLevel(l, tid);
        break;
      }
    }
  }
  if (threadCurLevel(tid) == -1) {
    return -1;
  }

  int pos = _qOffsetsPPtr[tid][threadCurLevel(tid)] + _qSizesPPtr[tid][threadCurLevel(tid)] - 1;
  int g = _eventsPPtr[tid][pos];               // get the last element
  _flagsPPtr[tid][g] = 0;                      // clear the flag
  _qSizesPPtr[tid][threadCurLevel(tid)] -= 1;  // pop the last element
  return g;
}

/**
 * @brief: Init relevant pointer.
 */
void QueueBasedEventManager::initPtr(int num_nodes, int num_thread, const std::vector<int>& firstNodePerLevel)
{
  _numNodes = num_nodes;
  _numLevels = (int) firstNodePerLevel.size();
  _numThread = num_thread;
  _eventsPPtr = new int*[num_thread];
  _flagsPPtr = new int*[num_thread];
  _qOffsetsPPtr = new int*[num_thread];
  _qSizesPPtr = new int*[num_thread];
  _threadCurLevel.resize(num_thread);
}

/**
 * @brief: Allocate data structures to numa node.
 */
void QueueBasedEventManager::allocDataStructuresToNumaNode(int thread_id, int curr_node_id, bool vec_mode,
                                                           const std::vector<int>& firstNodePerLevel)
{
  int *events, *flags, *queue_sizes, *queue_offsets;

  if (numa_available() != -1) {
    events = (int*) numa_alloc_onnode((_numNodes + _numLevels * 255) * sizeof(int), curr_node_id);
    flags = (int*) numa_alloc_onnode(_numNodes * sizeof(int), curr_node_id);
    queue_sizes = (int*) numa_alloc_onnode(_numLevels * sizeof(int), curr_node_id);
    queue_offsets = (int*) numa_alloc_onnode(_numLevels * sizeof(int), curr_node_id);
  } else {
    events = (int*) malloc((_numNodes + _numLevels * 255) * sizeof(int));
    flags = (int*) malloc(_numNodes * sizeof(int));
    queue_sizes = (int*) malloc(_numLevels * sizeof(int));
    queue_offsets = (int*) malloc(_numLevels * sizeof(int));
  }

  _eventsPPtr[thread_id] = &(events[0]);
  _flagsPPtr[thread_id] = &(flags[0]);
  _qSizesPPtr[thread_id] = &(queue_sizes[0]);
  _qOffsetsPPtr[thread_id] = &(queue_offsets[0]);

  // init flags
  for (int gid = 0; gid < _numNodes; ++gid) {
    _flagsPPtr[thread_id][gid] = 0;
  }

  // init level size and offset
  for (int level = 0; level < _numLevels; ++level) {
    _qSizesPPtr[thread_id][level] = 0;
    _qOffsetsPPtr[thread_id][level] = firstNodePerLevel[level] + 255 * level;
  }
}

/**
 * @brief: Free data structures.
 */
void QueueBasedEventManager::freeDataStructuresInNumaNode(bool vec_mode)
{
  for (int thread_id = 0; thread_id < _numThread; ++thread_id) {
    if (numa_available() != -1) {
      numa_free(_eventsPPtr[thread_id], (_numNodes + _numLevels * 255) * sizeof(int));
      numa_free(_flagsPPtr[thread_id], _numNodes * sizeof(int));
      numa_free(_qSizesPPtr[thread_id], _numLevels * sizeof(int));
      numa_free(_qOffsetsPPtr[thread_id], _numLevels * sizeof(int));
    } else {
      free(_eventsPPtr[thread_id]);
      free(_flagsPPtr[thread_id]);
      free(_qSizesPPtr[thread_id]);
      free(_qOffsetsPPtr[thread_id]);
    }
  }
  delete _eventsPPtr;
  delete _flagsPPtr;
  delete _qSizesPPtr;
  delete _qOffsetsPPtr;
}
}  // namespace ifsim