#ifndef __THREADING_HPP__
#define __THREADING_HPP__

#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <cassert>
#include <mutex>
#include <thread>
#include <unordered_map>

#include <iomanip>

#include <pthread.h>
#include <unistd.h>
#include <numa.h>
#include <numaif.h>
#include <linux/sched.h>

#include <atomic>

#include "tbb/tick_count.h"
#include "tbb/combinable.h"

class Threading {
  static int max_thread_id;
  static std::unordered_map<std::thread::id, int> threadIdMap;
  static std::unordered_map<int, std::thread::id> idThreadMap;

  static inline std::mutex &display_mutex() {
    static std::mutex the_mutex;
    return the_mutex;
  }

  static inline std::mutex &thread_count_mutex() {
    static std::mutex the_mutex;
    return the_mutex;
  }

  static inline std::mutex &sequentialize_mutex() {
    static std::mutex the_mutex;
    return the_mutex;
  }
 
public:
  class SequentializeCurrentScope {
    std::mutex &the_mutex;
  public:
    SequentializeCurrentScope() : the_mutex(sequentialize_mutex()) 
    { 
      // ThreadIdMap must already be initialized before we can use this dump
      the_mutex.lock(); 
      std::cerr << "Thread " << my_thread_id() << " acquired sequential lock\n";
    }
    ~SequentializeCurrentScope()
    { 
      the_mutex.unlock(); 
      std::cerr << "Thread " << my_thread_id() << " released sequential lock\n";
    }    
  };

  static void ensureTiedId() {
    if (Threading::threadIdMap.find(std::this_thread::get_id()) ==
        Threading::threadIdMap.end()) {
      static std::mutex lock;
      lock.lock();
      Threading::idThreadMap[max_thread_id] = std::this_thread::get_id();
      Threading::threadIdMap[std::this_thread::get_id()] = max_thread_id++;
      lock.unlock();
    }
  }

  static inline int my_thread_id() {
    ensureTiedId();
    assert(Threading::threadIdMap.find(std::this_thread::get_id()) 
           != Threading::threadIdMap.end());
    return Threading::threadIdMap[std::this_thread::get_id()];
  }

  static inline void dump(std::string const &s) {
    static std::fstream streams[128];

    // ThreadIdMap must already be initialized before we can use this dump
    std::fstream &my_stream = streams[my_thread_id()];
    if (!my_stream.is_open()) {
      std::stringstream ss;
      ss << "/tmp/swarm.dmp." << my_thread_id();
      std::cerr << "Opening " << ss.str() << "\n";
      my_stream.open(ss.str(), std::fstream::out);
    }

    assert(my_stream.is_open());
    my_stream << s;
  }

  static inline void dump(std::ostream & os, std::string const &s) {
    display_mutex().lock();
    os << s;
    display_mutex().unlock();
  }

  static void dumpThreadInfo(std::ostream & os, std::string const &s);
  static void dumpMemInfo(std::ostream & os, void *addr);

  static int threadCountInc() {
    // Shared counter to pin round-robin threads to CPUs style
    static std::atomic<int> cpu_count(-1);

    int inc = 0;
    try {
      char * val = getenv( "STRIDE_PROC_PINNING");
      std::istringstream buffer(val);
      buffer >> inc;
    } catch (...) {
      inc = 1;
    }

    cpu_count += inc;
  
    return cpu_count;
  }

  // Just increments a counter by the value in environment variable
  // STRIDE_PROC_PINNING and pins thread to the next processor modulo
  // num_procs. Later we can extend this behavior.
  static void pinThread() {
#ifdef NO_PINNING
    return;
#else
    static __thread bool firstTime = true;
    assert (firstTime);
    firstTime = false;

    // The global lock below will be removed, for in-order debugging purposes atm
    static std::mutex global;
    global.lock();
    cpu_set_t cpuSet;    
    int cpu = Threading::threadCountInc() % numa_num_configured_cpus();

    CPU_ZERO(&cpuSet);
    CPU_SET(cpu, &cpuSet);
#if (VERBOSE >= 1)
    std::stringstream ss;
    ss << "PIN thread " << my_thread_id()
       << " to cpu " << std::dec << cpu << " node " << numa_node_of_cpu(cpu) << "\n";
    dump(std::cerr, ss.str());
#endif
    int res = pthread_setaffinity_np(pthread_self(), sizeof(cpuSet), &cpuSet);
    if (res != 0) {
      std::cerr << "sched_setaffinity failed, exit with code " << errno << std::endl;
      exit(errno);
    }
    // The global lock below will be removed, for in-order debugging purposes atm
    global.unlock();

#endif // NO_PINNING
  }

  // This is very problem and processor specific.
  // Atm, just use a proof of concept implementation, slab0 goes to node 0 and
  // slab1 goes to node 1.
  template<typename ARRAY_TYPE>
  static void pinPages(void *A, int SLABS, int ARRAY_SIZE, ARRAY_TYPE t) {    
#ifdef NO_PINNING
    return;
#else
    //
    // Can't make membind work properly atm
    //
    // nodemask_t mask;
    // nodemask_zero(&mask);
    // nodemask_set_compat(&mask, 1);
    // int mbind(void *addr, unsigned long len, int mode,
    //           unsigned long *nodemask, unsigned long maxnode,
    //           unsigned flags);
    // 
    // uint64_t ptrInt = (uint64_t)&A[0][0][0][0][0];
    // uint64_t m = 0xfffffffffffff800;
    // ptrInt = ptrInt & m;
    // void *ptr = (void *)ptrInt;
    // cerr << "mbind 1 @0x" << std::hex << ptr << std::endl;
    // int res = mbind(ptr, Npadded * Npadded * Npadded * sizeof(double), 
    //                 MPOL_DEFAULT // | MPOL_BIND
    //                 , mask.n, 1, 
    //                 MPOL_MF_MOVE // | MPOL_MF_STRICT
    //                 );
    // if (res != 0) {
    //   cerr << "mbind 1 failed, exit with code " << errno << std::endl;
    //   exit(errno);
    // }

    // Use move_pages instead
    int pageSize = Threading::getPageSize();
    uint64_t ind, count=0;
    for (int arr=0; arr<SLABS; arr++) {
      for (ind=0; ind < ARRAY_SIZE * sizeof(decltype(t)); ind+=pageSize) {
        uint64_t ptrInt = (uint64_t)(((char *)A) + 
                                     arr*ARRAY_SIZE*sizeof(decltype(t)) + ind);
        uint64_t m = ~(pageSize-1);
        ptrInt = ptrInt & m;
        void *ptr = (void *)ptrInt;

        // Touch one element of the page so that it is pinned before we can move it.
        char *c = (char *)ptrInt;
        *c = 0.0;

        int status = 0;
        int node = arr;
#if (DEBUG_NUMA_MEM && VERBOSE >= 10)
        long ret = move_pages(0, 1, &ptr, &node, &status, MPOL_MF_MOVE);
        std::cerr << "status for page num=" << count++ << " with ind=" //<< std::hex 
             << " " << ptr << " is " << std::dec << status << " and ret is " << ret 
             << "\n" ;
#else 
        count++; // -Wunused-variable suppression ... 
        move_pages(0, 1, &ptr, &node, &status, MPOL_MF_MOVE);
#endif
        std::cerr << std::dec;
        if (status < 0) exit(0);
      }
    }
#endif // NO_PINNING
  }

  static void assertPageSize(int size) {
    if (size != getpagesize()) {
      std::cerr << "Page Size = " << getpagesize() << " and NOT " << size << "\n";
      assert(false);
    }
  }

  static int getPageSize() {
    return getpagesize();
  }
};

#define SEQUENTIAL_DUMP(y)                     \
  {                                             \
    std::stringstream ss;                       \
    ss << y;                                    \
    Threading::dump(std::cerr, ss.str());       \
  }

#define SEQUENTIAL_DUMP1(y)                      \
  {                                             \
    std::stringstream ss;                       \
    ss << y;                                    \
    Threading::dump(ss.str());                  \
  }

#define SEQUENTIAL_DUMP2(x, y)                  \
  {                                             \
    std::stringstream ss;                       \
    ss << y;                                    \
    Threading::dump(x, ss.str());               \
  }

#endif // __THREADING_HPP__
