/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
/*
 * Copyright (c) 2009 INRIA
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation;
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 *
 * Author: Guillaume Seguin <guillaume@segu.in>
 */

#include "multithreaded-simulator-impl.h"
#include "ns3/simulator.h"
#include "ns3/uinteger.h"
#include "ns3/enum.h"
#include "ns3/boolean.h"
#include "ns3/ptr.h"
#include "ns3/pointer.h"
#include "ns3/assert.h"
#include "ns3/log.h"
#include "ns3/wall-clock-synchronizer.h"
#include "ns3/node.h"
#include "ns3/node-list.h"
#include "ns3/net-device.h"

#include <math.h>
#include <algorithm>
#include <pthread.h>

#ifdef NS3_MPI
#include <mpi.h>
#include "ns3/mpi-interface.h"
#include "ns3/lbts-message.h"
#include "ns3/mpi-receiver.h"
#endif

#if CLOCK_DEBUGGING

#include "ns3/high-precision-clock.h"

#define CLOCK_DEBUGGING_INIT_OVERHEAD()                             \
              HighPrecisionClockInitOverhead ();
#define CLOCK_DEBUGGING_INIT()                                      \
              HighPrecisionClock start, end, start2, end2;          \
              HighPrecisionClock events, barriers, barriers_actual; \
              HighPrecisionClock diff;                              \
              HighPrecisionClockInit (&start);                      \
              HighPrecisionClockInit (&end);                        \
              HighPrecisionClockInit (&start2);                     \
              HighPrecisionClockInit (&end2);                       \
              HighPrecisionClockInit (&events);                     \
              HighPrecisionClockInit (&barriers);                   \
              HighPrecisionClockInit (&barriers_actual);            \
              HighPrecisionClockInit (&diff);

#define CLOCK_DEBUGGING_NOW(clock)            \
              HighPrecisionClockNow (clock);

#define CLOCK_DEBUGGING_DIFF_AND_ACCUMULATE(dest, diff, start, end) \
              HighPrecisionClockDiff (diff, start, end);            \
              HighPrecisionClockAccumulate (dest, diff);

#define CLOCK_DEBUGGING_PRINT()                                                                                        \
              uint64_t events_cycles, barriers_cycles, barriers_actual_cycles, barriers_wait_cycles;                   \
              events_cycles = HighPrecisionClockGetUint64 (&events);                                                   \
              barriers_cycles = HighPrecisionClockGetUint64 (&barriers);                                               \
              barriers_actual_cycles = HighPrecisionClockGetUint64 (&barriers_actual);                                 \
              barriers_wait_cycles = barriers_cycles - barriers_actual_cycles;                                         \
              pthread_mutex_lock (&m_globalEventsLock);                                                                \
              std::cout << "Thread " << context->id << " spent " << std::endl;                                         \
              std::cout << events_cycles << " cycles at processing events, " << std::endl;                             \
              std::cout << barriers_cycles << " cycles inside barriers (waiting & processing) and " << std::endl;      \
              std::cout << barriers_actual_cycles << " cycles at processing barriers, which gives " << std::endl;      \
              std::cout << barriers_wait_cycles << " cycles at waiting barriers." << std::endl;                        \
              std::cout << "Summary :" << std::endl;                                                                   \
              double barriers_percent = ((long double) barriers_cycles) * 100.0 / ((long double) events_cycles);       \
              double waiting_percent = ((long double) barriers_wait_cycles) * 100.0 / ((long double) barriers_cycles); \
              std::cout << barriers_percent << "%% of events time spent on barriers" << std::endl;                     \
              std::cout << waiting_percent << "%% of barrier time spent waiting" << std::endl;                         \
              std::cout << (100.0 - waiting_percent) << "%% of barrier time spent working" << std::endl;               \
              pthread_mutex_unlock (&m_globalEventsLock);

#else /* CLOCK_DEBUGGING */

#define CLOCK_DEBUGGING_INIT_OVERHEAD()
#define CLOCK_DEBUGGING_INIT()
#define CLOCK_DEBUGGING_PRINT()
#define CLOCK_DEBUGGING_NOW(clock)
#define CLOCK_DEBUGGING_DIFF_AND_ACCUMULATE(dest, diff, start, end)

#endif /* CLOCK_DEBUGGING */

NS_LOG_COMPONENT_DEFINE ("MultiThreadedSimulatorImpl");

namespace ns3 {

NS_OBJECT_ENSURE_REGISTERED (MultiThreadedSimulatorImpl);

/*
 * Helper function macro-like which peeks the next event timestamp of a
 * given partition
 */
inline uint64_t
NextPartitionEventTs (MultiThreadingPartition *partition)
{
  if (!partition->events->IsEmpty ())
  {
  		return partition->events->PeekNext ().key.m_ts;
  }
  else return 0x7fffffffffffffffLL;	// maximum simulation time;
}

static __thread MultiThreadingPartition *g_currentPartition = 0;

TypeId
MultiThreadedSimulatorImpl::GetTypeId (void)
{
  static TypeId tid = TypeId ("ns3::MultiThreadedSimulatorImpl")
    .SetParent<Object> ()
    .AddConstructor<MultiThreadedSimulatorImpl> ()
    .AddAttribute ("ThreadsCount",
                   "Number of parallel execution units to run",
                   UintegerValue (2),
                   MakeUintegerAccessor (&MultiThreadedSimulatorImpl::m_threadsCount),
                   MakeUintegerChecker<uint32_t> ())
    .AddAttribute ("BarrierType",
                   "Synchronization barriers implementation : either posix "
                   "(using pthread barriers) or spin (using global spinlock "
                   "based barriers) or spin-tree (using a tree-shaped "
                   "spinlock barrier)",
                   EnumValue (BARRIER_TREESPIN),
                   MakeEnumAccessor (&MultiThreadedSimulatorImpl::m_barrierType),
                   MakeEnumChecker (BARRIER_POSIX, "posix",
                                    BARRIER_GLOBALSPIN, "spin",
                                    BARRIER_TREESPIN, "spin-tree"))
    .AddAttribute ("ThreadDedicatedPercent",
                   "Percentage of thread-dedicated partitions",
                   UintegerValue (0),
                   MakeUintegerAccessor (&MultiThreadedSimulatorImpl::m_threadDedicatedPercent),
                   MakeUintegerChecker<uint32_t> (0, 100))
    .AddAttribute ("SharedPartitionsSets",
                   "Number of shared sets of partitions. Must be smaller than number of threads.",
                   UintegerValue (1),
                   MakeUintegerAccessor (&MultiThreadedSimulatorImpl::m_nSharedPartitionsSets),
                   MakeUintegerChecker<uint32_t> (1))
	 .AddAttribute ("IsRealTime",
									 "Should the simulator synchronize event execution with real-time?",
									 BooleanValue (false),
									 MakeBooleanAccessor (&MultiThreadedSimulatorImpl::m_isRealTime),
									 MakeBooleanChecker())
	 .AddAttribute ("IsDistributed",
									 "Is hybrid (distributed-parallel) mode enabled?",
									 BooleanValue (false),
									 MakeBooleanAccessor (&MultiThreadedSimulatorImpl::m_isDistributed),
									 MakeBooleanChecker())
	 .AddAttribute ("SynchronizationMode",
									 "What to do if the simulation cannot keep up with real time.",
									 EnumValue (SYNC_HARD_LIMIT),
									 MakeEnumAccessor (&MultiThreadedSimulatorImpl::m_synchronizationMode),
									 MakeEnumChecker (SYNC_BEST_EFFORT, "BestEffort",
																		SYNC_HARD_LIMIT, "HardLimit"))
	 .AddAttribute ("HardLimit",
									 "Maximum acceptable real-time jitter (used in conjunction with SynchronizationMode=HardLimit)",
									 TimeValue (MicroSeconds (10)),
									 MakeTimeAccessor (&MultiThreadedSimulatorImpl::m_hardLimit),
									 MakeTimeChecker ())
    ;
  return tid;
}

MultiThreadedSimulatorImpl::MultiThreadedSimulatorImpl ()
  : m_uid (4),
    m_globalUid (0),
    m_globalTs (0),
    m_globalEvents (0),
    m_rank (0)
{
  NS_LOG_FUNCTION_NOARGS ();
  pthread_mutexattr_t lock_attr;
  pthread_mutexattr_init (&lock_attr);
  pthread_mutex_init (&m_globalEventsLock, &lock_attr);
  pthread_mutex_init (&m_destroyEventsLock, &lock_attr);

#ifdef NS3_MPI
//  if (m_isDistributed)
  	{
			m_rank = MpiInterface::GetSystemId ();
			m_numRanks = MpiInterface::GetSize ();
			// Allocate the LBTS message buffer
			m_lbtsMessages = new LbtsMessage[m_numRanks];
			//m_grantedTime = Seconds (0);
  	}
#endif

  m_running = false;
  m_nEvents = 0;
}

void
MultiThreadedSimulatorImpl::DoDispose (void)
{
  NS_LOG_FUNCTION_NOARGS ();
  pthread_mutex_destroy (&m_globalEventsLock);
  pthread_mutex_destroy (&m_destroyEventsLock);
  while (!m_globalEvents->IsEmpty ())
    {
      Scheduler::Event next = m_globalEvents->RemoveNext ();
      next.impl->Unref ();
    }
  m_globalEvents->Unref ();
  m_globalEvents = 0;
  SimulatorImpl::DoDispose ();
}

MultiThreadedSimulatorImpl::~MultiThreadedSimulatorImpl ()
{
  NS_LOG_FUNCTION_NOARGS ();
  /*pthread_mutex_destroy (&m_globalEventsLock);
  pthread_mutex_destroy (&m_destroyEventsLock);
  while (!m_globalEvents->IsEmpty ())
    {
      Scheduler::Event next = m_globalEvents->RemoveNext ();
      next.impl->Unref ();
    }
  m_globalEvents->Unref ();
  m_globalEvents = 0;*/
}

void
MultiThreadedSimulatorImpl::Destroy (void)
{
  NS_LOG_FUNCTION_NOARGS ();
  while (!m_destroyEvents.empty ())
    {
      Ptr<EventImpl> ev = m_destroyEvents.front ().PeekEventImpl ();
      m_destroyEvents.pop_front ();
      NS_LOG_LOGIC ("handle destroy " << ev);
      if (!ev->IsCancelled ())
        {
          ev->Invoke ();
        }
    }
  for (MultiThreadingPartitions::const_iterator i = m_partitions.begin (); i != m_partitions.end (); ++i)
    {
      MultiThreadingPartition *partition = i->second;
      delete partition;
    }
  m_partitions.clear ();
}

void
MultiThreadedSimulatorImpl::SetPartitionScheduler (MultiThreadingPartition *partition, ObjectFactory schedulerFactory)
{
  NS_LOG_FUNCTION (this << partition << schedulerFactory);

  Ptr<Scheduler> scheduler = schedulerFactory.Create<Scheduler> ();

  if (partition->events != 0)
    {
      while (!partition->events->IsEmpty ())
        {
          Scheduler::Event next = partition->events->RemoveNext ();
          scheduler->Insert (next);
        }
      partition->events->Unref ();
    }
  partition->events = GetPointer (scheduler);
}

void
MultiThreadedSimulatorImpl::SetScheduler (ObjectFactory schedulerFactory)
{
  NS_LOG_FUNCTION (this << schedulerFactory);

  m_schedulerFactory = schedulerFactory;
  for (MultiThreadingPartitions::const_iterator it = m_partitions.begin (); it != m_partitions.end (); ++it)
    {
      SetPartitionScheduler (it->second, schedulerFactory);
    }
  Ptr<Scheduler> scheduler = schedulerFactory.Create<Scheduler> ();
  if (m_globalEvents != 0)
    {
      while (!m_globalEvents->IsEmpty ())
        {
          Scheduler::Event next = m_globalEvents->RemoveNext ();
          scheduler->Insert (next);
        }
      m_globalEvents->Unref ();
    }
  m_globalEvents = GetPointer (scheduler);
}

bool
MultiThreadedSimulatorImpl::IsFinished (void) const
{
  NS_LOG_FUNCTION_NOARGS ();
  for (MultiThreadingPartitions::const_iterator it = m_partitions.begin(); it != m_partitions.end(); ++it)
    {
      MultiThreadingPartition *partition = it->second;
      if (!partition->events->IsEmpty () || !partition->eventMessages.empty ())
        {
          return false;
        }
    }
  return true;
}

Time
MultiThreadedSimulatorImpl::Next (void) const
{
  NS_LOG_FUNCTION_NOARGS ();
  uint64_t nextTs = GetMaximumSimulationTs ();
  for (MultiThreadingPartitions::const_iterator it = m_partitions.begin (); it != m_partitions.end (); ++it)
    {
      MultiThreadingPartition *partition = it->second;
      if (!partition->events->IsEmpty ())
        {
          nextTs = std::min (nextTs, NextPartitionEventTs (partition));
        }
    }
  return TimeStep (nextTs);
}

inline uint32_t
MultiThreadedSimulatorImpl::RunPartitionUntil (MultiThreadingPartition *partition, uint64_t maxTs)
{
  NS_LOG_FUNCTION (this << partition << maxTs);

  NS_LOG_DEBUG ("Processing events at partition " << partition->id << " going from " << partition->currentTs << " until " << maxTs);
  uint32_t nExecuted = 0;
  while (!partition->events->IsEmpty () && NextPartitionEventTs (partition) < maxTs)
    {
//  		std::cout << partition->id << " " << NextPartitionEventTs (partition) << " " << maxTs << std::endl;
      ProcessPartitionEvent (partition);
      nExecuted++;
//      if( AtomicGet(&m_nStoppedPartitions) >= m_nPartitions)
//      	{
//      		break;
//      	}
    }
  return nExecuted;
}

inline void
MultiThreadedSimulatorImpl::ProcessPartitionEvent (MultiThreadingPartition *partition)
{
  NS_LOG_FUNCTION (this << partition);

  Ptr<Synchronizer> synchronizer = partition->synchronizer;

  //NS_LOG_DEBUG ("Process event at partition " << partition->id << " on rank " << m_rank << " at " << partition->currentTs);
  //NS_LOG_LOGIC ("handle " << next.key.m_ts);
  //NS_LOG_DEBUG ("Clock " << partition->id << " going from " << partition->currentTs << " to " << next.key.m_ts << " (during event processing)");

	uint64_t tsDelay;
	uint64_t tsNext;
	uint64_t tsNow;
	uint64_t tsFinal;

	if(m_isRealTime)
		{
			while(true)
				{
					tsDelay = 0;
					tsNext = 0;
					tsNow = 0;
					tsFinal = 0;

					tsNow = synchronizer->GetCurrentRealtime ();
					Scheduler::Event next = partition->events->PeekNext ();
				  NS_ASSERT (next.key.m_ts >= partition->currentTs);
				  NS_ASSERT (next.key.m_context != 0xffffffff);
					tsNext = next.key.m_ts;
				  //NS_LOG_DEBUG ("Process event at partition " << partition->id << " on rank " << m_rank << " at " << partition->currentTs);
					NS_LOG_DEBUG ("Started processing event on Partition (" << partition->id << "," << m_rank << ").  Clock going from " << tsNow << " to " << tsNext);

					if (tsNext <= tsNow)
						{
							tsDelay = 0;
						}
					else
						{
							tsDelay = tsNext - tsNow;
						}

					synchronizer->SetCondition (false);

					if (synchronizer->Synchronize (tsNow, tsDelay))
						{
							//NS_LOG_DEBUG ("Partition " << partition->id << " interrupted ...");
							break;
						}
					NS_LOG_DEBUG("******PARTITION (" << partition->id << "," << m_rank << "): AN EXTERNAL EVENT HAS OCCURED REQUIRING RESYNCHRONIZATION******");
				}

			NS_LOG_DEBUG ("Partition (" << partition->id << "," << m_rank << ") thread woken up.  Event starts at: " << synchronizer->GetCurrentRealtime ());
		}// end real-time

	Scheduler::Event next = partition->events->RemoveNext ();
  partition->currentTs = next.key.m_ts;
  partition->currentUid = next.key.m_uid;
  synchronizer->EventStart ();
  next.impl->Invoke ();
  synchronizer->EventEnd ();
  if(m_isRealTime)
  {
  	tsFinal = synchronizer->GetCurrentRealtime ();
  }
  next.impl->Unref ();

  if(m_isRealTime)
  {
		NS_LOG_DEBUG ("Partition " << partition->id << " event finishes at: " << tsFinal);

		if (m_synchronizationMode == SYNC_HARD_LIMIT)
			{
				uint64_t tsJitter;

				if (tsFinal >= next.key.m_ts)
					{
						tsJitter = tsFinal - tsNext;
					}
				else
					{
						tsJitter = tsNext - tsFinal;
					}

				if (tsJitter > m_hardLimit)
					{
						//NS_FATAL_ERROR
						NS_LOG_DEBUG("Partition " << partition->id << " hard real-time limit exceeded (jitter = " << tsJitter << ")");
					}
			}
  }
}

void*
MultiThreadedSimulatorImpl::RunThread (void *data)
{
  struct ThreadContext *context = (struct ThreadContext *)data;
  MultiThreadedSimulatorImpl *self = context->simulator;
  self->DoRunThread (context);
  return 0;
}

uint32_t
MultiThreadedSimulatorImpl::ProcessPartition (MultiThreadingPartition *partition)
{
  NS_LOG_FUNCTION (this << partition);

  pthread_mutex_lock (&partition->eventMessagesLock);
  partition->minMessageTs = GetMaximumSimulationTs ();
  while (!partition->eventMessages.empty ())
    {
      struct MultiThreadingPartition::EventMessage message = partition->eventMessages.front ();
      partition->eventMessages.pop ();
      partition->nMessages--;
      NS_ASSERT (message.event != 0);
      Scheduler::Event ev;
      ev.impl = message.event;
      ev.key.m_ts = message.timestamp;
      ev.key.m_context = partition->id;
      ev.key.m_uid = partition->uid;
      partition->uid++;
      partition->events->Insert (ev);
    }
  pthread_mutex_unlock (&partition->eventMessagesLock);
  g_currentPartition = partition;
  uint32_t nExecuted = RunPartitionUntil (partition, m_maxTs);
  g_currentPartition = 0;
  return nExecuted;
}

MultiThreadingPartition *
MultiThreadedSimulatorImpl::GetNextSharedPartition (struct ThreadContext *context)
{
  NS_LOG_FUNCTION (this << context);
#if LOCKLESS_SHARED_PARTITIONS
  struct SharedPartitionsSet *sharedSet = context->sharedPartitions;
  //std::cout<<"cccccccccccccccccccc current1 cc  "<<&sharedSet->currentPartition<<std::endl;
  uint16_t current = AtomicGet (&sharedSet->currentPartition);
  //std::cout<<"cccccccccccccccccccc current2 cc  "<<current<<std::endl;
  uint16_t size = (uint16_t) sharedSet->partitions.size ();
  MultiThreadingPartition* nextSharedPartition = 0;
  uint16_t i = 0;
  while (i < size)
    {
      if (AtomicCompareAndExchange (&sharedSet->partitions[i].threadId,
                                    -1, context->id) == -1 &&
          (
						(!sharedSet->partitions[i].partition->eventMessages.empty() &&
								sharedSet->partitions[i].partition->minMessageTs < m_maxTs) ||
						(!sharedSet->partitions[i].partition->events->IsEmpty() &&
								NextPartitionEventTs (sharedSet->partitions[i].partition) < m_maxTs))
          )
        {
//					if( AtomicGet(&m_nStoppedPartitions) >= m_nPartitions)
//						{
//							break;
//						}
//        	NS_LOG_DEBUG("Rank " << m_rank << " partition " << sharedSet->partitions[i].partition->id << " m_maxTs " << m_maxTs <<
//        							" minMessageTs " << sharedSet->partitions[i].partition->minMessageTs  <<
//        							" NextPartitionEventTs " << NextPartitionEventTs (sharedSet->partitions[i].partition));
					MultiThreadingPartition* thisPartition = sharedSet->partitions[i].partition;
					NS_LOG_DEBUG("Partition " << thisPartition->id << " NextPartitionEventTs " << NextPartitionEventTs (thisPartition) <<
												" minMessageTs " << thisPartition->minMessageTs);
					if( nextSharedPartition == 0 )
						{
							nextSharedPartition = thisPartition;
//	      				break;
						}
					else
						{
							uint64_t thisPartitionNextTs = std::min( NextPartitionEventTs (thisPartition),
																												thisPartition->minMessageTs );
							uint64_t nextPartitionNextTs = std::min( NextPartitionEventTs (nextSharedPartition),
																												nextSharedPartition->minMessageTs );
							if ( (thisPartitionNextTs == nextPartitionNextTs &&
										thisPartition->nMessages > nextSharedPartition->nMessages) ||
										thisPartitionNextTs < nextPartitionNextTs )
								{
										nextSharedPartition = thisPartition;
								}
						}
        }
      i++;
    }

  if(nextSharedPartition != 0)
  	{
  		NS_LOG_DEBUG("Partition " << nextSharedPartition->id << " being processed by thread " << context->id);
  		AtomicCompareAndExchange (&sharedSet->currentPartition, current, nextSharedPartition->id);
  		return nextSharedPartition;
  	}
  else
  	{
  		return 0;
  	}
//  if (i > current)
//    {
//      AtomicCompareAndExchange (&sharedSet->currentPartition, current, i);
//    }
//  if (i == (int) sharedSet->partitions.size ())
//    {
//      return 0;
//    }
//  return sharedSet->partitions[i].partition;
#else /* LOCKLESS_SHARED_PARTIlTIONS */
  pthread_mutex_lock (&(context->sharedPartitions->lock));
  if (context->sharedPartitions->currentPartition < m_nSharedPartitions)
    {
      uint32_t index = context->sharedPartitions->currentPartition;
      context->sharedPartitions->currentPartition++;
      pthread_mutex_unlock (&(context->sharedPartitions->lock));
      MultiThreadingPartition *partition = context->sharedPartitions->partitions[index];
      return partition;
    }
  else
    {
      pthread_mutex_unlock (&(context->sharedPartitions->lock));
      return 0;
    }
#endif /* LOCKLESS_SHARED_PARTITIONS */
}

void
MultiThreadedSimulatorImpl::DoRunThread (struct ThreadContext *context)
{
  NS_LOG_FUNCTION (this << context);
  uint32_t nExecutedByThread = 0;
  CLOCK_DEBUGGING_INIT ();
  while (AtomicGet (&m_nStoppedPartitions) < m_nPartitions + 1 &&
         AtomicGet (&m_nEvents) != 0)
  //while(1)  
    {
      //std::cout<<"&m_nPartitions and &m_nEvents : "<<&m_nPartitions<<&m_nEvents<<std::endl;
      //if (AtomicGet (&m_nStoppedPartitions) >= m_nPartitions + 1 &&
         //AtomicGet (&m_nEvents) == 0)
         //break;
      //std::cout<<AtomicGet (&m_nPartitions)<<"&&&&&&&&&&&&&&&&&&&"<<AtomicGet(&m_nEvents)<<std::endl;
      CLOCK_DEBUGGING_NOW (&start);
      /* Try to get the master lock */

      //std::cout<<"-------------tid--compute m_maxts---"<<pthread_self()<<std::endl;
      if (context->id == 0)//just one thread compute
        {
          CLOCK_DEBUGGING_NOW (&start2);
#ifdef NS3_MPI
					if(m_isDistributed)
						{
							MpiInterface::ReceiveMessages ();
							MpiInterface::TestSendComplete ();
						}
#endif
          /* Compute global maxTs */
          m_maxTs = GetMaximumSimulationTs ();
          for (MultiThreadingPartitions::iterator it = m_partitions.begin (); it != m_partitions.end (); ++it)
            {
              MultiThreadingPartition *partition = it->second;
              if(partition->rank == m_rank)
								{
									m_maxTs = std::min (m_maxTs, partition->minMessageTs + partition->minDelay);
                  //std::cout<<"partition->minMessageTs is================"<<partition->minMessageTs<<std::endl;
                  //std::cout<<"partition->minDelay is =================="<<partition->minDelay<<std::endl;
                  //std::cout<<"now m_maxTs is ========1===="<<m_maxTs<<std::endl;
									if (!partition->events->IsEmpty ())
										{
											m_maxTs = std::min (m_maxTs, NextPartitionEventTs (partition) + partition->minDelay);
                      //std::cout<<"now m_maxTs is =====2======="<<m_maxTs<<std::endl;
										}
								}
            }
          if (!m_globalEvents->IsEmpty () && m_globalEvents->PeekNext ().key.m_ts < m_maxTs)
            {
              std::cout<<"**********************gloabal mts"<<std::endl;
            	m_maxTs = m_globalEvents->PeekNext ().key.m_ts;
            }
#ifdef NS3_MPI
          if(m_isDistributed)
						{
							Time smallestTime = Time(m_maxTs);
							LbtsMessage lbts (MpiInterface::GetRxCount (), MpiInterface::GetTxCount (), m_rank, smallestTime);
							m_lbtsMessages[m_rank] = lbts;
							MPI_Allgather (&lbts, sizeof (LbtsMessage), MPI_BYTE, m_lbtsMessages,
														 sizeof (LbtsMessage), MPI_BYTE, MPI_COMM_WORLD);

							// The totRx and totTx counts insure there are no transient
							// messages;  If totRx != totTx, there are transients,
							// so we don't update the granted time.
//							uint32_t totRx = m_lbtsMessages[0].GetRxCount ();
//							uint32_t totTx = m_lbtsMessages[0].GetTxCount ();

							for (uint32_t i = 0; i < m_numRanks; ++i)
								{
									if (m_lbtsMessages[i].GetSmallestTime () < smallestTime)
										{
											smallestTime = m_lbtsMessages[i].GetSmallestTime ();
										}
//									totRx += m_lbtsMessages[i].GetRxCount ();
//									totTx += m_lbtsMessages[i].GetTxCount ();

								}
							NS_LOG_DEBUG("Remote maxTs " << smallestTime.GetNanoSeconds() << " local maxTs == " << m_maxTs);
							if ((uint64_t)smallestTime.GetNanoSeconds() < m_maxTs)
								{
									m_maxTs = smallestTime.GetNanoSeconds();
								}
						}
#endif
          /* Reset shared partitions */
          #if LOCKLESS_SHARED_PARTITIONS
          for (uint32_t i = 0; i < m_nSharedPartitionsSets; ++i)
            {
              struct SharedPartitionsSet *sharedSet = &m_sharedPartitionsSets[i];
              for (std::vector<struct LockLessPartition>::iterator j = sharedSet->partitions.begin ();
                   j != sharedSet->partitions.end (); ++j)
                {
                  j->threadId = -1;
                }
              // reset partition iterator
              sharedSet->currentPartition = 0;
            }
          #else /* LOCKLESS_SHARED_PARTITIONS */
          for (uint32_t i = 0; i < m_nSharedPartitionsSets; ++i)
            {
              m_sharedPartitionsSets[i].currentPartition = 0;
            }
          #endif /* LOCKLESS_SHARED_PARTITIONS */
          /* Process global events */
          //NS_LOG_DEBUG ("Maybe processing global event if before " << m_maxTs);
          if (!m_globalEvents->IsEmpty () && m_globalEvents->PeekNext ().key.m_ts <= m_maxTs)
            {
              /* Set maxTs to the next global event date
               * Otherwise, simulation consistency errors could happen if a
               * global event sent a packet to a given partition scheduled for
               * reception at time nextGlobalTs + delay, while this partition
               * could have already advanced to a time superior to
               * nextGlobalTs + delay. Setting to nextGlobalTs ensures that
               * partitions won't be processed after nextGlobalTs, which
               * partly solves the problem.
               * The best fix here would be to have a global minimum delay
               * and set maxTs to nextGlobalTs + globalMinDelay.
               */
              //m_maxTs = m_globalEvents->PeekNext ().key.m_ts;
              while (!m_globalEvents->IsEmpty () && m_globalEvents->PeekNext ().key.m_ts <= m_maxTs)
                {
                  Scheduler::Event next = m_globalEvents->RemoveNext ();
                  NS_LOG_DEBUG ("Process global event at " << m_globalTs);
                  NS_ASSERT (next.key.m_ts >= m_globalTs);
                  NS_LOG_DEBUG ("Global clock going from " << m_globalTs << " to " << next.key.m_ts << " (during event processing)");
                  m_globalTs = next.key.m_ts;
                  m_globalUid = next.key.m_uid;
                  next.impl->Invoke ();
                  next.impl->Unref ();
                  AtomicExchangeAndAdd (&m_nEvents, -1);
                  nExecutedByThread += 1;
                }
            }
          CLOCK_DEBUGGING_NOW (&end2);
          CLOCK_DEBUGGING_DIFF_AND_ACCUMULATE(&barriers_actual, &diff, &start2, &end2);
          NS_LOG_INFO ("New iteration.");

          //MPI_Barrier(MPI_COMM_WORLD);
        }

      m_barrier->Wait ();

      CLOCK_DEBUGGING_NOW (&end);
      CLOCK_DEBUGGING_DIFF_AND_ACCUMULATE(&barriers, &diff, &start, &end);
      uint32_t nExecutedAtIteration = 0;
      CLOCK_DEBUGGING_NOW (&start);

      //std::cout<<"-------tid--process---"<<pthread_self()<<"------maxTs = "<<m_maxTs<<std::endl;
      /* Process dedicated partitions */
      for (std::list<MultiThreadingPartition *>::iterator it = context->dedicatedPartitions.begin (); it != context->dedicatedPartitions.end (); ++it)
        {
          MultiThreadingPartition *partition = (*it);
          uint32_t nExecuted = ProcessPartition (partition);
          AtomicExchangeAndAdd (&m_nEvents, -nExecuted);
          nExecutedByThread += nExecuted;
          nExecutedAtIteration += nExecuted;
        }
      /* Attempt to process some shared partitions */
      while (true)
        {
          MultiThreadingPartition *partition = GetNextSharedPartition (context);
          if (partition == 0)
            {
              break;
            }
          else
            {
              uint32_t nExecuted = ProcessPartition (partition);
              AtomicExchangeAndAdd (&m_nEvents, -nExecuted);
              nExecutedByThread += nExecuted;
              nExecutedAtIteration += nExecuted;
            }

//          if( AtomicGet(&m_nStoppedPartitions) >= m_nPartitions)
//          	{
//          		break;
//          	}
        }
      CLOCK_DEBUGGING_NOW (&end);
      CLOCK_DEBUGGING_DIFF_AND_ACCUMULATE(&events, &diff, &start, &end);
      NS_LOG_DEBUG ("Thread " << context->id << " processed " << nExecutedAtIteration << " events at current iteration.");

      //if(context->id == 0) MPI_Barrier(MPI_COMM_WORLD);

      m_barrier->Wait ();
    }
  NS_LOG_INFO ("Thread " << context->id << " processed " << nExecutedByThread << " events.");
  CLOCK_DEBUGGING_PRINT ();
}

void
MultiThreadedSimulatorImpl::Run (void)
{
  NS_LOG_FUNCTION_NOARGS ();
  CLOCK_DEBUGGING_INIT_OVERHEAD ();

#ifdef NS3_MPI
  if (m_isDistributed)
  	{
			m_rank = MpiInterface::GetSystemId ();
			m_numRanks = MpiInterface::GetSize ();
			// Allocate the LBTS message buffer
			m_lbtsMessages = new LbtsMessage[m_numRanks];
			//m_grantedTime = Seconds (0);
  	}
#endif

  for(MultiThreadingPartitions::iterator it = m_partitions.begin (); it != m_partitions.end (); ++it)
  {
  	MultiThreadingPartition *partition = it->second;
  	partition->synchronizer->SetOrigin (partition->currentTs);
  }
  while (!m_initialEvents.empty ())
    {
      struct InitialEvent event = m_initialEvents.front ();
      m_initialEvents.pop ();
      //MultiThreadingPartition *partition = m_partitions[event.context];
      MultiThreadingPartitions::iterator it = m_partitions.find (event.context);
      if(it != m_partitions.end() && it->second->rank == m_rank)		// event belongs to this rank
				{
					MultiThreadingPartition *partition = it->second;
					Scheduler::Event ev;
					ev.impl = event.event;
					ev.key.m_ts = event.timestamp;
					ev.key.m_context = partition->id;
					ev.key.m_uid = partition->uid;
					partition->uid++;
					partition->events->Insert (ev);
				}
    }
  m_nStoppedPartitions = 0;
  m_nPartitions = (int) m_partitions.size ();
  m_running = true;
  m_maxTs = 0;
  switch (m_barrierType)
    {
      case BARRIER_POSIX:
        m_barrier = new PosixBarrier (m_threadsCount);
        break;
      case BARRIER_GLOBALSPIN:
        m_barrier = new GlobalSpinBarrier (m_threadsCount);
        break;
      case BARRIER_TREESPIN:
      default:
        m_barrier = new TreeSpinBarrier (m_threadsCount);
        break;
    }
  m_threads = new ThreadContext[m_threadsCount];
  NS_ASSERT_MSG (m_nSharedPartitionsSets <= m_threadsCount,
                 "Number of shared partitions sets must be lower or equal to "
                 "the number of threads.");
  m_sharedPartitionsSets = new struct SharedPartitionsSet[m_nSharedPartitionsSets] ();
  #if LOCKLESS_SHARED_PARTITIONS == 0
  pthread_mutexattr_t lock_attr;
  pthread_mutexattr_init (&lock_attr);
  for (uint32_t i = 0; i < m_nSharedPartitionsSets; ++i)
    {
      pthread_mutex_init (&(m_sharedPartitionsSets[i].lock), &lock_attr);
    }
  #endif /* LOCKLESS_SHARED_PARTITIONS */
  /* Initialize contexts */
  for (uint32_t i = 0, sharedSetIndex = 0; i < m_threadsCount; ++i)
    {
      struct ThreadContext *context = &m_threads[i];
      context->id = i;
      context->simulator = this;
      context->sharedPartitions = &m_sharedPartitionsSets[sharedSetIndex];
      sharedSetIndex = (sharedSetIndex + 1) % m_nSharedPartitionsSets;
    }
  /* Statically dispatch partitions over threads */
  uint32_t nThreadDedicated = m_partitions.size () * m_threadDedicatedPercent / 100;
  std::cout<<"-----------------nThreadDedicated : "<<nThreadDedicated <<std::endl;
  m_nSharedPartitions = m_partitions.size () - nThreadDedicated;
  uint32_t threadId = 0, partitionIndex = 0;//, sharedSetIndex = 0;
  for (MultiThreadingPartitions::const_iterator it = m_partitions.begin ();
       it != m_partitions.end (); ++it)
    {
      MultiThreadingPartition *partition = it->second;
      if (partitionIndex < nThreadDedicated)
        {
          std::cout<<"-----------------partitionIndex: "<<partitionIndex<<std::endl;
          m_threads[threadId].dedicatedPartitions.push_back (partition);
          threadId = (threadId + 1) % (m_threadsCount-1);//change by zxg
          std::cout<<"----------------threadId : "<<threadId<<std::endl;
        }
      else
        {
          //all switch use the last threads
          m_threads[m_threadsCount-1].dedicatedPartitions.push_back (partition);//add by zxg
          //#if LOCKLESS_SHARED_PARTITIONS
          //struct LockLessPartition item;
          //item.partition = partition;
          //m_sharedPartitionsSets[sharedSetIndex].partitions.push_back (item);
          //#else /* LOCKLESS_SHARED_PARTITIONS */
          //m_sharedPartitionsSets[sharedSetIndex].partitions.push_back (partition);
          //#endif /* LOCKLESS_SHARED_PARTITIONS */
          //sharedSetIndex = (sharedSetIndex + 1) % m_nSharedPartitionsSets;

        }
      partitionIndex++;
    }
  /* Run threads */
  for (uint32_t i = 1; i < m_threadsCount; i++)
    {
      pthread_create (&(m_threads[i].thread), NULL, &RunThread, &(m_threads[i]));
    }
  DoRunThread (&(m_threads[0]));
  /* Join threads */
  for (uint32_t i = 1; i < m_threadsCount; i++)
    {
      pthread_join (m_threads[i].thread, NULL);
    }
  delete [] m_threads;
  delete m_barrier;
  #if LOCKLESS_SHARED_PARTITIONS == 0
  for (uint32_t i = 0; i < m_nSharedPartitionsSets; ++i)
    {
      pthread_mutex_destroy (&(m_sharedPartitionsSets[i].lock));
    }
  #endif /* LOCKLESS_SHARED_PARTITIONS */
  delete [] m_sharedPartitionsSets;
  m_running = false;
}

void
MultiThreadedSimulatorImpl::RunOneEvent (void)
{
  NS_LOG_FUNCTION_NOARGS ();
  uint64_t nextTs = GetMaximumSimulationTs ();
  uint32_t nextPartition = -1;
  for (MultiThreadingPartitions::const_iterator it = m_partitions.begin (); it != m_partitions.end (); ++it)
    {
      MultiThreadingPartition *partition = it->second;
      if (!partition->events->IsEmpty () && NextPartitionEventTs (partition) < nextTs)
        {
          nextTs = NextPartitionEventTs (partition);
          nextPartition = partition->id;
        }
    }
  if (nextPartition != (uint32_t) -1)
    {
      ProcessPartitionEvent (m_partitions[nextPartition]);
    }
}

void
MultiThreadedSimulatorImpl::Stop (void)
{
  NS_LOG_FUNCTION_NOARGS ();
  NS_FATAL_ERROR ("MultiThreadedSimulatorImpl::Stop (void)");
}

void
MultiThreadedSimulatorImpl::Stop (Time const &time)
{
  NS_LOG_FUNCTION (this << time);
  Simulator::Schedule (time, &MultiThreadedSimulatorImpl::StopOnePartition, this);
  for (MultiThreadingPartitions::iterator it = m_partitions.begin(); it != m_partitions.end(); ++it)
    {
      MultiThreadingPartition *partition = it->second;
      if(partition->rank == m_rank)
				{
        	NS_LOG_DEBUG("Partition (" << partition->id << "," << m_rank << ") stop scheduled at " << time.GetNanoSeconds());
					Simulator::ScheduleWithContext (partition->id, time,
																					&MultiThreadedSimulatorImpl::StopOnePartition, this);
				}
    }
}

void
MultiThreadedSimulatorImpl::StopOnePartition (void)
{
  NS_LOG_FUNCTION_NOARGS ();
  AtomicExchangeAndAdd (&m_nStoppedPartitions, 1);
  NS_LOG_DEBUG("Rank " << m_rank << ": " << m_nStoppedPartitions << " out of " << m_nPartitions << " partitions stopped");
}


void
MultiThreadedSimulatorImpl::PushEventMessage (MultiThreadingPartition *partition, uint32_t from, uint64_t timestamp, EventImpl *event)
{
  NS_LOG_FUNCTION (this << partition << from << timestamp << event);

  pthread_mutex_lock (&partition->eventMessagesLock);
  struct MultiThreadingPartition::EventMessage message;
  message.from = from;
  message.timestamp = timestamp;
  partition->minMessageTs = std::min (partition->minMessageTs, timestamp);
  message.event = event;
  partition->nMessages++;
  partition->eventMessages.push (message);
  if(m_isRealTime)
  	{
  		partition->synchronizer->Signal ();
  	}
  pthread_mutex_unlock (&partition->eventMessagesLock);
}

//
// Schedule an event for a _relative_ time in the future.
//
EventId
MultiThreadedSimulatorImpl::Schedule (Time const &time, EventImpl *event)
{
  NS_LOG_FUNCTION (this << time << event);

  NS_ASSERT (time.GetTimeStep () >= 0);
  AtomicExchangeAndAdd (&m_nEvents, 1);
  if (g_currentPartition == 0)
    {
      Scheduler::Event ev;
      ev.impl = event;
      ev.key.m_ts = m_globalTs + (uint64_t) time.GetTimeStep ();
      ev.key.m_context = 0xffffffff;
      ev.key.m_uid = m_uid;
      m_uid++;
      NS_LOG_INFO ("Scheduling global event with timestamp " << ev.key.m_ts);
      pthread_mutex_lock (&m_globalEventsLock);
      m_globalEvents->Insert (ev);
      pthread_mutex_unlock (&m_globalEventsLock);
      return EventId (event, ev.key.m_ts, ev.key.m_context, ev.key.m_uid);
    }
  else
    {
      Scheduler::Event ev;
      ev.impl = event;
      ev.key.m_ts = g_currentPartition->currentTs + time.GetTimeStep ();
      ev.key.m_context = g_currentPartition->id;
      ev.key.m_uid = g_currentPartition->uid;
      g_currentPartition->uid++;
      g_currentPartition->events->Insert (ev);
      if(m_isRealTime)
      	{
      		g_currentPartition->synchronizer->Signal ();
      	}
      return EventId (event, ev.key.m_ts, ev.key.m_context, ev.key.m_uid);
    }
}

//
// Schedule an event with the given context to occur at the given _absolute_ time.
//
void
MultiThreadedSimulatorImpl::ScheduleWithContext (uint32_t context, Time const &time, EventImpl *event)
{
  NS_LOG_FUNCTION (this << context << time << event);

  AtomicExchangeAndAdd (&m_nEvents, 1);
  MultiThreadingPartitions::iterator it = m_partitions.find (context);
  NS_ASSERT ((m_running && it != m_partitions.end ()) || !m_running);

  /* Unknown partition and no current partition :
   * we are outside of Simulator::Run (), buffer event into m_initialEvents */
  if (it == m_partitions.end () && g_currentPartition == 0)
    {
      struct InitialEvent ev;
      ev.context = context;
      ev.timestamp = time.GetTimeStep ();
      ev.event = event;
      m_initialEvents.push (ev);
      NS_LOG_INFO ("Scheduling event at " << time.GetTimeStep () << " with context " << context << " (before Run)");
    }
  /* Known partition but no current partition :
   * we either are inside Simulator::Run processing global events
   * or, at least, after partitions were initialized.
   * Either way, schedule the event to the partition as if it was
   * coming from a global event */
  else if (it != m_partitions.end () && g_currentPartition == 0)
    {
      MultiThreadingPartition *partition = it->second;
      uint64_t timestamp = (uint64_t) time.GetTimeStep ();
      PushEventMessage (partition, 0xffffffff, timestamp, event);
      NS_LOG_INFO ("Scheduling event at " << timestamp << " with context " << context << " (from global event)");
    }
  /* Known partition and we have a current partition :
   * just schedule the event to the partition from the current one */
  else
    {
      if(context == g_currentPartition->id)
      	{
      		Schedule(time, event);
      	}
      else
      	{

      		MultiThreadingPartition *subPartition = GetMappedPartition(context, g_currentPartition->id);
//      		if(subPartition == NULL)
//      			{
//      				subPartition = CloneSubPartition(context, g_currentPartition->id);
//      			}
      		NS_ASSERT(subPartition != NULL);
					uint64_t timestamp = g_currentPartition->currentTs + time.GetTimeStep ();
					PushEventMessage (subPartition, g_currentPartition->id, timestamp, event);
					NS_LOG_INFO ("Scheduling event at " << timestamp << " with context " << subPartition->id);
      	}
    }
}

/*void MultiThreadedSimulatorImpl::ScheduleWithSubContext(uint32_t context, uint32_t subContext, Time const &time, EventImpl *event)
{
	AtomicExchangeAndAdd (&m_nEvents, 1);
	ContextToPartitionMap::iterator contextIt = m_contextToPartitionMap.find (context);
	MultiThreadingPartitions::iterator it;
	uint32_t mappedContext;
	if(contextIt != m_contextToPartitionMap.end ())
		{
			std::map<uint32_t, uint32_t>::iterator subContextIt = (contextIt->second).find (subContext);
			if(subContextIt != (contextIt->second).end ())
				{
					mappedContext = subContextIt->second;
					it = m_partitions.find (mappedContext);
				}
		}

	NS_ASSERT ((m_running && it != m_partitions.end ()) || !m_running);

	MultiThreadingPartition *partition = it->second;
	uint64_t timestamp = g_currentPartition->currentTs + time.GetTimeStep ();
	PushEventMessage (partition, g_currentPartition->id, timestamp, event);
	NS_LOG_INFO ("Scheduling event at " << timestamp << " with context " << mappedContext);
}*/

EventId
MultiThreadedSimulatorImpl::ScheduleNow (EventImpl *event)
{
  NS_LOG_FUNCTION (this << event);

  return Schedule (TimeStep (0), event);
}

EventId
MultiThreadedSimulatorImpl::ScheduleDestroy (EventImpl *event)
{
  NS_LOG_FUNCTION (this << event);

  EventId id (Ptr<EventImpl> (event, false), (uint64_t) Now ().GetTimeStep (), 0xffffffff, 2);
  pthread_mutex_lock (&m_destroyEventsLock);
  m_destroyEvents.push_back (id);
  pthread_mutex_unlock (&m_destroyEventsLock);
  return id;
}

Time
MultiThreadedSimulatorImpl::Now (void) const
{
  NS_LOG_FUNCTION_NOARGS ();
  if (g_currentPartition != 0)
    {
      return TimeStep (g_currentPartition->currentTs);
    }
  else
    {
      uint64_t minTs = GetMaximumSimulationTs ();
      for (MultiThreadingPartitions::const_iterator it = m_partitions.begin (); it != m_partitions.end (); ++it)
        {
          MultiThreadingPartition *partition = it->second;
          minTs = std::min (minTs, partition->currentTs);
        }
      return TimeStep (minTs);
    }
}

Time
MultiThreadedSimulatorImpl::RealtimeNow (void) const
{
  return TimeStep (g_currentPartition->synchronizer->GetCurrentRealtime ());
}

Time
MultiThreadedSimulatorImpl::GetDelayLeft (const EventId &id) const
{
  NS_LOG_FUNCTION (this << &id);

  if (IsExpired (id))
    {
      return TimeStep (0);
    }
  else
    {
      MultiThreadingPartition *partition = g_currentPartition;
      NS_ASSERT (partition->id == id.GetContext ());
      return TimeStep (id.GetTs () - partition->currentTs);
    }
}

void
MultiThreadedSimulatorImpl::Remove (const EventId &id)
{
  NS_LOG_FUNCTION (this << &id);

  if (id.GetUid () == 2)
    {
      pthread_mutex_lock (&m_destroyEventsLock);
      // destroy events.
      for (DestroyEvents::iterator i = m_destroyEvents.begin (); i != m_destroyEvents.end (); ++i)
        {
          if (*i == id)
            {
              m_destroyEvents.erase (i);
              break;
            }
         }
      pthread_mutex_unlock (&m_destroyEventsLock);
      return;
    }
  else
    {
      MultiThreadingPartition *partition = g_currentPartition;
      //NS_ASSERT (partition->id == id.GetContext ());???
      if (IsExpired (id))
        {
          return;
        }
      Scheduler::Event event;
      event.impl = id.PeekEventImpl ();
      event.key.m_ts = id.GetTs ();
      event.key.m_uid = id.GetUid ();
      partition->events->Remove (event);
      event.impl->Cancel ();
      // whenever we remove an event from the event list, we have to unref it.
      event.impl->Unref ();
    }
}

void
MultiThreadedSimulatorImpl::Cancel (const EventId &id)
{
  NS_LOG_FUNCTION (this << &id);

  if (!IsExpired (id))
    {
      id.PeekEventImpl ()->Cancel ();
    }
}

bool
MultiThreadedSimulatorImpl::IsExpired (const EventId &ev) const
{
  NS_LOG_FUNCTION (this << &ev);

  if (ev.GetUid () == 2)
    {
      pthread_mutex_lock (&m_destroyEventsLock);
      // destroy events.
      for (DestroyEvents::const_iterator i = m_destroyEvents.begin (); i != m_destroyEvents.end (); i++)
        {
          if (*i == ev)
            {
              pthread_mutex_unlock (&m_destroyEventsLock);
              return false;
            }
         }
      pthread_mutex_unlock (&m_destroyEventsLock);
      return true;
    }
  else if (ev.GetContext () == 0xffffffff)
    {
      if (ev.PeekEventImpl () == 0 ||
          ev.GetTs () < m_globalTs ||
          (ev.GetTs () == m_globalTs &&
           ev.GetUid () <= m_globalUid) ||
          ev.PeekEventImpl ()->IsCancelled ())
        {
          return true;
        }
      else
        {
          return false;
        }
    }
  else
    {
      MultiThreadingPartitions::const_iterator it = m_partitions.find (ev.GetContext ());
      NS_ASSERT (it != m_partitions.end ());
      MultiThreadingPartition *partition = it->second;
      if (ev.PeekEventImpl () == 0 ||
          ev.GetTs () < partition->currentTs ||
          (ev.GetTs () == partition->currentTs &&
           ev.GetUid () <= partition->currentUid) ||
          ev.PeekEventImpl ()->IsCancelled ())
        {
          return true;
        }
      else
        {
          return false;
        }
    }
}

// System ID for non-distributed simulation is always zero
uint32_t
MultiThreadedSimulatorImpl::GetSystemId (void) const
{
  NS_LOG_FUNCTION_NOARGS ();
  return 0;
}

Time
MultiThreadedSimulatorImpl::GetMaximumSimulationTime (void) const
{
  NS_LOG_FUNCTION_NOARGS ();
  return TimeStep (GetMaximumSimulationTs ());
}

uint32_t
MultiThreadedSimulatorImpl::GetContext (void) const
{
  NS_LOG_FUNCTION_NOARGS ();
  if (g_currentPartition != 0)
    {
      return g_currentPartition->id;
    }
  else
    {
      return 0xffffffff;
    }
}

uint64_t
MultiThreadedSimulatorImpl::GetMaximumSimulationTs (void) const
{
  NS_LOG_FUNCTION_NOARGS ();
  // XXX: I am fairly certain other compilers use other non-standard
  // post-fixes to indicate 64 bit constants.
  return 0x7fffffffffffffffLL;
}

void
MultiThreadedSimulatorImpl::AddPartition (uint32_t context, MultiThreadingPartition *partition)
{
  NS_LOG_FUNCTION (this << context << partition);

  m_partitions[context] = partition;
  m_contextToPartitionMap[context][0xffffffff] = context;

  partition->synchronizer = CreateObject<WallClockSynchronizer> ();

  SetPartitionScheduler (partition, m_schedulerFactory);
}

uint32_t MultiThreadedSimulatorImpl::AddSubPartition (uint32_t context, uint32_t subContext)
{
	NS_LOG_FUNCTION (this << context << subContext);

	uint32_t mappedContext = 0;
	MultiThreadingPartition *subPartition = NULL;

  pthread_mutexattr_t lock_attr;
  pthread_mutexattr_init (&lock_attr);

	MultiThreadingPartitions::iterator it = m_partitions.find (context);
	if(it != m_partitions.end() && !m_contextToPartitionMap.empty ())
		{
			MultiThreadingPartition *partition = it->second;
			subPartition = new MultiThreadingPartition;
			subPartition->rank = partition->rank;
			subPartition->nMessages = partition->nMessages;
			subPartition->events = 0;
			subPartition->currentUid = 0;
			// uids are allocated from 4.
			// uid 0 is "invalid" events
			// uid 1 is "now" events
			// uid 2 is "destroy" events
			subPartition->uid = 4;
			subPartition->currentTs = partition->currentTs;
			subPartition->minDelay = partition->minDelay;
			pthread_mutex_init (&subPartition->eventMessagesLock, &lock_attr);
			subPartition->synchronizer = CreateObject<WallClockSynchronizer> ();
			subPartition->synchronizer->SetOrigin (subPartition->currentTs);
		  SetPartitionScheduler (subPartition, m_schedulerFactory);
		  pthread_mutex_lock (&m_addPartitionLock);
			mappedContext = (uint32_t)m_partitions.rbegin()->first + 1;
			m_contextToPartitionMap[context][subContext] = mappedContext;
			subPartition->id = mappedContext;
			m_partitions[mappedContext] = subPartition;
			m_nPartitions++;
/*      struct LockLessPartition item;
      item.partition = subPartition;
      m_sharedPartitionsSets[0].partitions.push_back (item)*/;
			pthread_mutex_unlock (&m_addPartitionLock);
		}

	return mappedContext;
}

MultiThreadingPartition* MultiThreadedSimulatorImpl::GetMappedPartition (uint32_t context, uint32_t subContext)
{
	MultiThreadingPartition* subPartition = NULL;
	ContextToPartitionMap::iterator outerIt = m_contextToPartitionMap.find (context);
	if(outerIt != m_contextToPartitionMap.end ())
		{
			std::map<uint32_t, uint32_t>::iterator innerIt = (outerIt->second).find (subContext);
			uint32_t mappedContext = context;
			if(innerIt != (outerIt->second).end ())
				{
					mappedContext = innerIt->second;
				}
			MultiThreadingPartitions::iterator it = m_partitions.find (mappedContext);
			subPartition = it->second;
		}
	return subPartition;
}

} // namespace ns3
