/* neutronServer.cpp
 *
 * Copyright (c) 2014 Oak Ridge National Laboratory.
 * All rights reserved.
 * See file LICENSE that is included with this distribution.
 *
 * Based on MRK pvDataBaseCPP exampleServer
 *
 * @author Kay Kasemir
 */
#include <iostream>
#include <pv/standardPVField.h>
#include <workerRunnable.h>
#include "neutronServer.h"
#include "nanoTimer.h"
 // #include "TcpSocket.hpp"
#include "SocketRunnable.hpp"
#include "TcpClient.hpp"
#include <unistd.h>
#include "RbcpConfig.hpp"


using namespace epics::pvData;
using namespace epics::pvDatabase;
using namespace std;
using namespace std::tr1;

namespace epics { namespace neutronServer {

NeutronPVRecord::shared_pointer NeutronPVRecord::create(string const & recordName)
{
    FieldCreatePtr fieldCreate = getFieldCreate();
    StandardFieldPtr standardField = getStandardField();
    PVDataCreatePtr pvDataCreate = getPVDataCreate();

    // Create the data structure that the PVRecord should use
    PVStructurePtr pvStructure = pvDataCreate->createPVStructure(
        fieldCreate->createFieldBuilder()
        ->add("timeStamp", standardField->timeStamp())
        // Demo for manual setup of structure, could use
        // add("proton_charge", standardField->scalar(pvDouble, ""))
        ->addNestedStructure("proton_charge")
            ->setId("epics:nt/NTScalar:1.0")
            ->add("value", pvDouble)
        ->endNested()
        ->add("time_of_flight", standardField->scalarArray(pvUInt, ""))
        ->add("pixel", standardField->scalarArray(pvUInt, ""))
        ->createStructure()
        );

    NeutronPVRecord::shared_pointer pvRecord(new NeutronPVRecord(recordName, pvStructure));
    if (!pvRecord->init())
        pvRecord.reset();
    return pvRecord;
}

NeutronPVRecord::NeutronPVRecord(string const & recordName, PVStructurePtr const & pvStructure)
: PVRecord(recordName,pvStructure), pulse_id(0)
{
}

bool NeutronPVRecord::init()
{
    initPVRecord();

    // Fetch pointers into the records pvData which will be used to update the values
    if (!pvTimeStamp.attach(getPVStructure()->getSubField("timeStamp")))
        return false;

    pvProtonCharge = getPVStructure()->getSubField<PVDouble>("proton_charge.value");
    if (pvProtonCharge.get() == NULL)
        return false;

    pvTimeOfFlight = getPVStructure()->getSubField<PVUIntArray>("time_of_flight.value");
    if (pvTimeOfFlight.get() == NULL)
        return false;

    pvPixel = getPVStructure()->getSubField<PVUIntArray>("pixel.value");
    if (pvPixel.get() == NULL)
        return false;

    return true;
}

void NeutronPVRecord::process()
{
    // Update timestamp
    timeStamp.getCurrent();
    // pulse_id is unsigned, put into userTag as signed?
    timeStamp.setUserTag(static_cast<int>(pulse_id));
    pvTimeStamp.set(timeStamp);
}

void NeutronPVRecord::update(uint64 id, double charge,
                             shared_vector<const uint32> tof,
                             shared_vector<const uint32> pixel)
{
    lock();
    try
    {
        beginGroupPut();
        pulse_id = id;
        pvProtonCharge->put(charge);
        pvTimeOfFlight->replace(tof);
        pvPixel->replace(pixel);

        // TODO Create server-side overrun by updating same field
        // multiple times within one 'group put'
        // pvPulseID->put(id);

        process();
        endGroupPut();
    }
    catch(...)
    {
        unlock();
        throw;
    }
    unlock();
}

// --------------------------------------------------------------------------------------------
// What follows is the FakeNeutronEventRunnable that creates dummy data.
// Basic profiling by periodically pausing the code in the debugger showed that
// much of the server time is spent populating the pixel and time-of-flight arrays.
// As the number of events is increased, the server hit a CPU limit in the thread
// that created and filled the two arrays.
//
// This implementation now uses two threads, one each for the tof_runnable and pixel_runnable,
// then posting updated data as both provide a result.
// --------------------------------------------------------------------------------------------

/** Runnable that creates an array.
 *  When creating a large demo data arrays,
 *  the two arrays can be filled in separate threads / CPU cores
 */
class ArrayRunnable : public WorkerRunnable
{
public:
    ArrayRunnable()
    : count(0), id(0), realistic(0)
    {}

    /** Start collecting events (fill array with simulated data) */
    void createEvents(size_t count, uint64 id, bool realistic)
    {
        this->count = count;
        this->id = id;
        this->realistic = realistic;
        startWork();
    }

    /** Wait for data to be filled and return it */
    shared_vector<const uint32> getEvents()
    {
        waitForCompletion();
        return data;
    }

protected:
    /** Parameters for new data request: How many events */
    size_t count;
    /** Parameters for new data request: Used to create dummy events */
    uint64 id;
    /** Flag to generate semi-real looking data.**/
    bool realistic;
    /** Result of a request for data */
    shared_vector<const uint32> data;
};



class TimeOfFlightRunnable : public ArrayRunnable
{
protected:
    void doWork();
};

void TimeOfFlightRunnable::doWork()
{
    shared_vector<uint32> tof(count);
    if (this->realistic == false)
        fill(tof.begin(), tof.end(), id);
    else
    {
        uint32 *p = tof.dataPtr().get();
        for (uint32 i = 0; i != tof.size(); ++i)
        {
            uint32 normal_tof = 0;
            for (uint j = 0; j < NS_TOF_NORM; ++j)
                normal_tof += rand() % (NS_TOF_MAX);
            *(p++) = int(normal_tof/NS_TOF_NORM);
        }
    }
    data = freeze(tof);
}

class PixelRunnable : public ArrayRunnable
{
public:
    NanoTimer timer;
protected:
    void doWork();
};

void PixelRunnable::doWork()
{
	// In reality, each event would have a different value,
    // which is simulated a little bit by actually looping over
    // each element.
    uint32 value = id * 10;

    // Pixels created in this thread
    shared_vector<uint32> pixel(count);

    if (this->realistic == false)
    {
        // Set elements via [] operator of shared_vector
        // This takes about 1.5 ms for 200000 elements
        // timer.start();
        // for (size_t i=0; i<count; ++i)
        //   pixel[i] = value;
        // timer.stop();

        // This is much faster, about 0.6 ms, but less realistic
        // because our code no longer accesses each array element
        // to deposit a presumably different value
        // timer.start();
        // fill(pixel.begin(), pixel.end(), value);
        // timer.stop();

        // Set elements via direct access to array memory.
        // Speed almost as good as std::fill(), about 0.65 ms,
        // and we could conceivably put different values into
        // each array element.
        timer.start();
        uint32 *p = pixel.dataPtr().get();
        for (size_t i=0; i<count; ++i)
            *(p++) = value;
        timer.stop();

    }
    else
    {
        //Pixel IDs in two detector banks.
        //Generate random number between NS_ID_MIN1 and NS_ID_MAX1, or between NS_ID_MIN2 and NS_ID_MAX2
        timer.start();
        uint32 *p = pixel.dataPtr().get();
        for (uint32 i = 0; i != pixel.size(); ++i)
        {
            if (i%2 == 0)
                *(p++) = (rand() % (NS_ID_MAX1-NS_ID_MIN1)) + NS_ID_MIN1;
            else
                *(p++) = (rand() % (NS_ID_MAX2-NS_ID_MIN2)) + NS_ID_MIN2;
        }
        timer.stop();
    }

    data = freeze(pixel);
}

FakeNeutronEventRunnable::FakeNeutronEventRunnable(NeutronPVRecord::shared_pointer record,
                                                   double delay, size_t event_count, bool random_count, bool realistic)
  : record(record), is_running(true), delay(delay), event_count(event_count), random_count(random_count), realistic(realistic)
{
	createTcpSocketDataInfo();
    #if 0
    pRbcpConfig = new RbcpConfig(200, 20, 40, 20, 40, 140, 150, 90, 90, 50, 0, 0, 1);
    #else

    pRbcpConfig = new RbcpConfig(); // front
    #endif
}


void FakeNeutronEventRunnable::rbcpConfiguration() {
    #define TIME_DELAY_CMD 100000
}

void bittobytes(unsigned char *inbuf ,unsigned char *outbuf ){
   int i;
   for(i=0;i<8;i++){
       *(outbuf+i) = (*inbuf>>i)&0x01;
   }
   // printf("inbuf = %x\n", *inbuf);
   // for (int i = 0; i < 8; ++i)
   // {
   //      printf("outbuf[%d] = %x\n", i, outbuf[i]);
   //     /* code */
   // }
}

int findPosition(unsigned char *xbuf , unsigned char *ybuf , unsigned char x_size , unsigned char y_size ){
    unsigned int i,j;
    int result = -1;
    // for (int i = 0; i < 32; ++i)
    // {
    //     printf("xbuf[%d] = %d\n", i, xbuf[i]);
    //     /* code */
    // }

    // for (int i = 0; i < 32; ++i)
    // {
    //     printf("ybuf[%d] = %d\n", i, ybuf[i]);
    //     /* code */
    // }


    for(i=0;i<x_size;i++) {
        if(xbuf[i] == 1) {
            for (int j = 0;  j< y_size; ++j)
            {
                if (ybuf[j] == 1)
                {
                result = j*y_size + i;
                //printf("i=[%d] j = [%d]\n", i, j);
                return result;
                    /* code */
                }
                /* code */
            }
        }
    }
    return result;
}

unsigned int calcPosition(uint64_t raw_data) {
                uint32_t high_data =( raw_data & 0xffffffff00000000) >> 32;
                uint32_t low_data =(uint32_t)( raw_data & 0xffffffff) ;
                //printf("high_data = %lx , low_data = %lx\n", high_data, low_data);
                unsigned char x_pos[32];
                unsigned char y_pos[32];
                unsigned int calc_pos;


                bittobytes(((unsigned char *)(&high_data) + 0), x_pos +0);
                bittobytes(((unsigned char *)(&high_data) + 1), x_pos +8);
                bittobytes(((unsigned char *)(&high_data) + 2), x_pos +16);
                bittobytes(((unsigned char *)(&high_data) + 3), x_pos +24);

                bittobytes(((unsigned char *)(&low_data) + 0), y_pos +0);
                bittobytes(((unsigned char *)(&low_data) + 1), y_pos +8);
                bittobytes(((unsigned char *)(&low_data) + 2), y_pos +16);
                bittobytes(((unsigned char *)(&low_data) + 3), y_pos +24);


                // for (int i = 0; i < 32; ++i)
                // {
                //     printf("x_pos[%d] = %d\n", i, x_pos[i]);
                //     printf("y_pos[%d] = %d\n", i, y_pos[i]);
                //     /* code */
                // }
                //exit(0);
                calc_pos = findPosition(x_pos, y_pos,32,32);
                //printf("calc_pos = [%d]\n", calc_pos);
                return calc_pos;
}

void FakeNeutronEventRunnable::run()
{

    shared_ptr<ArrayRunnable> tof_runnable(new TimeOfFlightRunnable());
    shared_ptr<epicsThread> tof_thread(new epicsThread(*tof_runnable, "tof_processor", epicsThreadGetStackSize(epicsThreadStackMedium)));
    //tof_thread->start();

    shared_ptr<PixelRunnable> pixel_runnable(new PixelRunnable());
    shared_ptr<epicsThread> pixel_thread(new epicsThread(*pixel_runnable, "pixel_processor", epicsThreadGetStackSize(epicsThreadStackMedium)));
    //pixel_thread->start();

    uint64 id = 0;
    size_t packets = 0, slow = 0;

    epicsTime last_run(epicsTime::getCurrent());
    epicsTime next_log(last_run);
    epicsTime next_run;
#ifdef SIM_SERVER
#else
    pRbcpConfig->stop();
    epicsThreadSleep(0.3);
    pRbcpConfig->reset();
    epicsThreadSleep(0.3);
    
    
    
    epicsThreadSleep(0.3);
    
    pRbcpConfig->configCommit();
    pRbcpConfig->start();

#endif

    TcpClient client = TcpClient(pTcpSocketDataInfo);
    client.start();


    while (is_running)
    {
        // Compute time for next run
        next_run = last_run + delay;

        // Wait until then
        double sleep = next_run - epicsTime::getCurrent();
        int pointerRingLength = pTcpSocketDataInfo->pointerRing->getUsed();
        if (pointerRingLength == 0)
        {
            continue;
        } else {
            if (pointerRingLength >= 10)
            {
                printf("pop before pointerRing->getUsed is %d\n", pointerRingLength);
            }


            PulseDataRecieve *pPulseDataRecieve = pTcpSocketDataInfo->pointerRing->pop();
            //printf("pop after pointerRing->getUsed is %d\n", pTcpSocketDataInfo->pointerRing->getUsed());
            //pPulseDataRecieve->print();

            PulseData * pPulseData = pPulseDataRecieve->getPulseDataPointer();

            //printf("id = %d\n", pPulseData->ID);
            size_t eventCount = pPulseDataRecieve->getEventDataCount();
            //printf("event count is %d\n", eventCount);
            shared_vector<uint32> xyPos(eventCount);
             uint32 *p = xyPos.dataPtr().get();
             for (int iCount = 0; iCount < eventCount; ++iCount)
             {
                EventRawdata mEventRawdata = pPulseData->pEventData[iCount];
                uint64_t raw_data = mEventRawdata.event_data;

                unsigned int calc_pos =  calcPosition(raw_data);
                p[iCount] = calc_pos;
                 /* code */
             }
            // get tof result
            shared_vector<uint32> topData(eventCount);
            uint32 *ptopData = topData.dataPtr().get();
            uint64_t pulse_time_local0 = pPulseData->time_local0;
            uint64_t pulse_time_local1 = pPulseData->time_local1;
            pulse_time_local0 = pulse_time_local0 >> 3;
            uint64_t let0 = 0;
            for (int iCount = 0; iCount < eventCount; ++iCount)
            {
                EventRawdata mEventRawdata = pPulseData->pEventData[iCount];
//                uint64_t raw_data = mEventRawdata.event_data;
                uint64_t event_time_1 = mEventRawdata.event_time_1;
                uint64_t event_time_0 = mEventRawdata.event_time_0;
                uint64_t result = 0;
                if (event_time_1 - pulse_time_local1 == 0) {
            //        printf("####event_time_0 = %lld,  pulse_time_local0 = %lld, result = [ %lld ] inscreas = %lld\n",event_time_0,pulse_time_local0, result, result - let0);

                }
                event_time_0 = event_time_0 >> 3;
                if (event_time_1 - pulse_time_local1 == 1) {
                    result = 124999999 - pulse_time_local0 + event_time_0 + 1;
 //                   printf("====event_time_0 = %lld,  pulse_time_local0 = %lld, result = [ %lld ] inscreas = %lld\n",event_time_0,pulse_time_local0, result, result - let0);
//                    printf("result %d", result);
                } else {
                    result = event_time_0 - pulse_time_local0;
//                   printf("event_time_0 = %lld,  pulse_time_local0 = %lld, result = [ %lld ] inscreas = %lld\n, ",event_time_0,pulse_time_local0, result, result - let0);
                }
//            printf("event_time_0 = %lld,  pulse_time_local0 = %lld, result = [ %lld ] inscreas = %lld\n",
 //                  event_time_0,pulse_time_local0, result, result - let0
  //                 );
            let0 = result;
            ptopData[iCount] = uint32_t(result & 0xffffffff);
                /* code */
            }

            // Increment the 'ID' of the pulse
            id = pPulseDataRecieve->getPulseDataID();
//            printf("pulse[%5d]  eventCount=%5d\n",id, eventCount);
            delete pPulseDataRecieve;

            // Create fake { time-of-flight, pixel } events,
            // using the ID to get changing values, in parallel threads

            //tof_runnable->createEvents(count, id, realistic);
            //pixel_runnable->createEvents(count, id, realistic);

            // >>>> While array threads are running >>>>
            // Mark this run
            last_run = epicsTime::getCurrent();
            ++packets;

            // Every 10 second, show how many updates we generated so far
            if (last_run > next_log)
            {
            next_log = last_run + 10.0;
            #if 0
            cout << packets << " packets, " << slow << " times slow";
            cout << ", array values set in " << pixel_runnable->timer;
            cout << endl;
            #endif
            slow = 0;
            }

            // Vary a fake 'charge' based on the ID
            double charge = (1 + id % 10)*1e8;


        #if 0
    	for(int i=0; i<tof_data.size();i++) {
    		p[i] = pPulseData->pEventData[i].pos;
    		}
        #endif

    	shared_vector<const uint32> r_xyPos;
    	r_xyPos = freeze(xyPos);
        shared_vector<const uint32> r_topData;
        r_topData = freeze(topData);
        // <<<< Wait for array threads, fetch their data <<<<
        //record->update(id, charge, tof_runnable->getEvents(), pixel_runnable->getEvents());

        // TODO Overflow the server queue by posting several updates.
        // For client request "record[queueSize=2]field()", this causes overrun.
        // For queueSize=3 it's fine.
        record->update(id, charge, r_topData, r_xyPos);
       	}

    }

    pixel_runnable->shutdown();
    tof_runnable->shutdown();
    cout << "Processing thread exits\n";
    processing_done.signal();
    delete pRbcpConfig;
}

void FakeNeutronEventRunnable::createTcpSocketDataInfo() {
	pTcpSocketDataInfo = new TcpSocketDataInfo();
	pTcpSocketDataInfo->ring = epicsRingBytesCreate(ByteRingSize);
	pTcpSocketDataInfo->consumerEvent = epicsEventMustCreate(epicsEventEmpty);
	pTcpSocketDataInfo->mutex = epicsMutexMustCreate();
	pTcpSocketDataInfo->pointerRing = new epicsRingPointer<PulseDataRecieve>(PointerRingSize);

	}
void FakeNeutronEventRunnable::setDelay(double seconds)
{   // No locking..
    delay = seconds;
}

void FakeNeutronEventRunnable::setCount(size_t count)
{   // No locking..
    event_count = count;
}

void FakeNeutronEventRunnable::setRandomCount(bool random_count)
{   // No locking..
	this->random_count = random_count;
}

void FakeNeutronEventRunnable::shutdown()
{   // Request exit from thread
    is_running = false;
    processing_done.wait(5.0);
}


}} // namespace neutronServer, epics
