/*
 * To change this license header, choose License Headers in Project Properties.
 * To change this template file, choose Tools | Templates
 * and open the template in the editor.
 */

/* 
 * File:   EdsPointThread.cpp
 * Author: ever
 * 
 * Created on May 1, 2016, 7:39 AM
 */

#include "EdsPointThread.h"

TL_ThreadLock g_lock;
map<string, pair<EdsPointThread *, int>> _wizid_thread_liveid;
map<string, pair<EdsPointThread *, int>> _tgn_thread_liveid;
vector<EdsPointThread *> _eds_threads;

//sqlite3 * SQLITE3Ptr = nullptr;
//TL_ThreadLock SQLITE3Lock;

LiveClient* EdsPointThread::initializeClient(const char* version,
                                             const char* host,
                                             unsigned short port) {
    try {
        LOG() << debug << "Creating LiveClient for EDS version " << version << endl;
        std::unique_ptr<LiveClient> client(new LiveClient(version));

        LOG() << debug << "Initializing LiveClient connection to " << host << ":" << port << endl;
        client->init(AccessMode_ReadWrite,
                     "0.0.0.0", // bind to all local interfaces
                     0, // bind to any local port
                     host,
                     port,
                     50);

        LOG() << "LiveClient initialized successfully!" << endl;
        return client.release();
    } catch (const BackendNotFoundError& exc) {
        LOG() << debug << "Couldn't load backend library for EDS " <<
                version << " " << exc.what() << endl;
        return nullptr;
    } catch (const LiveClientError& exc) {
        LOG() << debug << "Couldn't initialize client connection object " <<
                exc.what() << endl;
        return nullptr;
    }
}

ArchClient* EdsPointThread::initializeArchClient(const char* version,
                                                 const char* host,
                                                 unsigned short port) {
    try {
        LOG() << "Creating ArchClient for EDS version " << version << endl;
        std::unique_ptr<ArchClient> client(new ArchClient(version));

        // Initialize as client (read mode)
        LOG() << "Initializing ArchClient connection to " << host << ":" << port << endl;
        client->init("0.0.0.0", // bind to all local interfaces
                     0, // bind to any local port
                     host,
                     port,
                     50);

        LOG() << "ArchClient initialized successfully!" << endl;
        return client.release();
    } catch (const BackendNotFoundError& exc) {
        LOG() << "Couldn't load arch backend library for EDS " << version << "|" << exc.what() << endl;
        return nullptr;
    } catch (const ArchClientError& exc) {
        LOG() << "Couldn't initialize arch client connection object " << exc.what() << endl;
        return nullptr;
    }
}

EdsPointThread::EdsPointThread(LiveClient* live_client, const vector<string>& points,
                               const vector<string>& wizids) : _live_client(live_client), _arch_client(nullptr),
                                                               _points(points), _wizids(wizids) {
    _is_record_file = true;
    _is_record_kafka = false;
    _save_data_size = 3600; //保留最近x个数据
    _intval_time = 1; //默认1s采集一次
    _kafka_cache.resize(points.size());
    if (!checkLiveClient()) {
        LOG() << error << "EdsPointThread: init is nullptr" << endl;
        return;
    }
    _switch_data = nullptr;
    _buffer = static_cast<char *>(malloc(EDSPOINTBUFFLEN));
    history_root_path = "/tmp";
    last_history_time_path = 0;
    _datafile_fd = -1;
    _block_id_max = -1;
}

void EdsPointThread::setArchClient(ArchClient* client) {
    _arch_client = client;
}

void EdsPointThread::setPointFileName(const string& fname) {
    _point_file = fname;
}

void EdsPointThread::setSwitchData(SwitchData* switchData, const string& switch_topic_name, int parttionNum) {
    _switch_data = switchData;
    _switch_topic_name = switch_topic_name;
    _switch_partition_num = parttionNum;
}

EdsPointThread::~EdsPointThread() {
    if (_buffer) {
        free(_buffer);
    }
}

void EdsPointThread::liveRun() {
    initLiveIds();
    _elapsed_time = 0;
    try {
        unsetPointInput();
        setPointInput();
        do {
            gettimeofday(&_t1, nullptr);
            synchronize();
            getPointsValue(_t1.tv_sec);
            record();
            gettimeofday(&_t2, nullptr);
            //清理记录ts返回为0的记录
            if (_timezero_liveids.size() > 5) {
                map<time_t, vector<int>>::iterator it = _timezero_liveids.end();
                --it;
                --it;
                _timezero_liveids.erase(_timezero_liveids.begin(), it);
            }

            int usec = _t2.tv_usec - _t1.tv_usec;
            time_t sec = _t2.tv_sec - _t1.tv_sec;
            if (usec < 0) {
                usec += 1000000;
                --sec;
            }

            _elapsed_time = sec * 1000 + usec / 1000;
            LOG() << debug << "Thread:" << id() << " elapsed time:" << sec << "." << usec
                    << " elapsed_time:" << _elapsed_time << endl;

            if (_intval_time > 0 && sec < _intval_time) {
                const unsigned long long usleep_time = 1000000 * (_intval_time - sec) - usec;
                usleep(usleep_time);
            }
        } while (_running);
    } catch (const TL_Exp& e) {
        LOG() << error << ":Exception:" << e.what() << endl;
    } catch (const std::exception& e) {
        LOG() << error << ":Exception:" << e.what() << endl;
    } catch (...) {
        LOG() << error << ":Exception:unknow." << endl;
    }
}

void EdsPointThread::archRun() {
    // if (_liveid_pos.empty()) return;
    vector<int> live_id_pos(_liveid_pos.size());
    try {
        initLiveIds();
        for (const auto& liveData: _liveid_pos) {
            const auto trendValue = _arch_client->getTabularTrend(liveData.first, "AVG");
            live_id_pos.push_back(liveData.first);
            _arch_client->addTabularTrend(trendValue);
            /*
            // Create VALUE trend with preference for shade values over archives
            std::auto_ptr<TabularTrend> valueTabTrend(
                _arch_client->getTabularTrend(liveData.first, "VALUE"));
            _arch_client->addTabularTrend(valueTabTrend.get(), ShadeMode_PreferShade);
            live_id_pos.push_back(liveData.first);
            // Create AVG trend
            std::auto_ptr<TabularTrend> avgTabTrend(
                _arch_client->getTabularTrend(liveData.first, "AVG"));
            _arch_client->addTabularTrend(avgTabTrend.get());
            live_id_pos.push_back(liveData.first);
            */
        }
    } catch (const Error& exc) {
        LOG() << error << exc.what() << endl;
    }
    LOG() << debug << "Thread:" << id() << " " << _point_file
            << " _noliveid_points:" << _noliveid_points.size()
            << " live_id_pos:" << live_id_pos.size() << endl;
    constexpr long range = 5 * 60;
    while (_running) {
        try {
            gettimeofday(&_t1, nullptr);
            // Execute both trends for last 5 minutes with 1 minutes step size.
            // This function will block until trends are ready.
            LOG() << debug << "Thread:" << id() << " " << _point_file
                    << " _noliveid_points:" << _noliveid_points.size()
                    << " executeTabularTrends" << endl;
            _arch_client->executeTabularTrends(_t1.tv_sec - range, range, 60);
            TabularTrendRow rows;
            myPointValue tmp;
            while (_arch_client->fetchTabularTrendRow(&rows)) {
                for (size_t i = 0; i < rows.size(); ++i) {
                    const auto& pv = rows[i];
                    const auto liveid = live_id_pos[i];
                    myPointCache& cache = _liveid_pos[liveid];
                    fetchArchData(pv, tmp);
                    // LOG() << debug << "i:" << i << "|" << pv.ts << "|" << pv.value << "|" << pv.quality << endl;
                    updateCache(cache, tmp);
                    shrinkCache(cache, tmp);
                }
            }
            record();
            // gettimeofday(&_t1, nullptr);
            LOG() << debug << "Thread:" << id() << " " << _point_file
                    << " rows.size:" << rows.size()
                    << " sleep:" << 60 << "s" << endl; //_file
            sleep(60);
        } catch (const Error& exc) {
            LOG() << debug << exc.what() << endl;
        }
    }
}

void EdsPointThread::run() {
    if (_arch_client == nullptr) {
        liveRun();
    } else {
        archRun();
    }
}

bool EdsPointThread::checkLiveClient() const {
    if (_live_client == nullptr) {
        LOG() << error << "Live Client is nullptr." << endl;
        return false;
    }
    return true;
}

bool EdsPointThread::initLiveIds() {
    if (!checkLiveClient()) return false;
    LOG() << "init points.size= " << _points.size() << " begin." << endl;
    _liveid_pos.clear();
    for (int i = 0; i < _points.size(); ++i) {
        const string& point = _points[i];
        const string& wizid = _wizids[i];
        int liveid = _live_client->findByIESS(point.c_str());
        if (liveid == -1) {
            LOG() << warn << "Point " << point << " find live id failed." << endl;
            _noliveid_points.push_back(point);
        } else {
            myPointCache& cache = _liveid_pos[liveid];
            cache.pos = i; //pos是来找string 名字的
            //_pos_liveid[i] = liveid;
            TL_ThreadLock::Lock lk(g_lock);
            pair<EdsPointThread *, int>& obj_id = _wizid_thread_liveid[wizid];
            obj_id.first = this;
            obj_id.second = liveid;
            _tgn_thread_liveid[point] = obj_id;
        }
    }
    return true;
}

void EdsPointThread::setPointInput() {
    if (!checkLiveClient()) return;
    map<int, myPointCache>::const_iterator it = _liveid_pos.begin();
    while (it != _liveid_pos.end()) {
        try {
            _live_client->setInput(it->first);
        } catch (const Error* exc) {
            LOG() << warn << "Failed to setInput for Point(" << _points[it->second.pos] << ") " << exc->what() << endl;
        }
        ++it;
    }
}

void EdsPointThread::unsetPointInput() {
    if (!checkLiveClient()) return;
    map<int, myPointCache>::const_iterator it = _liveid_pos.begin();
    while (it != _liveid_pos.end()) {
        try {
            _live_client->unsetInput(it->first);
        } catch (const Error* exc) {
            LOG() << warn << "Failed to unsetInput for Point(" << _points[it->second.pos] << ") " << exc->what()
                    << endl;
        }
        ++it;
    }
}

bool EdsPointThread::synchronize() const {
    if (!checkLiveClient()) return false;
    try {
        do {
            _live_client->synchronizeInput();
        } while (_live_client->isUpdateRequired());
    } catch (const Error& exc) {
        LOG() << warn << "Failed to synchronize input point value " << exc.what() << endl;
        return false;
    }
    return true;
}

SwitchData::SwitchNode* EdsPointThread::findSwitchNode(const string& tgn) {
    map<string, SwitchData::SwitchNode *>::iterator it = _switch_data_tmp_cache.find(tgn);
    if (it != _switch_data_tmp_cache.end()) {
        return it->second;
    } else {
        SwitchData::SwitchNode* node = _switch_data->findSwitchNode(tgn);
        if (node) {
            _switch_data_tmp_cache[tgn] = node;
            return node;
        }
    }
    _switch_data_tmp_cache[tgn] = nullptr;
    return nullptr;
}

void EdsPointThread::checkSwitchData(const string& tgn, time_t t, int val) {
    if (_switch_data) {
        SwitchData::SwitchNode* node = findSwitchNode(tgn);
        if (t == 0) {
            val = -1;
            t = _t1.tv_sec;
        }
        if (node) {
            int type = node->switchType(tgn);
            if (type == 1) {
                if (val != node->v1) {
                    //changed
                    node->t1 = t;
                    node->last_uptime = t;
                    node->v1 = val;
                    _changed_switch_nodes.insert(node);
                }
            } else if (type == 2) {
                if (val != node->v2) {
                    //changed
                    node->t2 = t;
                    node->last_uptime = t;
                    node->v2 = val;
                    _changed_switch_nodes.insert(node);
                }
            }
        }
    }
}

void EdsPointThread::fetchLiveData(int liveid, myPointValue& value) const {
    value.ts = _live_client->readFieldInt(liveid, "TS");
    value.type = _live_client->pointRT(liveid);
    value.value.f = _live_client->readAnalog(liveid, &value.quality);
}

void EdsPointThread::fetchArchData(const PointValue& pv, myPointValue& value) const {
    value.ts = pv.ts;
    value.value.f = pv.value;
    value.quality = pv.quality;
}

void EdsPointThread::shrinkCache(myPointCache& cache, const myPointValue& tmp) const {
    if (cache.values.size() > _save_data_size + 20) {
        //clear
        TL_ThreadRwLock::WLock wl(_rw_lock);
        const time_t ts1 = tmp.ts - _intval_time * _save_data_size;
        const std::map<int, myPointValue>::const_iterator it = cache.values.upper_bound(ts1);
        cache.values.erase(cache.values.begin(), it);
        //LOG() << debug << "Point Data clear:" << point << " cache.value.size=" << cache.values.size() << endl;
    }
}

void EdsPointThread::updateCache(myPointCache& cache, const myPointValue& tmp) const {
    TL_ThreadRwLock::WLock wl(_rw_lock);
    myPointValue& pvalue = cache.values[tmp.ts];
    pvalue.type = tmp.type;
    pvalue.quality = tmp.quality;
    pvalue.ts = tmp.ts;
    // pvalue.value = tmp.value; //?
    if (tmp.type != 'B' && tmp.type != 'P') {
        pvalue.value.f = tmp.value.f;
    } else {
        pvalue.value.i = tmp.value.i;
    }
}

bool EdsPointThread::getLivePointValue(time_t t, int liveid) {
    if (!checkLiveClient()) return false;
    try {
        myPointCache& cache = _liveid_pos[liveid];
        if (cache.pos >= _points.size()) {
            LOG() << warn << "cache.pos=" << cache.pos << "," << cache.values.size() << " not found _points pos."
                    << endl;
            return false;
        }
        myPointValue tmp;
        fetchLiveData(liveid, tmp);
        //switch

        const string& point = _points[cache.pos];

        checkSwitchData(point, tmp.ts, tmp.value.f);

        if (tmp.ts > 0) {
            updateCache(cache, tmp);
            shrinkCache(cache, tmp);
            return true;
        } else {
            //todo controller getdata after 5 min later.
            //LOG() << debug << "Point=" << point << ",liveid=" << liveid << " TS is zero" << endl;
            _timezero_liveids[t].push_back(liveid);
            return false;
        }
    } catch (const Error& exc) {
        LOG() << warn << "Failed to get point value " << exc.what() << endl;
    }
    return false;
}

void EdsPointThread::setHistoryRootPath(const string& hrp) {
    history_root_path = hrp;
    if (history_root_path.empty()) history_root_path = "/tmp";
}

void EdsPointThread::getPointsValue(time_t t) {
    if (!synchronize()) {
        LOG() << warn << "LiveClient synchronize failed." << endl;
        return;
    }
    //按天来存储历史数据
    //time_t t2 = (t + BEIJIN_TIME) / ONDAY_SECONDS * ONDAY_SECONDS;

    map<int, myPointCache>::const_iterator it = _liveid_pos.begin();
    while (it != _liveid_pos.end()) {
        getLivePointValue(t, it->first);
        ++it;
    }
}

void EdsPointThread::record() {
    map<int, myPointCache>::iterator it = _liveid_pos.begin();

    enum {
        buffer_size = 256
    };
    char buffer[buffer_size];
    int kafka_cache_id = 0;
    int l = 0;
    //string point_record;
    //_kafka_cache.clear();
    map<time_t, string> date_time_str_cache;
    TL_Datetime dt;
    unsigned char quality = 0;

    KKSManger* kksManger = KKSManger::getInstance();
    while (it != _liveid_pos.end()) {
        myPointCache& cache = it->second;
        const string& point = _points[cache.pos];
        const string& wizid = _wizids[cache.pos];
        if (cache.values.size() > 0) {
            std::map<int, myPointValue>::value_type& v = *cache.values.rbegin();
            if (cache.last_record_ts < v.first) {
                const myPointValue& pvalue = v.second;
                if (pvalue.type != 'B' && pvalue.type != 'P') {
                    l = snprintf(buffer, buffer_size, "%f|%c", pvalue.value.f, pvalue.quality);
                    quality = pvalue.quality | 0x80; //float
                } else {
                    l = snprintf(buffer, buffer_size, "%d|%c", pvalue.value.i, pvalue.quality);
                    quality = pvalue.quality;
                }
                //可以拆分线程去独立处理，提升速度
                string& dtstr = date_time_str_cache[v.first];
                if (dtstr.size() == 0) {
                    dt.setTime(v.first);
                    dtstr = dt.toString();
                }

                //dt.toString();
                if (_is_record_file) {
                    //DLOG("point") << noop << point << "|" << wizid << "|" << dtstr << "|" << buffer << endl;
                    LOG("point") << noop << point << "|" << wizid << "|" << dtstr << "|" << buffer << endl;
                }
                if (_is_record_kafka) {
                    string& point_record = _kafka_cache[kafka_cache_id];
                    point_record.clear();
                    point_record.assign(point);
                    point_record.append("|");
                    point_record.append(wizid);
                    point_record.append("|");
                    point_record.append(dtstr);
                    point_record.append("|");
                    point_record.append(buffer, l);
                    ++kafka_cache_id;
                    //_kafka_cache.push_back(point_record);
                }

                //compress ========================================================================
                //u32_t id, u32_t tm, u32_t vl, u8_t quality
                u32_t kid = kksManger->getKKSId(point);
                kksManger->addKKSValue(kid, v.first, pvalue.value.i, quality);
                //end compress, add by ever 20170410
                //=================================================================================
                cache.last_record_ts = v.first;
            }
        }
        ++it;
    }

    if (_is_record_kafka && kafka_cache_id > 0) {
        //_kafka_cache.resize(kafka_cache_id);
        //LOG() << debug << "write2Kafka:" << _kafka_cache.size() << endl;
        _kafka_svr->write2Kafka(_topicname, _kafka_cache, kafka_cache_id, _partition_num, WRITE_KAFKA_TYPE_POLL);
    }

    recordSwitchData();
}

void EdsPointThread::recordSwitchData() {
    if (!_is_record_kafka) {
        return;
    }
    set<SwitchData::SwitchNode *>::iterator it = _changed_switch_nodes.begin();
    int kafka_cache_id = 0;

    enum {
        tmpsize = 256
    };
    char tmp[tmpsize];
    while (it != _changed_switch_nodes.end()) {
        SwitchData::SwitchNode* node = *it;
        string& switch_record = _kafka_cache[kafka_cache_id];

        switch_record.assign(node->eqid);
        switch_record.append("|");
        switch_record.append(node->tgn1);
        //switch_record.append(node->cn1);
        //switch_record.append("|");
        int i = snprintf(tmp, tmpsize, "|%lld|%lld|", node->/*t1*/last_uptime, node->v1);
        switch_record.append(tmp, i);

        switch_record.append(node->tgn2);
        //switch_record.append("|");
        //switch_record.append(node->cn2);
        //switch_record.append("|");
        i = snprintf(tmp, tmpsize, "|%lld|%lld", node->/*t2*/last_uptime, node->v2);
        switch_record.append(tmp, i);
        ++kafka_cache_id;
        ++it;
    }
    _kafka_svr->write2Kafka(_switch_topic_name, _kafka_cache, kafka_cache_id, _switch_partition_num,
                            WRITE_KAFKA_TYPE_POLL);
}

void EdsPointThread::setInterval(int intval) {
    _intval_time = intval;
    if (_intval_time <= 0) _intval_time = 1;
}

void EdsPointThread::setThreadNo(int tno) {
    _thread_no = tno;
}

int EdsPointThread::getThreadNo() {
    return _thread_no;
}

void EdsPointThread::setKafkaServer(bool iskafka, KafkaServer* kafka_svr, const string& topic, int partition) {
    _is_record_kafka = iskafka;
    _kafka_svr = kafka_svr;
    _topicname = topic;
    _partition_num = partition;
}

bool EdsPointThread::getPointValuesFromCache(int liveid, std::map<int, myPointValue>& values) {
    TL_ThreadRwLock::RLock rl(_rw_lock);
    map<int, myPointCache>::const_iterator it = _liveid_pos.find(liveid);
    if (it == _liveid_pos.end()) return false;
    values = it->second.values; //copy
    return true;
}

size_t EdsPointThread::getValidIdSize() {
    return _liveid_pos.size();
}

size_t EdsPointThread::getInvalidIdSize() {
    //无效id数
    return _noliveid_points.size();
}

string EdsPointThread::status2Json() {
    ostringstream os;
    os << "{file:'";
    os << _point_file << "',threadAddr:'" << (void *) this << "',threadNo:" << _thread_no << ",intval:" << _intval_time
            << ",validIdSize:";
    os << getValidIdSize() << ",InvalidIdSize:"; //有效点
    os << getInvalidIdSize() << ",ZeroTsSize:"; //无效点
    if (_timezero_liveids.size() > 0) {
        os << _timezero_liveids.rbegin()->second.size(); //TS 为0点
    } else {
        os << "'no data'";
    }
    os << ",elapsedTime:" << _elapsed_time << "}";
    return os.str();
}

void EdsPointThread::zeroTsIds2Json(ostream& os) {
    os << "{file:'" << _point_file << "',";
    os << "zerotgn:[";
    if (_timezero_liveids.size() > 0) {
        const vector<int>& zvs = _timezero_liveids.rbegin()->second;

        vector<int>::const_iterator it = zvs.begin();
        vector<int>::const_iterator end = zvs.end();
        --end;
        int i = 0;
        while (it != end) {
            os << "'" << _points[_liveid_pos[*it].pos] << "',";
            ++it;
            if (++i % 20 == 0) {
                os << "\n";
            }
            //if(it!=)
        }
        if (it != zvs.end()) {
            os << "'" << _points[_liveid_pos[*it].pos] << "'";
        }
    }
    os << "]}";
}

void EdsPointThread::history(ostream& os, const string& tgn, time_t stime, time_t etime) {
    do {
        char type, quality = 'U';
        //vector<pair<time_t, value_t > > values;
        vector<KKSManger::KKS_Value> values;
        KKSManger* kksManger = KKSManger::getInstance();
        u32_t kid = kksManger->getKKSId(tgn);
        kksManger->getKKSHistory(kid, stime, etime, values);

        if (values.size() > 0) {
            os << "{retcode:0,type:\"" << type << "\",quality:\"" << quality << "\",tgn:\"" << tgn << "\",";
            TL_Datetime dt(stime);
            os << "range:\"" << dt.toString() << "|";
            dt.setTime(etime);
            os << dt.toString() << "\",";

            auto it = values.begin();
            KKSManger::KKS_Value& v = *it;
            time_t stt = v.tm;
            os << "stime:" << stt << ",data:[0,";

            if (v.quality & 0x80) {
                //float
                os << *(float *) (&v.value);
            } else {
                os << v.value;
            }
            ++it;
            while (it != values.end()) {
                KKSManger::KKS_Value& v = *it;
                os << "," << v.tm - stt << ",";
                if (v.quality & 0x80) {
                    //float
                    os << *(float *) (&v.value);
                } else {
                    os << v.value;
                }
                ++it;
            }
            os << "]}";
        } else {
            os << "{retcode:-1,retmsg=\"kid=" << kid << ",vsize=" << values.size() << "\"}";
            break;
        }
    } while (0);
}
