/*
 * To change this license header, choose License Headers in Project Properties.
 * To change this template file, choose Tools | Templates
 * and open the template in the editor.
 */

/* 
 * File:   Udp.cpp
 * Author: ever
 * 
 * Created on 2018年1月28日, 下午8:48
 */

#include "Udp.h"

//static vector<string> empty_vector;

Udp::Udp(const string &point_conf, const string &host_and_port) {
    _is_record_file = false;
    _is_record_kafka = false;
    _save_data_size = 1800; //保留最近x个数据
    history_root_path = "/tmp";
    last_history_time_path = 0;
    try {
        SocketAddress address(host_and_port);
        _datagram_socket.bind(address, true);
    } catch (...) {
        LOG() << error << "udp bind to " << host_and_port << " failed" << endl;
    }
    _buffer = (char *) malloc(DATAGRAME_BUFFER_LENGTH);
}

Udp::~Udp() {
    if (_buffer) {
        free(_buffer);
    }
}

void Udp::setKafkaServer(bool iskafka, KafkaServer *kafka_svr, const string &topic, int partition) {
    _is_record_kafka = iskafka;
    _kafka_svr = kafka_svr;
    _topicname = topic;
    _partition_num = partition;
    if (_partition_num < 1) _partition_num = 1;
    _kafka_cache.resize(KAFKA_CACHE_MAX_SIZE);
}

void Udp::setRecordFileEnable(bool recordFileEnable) {
    _is_record_file = recordFileEnable;
}

void Udp::setThreadNo(int tno) {
    _thread_no = tno;
}

int Udp::getThreadNo() {
    return _thread_no;
}

void Udp::setHistoryRootPath(const string &hrp) {
    history_root_path = hrp;
    if (history_root_path.empty()) history_root_path = "/tmp";
}

void Udp::run() {
    bool keepRunning = true;
    do {
        try {
            //gettimeofday(&_t1, NULL);
            int len = _datagram_socket.receiveBytes(_buffer, DATAGRAME_BUFFER_LENGTH);
            _udp_packet.setData((u8_t *) _buffer, len);
            if (!_udp_packet.isValid()) {
                LOG() << debug << "recv packet: udp package len=" << len << " is invalid!" << endl;
                continue;
            }
            auto head = _udp_packet.getHead();
            u32_t num = _udp_packet.getNum();
            time_t _data_time = head->getTime();
            if (num <= 0 || num > (65534 / 8)) {
                LOG() << debug << "recv packet:" << len << " point_num:" << num << " time:" << _data_time << endl;
                continue;
            }
            u16_t cmd = _udp_packet.getCmd();
//            LOG() << debug << "recv packet:" << len << ",sno:" << (int) _udp_packet.getHead()->getSNo() << ",packet_no:"
//                  << _udp_packet.getHead()->getPackageNo()
//                  << ",point_num:" << num << ",time:" << _data_time << ",cmd:" << cmd
//                  << endl;

            LOG() << debug << "recv packet:" << len << "|" << head->P << head->R
                  << "|" << (int) head->ver << "|" << (int) head->cmd << "|" << (int) head->sno << "|"
                  << head->getPackageNo()
                  << "|" << _data_time << "|" << head->getBodyLength() << "|.." << num << "..|" << endl;

            if (cmd == 1) {
                UdpPacket::TGN *tgn = _udp_packet.getTGN();
                TL_ThreadRwLock::WLock wl(_rw_lock);
                while (num > 0) {
                    myPointCache &point = _tgn_cache[tgn->getTagId()];
                    myPointValue &pv = point.values[_data_time];
                    pv.quality = tgn->getQuality();
                    pv.value.f = tgn->getValue();
                    pv.ts = _data_time;
                    ++tgn;
                    --num;
                }
            } else {
                LOG() << debug << "undefined cmd:" << cmd << endl;
            }
            //LOG() << debug << "recording ..." << endl;
            record();
        } catch (const exception &e) {
            LOG() << e.what() << endl;
        }
    } while (keepRunning);
}

void Udp::record() {
    map<int, myPointCache>::iterator it = _tgn_cache.begin();

    enum {
        buffer_size = 256
    };
    char buffer[buffer_size];
    int kafka_cache_id = 0;
    int len = 0;
    while (it != _tgn_cache.end()) {
        myPointCache &cache = it->second;
        if (cache.values.size() > 0) {
            std::map<time_t, myPointValue>::value_type &v = *cache.values.rbegin();
            if (cache.last_record_ts < v.first) {
                //新数据
                const myPointValue &pvalue = v.second;
                KKSManger::getInstance()->addKKSValue(it->first, v.first, pvalue.value.u, pvalue.quality);

                len = snprintf(buffer, buffer_size, "%d|%d|%f|%c", it->first, v.first, pvalue.value.f, pvalue.quality);


                if (_is_record_file) {
                    LOG("point") << noop << buffer << endl;
                }

                if (_is_record_kafka) {
                    string &point_record = _kafka_cache[kafka_cache_id];
                    point_record.clear();
                    point_record.append(buffer, len);
                    ++kafka_cache_id;
                    if (kafka_cache_id == KAFKA_CACHE_MAX_SIZE) {
                        _kafka_svr->write2Kafka(_topicname, _kafka_cache, kafka_cache_id, _partition_num,
                                                WRITE_KAFKA_TYPE_POLL);
                        kafka_cache_id = 0;
                    }
                    //_kafka_cache.push_back(point_record);
                }
                cache.last_record_ts = v.first;
            }

            //清理緩存數據，避免内存占用
            if (cache.values.size() > 10) {
                int clear_num = cache.values.size() - 5; //保留5個
                while (clear_num > 0) {
                    cache.values.erase(cache.values.begin());
                    --clear_num;
                }
            }
        }
        ++it;
    }

    if (_is_record_kafka && kafka_cache_id > 0) {
        _kafka_svr->write2Kafka(_topicname, _kafka_cache, kafka_cache_id, _partition_num, WRITE_KAFKA_TYPE_POLL);
    }
}
