# -*- coding: utf-8 -*-
"""
Created on 2016/5/11 16:19

@author: 'Albert'
"""
import traceback

import os
import time
import random
import requests
import pandas as pd
import logging
import logging.config
import logging.handlers
import SocketServer
import struct
import pickle

from functools import partial
from sqlalchemy import create_engine
from multiprocessing import Process
from multiprocessing import Pool as ProcessPool
from bosonnlp import BosonNLP, HTTPError, TaskNotFoundError, TaskError, TimeoutError


logging.config.fileConfig('logging.ini')

MAX_PROCESSORS = 9  # depends up to CPU
MAX_COROUTINES = 50
MYSQL = 'root:root@127.0.0.1:33060/QRSETS?charset=utf8'


class LogStreamHandler(SocketServer.StreamRequestHandler):
    def handle(self):
        """
        Handle multiple requests - each expected to be a 4-byte length,
        followed by the LogRecord in pickle format. Logs the record
        according to whatever policy is configured locally.
        """
        while True:
            chunk = self.connection.recv(4)
            if len(chunk) < 4:
                break
            slen = struct.unpack('>L', chunk)[0]
            chunk = self.connection.recv(slen)
            while len(chunk) < slen:
                chunk = chunk + self.connection.recv(slen - len(chunk))
            obj = self.unPickle(chunk)
            record = logging.makeLogRecord(obj)
            self.handleLogRecord(record)

    def unPickle(self, data):
        return pickle.loads(data)

    def handleLogRecord(self, record):
        svr_logger = logging.getLogger('svr_main')
        # N.B. EVERY record gets logged. This is because Logger.handle
        # is normally called AFTER logger-level filtering. If you want
        # to do filtering, do it at the client end to save wasting
        # cycles and network bandwidth!
        svr_logger.handle(record)


class LogRecordSocketReceiver(SocketServer.ThreadingTCPServer):
    """
    Simple TCP socket-based logging receiver suitable for testing.
    """

    allow_reuse_address = 1

    def __init__(self, host='127.0.0.1',
                 port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
                 handler=LogStreamHandler):
        SocketServer.ThreadingTCPServer.__init__(self, (host, port), handler)
        self.abort = 0
        self.timeout = 1
        self.logname = None

    def serve_until_stopped(self):
        import select
        abort = 0
        while not abort:
            rd, wr, ex = select.select([self.socket.fileno()],
                                       [], [],
                                       self.timeout)
            if rd:
                self.handle_request()
            abort = self.abort


def get_api_status(api_token):
    headers = {'X-Token': api_token}
    rate_limit_url = 'http://api.bosonnlp.com/application/rate_limit_status.json'
    return requests.get(rate_limit_url, headers=headers).json()


def check_api(response, api_token):
    # noinspection PyBroadException
    try:
        result = response.json()
        limits = result.get('limits')
        remains = []
        for k in limits:
            if limits.get(k).get('count-limit-remaining') == 0:
                return 0
            remains.append(limits.get(k).get('count-limit-remaining'))
        clilogger.debug("{0} remains: {1}".format(api_token, str(remains)))
        return min(remains)
    except:
        clilogger.error(traceback.format_exc())
        return 0


def check_api_wrapper(tokens):
    import grequests
    rate_limit_url = 'http://api.bosonnlp.com/application/rate_limit_status.json'    
    reqs = [grequests.get(rate_limit_url, headers={'X-Token': tok}) for tok in tokens]
    reps = grequests.map(reqs)
    return map(check_api, reps, tokens)


def get_able(key_dic):
    res = {}
    for k, v in key_dic.iteritems():
        if v > 0:
            res[k] = v
        else:
            clilogger.warn(k + " count limit exceeded")
    return res


def get_lr(mysql):
    engine = create_engine('mysql+mysqlconnector://' + mysql)
    tmp = pd.read_sql_query("select max(RAWID) as r from bosonnlp", engine)
    l = tmp.iloc[0]['r']
    if l is None:
        l = 0
    total = pd.read_sql_query(
        "select count(*) as c from dt_clear_data where TAG=1 and RAWID>" + str(l), engine).iloc[0]['c']
    clilogger.info("total row: " + str(total))
    tmp = pd.read_sql_query(
        "select min(RAWID) as l , max(RAWID) as r from dt_clear_data where TAG=1 and RAWID>" + str(l), engine)
    l, r = tmp.iloc[0]['l'], tmp.iloc[0]['r']
    return l, r


def get_task(ava, l, r):
    res = []
    for k, v in ava.iteritems():
        if l + v < r:
            res.append((k, l, l + v))
            l += v
        else:
            res.append((k, l, r))
            break
    return res


# noinspection PyTypeChecker
def nlp_(api_token, engine, data):
    st = time.time()
    nlp  = BosonNLP(api_token)
    text = data['TEXT'].tolist()
    # noinspection PyBroadException
    try:
        # sentiment analysis
        tmp = nlp.sentiment(text, model='news')
        sentiment = pd.DataFrame(tmp)
        sentiment.columns = ['positive', 'negative']
        # text classify
        class_set = {0: '体育', 1: '教育', 2: '财经', 3: '社会', 4: '娱乐', 5: '军事', 6: '国内', 7: '科技', 8: '互联网',
                     9: '房产', 10: '国际', 11: '女人', 12: '汽车', 13: '游戏'}
        tmp = nlp.classify(text)
        classify = pd.DataFrame(tmp)
        classify.columns = ['classify']
        classify['classify'] = classify['classify'].map(class_set)
        # extract keywords
        tmp = nlp.extract_keywords(text)
        kw_list = map(str, tmp)
        keywords = pd.DataFrame(kw_list)
        keywords.columns = ['keywords']
        # 命名实体识别（NER）
        tmp = nlp.ner(text, sensitivity=5)
        ner_list = map(str, tmp)
        ner = pd.DataFrame(ner_list)
        ner.columns = ['ner']
        # summary
        summary_list = []
        for index, row in data[['TITLE', 'TEXT']].iterrows():
            title, content = row
            summary_list.append(nlp.summary(title, content, 50))
        summary = pd.DataFrame(summary_list)
        summary.columns = ['summary']

        result_df = pd.concat([data[['ID', 'RAWID']], sentiment, classify, keywords, ner, summary], axis=1)
        result_df.to_sql('bosonnlp', engine, if_exists='append', index=False)
    except TimeoutError, e:
        clilogger.error("PID:{0} - 分析任务超时。- {1}".format(os.getpid(), e))
        clilogger.error("PID:{0} - {1} - {2} status: {3}".format(os.getpid(), type(e), nlp.token,
                                                          get_api_status(nlp.token)))
    except TaskError, e:
        clilogger.error("PID:{0} - 分析任务出错。- {1}".format(os.getpid(), e))
        clilogger.error("PID:{0} - {1} - {2} status: {3}".format(os.getpid(), type(e), nlp.token,
                                                          get_api_status(nlp.token)))
    except TaskNotFoundError, e:
        clilogger.error("PID:{0} - 任务不存在。- {1}".format(os.getpid(), e))
        clilogger.error("PID:{0} - {1} - {2} status: {3}".format(os.getpid(), type(e), nlp.token,
                                                          get_api_status(nlp.token)))
    except HTTPError, e:
        clilogger.error("PID:{0} - An HTTP error occurred.- {1}".format(os.getpid(), e))
        clilogger.error("PID:{0} - {1} - {2} status: {3}".format(os.getpid(), type(e), nlp.token,
                                                          get_api_status(nlp.token)))
    except Exception, e:
        clilogger.error("PID:{0} - An Exception occurred. {1} - {2}".format(os.getpid(), e, traceback.format_exc()))
        clilogger.error("PID:{0} - {1} - {2} status: {3}".format(os.getpid(), type(e), nlp.token,
                                                          get_api_status(nlp.token)))
    et = time.time()
    clilogger.debug("PID:{0} nlp_ interval: {1}".format(os.getpid(), (et-st)))


def analysis(mysql, (api_token, l, r)):
    import gevent
    greenlets = []
    num = r - l
    clilogger.info("PID:{4} - start task, api_token: {0} left: {1} right: {2} total: {3}"
                .format(api_token, l, r, num, os.getpid()))
    engine = create_engine('mysql+mysqlconnector://' + mysql)
    block = 50
    while l < r and l + block < r:
        sql = "select ID, RAWID, TITLE, TEXT from dt_clear_data where TAG=1 and RAWID >= {0:d} and RAWID < {1:d}" \
            .format(l, l + block)
        data = pd.read_sql_query(sql, engine)
        rows = data.shape[0]
        if rows > 0:
            clilogger.debug("PID:{0} - {1} data.shape: {2}".format(os.getpid(), sql, data.shape))
            greenlets.append(gevent.spawn(nlp_, api_token, engine, data))
        l += block
        num -= rows
    if l < r and num > 0:
        sql = "select ID, RAWID, TITLE, TEXT from dt_clear_data where TAG=1 and RAWID >= {0:d} and RAWID < {1:d}" \
            .format(l, r)
        data = pd.read_sql_query(sql, engine)
        rows = data.shape[0]
        if rows > 0:
            clilogger.debug("PID:{0} - {1} data.shape: {2}".format(os.getpid(), sql, data.shape))
            greenlets.append(gevent.spawn(nlp_, api_token, engine, data))
            num -= rows
    # timeout may be helpful to cut down executing time, but at the cost of losing some data.
    # a customized timeout value may help to reduce executing time and get data as much as programe can
    gevent.joinall(greenlets, timeout=None)
    clilogger.info("PID:{0} - {1} finish, remain: {2}".format(os.getpid(), api_token, num))


def logserver_main():
    tcpserver = LogRecordSocketReceiver()
    print 'start TCP log server...'
    tcpserver.serve_until_stopped()


if __name__ == '__main__':
    # start log server
    logserverd = Process(target = logserver_main)
    logserverd.daemon = True
    logserverd.start()

    clilogger = logging.getLogger('cli_main')
    print 'start BosonNLP...'

    API_TOKEN_DF = pd.DataFrame.from_csv('key.csv')
    API_TOKENS   = list(API_TOKEN_DF.index.values.tolist())
    random.shuffle(API_TOKENS)

    clilogger.info("total API_TOKEN: {0}".format(len(API_TOKENS)))

    if len(API_TOKENS) > MAX_PROCESSORS * 2:
        step = len(API_TOKENS)/MAX_PROCESSORS
    else:
        step = 1
    
    jobs = [API_TOKENS[i: i + step] for i in xrange(0, len(API_TOKENS), step)]
    pool = ProcessPool(processes=MAX_PROCESSORS)
    rs = [pool.apply_async(check_api_wrapper, (job,)) for job in jobs]
    pool.close()
    pool.join()

    remains = [i for ms in rs for i in ms.get()]

    clilogger.info("remains: " + str(remains))

    API_TOKEN_DIC = dict(zip(API_TOKENS, remains))
    available = API_TOKEN_DIC
    if remains.count(0) > 0:
        available = get_able(API_TOKEN_DIC)

    API_TOKEN_DF = pd.concat([API_TOKEN_DF, pd.DataFrame.from_dict(API_TOKEN_DIC, orient='index')], axis=1)
    API_TOKEN_DF.columns = ['remain']

    if len(available) > 0:
        left, right = get_lr(MYSQL)

        clilogger.info("left: {0}, right: {1}".format(left, right))

        tasks = get_task(available, left, right)
        task_df = pd.DataFrame(tasks, columns=['key', 'left', 'right'])
        API_TOKEN_DF = API_TOKEN_DF.join(task_df.set_index('key'))
        API_TOKEN_DF.to_csv('API_TOKEN_DF.csv')

        partial_analysis = partial(analysis, MYSQL)

        pool = ProcessPool(processes=MAX_PROCESSORS)
        pool.map_async(partial_analysis, tasks)
        pool.close()
        pool.join()
    else:
        clilogger.warn('no available key')

    time.sleep(2)
    logserverd.terminate()
