#!/usr/bin/env python
# _*_ coding:utf-8 _*_
__all__ = ['ctl_Master', 'ctl_Master_lonely', 'ctl_Master_cfg', 'ctl_ConnPool', 'sql_SR_Master_Go',
           'df_SR_Master_Go', 'start_Slave', 'SR_Task', 'SR_Result', 'start_Slaves_Process']

import os, sys
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(curdir)
# sys.path.insert(0, '..') 上层目录
sys.path.insert(0, os.path.dirname(os.path.normpath(curdir)))
import pandas as pd
import numpy as np
#import dask.dataframe as ddf
'''
由于GIL的存在，python中的多线程其实并不是真正意义上的多线程，
I/O密集型使用多线程并发执行提高效率、计算密集型使用多进程并行执行提高效率。
'''
import random, time, queue, signal
from multiprocessing import Pool
from multiprocessing.managers import BaseManager
#window下运行防止多进程可能崩溃
from multiprocessing import freeze_support
import threading
import getopt
import pickle
import configparser
import logging
from statser.StatisticsReporter import sql_StatisticsReporter, df_StatisticsReporter, stream_StatisticsReporter, deal_require, merge_src
import SR_Utils
import SQL_Utils
import CommonUtils

logger = CommonUtils.getLogger('SR_MaterSlave')

g_sr_Task_df = []

def sg_exit(signum, frame):
    exit(0)

def get_process_info():
    return '(pid=%d) ' % (os.getpid())

class MyParser(configparser.ConfigParser):
    def as_dict(self):
        d = dict(self._sections)
        for k in d:
            d[k] = dict(self._defaults, **d[k])
            d[k].pop('__name__', None)
        return d

# 发送任务的队列
task_queue = queue.Queue()
# 接收结果的队列
result_queue = queue.Queue()

# 使用标准函数来代替lambda函数，避免python2.7中，pickle无法序列化lambda的问题
def get_task_queue():
    global task_queue
    return task_queue

# 使用标准函数来代替lambda函数，避免python2.7中，pickle无法序列化lambda的问题
def get_result_queue():
    global result_queue
    return result_queue

#### the same machine ######
# 发送任务的队列
task_queue_lonely = queue.Queue()
# 接收结果的队列
result_queue_lonely = queue.Queue()

# 使用标准函数来代替lambda函数，避免python2.7中，pickle无法序列化lambda的问题
def get_task_queue_lonely():
    global task_queue_lonely
    return task_queue_lonely

# 使用标准函数来代替lambda函数，避免python2.7中，pickle无法序列化lambda的问题
def get_result_queue_lonely():
    global result_queue_lonely
    return result_queue_lonely


'''
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce a Windows executable.
#windows 运行分布式进程需要先启动 freeze_support()
#freeze_support()"冻结"时生成 Windows 可执行文件
#原因是Windows没有直接的fork()
#Window是通过创建一个新的过程代码,在子进程运行来模拟fork()
#由于代码是在技术无关的进程中运行的，所以它必须在运行之前交付
#它传递的方式首先是被pickle，然后通过管道从原始进程发送到新进程
#另外，这个新进程被告知它必须运行通过管道传递的代码通过传递
#freeze_support() 函数的任务是检查它正在运行的进程是否应该通过管道或不运行代码。
'''
freeze_support()

envConfig = {}
host = '127.0.0.1'
port = 4800
authkey = bytes('tongji', encoding='utf8')

'''
Queue之所以能通过网络访问，就是通过BaseManager实现的。由于BaseManager管理的不止一个Queue，所以，
要给每个Queue的网络调用接口起个名字，比如get_task_queue。task_worker的BaseManager注册的名字必须和task_manager中的一样。
把两个Queue都注册到网络上，callable参数关联了Queue对象，注意回调函数不能使用括号
'''
BaseManager.register('get_task_queue', callable = get_task_queue)
BaseManager.register('get_result_queue', callable = get_result_queue)
manager = BaseManager(address = (host, port), authkey = authkey)
is_manager_start = False
g_conn_pool = None

# 在本机内多进程并发计算所用queue
BaseManager.register('get_task_queue_lonely', callable = get_task_queue_lonely)
BaseManager.register('get_result_queue_lonely', callable = get_result_queue_lonely)
manager_lonely = BaseManager(address = (host, port+1), authkey = authkey)
is_manager_lonely_start = False

g_task_id = 2020


class SR_Task():
    def __init__(self, task_id, _src_dump, _src_type):
        global g_task_id

        if task_id:
            self.task_id = task_id
        else:
            self.task_id = g_task_id
            g_task_id += 1

        self._src_dump = _src_dump
        self._src_type = _src_type
        self.kwargs = {}


class SR_Result():
    def __init__(self, task_id, _src_dump, _src_type, status, res):
        self.task_id = task_id
        self._src_dump = _src_dump
        self._src_type = _src_type
        self.status = status
        self.res = res


def ctl_Master(startManager):
    '''
    初始化多结点通信环境，结点不限在本机执行
    '''
    global manager, is_manager_start

    if startManager:
        if not is_manager_start:
            manager.start()
            is_manager_start = True
        taskQueue = manager.get_task_queue()
        resultQueue = manager.get_result_queue()
        return taskQueue, resultQueue
    else:
        manager.shutdown()
        is_manager_start = False
        return None, None


def ctl_Master_lonely(startManager):
    '''
    初始化本机多进程通信环境
    '''
    global manager_lonely, is_manager_lonely_start

    if startManager:
        if not is_manager_lonely_start:
            manager_lonely.start()
            is_manager_lonely_start = True
        taskQueue = manager_lonely.get_task_queue_lonely()
        resultQueue = manager_lonely.get_result_queue_lonely()
        return taskQueue, resultQueue
    else:
        manager_lonely.shutdown()
        is_manager_lonely_start = False
        return None, None


def ctl_Master_cfg(startManager, config_ini, iniInString=False):
    '''
    根据配置文件初始化程序环境
    '''

    global manager, manager_lonely, is_manager_start, is_manager_lonely_start
    global host, port, authkey

    if startManager:
        if not config_ini:
            config_ini = os.sep.join([curdir, 'config.ini'])

        if isinstance(config_ini, dict):
            envConfig = config_ini
        else:
            cfg = MyParser()
            if iniInString:
                cfg.read_string(config_ini)
            else:
                cfg.read(config_ini, encoding='utf8')
            envConfig = cfg.as_dict()
        srConfig = envConfig.get('SR_Mater', {})
        host = srConfig.get('host', host)
        port = int(srConfig.get('port', port))
        str_ = srConfig.get('authkey', 'tongji')
        authkey = bytes(str_, encoding='utf8')

        manager = BaseManager(address = (host, port), authkey = authkey)
        manager_lonely = BaseManager(address = (host, port+1), authkey = authkey)
        is_manager_start = False
        is_manager_lonely_start = False

    ctl_Master(startManager)
    ctl_Master_lonely(startManager)


def ctl_ConnPool(conn_pool=None):
    '''
    设置全局数据库连接池
    :param conn_pool: SQL_Utils.getDbpool()
    '''
    global g_conn_pool

    if conn_pool:
        g_conn_pool = conn_pool
    return g_conn_pool


def switch_src_type(x):
    try:
        return {
            'sql': sql_SR_Slave_Go,
            'df': df_SR_Slave_Go,
        }[x.lower()]
    except KeyError:
        return ''


def start_Slave(manager_, lonely):
    '''
    起动Slave结点，manager_是BaseManager类型
    '''

    logger.info(get_process_info() + 'SR_MaterSlave slaver started')
    while 1:
        num = 1
        while num:
            try:
                manager_.connect()
                logger.info(get_process_info() + 'SR_MaterSlave slaver connected')
                break
            except ConnectionRefusedError:
                pass
            except (BaseException) as exc:
                logger.error(str(exc))
            time.sleep(1)
            num += 1

        if lonely:
            taskQueue = manager_.get_task_queue_lonely()
            resultQueue = manager_.get_result_queue_lonely()
        else:
            taskQueue = manager_.get_task_queue()
            resultQueue = manager_.get_result_queue()

        while 1:
            try:
                # 从队列中移除并返回一个项目。如果可选参数 block 是 true 并且 timeout 是 None (默认值)，则在必要时阻塞至项目可得到。
                sr_Task = taskQueue.get(block=True)
                logger.info(get_process_info() + 'SR_MaterSlave slaver got a task, task_id: ' + str(sr_Task.task_id))

                # run task
                if sr_Task is None:
                    break

                fun_ = switch_src_type(sr_Task._src_type)
                if fun_:
                    fun_(taskQueue, resultQueue, sr_Task)

                if lonely:
                    return

            except (queue.Empty):
                pass
            except (EOFError, BrokenPipeError):
                break
    logger.info(get_process_info() + 'SR_MaterSlave slaver exit')


def collectResultFromQueue(nslaves, resultQueue, timeoutSeconds, _src):
    status = 0
    retInfos = []

    # fix bug: OverflowError: timeout value is too large
    timeoutSeconds = min(timeoutSeconds, threading.TIMEOUT_MAX)

    for i in range(nslaves):
        try:
            sr_Result = resultQueue.get(timeout=timeoutSeconds)
        except queue.Empty:
            return 1, '', _src
        if sr_Result:
            sr_Result = pickle.loads(sr_Result)
            status += sr_Result.status
        else:
            status += 100
        retInfos.append(sr_Result)
    if status:
        return 1, '', _src

    # 所有线程结束后，把内容汇集，执行final动作
    _src_master = SR_Utils.src_Load(retInfos[0]._src_dump, keepData=True)
    i = 1
    while (i < nslaves):
        _src_slave = SR_Utils.src_Load(retInfos[i]._src_dump, keepData=True)
        _src_master.registerSlaves(_src_slave)
        i += 1
    merge_src(_src_master)

    return 0, '', _src_master


################################################################################


def sql_SR_Master_Go(sr_filename, _src=None, parameters={}, conn_pool=None,
                     dictResult=False, params={}, sql='', limitRows=0,
                     fun_require=deal_require, nslaves=1, timeoutSeconds=60*60*24*30):
    '''
    多结点并行执行SQL

    :param sr_filename: SR角本或者角本文件名
    :param _src: SR分析上下文环境
    :param parameters: SR角本参数
    :param conn_pool: 数据库连接池
    :param dictResult: 数据库游标Cursor是否返回字典
    :param params: 数据库SQL所用参数
    :param sql: SQL语句，如果这个参数不为None，则取代SR角本中的sql定义。可以是数组，
                数组的每个元素是一个SQL，合起来得到完整的数据集
    :param limitRows: 每次计算的行数，依据这个数把一个SQL检索拆分成多个，分别供给各个计算结点
                最后一个结点完成所有余下的数据的处理，不受这个数字的限制
    :param fun_require: 提供require变量的回调函数或者字典(“变量名=值”的集合)
    :param nslaves: 并发结点数量
    :param timeoutSeconds: 超时秒数
    :return: 状态信息及计算结果
    '''
    if conn_pool:
        conn = conn_pool.connection()
        use_g_conn_pool = False
    else:
        if ctl_ConnPool():
            conn = ctl_ConnPool().connection()
            use_g_conn_pool = True
    if not conn:
        return 2, '', _src

    #用数据组去生成角本中间码
    if _src is None:
        status, res, _src = sql_StatisticsReporter(sr_filename, None, parameters=parameters,
                                    conn=conn, dictResult=dictResult, params=params, sql=sql,
                                    fun_require=fun_require, debug=False, dump=True)
        if status:
            conn.close()
            return status, res, _src
    else:
        if not _src.dump:
             _src.dump = SR_Utils.src_Dump(_src)
    conn.close()

    if (nslaves < 1):
        nslaves = 1

    taskQueue, resultQueue = ctl_Master(True)

    if not sql:
        sql = _src.get_value('sql')

    if isinstance(sql, (list, tuple)):
        nslaves = len(sql)
        for i in range(nslaves):
            _src0 = SR_Utils.src_Load(_src.dump)
            _src0.sql = sql_tmp
            _src_dump = SR_Utils.src_Dump(_src0)

            sr_Task = SR_Task('', _src_dump, 'sql')
            if use_g_conn_pool:
                sr_Task.kwargs = {}
            else:
                sr_Task.kwargs = conn_pool._kwargs

            taskQueue.put(sr_Task)
    elif not isinstance(sql, str):
        return 3, '', _src
    else:
        # 把数据源切段分析，最后一段不应该有LIMIT数量，否则会造成分类不完整，如果没有标志，无法做数据源切段，不能并发
        if (sql.find('LIMIT_offset_rows__') < 0):
            nslaves = 1
            _src.sql = sql
            _src_dump = SR_Utils.src_Dump(_src)

            sr_Task = SR_Task('', _src_dump, 'sql')
            if use_g_conn_pool:
                sr_Task.kwargs = {}
            else:
                sr_Task.kwargs = conn_pool._kwargs

            taskQueue.put(sr_Task)
        else:
            nslaves_1 = nslaves - 1
            for i in range(nslaves):
                _src0 = SR_Utils.src_Load(_src.dump)
                if (limitRows > 0):
                    if (i < nslaves_1):
                        sql_tmp = sql.replace('LIMIT_offset_rows__', 'LIMIT '+str(i*limitRows)+','+str(limitRows))
                    else:
                        sql_tmp = sql.replace('LIMIT_offset_rows__', 'LIMIT '+str(i*limitRows)+',2147483647')
                    _src0.sql = sql_tmp
                _src_dump = SR_Utils.src_Dump(_src0)

                sr_Task = SR_Task('', _src_dump, 'sql')
                if use_g_conn_pool:
                    sr_Task.kwargs = {}
                else:
                    sr_Task.kwargs = conn_pool._kwargs

                taskQueue.put(sr_Task)

    result = collectResultFromQueue(nslaves, resultQueue, timeoutSeconds, _src)

    #如果队列里还有剩余内容，会让下一个事务无所适从
    while not taskQueue.empty():
        nouse_ = taskQueue.get(block=True)
    #taskQueue.clear()
    #resultQueue.clear()

    return result


def sql_SR_Slave_Go(taskQueue, resultQueue, sr_Task):
    _src = SR_Utils.src_Load(sr_Task._src_dump)

    logger.info(get_process_info() + 'Enter sql_SR_Slave_Go()')

    if sr_Task.kwargs:
        # kwargs:
        # <class 'dict'>: {'host': '192.168.1.110', 'user': 'root', 'password': 'Elastos182', 'database': 'op_einvoicedb',
        # 'autocommit': True, 'charset': 'utf8mb4', 'cursorclass': <class 'pymysql.cursors.SSCursor'>}
        conn = SQL_Utils.getConn(host = sr_Task.kwargs['host'],
                                 user = sr_Task.kwargs['user'],
                                 password = sr_Task.kwargs['password'],
                                 dbname=sr_Task.kwargs['database'],
                                 dictResult = False)
        logger.info(get_process_info() + 'call SQL_Utils.getConn()')
    else:
        if ctl_ConnPool():
            conn = ctl_ConnPool().connection()
        else:
            sr_Result = SR_Result(sr_Task.task_id, '', '', 1, '')
            sr_Result = pickle.dumps(sr_Result)
            resultQueue.put(sr_Result)
            return

    logger.info(get_process_info() + 'call StatisticsReporter.sql_StatisticsReporter()')
    status, res, _src = sql_StatisticsReporter('', _src, {}, conn, dictResult=False, debug=False)
    logger.info(get_process_info() + 'finish task, SQL: ' + _src.sql)

    _src_dump = SR_Utils.src_Dump(_src, keepData=True)
    sr_Result = SR_Result(sr_Task.task_id, _src_dump, 'sql', status, res)
    sr_Result = pickle.dumps(sr_Result)
    resultQueue.put(sr_Result)
    conn.close()

################################################################################

def df_SR_Slave_Go(taskQueue, resultQueue, sr_Task):
    global g_sr_Task_df

    _src = SR_Utils.src_Load(sr_Task._src_dump)

    logger.info(get_process_info() + 'Enter df_SR_Slave_Go()')
    df_i = sr_Task.kwargs.get('df_i', -1)
    df = g_sr_Task_df[df_i]

    logger.info(get_process_info() + 'call StatisticsReporter.df_StatisticsReporter()')
    status, res, _src = df_StatisticsReporter('', _src, {}, df)
    logger.info(get_process_info() + 'finish task: df')

    _src_dump = SR_Utils.src_Dump(_src, keepData=True)
    sr_Result = SR_Result(sr_Task.task_id, _src_dump, 'df', status, res)
    sr_Result = pickle.dumps(sr_Result)
    resultQueue.put(sr_Result)


def df_SR_Master_Go(sr_filename, _src=None, parameters={}, df=None,
                    fun_require=deal_require, nslaves=0, timeoutSeconds=60*60*24*30):
    '''
    用数据组去生成角本中间码，均分各个数据结到多个线程，所有线程结束后，把内容汇集，执行final动作
    !! 只能在本机内的进程可以, 因为这利用了fork()分叉出子进程前的内存是只读共享的特性

    ## 本函数在Windows平台表现异常，多进程用的是multiprocessing,在windows上和linux上，
    ## 实现方式不太一样。在windows上会有一个import创建进程的模块的操作，而linux上就没有。
    ## sysstr = platform.system()
    ## if (sysstr == "Linux"):
    ##    OK
    ## else:
    ##    Fail

    :param sr_filename: SR角本或者角本文件名
    :param _src: SR分析上下文环境
    :param parameters: SR角本参数
    :param df: 数据源Dataframe类型
    :param fun_require: 提供require变量的回调函数或者字典(“变量名=值”的集合)
    :param nslaves: 并发结点数量
    :param timeoutSeconds: 超时秒数
    :return: 状态信息及计算结果
    '''
    global g_sr_Task_df

    if _src is None:
        if isinstance(df, (list, tuple)):
            df_ = df[0]
        else:
            df_ = df
        status, res, _src = df_StatisticsReporter(sr_filename, None, parameters=parameters, df=df_[:1],
                                                        fun_require=fun_require, debug=False, dump=True)
        if status:
            return status, res, _src
    else:
        if not _src.dump:
             _src.dump = SR_Utils.src_Dump(_src)

    if isinstance(df, (list, tuple)):
        nslaves = len(df)
        g_sr_Task_df = df
    else:
        #计算需要几个进程
        if (nslaves <= 0):
            nslaves = os.cpu_count()
        # 均分各个数据结到多个线程
        g_sr_Task_df = np.array_split(df, nslaves)

    taskQueue, resultQueue = ctl_Master_lonely(True)

    for i in range(nslaves):
        sr_Task = SR_Task('', _src.dump, 'df')
        kwargs = {'df_i' : i}
        sr_Task.kwargs = kwargs

        taskQueue.put(sr_Task)

    level = logger.level
    logger.setLevel(logging.CRITICAL)

    # 进程池中最多有slaveNum个进程一起执行
    pool = Pool(nslaves)
    for i in range(nslaves):
        # 向进程中添加任务
        # 注意：如果添加的任务数量超过了进程池中进程的个数的话，那么就不会接着往进程池中添加，如果还没有执行的话，他会等待前面的进程结束，然后在往
        # 进程池中添加新进程

        # 这种写法，程序出错，进程池中传递实例方法问题。
        #pool.apply_async(start_Slave, (manager_lonely, ))
        pool.apply_async(start_Lonely_work, ())

    pool.close() # 关闭进程池
    pool.join()  # 主进程在这里等待，只有子进程全部结束之后，才会开启主线程

    result = collectResultFromQueue(nslaves, resultQueue, timeoutSeconds, _src)

    #如果队列里还有剩余内容，会让下一个事务无所适从
    while not taskQueue.empty():
        nouse_ = taskQueue.get(block=True)
    #taskQueue.clear()
    #resultQueue.clear()

    logger.setLevel(level)

    return result

################################################################################

def start_Lonely_work():
    global manager_lonely
    start_Slave(manager_lonely, 1)


def sr_worker():
    global manager, envConfig

    dbConfig = envConfig.get('database', {})
    conn_pool = SQL_Utils.getDbpool(host = dbConfig.get('host', ''),
                                    user = dbConfig.get('user', ''),
                                    password = dbConfig.get('password', ''),
                                    dbname = dbConfig.get('dbname', ''), dictResult=False)
    ctl_ConnPool(conn_pool)
    start_Slave(manager, 0)


def start_Slaves_Process():
    '''
    起动从结点进程
    :return:
    '''
    signal.signal(signal.SIGINT, sg_exit)
    signal.signal(signal.SIGTERM, sg_exit)

    slaveNum = int(0)
    if (len(sys.argv) >= 2):
        opts, args = getopt.getopt(sys.argv[1:], '-n:-c:-h:-a:-p:', ['slavesnum', 'config', 'host', 'authkey=','port'])
        for opt_name, opt_value in opts:
            if opt_name in ('-c','--config'):
                cfg = MyParser()
                cfg.read(opt_value, encoding='utf8')
                envConfig = cfg.as_dict()

                srConfig = envConfig.get('SR_Mater', {})
                host = srConfig.get('host', host)
                port = int(srConfig.get('port', port))
                str_ = srConfig.get('authkey', 'tongji')
                authkey = bytes(str_, encoding='utf8')
            elif opt_name in ('-h','--host'):
                host = opt_value
            elif opt_name in ('-n','--slavesnum'):
                slaveNum = int(opt_value)
            elif opt_name in ('-p','--port'):
                port = int(opt_value)
            elif opt_name in ('-a','--authkey'):
                authkey = bytes(opt_value, encoding='utf8')
    else:
        config_ini = os.sep.join([curdir, 'config.ini'])
        cfg = MyParser()
        cfg.read(config_ini, encoding='utf8')
        envConfig = cfg.as_dict()

        srConfig = envConfig.get('SR_Mater', {})
        host = srConfig.get('host', host)
        port = int(srConfig.get('port', port))
        str_ = srConfig.get('authkey', 'tongji')
        authkey = bytes(str_, encoding='utf8')
        manager = BaseManager(address = (host, port), authkey = authkey)

    if (slaveNum <= 0):
        slaveNum = os.cpu_count()

    # 进程池中最多有slaveNum个进程一起执行
    pool = Pool(slaveNum)
    for i in range(slaveNum):
        # 向进程中添加任务
        # 注意：如果添加的任务数量超过了进程池中进程的个数的话，那么就不会接着往进程池中添加，如果还没有执行的话，他会等待前面的进程结束，然后在往
        # 进程池中添加新进程
        pool.apply_async(sr_worker, ())

    pool.close() # 关闭进程池
    pool.join()  # 主进程在这里等待，只有子进程全部结束之后，才会开启主线程


if __name__ == '__main__':
    start_Slaves_Process()
