# coding=utf-8
__all__ = ['df_SR_Thread_Go', 'stream_SR_Thread_Go', 'sql_SR_Thread_Go']

import os
import sys
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(curdir)
# sys.path.insert(0, '..') 上层目录
sys.path.insert(0, os.path.dirname(os.path.normpath(curdir)))
import pandas as pd
import numpy as np
#import dask.dataframe as ddf
import threading
from statser.StatisticsReporter import sql_StatisticsReporter, df_StatisticsReporter, stream_StatisticsReporter, deal_require, merge_src
import SR_Utils


class SR_Thread_df (threading.Thread):
    def __init__(self, threadID, name, _src=None, df=None, fun_require=deal_require):
        threading.Thread.__init__(self)
        self.threadID = threadID
        self.name = name
        self._src = _src
        self.df = df
        self.fun_require = fun_require
        self.retInfo = (0, '', '')

    def run(self):
        self.retInfo = df_StatisticsReporter('', _src=self._src, df=self.df, fun_require=self.fun_require)


def df_SR_Thread_Go(sr_filename, _src=None, parameters={}, df=None, fun_require=deal_require, npartitions=0):
    '''
    用数据组去生成角本中间码，均分各个数据集到多个线程，所有线程结束后，把内容汇集，执行final动作

    :param sr_filename: SR角本或者角本文件名
    :param _src: SR分析上下文环境
    :param parameters: SR角本参数
    :param df: 数据源Dataframe类型
    :param fun_require: 提供require变量的回调函数或者字典(“变量名=值”的集合)
    :param npartitions: 并发线程数量
    :return: 状态信息及计算结果
    '''
    #用数据组去生成角本中间码
    if _src is None:
        if isinstance(df, (list, tuple)):
            df_ = df[0]
        else:
            df_ = df
        status, res, _src = df_StatisticsReporter(sr_filename, None, parameters=parameters, df=df_[:1], fun_require=fun_require, debug=False, dump=True)
        if status:
            return status, res, _src
    else:
        if not _src.dump:
             _src.dump = SR_Utils.src_Dump(_src)

    if isinstance(df, (list, tuple)):
        npartitions = len(df)
        dfs = df
    else:
        #计算需要几个线程
        if (npartitions <= 0):
            npartitions = os.cpu_count()
        # 均分各个数据集到多个线程
        dfs = np.array_split(df, npartitions)

    threads = []
    for i in range(npartitions):
        _src0 = SR_Utils.src_Load(_src.dump)
        _src0.subTaskId = str(i)
        thread_ = SR_Thread_df(i, '',  _src=_src0, df=dfs[i], fun_require=fun_require)
        threads.append(thread_)
        thread_.start()

    # 等待所有线程完成
    for thread_ in threads:
        thread_.join()

    _src_master = threads[0]._src

    status = 0
    for thread_ in threads:
        status += thread_.retInfo[0]
    if status:
        return 1, '', _src_master

    # 所有线程结束后，把内容汇集，执行final动作
    i = 1
    while (i < npartitions):
        thread_ = threads[i]
        i += 1
        _src_master.registerSlaves(thread_._src)
    merge_src(_src_master)

    return 0, '', _src_master


################################################################################


class SR_Thread_stream (threading.Thread):
    def __init__(self, threadID, name, _src=None, srd=None, fun_require=deal_require):
        threading.Thread.__init__(self)
        self.threadID = threadID
        self.name = name
        self._src = _src
        self.srd = srd
        self.fun_require = fun_require
        self.retInfo = (0, '', '')

    def run(self):
        self.retInfo = stream_StatisticsReporter('', _src=self._src, srd=self.srd, fun_require=self.fun_require)


def stream_SR_Thread_Go(sr_filename, _src=None, parameters={}, srd=None, fun_require=deal_require, npartitions=0):
    '''
    用Stream数据组去生成角本中间码，均分各个数据集到多个线程，所有线程结束后，把内容汇集，执行final动作

    :param sr_filename: SR角本或者角本文件名
    :param _src: SR分析上下文环境
    :param parameters: SR角本参数
    :param srd: 数据源StreamResDict类型
    :param fun_require: 提供require变量的回调函数或者字典(“变量名=值”的集合)
    :param npartitions: 并发结点数量
    :return: 状态信息及计算结果
    '''

    #用数据组去生成角本中间码
    if _src is None:
        status, res, _src = stream_StatisticsReporter(sr_filename, None, parameters=parameters, srd=srd, fun_require=fun_require, debug=False, dump=True)
        if status:
            return status, res, _src
    else:
        if not _src.dump:
             _src.dump = SR_Utils.src_Dump(_src)

    #计算需要几个线程
    if (npartitions <= 0):
        npartitions = os.cpu_count()

    threads = []
    for i in range(npartitions):
        _src0 = SR_Utils.src_Load(_src.dump)
        _src0.subTaskId = str(i)
        thread_ = SR_Thread_stream(i, '',  _src=_src0, srd=srd, fun_require=fun_require)
        threads.append(thread_)
        thread_.start()

    # 等待所有线程完成
    for thread_ in threads:
        thread_.join()

    _src_master = threads[0]._src

    status = 0
    for thread_ in threads:
        status += thread_.retInfo[0]
    if status:
        return 1, '', _src_master

    # 所有线程结束后，把内容汇集，执行final动作
    i = 1
    while (i < npartitions):
        thread_ = threads[i]
        i += 1
        _src_master.registerSlaves(thread_._src)
    merge_src(_src_master)

    return 0, '', _src_master


################################################################################


class SR_Thread_sql (threading.Thread):
    def __init__(self, threadID, name, _src=None, conn=None, dictResult=False, params={}, sql='', fun_require=deal_require):
        threading.Thread.__init__(self)
        self.threadID = threadID
        self.name = name
        self._src = _src
        self.conn = conn
        self.dictResult = dictResult
        self.params = params
        self.sql = sql
        self.fun_require = fun_require
        self.retInfo = (0, '', '')

    def run(self):
        self.retInfo = sql_StatisticsReporter('', _src=self._src, conn=self.conn, dictResult=self.dictResult, params=self.params, sql=self.sql, fun_require=self.fun_require)


def sql_SR_Thread_Go(sr_filename, _src=None, parameters={}, conn_pool=None, dictResult=False, params={}, sql='', limitRows=0, fun_require=deal_require, npartitions=0):
    '''
    Mysqldb是线程安全的，所以每个连接将能够访问它们各自的游标，查询和结果集，而不影响其他连接，但每个线程或进程将需要自己的连接。

    :param sr_filename: SR角本或者角本文件名
    :param _src: SR分析上下文环境
    :param parameters: SR角本参数
    :param conn_pool: 数据库连接池
    :param dictResult: 数据库游标Cursor是否返回字典
    :param params: 数据库SQL所用参数
    :param sql: SQL语句，如果这个参数不为None，则取代SR角本中的sql定义。可以是数组，
                数组的每个元素是一个SQL，合起来得到完整的数据集
    :param limitRows: 每次计算的行数，依据这个数把一个SQL检索拆分成多个，分别供给各个计算结点
                最后一个结点完成所有余下的数据的处理，不受这个数字的限制
    :param fun_require: 提供require变量的回调函数或者字典(“变量名=值”的集合)
    :param npartitions: 并发线程数量
    :return: 状态信息及计算结果
    '''

    conn = conn_pool.connection()
    #用数据组去生成角本中间码
    if _src is None:
        status, res, _src = sql_StatisticsReporter(sr_filename, None, parameters=parameters, conn=conn, dictResult=dictResult, params=params, sql=sql, fun_require=fun_require, debug=False, dump=True)
        if status:
            conn.close()
            return status, res, _src
    else:
        if not _src.dump:
             _src.dump = SR_Utils.src_Dump(_src)
    conn.close()

    #计算需要几个线程
    if (npartitions <= 0):
        npartitions = os.cpu_count()

    threads = []
    if not sql:
        sql = _src.get_value('sql')
        if not sql:
            return 2, '', _src

    # 把数据源切段分析，最后一段不应该有LIMIT数量，否则会造成分类不完整，如果没有标志，无法做数据源切段，不能并发
    if isinstance(sql, (list, tuple)):
        npartitions = len(sql)
        for i in range(npartitions):
            _src0 = SR_Utils.src_Load(_src.dump)
            _src0.subTaskId = str(i)
            conn = conn_pool.connection()
            thread_ = SR_Thread_sql(i, '',  _src=_src0, conn=conn, dictResult=dictResult, params=params, sql=sql[i], fun_require=fun_require)
            threads.append(thread_)
            thread_.start()
    elif (sql.find('LIMIT_offset_rows__') < 0):
        npartitions = 1
        thread_ = SR_Thread_sql(i, '',  _src=_src0, conn=conn, dictResult=dictResult, params=params, sql=sql, fun_require=fun_require)
        threads.append(thread_)
        thread_.start()
    else:
        npartitions_1 = npartitions - 1
        for i in range(npartitions):
            _src0 = SR_Utils.src_Load(_src.dump)
            _src0.subTaskId = str(i)
            conn = conn_pool.connection()

            if (limitRows > 0):
                if (i < npartitions_1):
                    sql_tmp = sql.replace('LIMIT_offset_rows__', 'LIMIT '+str(i*limitRows)+','+str(limitRows))
                else:
                    sql_tmp = sql.replace('LIMIT_offset_rows__', 'LIMIT '+str(i*limitRows)+',2147483647')
                thread_ = SR_Thread_sql(i, '',  _src=_src0, conn=conn, dictResult=dictResult, params=params, sql=sql_tmp, fun_require=fun_require)
            else:
                thread_ = SR_Thread_sql(i, '',  _src=_src0, conn=conn, dictResult=dictResult, params=params, sql=sql, fun_require=fun_require)

            threads.append(thread_)
            thread_.start()

    # 等待所有线程完成
    for thread_ in threads:
        thread_.join()

    _src_master = threads[0]._src

    status = 0
    for thread_ in threads:
        thread_.conn.close()
        status += thread_.retInfo[0]
    if status:
        return 1, '', _src_master

    # 所有线程结束后，把内容汇集，执行final动作
    i = 1
    while (i < npartitions):
        thread_ = threads[i]
        i += 1
        _src_master.registerSlaves(thread_._src)
    merge_src(_src_master)

    return 0, '', _src_master
