# -*- coding: utf-8 -*-
# @Time    : 2019/5/10 15:30
# @Author  : CHEN Wang
# @Site    : 
# @File    : parallelism.py
# @Software: PyCharm

"""


"""

import time
import pandas as pd
import threading
import multiprocessing
from abc import ABCMeta
from concurrent.futures import ThreadPoolExecutor
from quant_researcher.quant.project_tool.logger.my_logger import LOG


def get_target_processor_num(pct=0.6):
    """
    通过设置希望占用的CPU资源比例，获取可以运用的CPU核数
    :param pct: 希望占用的CPU资源比例
    :return:
    """
    total_cpu_num = get_total_processor_num()
    target_processor_num = int(total_cpu_num * pct)
    LOG.warning(f'希望使用的进程数={target_processor_num}，机器核数={total_cpu_num}，使用比例={pct}')
    return target_processor_num


def get_total_processor_num():
    """
    获取电脑CPU核心数，如果采用了超线程技术获取的其实是线程数（一个内核可能对应两个线程）

    :return:
    """
    total_cpu_num = multiprocessing.cpu_count()
    return total_cpu_num


def multiprocessing_test(processor_num=10):
    # 多进程测试，是否每个进程可以正常使用
    def func(x):
        return x

    arr = list(range(processor_num))
    res = []
    pool = multiprocessing.Pool(processor_num)
    for info in arr:
        res.append(pool.apply_async(func, args=(info,)))
        print(info)
    print(res)


class MultiThread(threading.Thread):
    """
    能够通过get_result获得该线程的运行结果，以后还会不断丰富此class
    """
    def __init__(self, func, args, name='NA', **kwargs):
        """
        类的初始化函数，构造函数

        :param func: 对应的函数
        :param args: 传递给函数的位置参数
        :param name: 此线程的名字
        :param kwargs:
            - info，dict，传递给函数的关键字参数
        """
        super(MultiThread, self).__init__()
        # threading.Thread.__init__(self)
        self.name = name
        self.func = func
        self.args = args
        self.info = kwargs.get('info', {})
        # LOG.info(f'開始初始化線程（{name}）, func={func.__name__}, args={args}，kwargs={self.info}')
        self.result = None
        self.err = None
        self.is_succeed = True

    def run(self):
        try:
            self.result = self.func(*self.args, **self.info)
        except Exception as err:
            self.is_succeed = False
            self.err = err
            raise err

    def get_result(self):
        if self.is_succeed:
            return self.result
        else:
            raise self.err


class Parallelism_abs(object, metaclass=ABCMeta):
    def __init__(self, processes=multiprocessing.cpu_count()):
        """

        :param processes: 进程数量，默认为cpu个数
        """
        self.total_processes = 0
        self.completed_processes = 0
        self.results = []
        self.data = []
        self.cores = processes  # cpu核心数量
        self._loginfolist = []  # 保存打印信息

    def __getstate__(self):
        self_dict = self.__dict__.copy()
        print(self.__dict__)
        del self_dict['pool']
        return self_dict

    def __setstate__(self, state):
        print(state)
        self.__dict__.update(state)

    def get_results(self):
        return self.results

    def complete(self, result):
        self.results.extend(result)
        self.completed_processes += 1
        print('Progress: {:.2f}%'.format((self.completed_processes / self.total_processes) * 100))


class Parallelism(Parallelism_abs):
    """ 多进程map类
        pl = ParallelSim()
        pl.run(yourFunc, yourIter)
        data = pl.get_results()
        data = list(data)
        print(data)
    """

    def __init__(self, processes=multiprocessing.cpu_count()):
        super(Parallelism, self).__init__(processes)
        self.pool = multiprocessing.Pool(processes=processes)

    def run(self, func, iter):
        if isinstance(iter, list) and self.cores > 1 and len(iter) > self.cores:
            j = self.cores
            for i in range(j):
                pLen = int(len(iter) / j) + 1
                self.data.append(
                    self.pool.starmap_async(func,
                                            iter[i * pLen:(i + 1) * pLen],
                                            callback=self.complete,
                                            error_callback=self.exception))
                self.total_processes += 1
        else:
            self.data.append(
                self.pool.starmap_async(func=func,
                                        iterable=iter,
                                        callback=self.complete,
                                        error_callback=self.exception)
            )
            self.total_processes += 1
        for i in range(self.total_processes):
            try:
                while not self.data[i].ready():
                    time.sleep(0.5)
                self.data[i].get()
            except Exception as e:
                print(e.args)
        self.pool.close()
        self.pool.join()

    def exception(self, exception=None):
        print(exception)


class Parallelism_Thread(Parallelism_abs):
    """
    多线程map类
        pl = ParallelSim()
        pl.run(yourIter)
        data = list(data)
        print(data)
    """

    def __init__(self, processes=multiprocessing.cpu_count()):
        super(Parallelism_Thread, self).__init__(processes)
        self.pool = ThreadPoolExecutor(self.cores)

    def run(self, iter):
        """
        使用concurrent.futures import ThreadPoolExecutor线程

        :param iter:
        :return:
        """
        if isinstance(iter, list) and self.cores > 1 and len(iter) > self.cores:
            j = self.cores
            for i in range(j):
                pLen = int(len(iter) / j) + 1
                self.data.append(
                    self.pool.map(self.do_working, iter[i * pLen:(i + 1) * pLen]))
                self.total_processes += 1
        else:
            self.data.append(self.pool.map(self.do_working, iter))
            self.total_processes += 1
        for i in range(self.total_processes):
            adata = list(self.data[i])
            print('{} SAVED: {}'.format(len(adata), adata))
            self.complete(adata)

    def do_working(self, code):
        raise Exception('你要在子类中实现此方法!')


def df_parallel_apply(data, func, axis=0, method='swifter', n_jobs=10, **kwargs):
    """

    :param data: dataframe 或者 series
    :param func: 函数，要应用于每一列或每一行的函数。
    :param axis: axis：默认为0，0对应行索引，将func函数应用于每一列；1对应列，将函数应用于每一行。
    :param method: 默认'swifter'， 这个也是推荐的方法； 也支持'pandarallel'， 以及'joblib'和'multiprocessing'
    :param n_jobs:
    :param kwargs:
    :return:
    """
    """
    dataframe 或 series的 apply的并行处理
    """

    start = time.time()

    if method == 'swifter':  # swifter 是一款用于给使用在 pandas DataFrame 或者 Series 上的 function 进行加速的包。
        import swifter
        res = data.swifter.apply(func, axis=axis, **kwargs)

    elif method == 'swifter2':
        import swifter
        import modin.pandas as pd  # modin需要pandas==1.5.0； 但是pandas1.5.0有点问题， 先不建议安装modin
        swifter.register_modin()
        # 注意导包的顺序，如果先import swifter，再import modin.pandas as pd，则需要按照如上的导入方式：
        res = data.swifter.apply(func, axis=axis, **kwargs)

    # 使用parallel_apply
    elif method == 'pandarallel':
        from pandarallel import pandarallel
        pandarallel.initialize(progress_bar=False, nb_workers=n_jobs)
        res = data.parallel_apply(func, axis=axis, **kwargs)  # 会卡住，需要注释了再执行后面的操作

    # 方法一： 通过from joblib import Parallel, delayed
    elif method == 'joblib':
        import pandas as pd
        from joblib import Parallel, delayed
        from tqdm import tqdm
        # data_grouped = data.groupby(data.index)
        # # data_grouped 是一个可迭代的对象，那么就可以使用 tqdm 来可视化进度条
        # results = Parallel(n_jobs=n_jobs)(delayed(func)(group) for name, group in tqdm(data_grouped))
        results = Parallel(n_jobs=n_jobs)(delayed(func)(data[column_name], **kwargs) for column_name in data.columns)
        res = pd.concat(results)

    elif method == 'multiprocessing':
        import pandas as pd
        with multiprocessing.Pool(get_target_processor_num(pct=0.5)) as pool:
            results = pool.map(func, data, **kwargs)
        res = pd.concat(results)

    else:
        res = df.apply(func, axis=axis, **kwargs)

    end = time.time()
    cost_time = end - start
    print(f'运用方法{method} 对df进行并行apply耗时{cost_time}')

    return res


if __name__ == '__main__':
    # a = get_total_processor_num()
    # b = get_target_processor_num()
    # multiprocessing_test(processor_num=10)

    # dataframe数据并行运算处理测试
    def get_data():
        import pandas as pd
        from sklearn.datasets import make_classification
        data_x, data_y = make_classification(n_samples=1000000, n_classes=4, n_features=6, n_informative=4, random_state=0)  # 4个特征
        return pd.DataFrame(data_x)

    def func(series):
        return max(series)

    df = get_data()
    res0 = df_parallel_apply(df, func, axis=0, method='', n_jobs=10)  # 等同于df.apply(func, axis=0)
    res1 = df_parallel_apply(df, func, axis=0, method='swifter', n_jobs=10)
    res2 = df_parallel_apply(df, func, axis=0, method='swifter2', n_jobs=10)
    # res3 = df_parallel_apply(df, func, axis=0, method='pandarallel', n_jobs=10)
    # res4 = df_parallel_apply(df, func, axis=0, method='joblib', n_jobs=10)