"""
并行计算测试
"""
import pandas as pd
import time
import os
from concurrent.futures import ProcessPoolExecutor
import multiprocessing
from numba import jit
import logging
import pp
import joblib


logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s - %(name)s - '
                           '%(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


def read_csv_pd(file):
    df = pd.read_csv(file)
    df = df.dropna()
    df = df[df['status'] < 5]
    return df['OC NO'].tolist()


def read_csv_open(file):
    """
    读取文件内容，返回状态不为空，且小于5的对应id
    :param file:
    :return:
    """
    set_id = set()
    with open(file, encoding='utf8') as f:
        lines = f.readlines()
        for num, line in enumerate(lines):
            if num == 0:
                continue
            fields = line.split(',')
            if len(fields[1]) > 0 and int(fields[1]) < 5:
                set_id.add(fields[0])
    return set_id


def for_circle(files):
    """
    for circle compare & cost time
    :param files:
    :return:
    """
    set_id_1 = set()
    set_id_2 = set()
    t1 = time.time()
    for file in files:
        mid_set = read_csv_pd(file=file)
        set_id_1.update(mid_set)
    t2 = time.time()
    for file in files:
        mid_set = read_csv_open(file=file)
        set_id_2.update(mid_set)
    t3 = time.time()

    print(len(set_id_1), len(set_id_2))
    # 2.1082966327667236
    print('read_csv_pd cost: {}'.format(t2 - t1))
    # 0.9119999408721924
    print('read_csv_open cost: {}'.format(t3 - t2))


@git
def map_circle(files):
    """
    map circle compare & cost time
    :param files:
    :return:
    """
    set_id_1 = set()
    set_id_2 = set()
    t1 = time.time()
    for mid_set in map(read_csv_open, files):
        set_id_1.update(mid_set)
    t2 = time.time()
    [set_id_2.update(mid_set) for mid_set in map(read_csv_open, files)]
    t3 = time.time()
    print(len(set_id_1), len(set_id_2))
    # 0.9460015296936035
    print('for cost: {}'.format(t2 - t1))
    # 0.8980069160461426
    print('cost: {}'.format(t3 - t2))


def map_futures(files):
    """
    map circle & futures & cost time
    :param files:
    :return:
    """
    set_id = set()
    t1 = time.time()
    with ProcessPoolExecutor(3) as pool:
        for mid_set in pool.map(read_csv_open, files):
            set_id.update(mid_set)
    t2 = time.time()
    print(len(set_id))
    # 1.4520056247711182
    print('map_futures cost: {}'.format(t2 - t1))


def for_multiprocessing(files):
    """

    :param files:
    :return:
    """
    cores = multiprocessing.cpu_count()
    set_id = set()
    t1 = time.time()

    with multiprocessing.Pool(cores) as pool:
        #rs = pool.map_async(read_csv_open, files).get()
        rs = pool.map(read_csv_open, files)
    """
    pool = multiprocessing.Pool()
    [set_id.update(mid_set) for mid_set in pool.imap(read_csv_open, files)]
    pool.close()
    pool.join()
    """
    t2 = time.time()
    [set_id.update(mid_set) for mid_set in rs]
    t3 = time.time()
    print(len(set_id))
    # 1.6059844493865967
    print('for multiprocessing cost: {}'.format(t2 - t1))
    print('dict update cost: {}'.format(t3 - t2))


SENTINEL = 'SENTINEL'


def read_csv_open_test(q, file):
    set_id = set()
    with open(file, encoding='utf8') as f:
        lines = f.readlines()
        for num, line in enumerate(lines):
            if num == 0:
                continue
            fields = line.split(',')
            if len(fields[1]) > 0 and int(fields[1]) < 5:
                set_id.add(fields[0])

    q.put(set_id)


def for_multiprocessing_process(files):
    """

    :param files:
    :return:
    """
    q = multiprocessing.Queue()
    set_id = set()
    t1 = time.time()
    process_arr = []
    for file in files:
        logger.info(file)
        t = multiprocessing.Process(target=read_csv_open_test,
                                    kwargs={'file': file, 'q': q})
        process_arr.append(t)
        t.start()

    t2 = time.time()
    [set_id.update(q.get()) for _ in process_arr]
    t3 = time.time()
    print(len(set_id))
    # 0.6759865283966064
    print('multiprocessing_process cost: {}'.format(t2 - t1))
    # 6.123079061508179
    print('dict update cost: {}'.format(t3 - t2))


def pp_test(files):
    set_id = set()
    for file in files:
        mid_set = read_csv_open(file)
        set_id.update(mid_set)
    return set_id


def pp_server_com(files):
    """

    :param files:
    :return:
    """
    ppservers = ()
    job_server = pp.Server(ppservers=ppservers)
    print('start pp with {} workers'.format(job_server.get_ncpus()))

    t1 = time.time()
    job = job_server.submit(pp_test, (files,), (read_csv_open,), ())
    t2 = time.time()
    rs = job()
    print(len(rs))
    # 0.004994630813598633
    print('pp_server_com cost: {}'.format(t2 - t1))


def joblib_com(files):
    set_id = set()
    t1 = time.time()
    rs = joblib.Parallel(4)(joblib.delayed(read_csv_open)(file)
                            for file in files)
    t2 = time.time()
    [set_id.update(mid_set) for mid_set in rs]
    print(len(set_id))
    # 1.2500150203704834
    print('pp_server_com cost: {}'.format(t2 - t1))


def run():
    path = './data'
    files = os.listdir(path)
    files = [os.path.join(path, file) for file in files]

    # 循环
    # for_circle(files=files)
    # map_circle(files=files)
    # map_futures(files=files)
    # for_multiprocessing(files)
    # for_multiprocessing_process(files)
    # pp_server_com(files)
    joblib_com(files)


if __name__ == '__main__':
    run()
