import dask.dataframe as dd
import numpy as np
import pandas as pd
import multiprocessing
from multiprocessing import Pool, cpu_count


def read_csv4multiprocess(file, queue):
    df = pd.read_csv(file)
    queue.put(df)

def read_csv_single(path):
    data_frames = [None]*len(path)
    for files_idx in range(len(path)):
        data_frames[files_idx] = pd.read_csv(path[files_idx],header=None)
    return data_frames

def read_csv_multi(path):
    df = dd.read_csv(path, header=None)  # 支持通配符读取
    df = df.compute()
    r,c = df.shape
    df = df.to_numpy()
    pattern_num = len(path)
    data_frames = [None]*pattern_num
    for idx in range(len(path)):
        img_Temp = df[int(0+idx*(r/pattern_num)):int((idx+1)*(r/pattern_num)), ...]
        if np.isnan(img_Temp).any():
            img_half = img_Temp[..., 0:int(c/2)]
            data_frames[idx] = img_half
        else:
            data_frames[idx] = img_Temp
    return data_frames

# def read_csv_multiprocessing(path):
#     # 使用进程池来并行处理文件
#     with multiprocessing.Pool() as pool:
#         # 并行读取文件
#         dataframes = pool.map(read_csv4multiprocess, path)
#
#     # 合并所有 DataFrame
#     all_data = pd.concat(dataframes, ignore_index=True)
#     return all_data

def pd_read_csv(path):
    return pd.read_csv(path, header=None)

def read_csv_pool(path):
    with Pool(processes=int(min(len(path), cpu_count()))) as pool:
        data_frames = pool.map(pd_read_csv, path)
    print('异步读取：'+str(min(len(path), cpu_count()))+'*processes')
    return data_frames
