import pandas as pd
import numpy as np
import cv2
import pickle
import dask.array as da
import dask.dataframe as dd
import asyncio
import aiofiles

import multiprocessing
from multiprocessing import Pool
import os


async def save_dataframe_async(dataframe, file_path):
    """
    异步保存 pandas DataFrame 到 CSV 文件
    """
    # 将 DataFrame 转换为 CSV 格式的字符串
    csv_data = dataframe.to_csv(index=False)
    # 异步写入文件
    async with aiofiles.open(file_path, mode='w') as f:
        await f.write(csv_data)

# async def save_multiple_dataframes(dataframes, file_paths):
#     """
#     异步保存多个 DataFrame 到多个 CSV 文件
#     """
#     # 使用 asyncio.gather 并发执行保存任务
#     tasks = [
#         save_dataframe_async(df, file_path)
#         for df, file_path in zip(dataframes, file_paths)
#     ]
#     # 等待所有任务完成
#     await asyncio.gather(*tasks)

# async def main(mat, name_list):
#     # 创建多个示例 DataFrame
#     dataframes = []
#     for color in mat.color:
#         for gray_idx in range(len(mat.gray_level)):  # 假设第二个维度是灰度级别
#             # 提取对应的二维矩阵并添加到列表中
#             dataframes.append(pd.DataFrame(mat[color][..., gray_idx]))
#     # dataframes = [
#     #     pd.DataFrame(mat['R'][...,0]),
#     #     pd.DataFrame(mat['R'][...,1]),
#     #     pd.DataFrame(mat['G'][...,1]),
#     # ]
#
#     # 异步保存多个 DataFrame
#     await save_multiple_dataframes(dataframes, name_list)

# def _save_csv_asynchronous(mat_color, name_list):
#     asyncio.run(main(mat_color, name_list))


# 多进程调用 multiprocessing
def save_task(dataframe, file_path):
    """
    多进程调用的保存任务，将异步函数放在进程内部
    """
    asyncio.run(save_dataframe_async(dataframe, file_path))

def _multi_csv(mat, name_list, is_multi):
    # 创建多个示例 DataFrame
    dataframes = []
    for color in mat.color:
        for gray_idx in range(len(mat.gray_level)):  # 假设第二个维度是灰度级别
            # 提取对应的二维矩阵并添加到列表中
            dataframes.append(pd.DataFrame(mat[color][..., gray_idx]))
    # 使用多进程池
    multiprocessing.freeze_support()
    with Pool(processes=int(is_multi)) as pool:
        pool.starmap(save_task, zip(dataframes, name_list))


## 单线程保存csv
filename_csv_list = []
def save_all_mat(brt, save_path, type, is_multi):
    if type == 'MAT':
        filename = save_path + "//brt.pkl"
        with open(filename, 'wb') as file:
            pickle.dump(brt, file)
    for color_idx in brt.color:
        for gray_idx in range(len(brt.gray_level)):
            if type == 'CSV':
                filename = save_path + f"/{color_idx}{brt.gray_level[gray_idx]}.csv"
                filename_csv_list.append(filename)
                if is_multi == 1:
                    df = pd.DataFrame(brt[color_idx][..., gray_idx].astype(np.float32))
                    df.to_csv(filename, index=False, header=False)
            if type == 'ROI':
                filename = save_path + f"/{color_idx}{brt.gray_level[gray_idx]}.bmp"
                img = brt[color_idx][..., gray_idx]*(128/np.mean(brt[color_idx][..., gray_idx]))
                img = img.astype(np.uint8)
                cv2.imwrite(filename, img)
    # 异步保存csv
    if type == 'CSV' and is_multi>1:
        print('异步保存：' + str(is_multi) + '*processes')
        _multi_csv(brt, filename_csv_list, is_multi)
