# 多的时候用循环
import pandas as pd
import numpy as np
import itertools
import os
# 合并10年的文件夹名称,2005-2014
filePath = [
    r"F:\极端事件\转CF\cf_csv_historical_200501010130-200512312230",
    r"F:\极端事件\转CF\cf_csv_historical_200601010130-200612312230",
    r"F:\极端事件\转CF\cf_csv_historical_200701010130-200712312230",
    r"F:\极端事件\转CF\cf_csv_historical_200801010130-200812312230",
    r"F:\极端事件\转CF\cf_csv_historical_200901010130-200912312230",
    r"F:\极端事件\转CF\cf_csv_historical_201001010130-201012312230",
    r"F:\极端事件\转CF\cf_csv_historical_201101010130-201112312230",
    r"F:\极端事件\转CF\cf_csv_historical_201201010130-201212312230",
    r"F:\极端事件\转CF\cf_csv_historical_201301010130-201312312230",
    r"F:\极端事件\转CF\cf_csv_historical_201401010130-201412312230"
]

file_dict = {}
for index, path in enumerate(filePath):
    file_list = os.listdir(path)
    file_list.sort(key=lambda x: int(x.split('_')[0]))
    file_dict[f"file_list{index+1}"] = file_list
# print(file_dict)
all = []
merged_data = pd.DataFrame()
for i in range(len(file_dict['file_list1'])):
    file_paths = []
    # 文件夹里有几个文件就写几行，多的删，少的复制
    file_paths.append(os.path.join(filePath[0], file_dict['file_list1'][i]))
    file_paths.append(os.path.join(filePath[1], file_dict['file_list2'][i]))
    file_paths.append(os.path.join(filePath[2], file_dict['file_list3'][i]))
    file_paths.append(os.path.join(filePath[3], file_dict['file_list4'][i]))
    file_paths.append(os.path.join(filePath[4], file_dict['file_list5'][i]))
    file_paths.append(os.path.join(filePath[5], file_dict['file_list6'][i]))
    file_paths.append(os.path.join(filePath[6], file_dict['file_list7'][i]))
    file_paths.append(os.path.join(filePath[7], file_dict['file_list8'][i]))
    file_paths.append(os.path.join(filePath[8], file_dict['file_list9'][i]))
    file_paths.append(os.path.join(filePath[9], file_dict['file_list10'][i]))

    data_frames = [pd.read_csv(file_path) for file_path in file_paths]
    # 按照第一行内容上下合并数据
    merged_data = pd.concat(data_frames)
    merged_data.to_csv(f"F:\\极端事件\\历史CF合并\\{file_dict['file_list1'][i]}", index=False)
    print(i)