import os
import dask.dataframe as dd
from concurrent.futures import ThreadPoolExecutor
from dask.distributed import Client
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score
from sklearn.model_selection import GridSearchCV
import warnings
import pandas as pd
import numpy as np
from scipy.optimize import minimize
import os
from openpyxl import load_workbook

warnings.filterwarnings('ignore', category=UserWarning, message='Workbook contains no default style, apply openpyxl\'s default')


number_map = {
    1: "一",
    2: "二",
    3: "三",
    4: "四",
    5: "五",
    6: "六",
    7: "七",
    8: "八",
    9: "九"
}
file_num = 1  # check area, should be dropped when put it to paper

print("1. 使用openpyxl读取大型Excel文件并手动分块")
file_path = r'D:\\文件\\大四\\毕业论文\\数据'
file_names = ['TRD_Dalyr', 'TRD_Dalyr1', 'TRD_Dalyr2', 'TRD_Dalyr3', 'TRD_Dalyr4', 'TRD_Dalyr5']

# 存储每个文件的Pandas DataFrame
dfs = []

for file_name in file_names:
    chinese_num = number_map.get(file_num, "未定义")
    print(f"正在遍历第{chinese_num}个文件")
    file_num += 1
    file_path_full = os.path.join(file_path, f'{file_name}.xlsx')
    # 使用openpyxl加载Excel文件
    wb = load_workbook(file_path_full, read_only=True)
    ws = wb.active

    # 手动分块读取数据
    chunk_size = 100000  # 根据您的内存情况调整分块大小
    data_chunks = []
    run_time = 1
    for i in range(0, ws.max_row, chunk_size):
        data_chunk = []
        for row in ws.iter_rows(min_row=i + 1, max_row=min(i + chunk_size, ws.max_row), values_only=True):
            data_chunk.append(row)
        if data_chunk:
            # 将数据块转换为Pandas DataFrame
            columns = [cell.value for cell in ws[1]]  # 假设第一行是列名
            chunk_df = pd.DataFrame(data_chunk, columns=columns)
            data_chunks.append(chunk_df)
        print(f"第{chinese_num}个文件，第{run_time}个数据块")
        run_time += 1

        # 合并分块数据
    if data_chunks:
        combined_chunk_df = pd.concat(data_chunks, axis=0)
        dfs.append(combined_chunk_df)

print("2. 将Pandas DataFrame转换为Dask DataFrame")
if dfs:
    combined_df = pd.concat(dfs, axis=0)
    cleaned_df = combined_df.dropna()

    # 将Pandas DataFrame转换为Dask DataFrame
    ddf = dd.from_pandas(cleaned_df, npartitions=2)  # 根据您的核心数调整npartitions


# 执行main函数
ddfs = main()

df = ddfs.copy()
df['交易日期'] = pd.to_datetime(df['交易日期'])
df['年份'] = df['交易日期'].dt.year
df['月份'] = df['交易日期'].dt.month
df['星期几'] = df['交易日期'].dt.dayofweek

# 对证券代码进行独热编码
encoder = OneHotEncoder(handle_unknown='ignore', sparse_output=False)
security_code_encoded = encoder.fit_transform(df[['证券代码']])

# 将编码后的特征与原始数据合并
security_code_df = pd.DataFrame(security_code_encoded, columns=encoder.get_feature_names_out(['证券代码']))
df = pd.concat([df, security_code_df], axis=1).drop(columns=['证券代码', '交易日期'])

# 分割数据集
X = df.drop(columns=['考虑现金红利再投资的日个股回报率'])
y = df['考虑现金红利再投资的日个股回报率']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 初始化随机森林回归模型
rf_regressor = RandomForestRegressor(
    n_estimators=100,  # 树的数量
    max_depth=10,  # 树的最大深度
    min_samples_split=5,  # 分裂内部节点所需的最小样本数
    random_state=42,
    n_jobs=-1  # 使用所有CPU核心
)

# 训练模型
rf_regressor.fit(X_train, y_train)

# 预测测试集
y_pred = rf_regressor.predict(X_test)

# 计算R²值
r2 = r2_score(y_test, y_pred)
print(f"R² Score: {r2:.4f}")

# 定义参数网格
param_grid = {
    'n_estimators': [50, 100, 200],
    'max_depth': [5, 10, 15],
    'min_samples_split': [2, 5, 10],
    'max_features': ['sqrt', 'log2']
}

# 超参数调优
# 初始化网格搜索
grid_search = GridSearchCV(
    estimator=RandomForestRegressor(random_state=42),
    param_grid=param_grid,
    cv=5,
    scoring='r2',
    n_jobs=-1
)

# 执行网格搜索
grid_search.fit(X_train, y_train)

# 输出最佳参数和最佳R²值
print("Best Parameters:", grid_search.best_params_)
print("Best R² Score:", grid_search.best_score_)

# 使用最佳模型进行预测
best_rf = grid_search.best_estimator_
y_pred_best = best_rf.predict(X_test)
print("Best Model R² Score:", r2_score(y_test, y_pred_best))
