# -*- coding: utf-8 -*-
"""
自动化切换数据集，实现长时间自动化计算
"""

import os
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict
from sklearn.metrics import mean_squared_error, median_absolute_error, mean_squared_log_error, mean_absolute_error, explained_variance_score
from sklearn.linear_model import LinearRegression as LR, Ridge, Lasso
from sklearn.tree import DecisionTreeRegressor as DTR
from sklearn.ensemble import (
    RandomForestRegressor as RFR,
    GradientBoostingRegressor as GBR,
    AdaBoostRegressor as ADBR,
    ExtraTreesRegressor as ETR,
    BaggingRegressor as BR,
)
from sklearn.neural_network import MLPRegressor
from sklearn import svm
import pickle
import shutil

# 检查 lightgbm 和 xgboost 是否可用
try:
    from lightgbm import LGBMRegressor as LGBR
except ImportError:
    LGBR = None
    print("Warning: lightgbm is not installed. Skipping LGBMRegressor.")

try:
    from xgboost import XGBRegressor as XGBR
except ImportError:
    XGBR = None
    print("Warning: xgboost is not installed. Skipping XGBRegressor.")

# 添加猴子补丁以修复 sklearn 与部分模型的兼容性问题
def sklearn_tags_patch(self):
    return {}

if LGBR:
    LGBR.__sklearn_tags__ = sklearn_tags_patch
if XGBR:
    XGBR.__sklearn_tags__ = sklearn_tags_patch

# 全局参数
random_state_forall = 420
name_of_models = ['lr', 'ridge', 'adb', 'bpnn', 'cbr',
                  'etr', 'lgb', 'rfr', 'svr', 'xgb',
                  'lasso', 'dtr', 'gbr', 'br']


# 核心函数
def Core(data):
    try:
        # 数据预处理
        data = data.iloc[:, 1:]
        data.drop_duplicates(inplace=True)  # 删除重复数据
        data = data.reset_index(drop=True)
        x = data.iloc[:, 1:]
        y = data.iloc[:, 0]

        # 数据归一化
        x = (x - x.min()) / (x.max() - x.min())
        x.dropna(axis=1, how='all', inplace=True)  # 删除全为 NaN 的列
        x.fillna(0, inplace=True)  # 将剩余的 NaN 替换为 0

        # 分割训练集和测试集
        Xtrain, Xtest, Ytrain, Ytest = train_test_split(x, y, test_size=0.3, random_state=random_state_forall)

        # 检查测试集规模
        if len(Ytest) < 2:
            raise ValueError("The test set must contain at least two samples for R^2 calculation.")

        # 初始化模型
        models = [
            LR(), Ridge(), ADBR(), MLPRegressor(), 
            ETR(), RFR(), svm.SVR(), Lasso(), DTR(), GBR(), BR()
        ]

        # 如果 LGBMRegressor 可用，添加到模型列表
        if LGBR:
            models.append(LGBR())
        # 如果 XGBRegressor 可用，添加到模型列表
        if XGBR:
            models.append(XGBR())

        # 模型训练
        trained_models = []
        for model in models:
            trained_models.append(model.fit(Xtrain, Ytrain))

        # 评估指标
        evaluations = ['R2', 'RMSE', 'MSLE', 'MEDAE', 'MAE', 'EVS']
        acc_data = []
        for model in trained_models:
            y_pred = model.predict(Xtest)
            model_evaluations = [
                model.score(Xtest, Ytest),  # R2
                mean_squared_error(Ytest, y_pred) ** 0.5,  # RMSE
                mean_squared_log_error(abs(Ytest), abs(y_pred)),  # MSLE
                median_absolute_error(Ytest, y_pred),  # MEDAE
                mean_absolute_error(Ytest, y_pred),  # MAE
                explained_variance_score(Ytest, y_pred)  # EVS
            ]
            acc_data.append(model_evaluations)

        # 转化为 DataFrame
        acc_df = pd.DataFrame(acc_data, index=name_of_models[:len(models)], columns=evaluations, dtype=float)
        return acc_df, trained_models
    except Exception as e:
        print(f"Error in Core function: {e}")
        raise


# 主程序
dataset_folder = "datasets"
output_folder = "results"

if not os.path.exists(output_folder):
    os.makedirs(output_folder)

for filename in os.listdir(dataset_folder):
    if filename.endswith(".csv"):
        dataset_name = os.path.splitext(filename)[0]
        dataset_path = os.path.join(dataset_folder, filename)
        output_dataset_folder = os.path.join(output_folder, dataset_name)

        if not os.path.exists(output_dataset_folder):
            os.makedirs(output_dataset_folder)

        try:
            data = pd.read_csv(dataset_path, encoding='gb2312')
            result = Core(data)
            acc_df, models = result

            # 保存模型评估指标
            acc_csv_filename = os.path.join(output_dataset_folder, "modelAcc.csv")
            acc_df.to_csv(acc_csv_filename)

            # 保存模型
            save_directory = os.path.join(output_dataset_folder, "models")
            if not os.path.exists(save_directory):
                os.makedirs(save_directory)

            for name, model in zip(name_of_models[:len(models)], models):
                model_filename = os.path.join(save_directory, f"model_{name}.pickle")
                with open(model_filename, 'wb') as model_file:
                    pickle.dump(model, model_file)

            print(f"模型保存完成：{dataset_name}")

        except Exception as e:
            print(f"Error processing dataset {dataset_name}: {e}")
            continue