from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
import os
import pandas as pd
from sklearn.model_selection import train_test_split, GridSearchCV, KFold
from sklearn.preprocessing import MinMaxScaler
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score, explained_variance_score, max_error, median_absolute_error

# 加载数据集
def load_data(cluster):
    file_path = os.path.join('data', 'clustered_data_0.4', f'Cluster_{cluster}.csv')
    df = pd.read_csv(file_path)
    df = df.dropna()
    X = df.drop(columns=['openrank', 'Date'])
    y = df['openrank']
    return X, y

# 数据预处理
def preprocess_data(X):
    scaler = MinMaxScaler()
    X_scaled = scaler.fit_transform(X)
    return X_scaled

# 模型训练和评估
def train_and_evaluate_model(model, X, y):
    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

    # 模型训练
    model.fit(X_train, y_train)

    # 模型预测
    y_pred = model.predict(X_test)

    # 计算评估指标
    mse = mean_squared_error(y_test, y_pred)
    rmse = mean_squared_error(y_test, y_pred, squared=False)
    mae = mean_absolute_error(y_test, y_pred)
    r2 = r2_score(y_test, y_pred)
    evs = explained_variance_score(y_test, y_pred)
    max_err = max_error(y_test, y_pred)
    med_abs_err = median_absolute_error(y_test, y_pred)

    # 保存评估指标到DataFrame
    result = pd.DataFrame({'Metric': ['MSE', 'RMSE', 'MAE', 'R-squared', 'Explained Variance', 'Max Error', 'Median Absolute Error'],
                           'Value': [mse, rmse, mae, r2, evs, max_err, med_abs_err]})
    return result

# 主函数
def main():
    # 创建结果文件夹
    if not os.path.exists('./result'):
        os.makedirs('./result')

    # 聚类类别列表
    clusters = [0, 1, 2]

    results = []  # 存储结果的列表

    for cluster in clusters:
        # 加载数据
        X, y = load_data(cluster)

        # 数据预处理
        X_scaled = preprocess_data(X)

        # 创建Pipeline
        pipeline = Pipeline([
            ('scaler', MinMaxScaler()),  # 特征缩放
            ('model', RandomForestRegressor())  # 替换为你选择的模型
        ])

        # 设置多个模型及其超参数
        models = [
            ('Linear Regression', LinearRegression(), {}),
            ('Decision Tree', DecisionTreeRegressor(), {'model__max_depth': range(1,10)}),  # 将max_depth参数移到这里，并指定为'model__max_depth'
            ('Support Vector Machine', SVR(), {'model__kernel':['linear', 'poly', 'rbf', 'sigmoid'], 'model__C':[1, 10]}),  # 将kernel和C参数移到'model__kernel'和'model__C'中
            ('K-Nearest Neighbors', KNeighborsRegressor(), {'model__n_neighbors': range(1,10)}),  # 将n_neighbors参数移到'model__n_neighbors'中
            ('Random Forest', RandomForestRegressor(), {'model__n_estimators': [100, 200, 300]}),
            ('Gradient Boosting', GradientBoostingRegressor(), {'model__learning_rate': [0.1, 0.01], 'model__n_estimators': [100, 200]}),
            ('AdaBoost', AdaBoostRegressor(), {'model__learning_rate': [0.1, 0.01], 'model__n_estimators': [100, 200]})
        ]

        for name, model, params in models:
            pipeline.set_params(model=model)

            # 使用GridSearchCV和五折交叉验证
            grid_search = GridSearchCV(pipeline, params, scoring='neg_mean_squared_error', cv=KFold(n_splits=5, shuffle=True, random_state=42))
            grid_search.fit(X_scaled, y)

            # 获取最佳模型和评估指标
            best_model = grid_search.best_estimator_
            result = train_and_evaluate_model(best_model, X_scaled, y)
            result['Model'] = name
            result['Cluster'] = f'Cluster {cluster}'

            # 添加结果到列表
            results.append(result)

    # 合并结果为一个DataFrame
    result_df = pd.concat(results)

    # 保存结果为CSV文件
    result_path = "./result/all_results_cluster_0.4_v2.csv"
    result_df.to_csv(result_path, index=False)

if __name__ == '__main__':
    main()
