import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.svm import SVR
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import GridSearchCV

# 1. 数据读取与初步清洗
df = pd.read_csv('杭州市.csv', encoding='gbk')

# 2. 日期特征处理
def parse_date(date_str):
    # 例：16-Dec
    year, month = date_str.split('-')
    year = int(year)
    month_map = {'Jan':1,'Feb':2,'Mar':3,'Apr':4,'May':5,'Jun':6,'Jul':7,'Aug':8,'Sep':9,'Oct':10,'Nov':11,'Dec':12}
    month = month_map[month.capitalize()]
    return year, month

df[['年份','月份']] = df['商品采价日期'].apply(lambda x: pd.Series(parse_date(x)))

# 3. 类别特征编码
le = LabelEncoder()
df['商品名称编码'] = le.fit_transform(df['商品名称'])

# 4. 特征选择与缺失值处理
features = ['商品名称编码', '商品销量', '销售额', '年份', '月份']
X = df[features]
y = df['商品价格']

# 缺失值简单处理（如有）
X = X.fillna(0)
y = y.fillna(0)

# 5. 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 特征标准化
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)

# 参数网格
param_grid = {
    'C': [0.1, 1, 10, 100],
    'epsilon': [0.01, 0.1, 0.5, 1],
    'kernel': ['rbf', 'linear', 'poly'],
    'gamma': ['scale', 'auto']
}

svr = SVR()
grid_search = GridSearchCV(svr, param_grid, cv=5, scoring='neg_mean_squared_error', n_jobs=-1)
grid_search.fit(X_train_scaled, y_train)

print("最优参数：", grid_search.best_params_)

# 用最优参数评估
best_svr = grid_search.best_estimator_
y_pred = best_svr.predict(X_test_scaled)
mse = mean_squared_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
print(f"优化后SVR模型评估: MSE={mse:.4f}, R2={r2:.4f}")

# 6. 三种回归模型训练与评估
def evaluate_model(model, name):
    model.fit(X_train, y_train)
    y_pred = model.predict(X_test)
    mse = mean_squared_error(y_test, y_pred)
    r2 = r2_score(y_test, y_pred)
    print(f"{name} 评估: MSE={mse:.4f}, R2={r2:.4f}")
    return mse, r2

models = [
    (RandomForestRegressor(n_estimators=100, random_state=42), '随机森林'),
    (GradientBoostingRegressor(n_estimators=100, random_state=42), '梯度提升'),
    (SVR(kernel='rbf'), '支持向量回归')
]

results = []
for model, name in models:
    mse, r2 = evaluate_model(model, name)
    results.append((name, mse, r2))

# 7. 选优输出
results.sort(key=lambda x: x[1])  # 按MSE升序
print("\n最优模型:")
print(f"{results[0][0]} (MSE={results[0][1]:.4f}, R2={results[0][2]:.4f})")

# 8. 融合模型（加权平均法）
# 先分别用最优参数训练SVR、随机森林、梯度提升
best_svr.fit(X_train_scaled, y_train)
rf = RandomForestRegressor(n_estimators=100, random_state=42)
rf.fit(X_train, y_train)
gbdt = GradientBoostingRegressor(n_estimators=100, random_state=42)
gbdt.fit(X_train, y_train)

# 预测（注意SVR用标准化特征，其余用原始特征）
svr_pred = best_svr.predict(X_test_scaled)
rf_pred = rf.predict(X_test)
gbdt_pred = gbdt.predict(X_test)

# 简单加权平均（可调整权重）
ensemble_pred = (svr_pred + rf_pred + gbdt_pred) / 3

mse_ensemble = mean_squared_error(y_test, ensemble_pred)
r2_ensemble = r2_score(y_test, ensemble_pred)
print(f"\n融合模型评估: MSE={mse_ensemble:.4f}, R2={r2_ensemble:.4f}") 