import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_absolute_error, mean_squared_error
import holidays
from datetime import datetime
"""
数据准备
    自动处理时间特征（小时、周末、节假日）
    添加模拟天气数据（实际应用需接入真实API）
    15分钟粒度重采样
    数据归一化处理
模型架构：
    三路特征提取：
        1D CNN捕获局部模式
        LSTM处理时序依赖
        Transformer建模长期关系
    注意力融合层整合不同特征
    Huber损失函数增强鲁棒性
训练优化：
    学习率调度与梯度裁剪
    早停机制（通过最佳模型保存实现）
    混合精度训练支持（需CUDA环境）
可视化功能：
    预测结果与置信区间展示
    关键评估指标输出（MAE, RMSE）
"""

# 配置参数
class Config:
    data_path = "electricity_prices.csv"
    seq_length = 168  # 使用过去7天数据（每小时数据）
    pred_length = 96  # 预测未来24小时（每15分钟）
    features = ['price', 'temp', 'humidity', 'hour_sin', 'hour_cos', 'weekend']
    test_days = 7  # 最后7天作为测试集
    batch_size = 64
    epochs = 200
    lr = 0.0001
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


# 数据预处理类
class DataProcessor:
    def __init__(self, config):
        self.config = config
        self.scaler = MinMaxScaler()

    def load_and_process(self):
        # 加载原始数据
        df = pd.read_csv(self.config.data_path, parse_dates=['datetime'], index_col='datetime')

        # 重采样为15分钟频率
        df = df.resample('15T').ffill()

        # 特征工程
        df = self._add_time_features(df)
        df = self._add_external_features(df)

        # 处理缺失值
        df = df.ffill().bfill()

        # 特征缩放
        scaled_data = self.scaler.fit_transform(df[self.config.features])

        return df, scaled_data

    def _add_time_features(self, df):
        # 时间特征
        df['hour'] = df.index.hour
        df['hour_sin'] = np.sin(2 * np.pi * df['hour'] / 24)
        df['hour_cos'] = np.cos(2 * np.pi * df['hour'] / 24)
        df['weekday'] = df.index.weekday
        df['weekend'] = df['weekday'].apply(lambda x: 1 if x >= 5 else 0)

        # 节假日特征
        country_holidays = holidays.country_holidays('CN')
        #df['holiday'] = df.index.date.map(lambda x: x in country_holidays).astype(int)

        return df

    def _add_external_features(self, df):
        # 示例：添加天气数据（实际应用中应从外部数据源获取）
        np.random.seed(42)
        df['temp'] = np.random.normal(20, 5, len(df))  # 温度
        df['humidity'] = np.random.uniform(40, 80, len(df))  # 湿度
        return df


# 自定义数据集
class PriceDataset(Dataset):
    def __init__(self, data, seq_len, pred_len):
        self.data = data
        self.seq_len = seq_len
        self.pred_len = pred_len

    def __len__(self):
        return len(self.data) - self.seq_len - self.pred_len

    def __getitem__(self, idx):
        x = self.data[idx:idx + self.seq_len]
        y = self.data[idx + self.seq_len:idx + self.seq_len + self.pred_len, 0]  # 预测价格列
        return torch.FloatTensor(x), torch.FloatTensor(y)


# 混合深度学习模型
class HybridForecaster(nn.Module):
    def __init__(self, input_size, output_steps):
        super().__init__()

        # 卷积分支
        self.conv_branch = nn.Sequential(
            nn.Conv1d(input_size, 64, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.MaxPool1d(2),
            nn.Conv1d(64, 128, kernel_size=3, padding=1),
            nn.ReLU()
        )

        # LSTM分支
        self.lstm_branch = nn.LSTM(input_size, 128, batch_first=True)

        # Transformer分支
        self.transformer = nn.TransformerEncoderLayer(d_model=input_size, nhead=4)

        # 特征融合
        self.attention = nn.MultiheadAttention(embed_dim=256, num_heads=4)

        # 预测头
        self.forecast_head = nn.Sequential(
            nn.Linear(256, 512),
            nn.ReLU(),
            nn.Dropout(0.3),
            nn.Linear(512, output_steps)
        )

    def forward(self, x):
        # 输入形状: (batch_size, seq_len, features)
        batch_size = x.size(0)

        # 卷积处理
        conv_out = self.conv_branch(x.permute(0, 2, 1)).permute(0, 2, 1)

        # LSTM处理
        lstm_out, _ = self.lstm_branch(x)

        # Transformer处理
        trans_out = self.transformer(x)

        # 特征拼接
        combined = torch.cat([conv_out, lstm_out, trans_out], dim=-1)

        # 注意力融合
        attn_out, _ = self.attention(
            combined.view(batch_size, -1, 256).permute(1, 0, 2),
            combined.view(batch_size, -1, 256).permute(1, 0, 2),
            combined.view(batch_size, -1, 256).permute(1, 0, 2)
        )
        attn_out = attn_out.permute(1, 0, 2).mean(dim=1)

        # 预测输出
        return self.forecast_head(attn_out)


# 训练器类
class ForecastTrainer:
    def __init__(self, model, config, scaler):
        self.model = model.to(config.device)
        self.config = config
        self.scaler = scaler
        self.optimizer = optim.AdamW(model.parameters(), lr=config.lr)
        self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, 'min', patience=5)
        self.loss_fn = nn.HuberLoss()

    def train(self, train_loader, val_loader):
        best_loss = float('inf')
        for epoch in range(self.config.epochs):
            self.model.train()
            train_loss = 0
            for x, y in train_loader:
                x, y = x.to(self.config.device), y.to(self.config.device)

                self.optimizer.zero_grad()
                outputs = self.model(x)
                loss = self.loss_fn(outputs, y)
                loss.backward()
                nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
                self.optimizer.step()

                train_loss += loss.item()

            # 验证
            val_loss = self.evaluate(val_loader)
            self.scheduler.step(val_loss)

            # 保存最佳模型
            if val_loss < best_loss:
                best_loss = val_loss
                torch.save(self.model.state_dict(), 'best_model.pth')

            print(
                f"Epoch {epoch + 1}/{self.config.epochs} | Train Loss: {train_loss / len(train_loader):.4f} | Val Loss: {val_loss:.4f}")

    def evaluate(self, loader):
        self.model.eval()
        total_loss = 0
        with torch.no_grad():
            for x, y in loader:
                x, y = x.to(self.config.device), y.to(self.config.device)
                outputs = self.model(x)
                total_loss += self.loss_fn(outputs, y).item()
        return total_loss / len(loader)

    def predict(self, loader):
        self.model.eval()
        preds, trues = [], []
        with torch.no_grad():
            for x, y in loader:
                x = x.to(self.config.device)
                outputs = self.model(x).cpu().numpy()
                preds.append(outputs)
                trues.append(y.numpy())
        return np.concatenate(preds), np.concatenate(trues)


# 可视化工具
def plot_results(true, pred, title='Electricity Price Forecast'):
    plt.figure(figsize=(16, 8))
    plt.plot(true, label='Actual Price')
    plt.plot(pred, label='Predicted Price', alpha=0.7)
    plt.fill_between(range(len(pred)),
                     pred - 0.1 * np.abs(pred),
                     pred + 0.1 * np.abs(pred),
                     alpha=0.2, color='orange')
    plt.title(title)
    plt.xlabel('Time Steps (15-min)')
    plt.ylabel('Normalized Price')
    plt.legend()
    plt.grid(True)
    plt.show()


# 主程序
def main():
    cfg = Config()

    # 数据准备
    processor = DataProcessor(cfg)
    raw_df, scaled_data = processor.load_and_process()

    # 创建数据集
    dataset = PriceDataset(scaled_data, cfg.seq_length, cfg.pred_length)

    # 划分训练测试集
    test_size = cfg.test_days * 24 * 4  # 每天96个15分钟间隔
    train_data, test_data = torch.utils.data.random_split(
        dataset, [len(dataset) - test_size, test_size]
    )

    train_loader = DataLoader(train_data, batch_size=cfg.batch_size, shuffle=True)
    test_loader = DataLoader(test_data, batch_size=cfg.batch_size, shuffle=False)

    # 初始化模型
    model = HybridForecaster(input_size=len(cfg.features), output_steps=cfg.pred_length)

    # 训练
    trainer = ForecastTrainer(model, cfg, processor.scaler)
    trainer.train(train_loader, test_loader)

    # 测试
    model.load_state_dict(torch.load('best_model.pth'))
    preds, trues = trainer.predict(test_loader)

    # 反归一化
    def inverse_transform(data):
        dummy = np.zeros((data.shape[0], len(cfg.features)))
        dummy[:, 0] = data.ravel()
        return processor.scaler.inverse_transform(dummy)[:, 0]

    true_prices = inverse_transform(trues)
    pred_prices = inverse_transform(preds)

    # 评估指标
    mae = mean_absolute_error(true_prices, pred_prices)
    rmse = np.sqrt(mean_squared_error(true_prices, pred_prices))
    print(f"MAE: {mae:.2f}, RMSE: {rmse:.2f}")

    # 可视化最后一天的预测
    plot_results(true_prices[-96:], pred_prices[-96:], "24-Hour Price Forecast")


if __name__ == "__main__":
    main()