import os
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, TensorDataset
from sklearn.preprocessing import MinMaxScaler
from datetime import datetime
import pickle
import warnings
# 新增：导入进度条库
from tqdm import tqdm

warnings.filterwarnings("ignore")


# -------------------------- 内置 Kronos 核心模块（不变）--------------------------
class PositionalEncoding(nn.Module):
    """位置编码模块（时序模型必备）"""

    def __init__(self, d_model, max_len=5000, dropout=0.1):
        super().__init__()
        self.dropout = nn.Dropout(p=dropout)
        position = torch.arange(max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2) * (-np.log(10000.0) / d_model))
        pe = torch.zeros(max_len, 1, d_model)
        pe[:, 0, 0::2] = torch.sin(position * div_term)
        pe[:, 0, 1::2] = torch.cos(position * div_term)
        self.register_buffer('pe', pe)

    def forward(self, x):
        """x: (seq_len, batch_size, d_model)"""
        x = x + self.pe[:x.size(0)]
        return self.dropout(x)


class MultiHeadAttention(nn.Module):
    """多头注意力模块"""

    def __init__(self, d_model, n_heads, dropout=0.1):
        super().__init__()
        assert d_model % n_heads == 0, "d_model must be divisible by n_heads"
        self.d_k = d_model // n_heads
        self.n_heads = n_heads
        self.w_q = nn.Linear(d_model, d_model)
        self.w_k = nn.Linear(d_model, d_model)
        self.w_v = nn.Linear(d_model, d_model)
        self.w_o = nn.Linear(d_model, d_model)
        self.dropout = nn.Dropout(dropout)
        self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)

    def forward(self, q, k, v, mask=None):
        residual = q
        batch_size = q.size(0)

        # 线性变换 + 多头拆分
        q = self.w_q(q).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
        k = self.w_k(k).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
        v = self.w_v(v).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)

        # 缩放点积注意力
        scores = torch.matmul(q, k.transpose(-2, -1)) / np.sqrt(self.d_k)
        if mask is not None:
            scores = scores.masked_fill(mask == 0, -1e9)
        attn_weights = F.softmax(scores, dim=-1)
        attn_weights = self.dropout(attn_weights)
        output = torch.matmul(attn_weights, v)

        # 多头合并 + 线性变换
        output = output.transpose(1, 2).contiguous().view(batch_size, -1, self.n_heads * self.d_k)
        output = self.w_o(output)
        output = self.dropout(output)
        output += residual  # 残差连接
        output = self.layer_norm(output)  # 层归一化

        return output, attn_weights


class FeedForwardNetwork(nn.Module):
    """前馈神经网络模块"""

    def __init__(self, d_model, d_ff, activation='gelu', dropout=0.1):
        super().__init__()
        self.w_1 = nn.Linear(d_model, d_ff)
        self.w_2 = nn.Linear(d_ff, d_model)
        self.dropout = nn.Dropout(dropout)
        self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
        self.activation = self._get_activation(activation)

    def _get_activation(self, activation):
        if activation == 'relu':
            return F.relu
        elif activation == 'gelu':
            return F.gelu
        elif activation == 'swish':
            return F.silu
        else:
            raise ValueError(f"Unsupported activation: {activation}")

    def forward(self, x):
        residual = x
        x = self.w_1(x)
        x = self.activation(x)
        x = self.dropout(x)
        x = self.w_2(x)
        x = self.dropout(x)
        x += residual  # 残差连接
        x = self.layer_norm(x)  # 层归一化
        return x


class EncoderLayer(nn.Module):
    """编码器层"""

    def __init__(self, d_model, n_heads, d_ff, activation='gelu', dropout=0.1):
        super().__init__()
        self.self_attn = MultiHeadAttention(d_model, n_heads, dropout)
        self.ffn = FeedForwardNetwork(d_model, d_ff, activation, dropout)

    def forward(self, x, mask=None):
        x, attn_weights = self.self_attn(x, x, x, mask)
        x = self.ffn(x)
        return x, attn_weights


class DecoderLayer(nn.Module):
    """解码器层"""

    def __init__(self, d_model, n_heads, d_ff, activation='gelu', dropout=0.1):
        super().__init__()
        self.self_attn = MultiHeadAttention(d_model, n_heads, dropout)
        self.cross_attn = MultiHeadAttention(d_model, n_heads, dropout)
        self.ffn = FeedForwardNetwork(d_model, d_ff, activation, dropout)

    def forward(self, x, enc_output, tgt_mask=None, src_mask=None):
        # 自注意力
        x, self_attn_weights = self.self_attn(x, x, x, tgt_mask)
        # 交叉注意力（与编码器输出）
        x, cross_attn_weights = self.cross_attn(x, enc_output, enc_output, src_mask)
        # 前馈网络
        x = self.ffn(x)
        return x, self_attn_weights, cross_attn_weights


class Kronos(nn.Module):
    """完整的 Kronos 时序预测模型（内置实现，无需外部依赖）"""

    def __init__(self, input_dim, history_len, pred_len, n_layers=3, d_model=64,
                 n_heads=8, d_ff=256, dropout=0.1, activation='gelu', use_norm=True, device='cpu'):
        super().__init__()
        self.input_dim = input_dim
        self.history_len = history_len
        self.pred_len = pred_len
        self.d_model = d_model
        self.device = device

        # 输入投影（将原始特征维度映射到d_model）
        self.input_projection = nn.Linear(input_dim, d_model)
        self.use_norm = use_norm
        if use_norm:
            self.input_norm = nn.LayerNorm(d_model)

        # 位置编码
        self.positional_encoding = PositionalEncoding(d_model, max_len=history_len + pred_len, dropout=dropout)

        # 编码器
        self.encoder_layers = nn.ModuleList([
            EncoderLayer(d_model, n_heads, d_ff, activation, dropout)
            for _ in range(n_layers)
        ])

        # 解码器
        self.decoder_layers = nn.ModuleList([
            DecoderLayer(d_model, n_heads, d_ff, activation, dropout)
            for _ in range(n_layers)
        ])

        # 输出投影（将d_model映射到目标变量维度）
        self.output_projection = nn.Linear(d_model, 2)  # 2个目标：电量、电价

        # 初始化权重
        self._init_weights()

    def _init_weights(self):
        """初始化模型权重"""
        for p in self.parameters():
            if p.dim() > 1:
                nn.init.xavier_uniform_(p)

    def generate_target_mask(self, seq_len):
        """生成目标序列的掩码（防止看未来信息）"""
        mask = (torch.triu(torch.ones(seq_len, seq_len, device=self.device)) == 1).transpose(0, 1)
        mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
        return mask

    def forward(self, x):
        """
        前向传播
        :param x: 输入序列 (batch_size, history_len, input_dim)
        :return: 预测序列 (batch_size, pred_len, 2)
        """
        batch_size = x.size(0)

        # 1. 输入投影
        enc_input = self.input_projection(x)  # (batch_size, history_len, d_model)
        if self.use_norm:
            enc_input = self.input_norm(enc_input)

        # 2. 添加位置编码
        enc_input = self.positional_encoding(enc_input.transpose(0, 1)).transpose(0,
                                                                                  1)  # (batch_size, history_len, d_model)

        # 3. 编码器前向传播
        enc_output = enc_input
        enc_attn_weights = []
        for enc_layer in self.encoder_layers:
            enc_output, attn = enc_layer(enc_output)
            enc_attn_weights.append(attn)

        # 4. 构建解码器输入（使用编码器最后一个时间步的输出作为起始，重复pred_len次）
        dec_input = enc_output[:, -1:, :].repeat(1, self.pred_len, 1)  # (batch_size, pred_len, d_model)
        dec_input = self.positional_encoding(dec_input.transpose(0, 1)).transpose(0, 1)  # 添加位置编码

        # 5. 生成目标掩码
        tgt_mask = self.generate_target_mask(self.pred_len)  # (pred_len, pred_len)

        # 6. 解码器前向传播
        dec_output = dec_input
        dec_attn_weights = []
        cross_attn_weights = []
        for dec_layer in self.decoder_layers:
            dec_output, self_attn, cross_attn = dec_layer(
                dec_output, enc_output, tgt_mask=tgt_mask
            )
            dec_attn_weights.append(self_attn)
            cross_attn_weights.append(cross_attn)

        # 7. 输出投影（预测电量和电价）
        output = self.output_projection(dec_output)  # (batch_size, pred_len, 2)

        return output


# -------------------------- 训练配置（不变）--------------------------
# 数据配置
CSV_PATH = "final_db_format_train_data.csv"  # 生成的训练数据CSV路径
HISTORY_LEN = 336  # 历史窗口长度（336*15分钟=84小时=3.5天）
PRED_LEN = 96  # 预测窗口长度（96*15分钟=24小时，与API输出一致）
TARGET_COLS = ["electricity_num", "electricity_fee"]  # 预测目标：电量、电价

# 模型配置（需与API中kronos_wrapper.py一致）
INPUT_DIM = 12  # 输入特征维度（见下方feature_cols，共12个特征）
N_LAYERS = 3  # 编码器/解码器层数
D_MODEL = 64  # 模型隐藏层维度
N_HEADS = 8  # 多头注意力头数
D_FF = 256  # 前馈网络维度
DROPOUT = 0.1  # dropout比例
ACTIVATION = "gelu"  # 激活函数

# 训练配置
EPOCHS = 50  # 训练轮数
BATCH_SIZE = 32  # 批次大小
LEARNING_RATE = 1e-4  # 学习率
WEIGHT_DECAY = 1e-5  # L2正则化系数
PATIENCE = 8  # 早停耐心值（连续8轮验证集loss不下降则停止）
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # 设备（GPU优先）

# 保存配置
MODEL_SAVE_PATH = "models/kronos_electric_best.pth"  # 模型权重保存路径
SCALER_SAVE_PATH = "models/scaler.pkl"  # 归一化器保存路径
LOG_SAVE_PATH = "models/train_log.txt"  # 训练日志保存路径


# -------------------------- 工具函数（不变）--------------------------
def get_holiday_flag(date: pd.Timestamp) -> int:
    """判断是否为节假日（与数据生成时一致）"""
    if date.weekday() >= 5:
        return 1
    legal_holidays = [
        (1, 1), (1, 2), (1, 3), (2, 1), (2, 2), (2, 3), (2, 4), (2, 5), (2, 6), (2, 7),
        (4, 5), (4, 6), (4, 7), (5, 1), (5, 2), (5, 3), (6, 10), (6, 11), (6, 12),
        (9, 15), (9, 16), (9, 17), (10, 1), (10, 2), (10, 3), (10, 4), (10, 5), (10, 6), (10, 7)
    ]
    if (date.month, date.day) in legal_holidays:
        return 1
    return 0


def create_sequences(data: np.ndarray, history_len: int, pred_len: int) -> tuple:
    """
    构造时序训练序列
    :param data: 归一化后的特征数据 (n_samples, n_features)
    :param history_len: 历史窗口长度
    :param pred_len: 预测窗口长度
    :return: X (n_sequences, history_len, n_features), y (n_sequences, pred_len, n_targets)
    """
    X, y = [], []
    for i in range(history_len, len(data) - pred_len + 1):
        # 历史序列（输入）
        X.append(data[i - history_len:i])
        # 预测目标（仅取前2个特征：电量、电价）
        y.append(data[i:i + pred_len, :2])
    return np.array(X), np.array(y)


def write_log(content: str):
    """写入训练日志"""
    with open(LOG_SAVE_PATH, "a", encoding="utf-8") as f:
        f.write(f"[{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}] {content}\n")
    print(content)


# -------------------------- 数据预处理（不变）--------------------------
def preprocess_data() -> tuple:
    write_log("=== 开始数据预处理 ===")

    # 1. 读取CSV数据
    if not os.path.exists(CSV_PATH):
        raise FileNotFoundError(f"训练数据文件不存在：{CSV_PATH}")
    df = pd.read_csv(CSV_PATH)
    write_log(f"原始数据条数：{len(df)}")

    # 2. 时间特征提取与排序
    df["create_time"] = pd.to_datetime(df["create_time"])
    df = df.sort_values("create_time").set_index("create_time")  # 按时间升序排列
    write_log(f"时间范围：{df.index.min()} 至 {df.index.max()}")

    # 3. 选择特征列（与API配置一致）
    feature_cols = TARGET_COLS + [
        "power_factor",  # 静态特征
        "sharp_electricity", "peak_electricity", "flat_electricity",  # 分时电量特征
        "valley_electricity", "deep_valley_electricity",
        "hour", "weekday", "month", "holiday"  # 时间特征（后续提取）
    ]

    # 4. 提取时间特征（修复 DatetimeIndex.apply 错误）
    df["hour"] = df.index.hour  # DatetimeIndex直接支持hour属性
    df["weekday"] = df.index.weekday  # 直接支持weekday属性
    df["month"] = df.index.month  # 直接支持month属性
    # 修复：用 pd.Series 包装索引后再 apply
    df["holiday"] = pd.Series(df.index).apply(get_holiday_flag).values

    # 5. 过滤有效特征列（确保无缺失）
    valid_feature_cols = [col for col in feature_cols if col in df.columns]
    df_features = df[valid_feature_cols].copy()
    write_log(f"有效特征列：{valid_feature_cols}（共{len(valid_feature_cols)}个）")

    # 6. 处理缺失值（前向填充+后向填充）
    df_features = df_features.fillna(method="ffill").fillna(method="bfill")
    write_log(f"预处理后数据条数：{len(df_features)}")

    # 7. 数据归一化（保存scaler供API使用）
    scaler = MinMaxScaler(feature_range=(0, 1))
    data_scaled = scaler.fit_transform(df_features)
    write_log(f"归一化完成（特征维度：{data_scaled.shape[1]}）")

    # 8. 保存归一化器
    os.makedirs("models", exist_ok=True)
    with open(SCALER_SAVE_PATH, "wb") as f:
        pickle.dump(scaler, f)
    write_log(f"归一化器已保存到：{SCALER_SAVE_PATH}")

    # 9. 构造时序序列
    X, y = create_sequences(data_scaled, HISTORY_LEN, PRED_LEN)
    write_log(f"序列构造完成：X形状={X.shape}, y形状={y.shape}")

    # 10. 划分训练集和验证集（按时间顺序，避免数据泄露）
    train_size = int(0.8 * len(X))
    X_train, X_val = X[:train_size], X[train_size:]
    y_train, y_val = y[:train_size], y[train_size:]
    write_log(f"训练集：X={X_train.shape}, y={y_train.shape}")
    write_log(f"验证集：X={X_val.shape}, y={y_val.shape}")

    # 11. 转换为Tensor格式
    X_train_tensor = torch.tensor(X_train, dtype=torch.float32).to(DEVICE)
    y_train_tensor = torch.tensor(y_train, dtype=torch.float32).to(DEVICE)
    X_val_tensor = torch.tensor(X_val, dtype=torch.float32).to(DEVICE)
    y_val_tensor = torch.tensor(y_val, dtype=torch.float32).to(DEVICE)

    # 12. 创建DataLoader
    train_dataset = TensorDataset(X_train_tensor, y_train_tensor)
    val_dataset = TensorDataset(X_val_tensor, y_val_tensor)
    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
    val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=False, drop_last=True)

    write_log("=== 数据预处理完成 ===")
    return train_loader, val_loader, scaler


# -------------------------- 模型初始化与训练（添加进度显示）--------------------------
def train_model(train_loader: DataLoader, val_loader: DataLoader):
    write_log("\n=== 开始模型训练 ===")
    write_log(f"训练配置：设备={DEVICE},  epoch={EPOCHS}, 批次大小={BATCH_SIZE}, 学习率={LEARNING_RATE}")

    # 1. 初始化Kronos模型（使用内置实现）
    model = Kronos(
        input_dim=INPUT_DIM,
        history_len=HISTORY_LEN,
        pred_len=PRED_LEN,
        n_layers=N_LAYERS,
        d_model=D_MODEL,
        n_heads=N_HEADS,
        d_ff=D_FF,
        dropout=DROPOUT,
        activation=ACTIVATION,
        use_norm=True,
        device=DEVICE
    ).to(DEVICE)
    write_log(f"Kronos模型初始化完成（参数总数：{sum(p.numel() for p in model.parameters()):,}）")

    # 2. 定义损失函数和优化器
    criterion = nn.MSELoss()  # 均方误差损失（适用于回归任务）
    optimizer = torch.optim.AdamW(
        model.parameters(),
        lr=LEARNING_RATE,
        weight_decay=WEIGHT_DECAY
    )
    # 适配旧版PyTorch：移除 verbose 参数
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, mode="min", factor=0.5, patience=4
    )  # 学习率衰减（验证损失停滞4轮则衰减为原来的50%）

    # 3. 训练记录
    best_val_loss = float("inf")
    patience_counter = 0  # 早停计数器
    last_lr = LEARNING_RATE  # 记录上次学习率
    total_train_batches = len(train_loader)
    total_val_batches = len(val_loader)

    # 4. 开始训练（轮次级进度条）
    for epoch in range(EPOCHS):
        write_log(f"\n📌 Epoch [{epoch + 1:2d}/{EPOCHS}] 开始训练")

        # 训练阶段（添加批次级进度条）
        model.train()
        train_loss = 0.0
        # tqdm 进度条：显示批次进度、当前损失、剩余时间
        train_pbar = tqdm(train_loader, desc=f"训练批次", total=total_train_batches, leave=False)
        for batch_idx, (X_batch, y_batch) in enumerate(train_pbar):
            # 前向传播
            outputs = model(X_batch)  # (batch_size, pred_len, 2)
            loss = criterion(outputs, y_batch)

            # 反向传播与优化
            optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)  # 梯度裁剪
            optimizer.step()

            # 累计损失
            train_loss += loss.item() * X_batch.size(0)

            # 实时更新进度条显示（当前批次损失）
            train_pbar.set_postfix({"批次损失": f"{loss.item():.6f}"})

        train_pbar.close()  # 关闭训练进度条
        avg_train_loss = train_loss / len(train_loader.dataset)  # 平均训练损失

        # 验证阶段（添加批次级进度条）
        model.eval()
        val_loss = 0.0
        val_pbar = tqdm(val_loader, desc=f"验证批次", total=total_val_batches, leave=False)
        with torch.no_grad():
            for X_batch, y_batch in val_pbar:
                outputs = model(X_batch)
                loss = criterion(outputs, y_batch)
                val_loss += loss.item() * X_batch.size(0)
                # 实时更新进度条显示（当前批次损失）
                val_pbar.set_postfix({"批次损失": f"{loss.item():.6f}"})

        val_pbar.close()  # 关闭验证进度条
        avg_val_loss = val_loss / len(val_loader.dataset)  # 平均验证损失

        # 学习率衰减（手动记录学习率变化）
        old_lr = optimizer.param_groups[0]['lr']
        scheduler.step(avg_val_loss)
        new_lr = optimizer.param_groups[0]['lr']
        lr_log = ""
        if new_lr != old_lr:
            lr_log = f" | 学习率调整：{old_lr:.6f} → {new_lr:.6f}"
            last_lr = new_lr

        # 记录日志（包含当前轮次的平均损失）
        log_content = f"Epoch [{epoch + 1:2d}/{EPOCHS}] | 平均训练损失：{avg_train_loss:.6f} | 平均验证损失：{avg_val_loss:.6f}{lr_log}"
        write_log(log_content)

        # 保存最优模型
        if avg_val_loss < best_val_loss:
            best_val_loss = avg_val_loss
            torch.save(model.state_dict(), MODEL_SAVE_PATH)
            write_log(f"✅ 保存最优模型（验证损失：{best_val_loss:.6f}）")
            patience_counter = 0  # 重置早停计数器
        else:
            patience_counter += 1
            write_log(f"❌ 验证损失未下降（已连续{patience_counter}轮）")

        # 早停判断
        if patience_counter >= PATIENCE:
            write_log(f"\n=== 早停触发（连续{PATIENCE}轮验证损失未下降）===")
            break

    # 5. 训练完成
    write_log(f"\n=== 模型训练完成 ===")
    write_log(f"最优验证损失：{best_val_loss:.6f}")
    write_log(f"最终学习率：{last_lr:.6f}")
    write_log(f"模型权重已保存到：{MODEL_SAVE_PATH}")

    # 6. 验证模型预测效果（可选）
    model.load_state_dict(torch.load(MODEL_SAVE_PATH))
    model.eval()
    with torch.no_grad():
        X_sample, y_sample = next(iter(val_loader))
        y_pred = model(X_sample)
        # 计算样本的MAE（平均绝对误差）
        mae_electricity = torch.mean(torch.abs(y_pred[:, :, 0] - y_sample[:, :, 0])).item()
        mae_fee = torch.mean(torch.abs(y_pred[:, :, 1] - y_sample[:, :, 1])).item()
        write_log(f"样本预测效果：电量MAE={mae_electricity:.4f}, 电价MAE={mae_fee:.6f}")


# -------------------------- 主函数 --------------------------
if __name__ == "__main__":
    # 清空之前的日志
    if os.path.exists(LOG_SAVE_PATH):
        os.remove(LOG_SAVE_PATH)

    try:
        # 1. 数据预处理
        train_loader, val_loader, scaler = preprocess_data()

        # 2. 模型训练
        train_model(train_loader, val_loader)

        write_log("\n🎉 全部训练流程完成！")
        write_log(f"📁 生成文件：")
        write_log(f"  - 模型权重：{MODEL_SAVE_PATH}")
        write_log(f"  - 归一化器：{SCALER_SAVE_PATH}")
        write_log(f"  - 训练日志：{LOG_SAVE_PATH}")
        write_log(f"\n💡 可直接将 models 目录复制到 Flask API 项目中使用")

    except Exception as e:
        error_msg = f"训练失败：{str(e)}"
        write_log(f"❌ {error_msg}")
        raise e