import os
import math
import numpy as np
import pandas as pd
from datetime import datetime

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader

# === 这里直接复用你现有的代码结构 ===
# 假设 kronos.py 和 module.py 放在同级目录 / 或 model/ 目录下
from kronos import KronosTokenizer, Kronos, auto_regressive_inference, calc_time_stamps


# ===========================
# 1. 数据集定义
# ===========================

class EnergySeriesDataset(Dataset):
    """
    用于训练 Tokenizer 的数据集：
    - 输入：连续值序列 x_seq (seq_len, 1)
    - 同时返回时间戳 stamp_seq (seq_len, 5)，供后续 Kronos 使用
    """
    def __init__(self, df, seq_len=512):
        """
        df: 必须包含 ['datetime', 'value'] 两列，按时间升序
        seq_len: 每个样本的序列长度
        """
        self.seq_len = seq_len

        df = df.sort_values('datetime').reset_index(drop=True)
        self.values = df['value'].values.astype(np.float32)
        self.timestamps = pd.to_datetime(df['datetime'])

        # 归一化（全局 mean/std，简单点）
        self.mean = self.values.mean()
        self.std = self.values.std() + 1e-6
        self.values_norm = ((self.values - self.mean) / self.std).astype(np.float32)

        # 能切出多少个片段
        self.num_samples = len(self.values_norm) - seq_len + 1
        if self.num_samples <= 0:
            raise ValueError("数据长度太短，无法切出任何长度为 seq_len 的序列")

        # 预先计算时间戳的特征
        time_df = calc_time_stamps(self.timestamps)
        self.time_feat = time_df.values.astype(np.float32)

    def __len__(self):
        return self.num_samples

    def __getitem__(self, idx):
        start = idx
        end = idx + self.seq_len

        x_seq = self.values_norm[start:end]            # (seq_len,)
        t_seq = self.time_feat[start:end, :]           # (seq_len, 5)

        x_seq = torch.from_numpy(x_seq).unsqueeze(-1)  # (seq_len, 1)
        t_seq = torch.from_numpy(t_seq)                # (seq_len, 5)
        return x_seq, t_seq


class EnergyTokenDataset(Dataset):
    """
    用于训练 Kronos 的数据集：
    - 会在 __getitem__ 里调用 tokenizer.encode，把 x_seq 编成 (s1_ids, s2_ids)
    - 输入：前 seq_len-1 步的 token
    - 目标：后 seq_len-1 步的 token（自回归 next-token 模型）
    """
    def __init__(self, df, tokenizer, seq_len=512, device='cpu'):
        self.seq_len = seq_len
        self.tokenizer = tokenizer.to(device)
        self.device = device

        df = df.sort_values('datetime').reset_index(drop=True)
        self.values = df['value'].values.astype(np.float32)
        self.timestamps = pd.to_datetime(df['datetime'])

        self.mean = self.values.mean()
        self.std = self.values.std() + 1e-6
        self.values_norm = ((self.values - self.mean) / self.std).astype(np.float32)

        self.num_samples = len(self.values_norm) - seq_len + 1
        if self.num_samples <= 0:
            raise ValueError("数据长度太短，无法切出任何长度为 seq_len 的序列")

        time_df = calc_time_stamps(self.timestamps)
        self.time_feat = time_df.values.astype(np.float32)

    def __len__(self):
        return self.num_samples

    def __getitem__(self, idx):
        start = idx
        end = idx + self.seq_len

        x_seq = self.values_norm[start:end]            # (seq_len,)
        t_seq = self.time_feat[start:end, :]           # (seq_len, 5)

        x_tensor = torch.from_numpy(x_seq).unsqueeze(-1).unsqueeze(0).to(self.device)  # (1, seq_len, 1)
        # 编码成 token（half=True 得到 [s1_ids, s2_ids]）
        with torch.no_grad():
            z_ids = self.tokenizer.encode(x_tensor, half=True)
        s1_ids = z_ids[0].squeeze(0)   # (seq_len,)
        s2_ids = z_ids[1].squeeze(0)   # (seq_len,)

        # 自回归：输入是前 seq_len-1，目标是后 seq_len-1
        s1_in = s1_ids[:-1]
        s2_in = s2_ids[:-1]
        s1_tgt = s1_ids[1:]
        s2_tgt = s2_ids[1:]

        stamp = torch.from_numpy(t_seq[:-1, :]).to(torch.long)   # (seq_len-1, 5)

        return s1_in.cpu(), s2_in.cpu(), s1_tgt.cpu(), s2_tgt.cpu(), stamp.cpu()


# ===========================
# 2. 训练过程
# ===========================

def train_tokenizer(tokenizer, train_loader, device='cuda', epochs=10, lr=1e-4):
    """
    训练 KronosTokenizer：自编码 + 量化损失
    """
    tokenizer = tokenizer.to(device)
    optimizer = optim.Adam(tokenizer.parameters(), lr=lr)

    for epoch in range(1, epochs + 1):
        tokenizer.train()
        total_loss = 0.0
        total_rec = 0.0
        total_bsq = 0.0
        count = 0

        for x_seq, _ in train_loader:
            # x_seq: (B, T, 1)
            x_seq = x_seq.to(device)

            optimizer.zero_grad()
            (z_pre, z_full), bsq_loss, _, _ = tokenizer(x_seq)

            # 用完整码本的重建做 MSE
            rec_loss = nn.MSELoss()(z_full, x_seq)
            loss = rec_loss + bsq_loss

            loss.backward()
            optimizer.step()

            bsz = x_seq.size(0)
            total_loss += loss.item() * bsz
            total_rec += rec_loss.item() * bsz
            total_bsq += bsq_loss.item() * bsz
            count += bsz

        print(f"[Tokenizer] Epoch {epoch}/{epochs} "
              f"loss={total_loss / count:.6f} "
              f"rec={total_rec / count:.6f} "
              f"bsq={total_bsq / count:.6f}")

    return tokenizer


def train_kronos(model, tokenizer, train_loader, device='cuda', epochs=20, lr=1e-4):
    """
    训练 Kronos：在 token 序列上自回归预测 (s1, s2)
    """
    model = model.to(device)
    tokenizer = tokenizer.to(device)
    optimizer = optim.Adam(model.parameters(), lr=lr)
    ce_loss_fn = nn.CrossEntropyLoss()

    for epoch in range(1, epochs + 1):
        model.train()
        total_loss = 0.0
        total_s1 = 0.0
        total_s2 = 0.0
        count = 0

        for s1_in, s2_in, s1_tgt, s2_tgt, stamp in train_loader:
            # 送到 device
            s1_in = s1_in.to(device)            # (B, L)
            s2_in = s2_in.to(device)
            s1_tgt = s1_tgt.to(device)
            s2_tgt = s2_tgt.to(device)
            stamp = stamp.to(device)            # (B, L, 5)

            optimizer.zero_grad()
            s1_logits, s2_logits = model(
                s1_in, s2_in,
                stamp=stamp,
                padding_mask=None,
                use_teacher_forcing=True,
                s1_targets=s1_tgt
            )  # logits shape: (B, L, vocab)

            # 计算交叉熵
            B, L, V1 = s1_logits.shape
            _, _, V2 = s2_logits.shape
            loss_s1 = ce_loss_fn(s1_logits.reshape(B * L, V1), s1_tgt.reshape(-1))
            loss_s2 = ce_loss_fn(s2_logits.reshape(B * L, V2), s2_tgt.reshape(-1))
            loss = (loss_s1 + loss_s2) / 2.0

            loss.backward()
            optimizer.step()

            bsz = s1_in.size(0)
            total_loss += loss.item() * bsz
            total_s1 += loss_s1.item() * bsz
            total_s2 += loss_s2.item() * bsz
            count += bsz

        print(f"[Kronos] Epoch {epoch}/{epochs} "
              f"loss={total_loss / count:.6f} "
              f"s1={total_s1 / count:.6f} "
              f"s2={total_s2 / count:.6f}")

    return model


# ===========================
# 3. 预测未来 96 个点（示例）
# ===========================

def predict_next_96(df, tokenizer, model, device='cuda', max_context=512):
    """
    df: 同样包含 ['datetime', 'value']，按时间升序，长度至少 max_context
    返回：未来 96 个点的 DataFrame（datetime, value_pred）
    """
    df = df.sort_values('datetime').reset_index(drop=True)
    values = df['value'].values.astype(np.float32)
    ts = pd.to_datetime(df['datetime'])

    # 只取最后 max_context 个点作为历史
    if len(values) > max_context:
        values = values[-max_context:]
        ts = ts[-max_context:]

    # 归一化
    mean = values.mean()
    std = values.std() + 1e-6
    values_norm = ((values - mean) / std).astype(np.float32)

    x = values_norm[np.newaxis, :, np.newaxis]  # (1, T, 1)
    x_timestamp = ts
    # 未来 96 个时间戳：每 15 分钟
    last_time = ts.iloc[-1]
    future_index = pd.date_range(last_time + pd.Timedelta(minutes=15),
                                 periods=96,
                                 freq='15min')
    # 时间特征
    x_time_df = calc_time_stamps(x_timestamp)
    y_time_df = calc_time_stamps(future_index)

    x_stamp = x_time_df.values[np.newaxis, :, :].astype(np.float32)
    y_stamp = y_time_df.values[np.newaxis, :, :].astype(np.float32)

    tokenizer = tokenizer.to(device)
    model = model.to(device)

    preds_norm = auto_regressive_inference(
        tokenizer=tokenizer,
        model=model,
        x=torch.from_numpy(x).float().to(device),
        x_stamp=torch.from_numpy(x_stamp).float().to(device),
        y_stamp=torch.from_numpy(y_stamp).float().to(device),
        max_context=max_context,
        pred_len=96,
        clip=5,
        T=1.0,
        top_k=0,
        top_p=0.99,
        sample_count=5,
        verbose=False
    )  # (1, T+96, 1) 的平均结果

    preds_norm = preds_norm[:, -96:, 0]  # 取最后 96 个点 (1,96)
    preds = preds_norm * std + mean

    pred_df = pd.DataFrame({
        "datetime": future_index,
        "value_pred": preds.reshape(-1)
    })
    return pred_df


# ===========================
# 4. main：串起来跑
# ===========================

def main():
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    seq_len = 512      # 训练时的上下文长度
    batch_size = 32

    # ==== 4.1 读取你刚刚生成的 data_predict 数据 ====
    # 这里假定你已经把 data_predict 导出到 CSV，包含 datetime/deviceid/type/value 等字段
    all_df = pd.read_csv("data_predict_mock.csv", parse_dates=['datetime'])

    # 示例：只训练 “deviceid=1, type=1（电量）” 这一条序列
    train_df = all_df[(all_df['deviceid'] == 1) & (all_df['type'] == 1)].copy()
    train_df = train_df[['datetime', 'value']].dropna().sort_values('datetime')

    # ==== 4.2 构造 Tokenizer & Kronos 模型 ====
    # 这些超参你可以根据显存调整（d_model / n_layers 越小越轻）
    s1_bits = 8
    s2_bits = 8
    d_in = 1

    tokenizer = KronosTokenizer(
        d_in=d_in,
        d_model=128,
        n_heads=4,
        ff_dim=256,
        n_enc_layers=3,
        n_dec_layers=3,
        ffn_dropout_p=0.1,
        attn_dropout_p=0.1,
        resid_dropout_p=0.1,
        s1_bits=s1_bits,
        s2_bits=s2_bits,
        beta=0.25,
        gamma0=1.0,
        gamma=1.0,
        zeta=0.1,
        group_size=8,
    )

    kronos_model = Kronos(
        s1_bits=s1_bits,
        s2_bits=s2_bits,
        n_layers=4,
        d_model=128,
        n_heads=4,
        ff_dim=256,
        ffn_dropout_p=0.1,
        attn_dropout_p=0.1,
        resid_dropout_p=0.1,
        token_dropout_p=0.1,
        learn_te=True
    )

    # ==== 4.3 训练 Tokenizer ====
    tok_dataset = EnergySeriesDataset(train_df, seq_len=seq_len)
    tok_loader = DataLoader(tok_dataset, batch_size=batch_size, shuffle=True, drop_last=True)

    tokenizer = train_tokenizer(
        tokenizer,
        tok_loader,
        device=device,
        epochs=10,
        lr=1e-4
    )

    os.makedirs("checkpoints", exist_ok=True)
    torch.save(tokenizer.state_dict(), "checkpoints/kronos_tokenizer_energy.pth")
    print("Tokenizer saved to checkpoints/kronos_tokenizer_energy.pth")

    # ==== 4.4 训练 Kronos ====
    token_dataset = EnergyTokenDataset(train_df, tokenizer=tokenizer, seq_len=seq_len, device=device)
    token_loader = DataLoader(token_dataset, batch_size=batch_size, shuffle=True, drop_last=True)

    kronos_model = train_kronos(
        kronos_model,
        tokenizer,
        token_loader,
        device=device,
        epochs=20,
        lr=1e-4
    )

    torch.save(kronos_model.state_dict(), "checkpoints/kronos_energy_model.pth")
    print("Kronos model saved to checkpoints/kronos_energy_model.pth")

    # ==== 4.5 用训练好的模型预测未来一天 96 个点 ====
    pred_df = predict_next_96(train_df, tokenizer, kronos_model, device=device, max_context=seq_len)
    print(pred_df.head())
    pred_df.to_csv("checkpoints/pred_next_96.csv", index=False)
    print("未来 96 个点预测结果已保存到 checkpoints/pred_next_96.csv")


if __name__ == "__main__":
    main()