from glob import glob

from high_vocab3 import UniVoc
from model3 import SamOut
import torch

import pandas as pd
from tqdm import tqdm
import numpy as np
import json
import random
from typing import List

# 超参数
vocab = UniVoc()
# 超参数
voc_size = vocab.voc_size+1

num_layers = 8
hidden_size = 2 ** 6 * num_layers
num_heads = num_layers
learning_rate = 0.00005 
batch_size =10
num_epochs = 1
max_seq_len = 512 * 4  # 最大序列长度限制


# 自定义数据集类
class PretrainDataset():
    def __init__(self, data_path: str):
        # self.token_system = token_system
        self.data_path = data_path

        # 一次性加载所有数据
        with open(data_path, "r", encoding="utf-8") as f:
            self.all_data = f.readlines()
        self.idx = (i for i in np.random.permutation(self.__len__()))

    def __len__(self):
        return len(self.all_data)

    def __getitem__(self, idx):
        line = self.all_data[idx]
        data = json.loads(line)
        text = data["conversations"]
        token_list = []
        for one in text:
            if one["role"] == "user":
                token_list += [1,5] + vocab.encode(one["content"])+[2]
            else:
                token_list += [1,6] +vocab.encode(one["content"])+[2]
        token_list = token_list

        # 确保序列至少包含2个token
        if len(token_list) < 2:
            return [0, 0]  # 返回两个0的最小序列
        return token_list

    def collate_fn(self, batch: List[List[int]]):
        # 过滤掉空序列
        batch = [seq for seq in batch if len(seq) >= 2]
        if len(batch) == 0:
            # 创建最小批次的占位符
            return torch.zeros(1, 2, dtype=torch.long), torch.zeros(1, 2, dtype=torch.long)

        # 找出批次中最长序列的长度
        max_len = max(len(seq) for seq in batch)

        # 初始化输入和目标张量
        input_batch = []
        target_batch = []

        for seq in batch:
            # 序列长度必须>=2才能分成输入和目标
            # 输入序列 (从第一个到倒数第二个token)
            input_seq = seq[:-1]
            # 目标序列 (从第二个到最后一个token)
            target_seq = seq[1:]

            # 填充输入序列
            padded_input = input_seq + [0] * (max_len - len(input_seq))
            # 填充目标序列 - 长度与输入相同
            padded_target = target_seq + [0] * (max_len - len(target_seq))

            # 如果目标序列比输入序列短，确保填充后的长度相同
            if len(padded_input) != len(padded_target):
                padded_target = target_seq + [0] * (len(padded_input) - len(target_seq))

            input_batch.append(padded_input)
            target_batch.append(padded_target)

        # 转换为张量
        input_tensor = torch.tensor(input_batch, dtype=torch.long)
        target_tensor = torch.tensor(target_batch, dtype=torch.long)

        return input_tensor, target_tensor

    def get_batch(self, batch_size):
        batch_list = []
        for i in range(batch_size):
            try:
                batch_list.append(self.__getitem__(next(self.idx).tolist()))
            except:
                break
        if len(batch_list)==0:
            return None,None
        else:
            return self.collate_fn(batch_list)


# 修正后的批处理函数


# 模型初始化
def create_model():
    model = SamOut(
        voc_size=voc_size,
        hidden_size=hidden_size,
        num_heads=num_heads,
        num_layers=num_layers
    )
    model.load_state_dict(torch.load("model_pretrain_cap_6.pth"))
    # 检查 GPU 是否支持 bfloat16
    if not torch.cuda.is_bf16_supported():
        raise ValueError("当前 GPU 不支持 bfloat16，请检查硬件支持或改用 float16。")

    model.to("cuda")
    return model


# 训练函数
def train_model():
    # 初始化模型
    model = create_model()

    # 损失函数和优化器
    criterion = torch.nn.CrossEntropyLoss(ignore_index=0,reduce=None)
    optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
    scaler = torch.amp.GradScaler()  # 使用新API

    # 加载token系统

    # 创建数据集
    dataset = PretrainDataset("sft_2048.jsonl")
    print(f"数据集已加载，包含 {len(dataset)} 个样本")

    # 训练循环
    epoch_losses = []
    for epoch in range(num_epochs):
        print(f"\n开始训练周期 {epoch + 1}/{num_epochs}")
        model.train()

        running_loss = 0.0
        step_count = 0
        progress_bar = tqdm(range(dataset.__len__() // batch_size + 1), desc=f"Epoch {epoch + 1}")

        for _ in progress_bar:
            inputs, targets = dataset.get_batch(batch_size)
            if inputs is None:
                break
            # 移动数据到GPU
            inputs = inputs.to("cuda", non_blocking=True)
            targets = targets.to("cuda", non_blocking=True)

            # 跳过空批次或单元素批次
            if inputs.numel() == 0 or inputs.size(0) == 0:
                continue

            # 混合精度训练
            with torch.amp.autocast('cuda', dtype=torch.bfloat16):  # 使用新API
                # 前向传播
                outputs, _ = model(inputs)

                # 调整形状计算损失
                B, S, V = outputs.size()
                outputs = outputs.view(B * S, V)
                targets = targets.view(B * S)

                loss = criterion(outputs, targets)

            # 反向传播和优化
            optimizer.zero_grad()
            scaler.scale(loss).backward()
            scaler.step(optimizer)
            scaler.update()

            torch.cuda.empty_cache()


            # 更新进度条
            running_loss += loss.item()
            step_count += 1
            avg_loss = running_loss / step_count
            progress_bar.set_postfix(loss=avg_loss)
            # if avg_loss<=1.70:
            #             # 保存模型和损失
            #     torch.save(model.state_dict(), f"model_pretrain_cap_{epoch + 1}_sft_new.pth")
            #     pd.to_pickle(epoch_losses, f"losses_{epoch + 1}_sft_new.pkl")
            #     return


        # 记录该epoch的平均损失
        epoch_loss = running_loss / step_count if step_count > 0 else 0.0
        epoch_losses.append(epoch_loss)
        print(f"周期 {epoch + 1} 完成，平均损失: {epoch_loss:.4f}")

        # 保存模型和损失
        torch.save(model.state_dict(), f"model_pretrain_cap_{epoch + 1}_sft_new.pth")
        pd.to_pickle(epoch_losses, f"losses_{epoch + 1}_sft_new.pkl")

    print("训练完成！")
    return model


if __name__ == "__main__":
    train_model()
    # 一次性加载所有数据
    # with open("sft_2048.jsonl", "r", encoding="utf-8") as f:
    #     all_data = f.readlines()
    #     print(all_data[:2])