from glob import glob
from hex_read_sort import TokenSystem
from model3 import SamOut
import torch
from torch.utils.data import Dataset, DataLoader
import pandas as pd
from tqdm import tqdm
import numpy as np
import json
import random
from typing import List

# 超参数
voc_size = 12507
num_layers = 8
hidden_size = 2 ** 6 * num_layers
num_heads = num_layers
learning_rate = 0.0005
batch_size = 32
num_epochs = 6
max_seq_len = 512  # 最大序列长度限制

# 自定义数据集类
class PretrainDataset(Dataset):
    def __init__(self, token_system: TokenSystem, data_path: str):
        self.token_system = token_system
        self.data_path = data_path
        
        # 一次性加载所有数据
        with open(data_path, "r", encoding="utf-8") as f:
            self.all_data = f.readlines()
    
    def __len__(self):
        return len(self.all_data)
    
    def __getitem__(self, idx):
        line = self.all_data[idx]
        data = json.loads(line)
        text = data["text"]
        
        # 处理文本
        segments = text.replace("<|im_start|>", "").split("<|im_end|>")[:-1]
        token_list = []
        for seg in segments:
            encoded_seg = self.token_system.encode(seg)
            # 限制最大长度
            if len(token_list) + len(encoded_seg) > max_seq_len:
                break
            token_list.extend(encoded_seg)
        
        # 确保序列至少包含2个token
        if len(token_list) < 2:
            return [0, 0]  # 返回两个0的最小序列
        return token_list

# 修正后的批处理函数
def collate_fn(batch: List[List[int]]):
    # 过滤掉空序列
    batch = [seq for seq in batch if len(seq) >= 2]
    if len(batch) == 0:
        # 创建最小批次的占位符
        return torch.zeros(1, 2, dtype=torch.long), torch.zeros(1, 2, dtype=torch.long)
    
    # 找出批次中最长序列的长度
    max_len = max(len(seq) for seq in batch)
    
    # 初始化输入和目标张量
    input_batch = []
    target_batch = []
    
    for seq in batch:
        # 序列长度必须>=2才能分成输入和目标
        # 输入序列 (从第一个到倒数第二个token)
        input_seq = seq[:-1]
        # 目标序列 (从第二个到最后一个token)
        target_seq = seq[1:]
        
        # 填充输入序列
        padded_input = input_seq + [0] * (max_len - len(input_seq))
        # 填充目标序列 - 长度与输入相同
        padded_target = target_seq + [0] * (max_len - len(target_seq))
        
        # 如果目标序列比输入序列短，确保填充后的长度相同
        if len(padded_input) != len(padded_target):
            padded_target = target_seq + [0] * (len(padded_input) - len(target_seq))
        
        input_batch.append(padded_input)
        target_batch.append(padded_target)
    
    # 转换为张量
    input_tensor = torch.tensor(input_batch, dtype=torch.long)
    target_tensor = torch.tensor(target_batch, dtype=torch.long)
    
    return input_tensor, target_tensor

# 模型初始化
def create_model():
    model = SamOut(
        voc_size=voc_size, 
        hidden_size=hidden_size, 
        num_heads=num_heads, 
        num_layers=num_layers
    )
    
    # 检查 GPU 是否支持 bfloat16
    if not torch.cuda.is_bf16_supported():
        raise ValueError("当前 GPU 不支持 bfloat16，请检查硬件支持或改用 float16。")
    
    model.to("cuda")
    return model

# 训练函数
def train_model():
    # 初始化模型
    model = create_model()
    
    # 损失函数和优化器
    criterion = torch.nn.CrossEntropyLoss(ignore_index=0)
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    scaler = torch.amp.GradScaler()  # 使用新API
    
    # 加载token系统
    token_system = TokenSystem()
    token_system.load("token_system")
    print("Token系统已加载")
    
    # 创建数据集
    dataset = PretrainDataset(token_system, "pretrain_hq.jsonl")
    print(f"数据集已加载，包含 {len(dataset)} 个样本")
    
    # 创建DataLoader
    dataloader = DataLoader(
        dataset, 
        batch_size=batch_size,
        shuffle=True,
        collate_fn=collate_fn,
        num_workers=12,
        pin_memory=True
    )
    
    # 训练循环
    epoch_losses = []
    for epoch in range(num_epochs):
        print(f"\n开始训练周期 {epoch+1}/{num_epochs}")
        model.train()
        
        running_loss = 0.0
        step_count = 0
        progress_bar = tqdm(dataloader, desc=f"Epoch {epoch+1}")
        
        for inputs, targets in progress_bar:
            # 移动数据到GPU
            inputs = inputs.to("cuda", non_blocking=True)
            targets = targets.to("cuda", non_blocking=True)
            
            # 跳过空批次或单元素批次
            if inputs.numel() == 0 or inputs.size(0) == 0:
                continue
            
            # 混合精度训练
            with torch.amp.autocast('cuda', dtype=torch.bfloat16):  # 使用新API
                # 前向传播
                outputs, _ = model(inputs)
                
                # 调整形状计算损失
                B, S, V = outputs.size()
                outputs = outputs.view(B*S, V)
                targets = targets.view(B*S)
                
                loss = criterion(outputs, targets)
            
            # 反向传播和优化
            optimizer.zero_grad()
            scaler.scale(loss).backward()
            scaler.step(optimizer)
            scaler.update()
            
            # 更新进度条
            running_loss += loss.item()
            step_count += 1
            avg_loss = running_loss / step_count
            progress_bar.set_postfix(loss=avg_loss)
        
        # 记录该epoch的平均损失
        epoch_loss = running_loss / step_count if step_count > 0 else 0.0
        epoch_losses.append(epoch_loss)
        print(f"周期 {epoch+1} 完成，平均损失: {epoch_loss:.4f}")
        
        # 保存模型和损失
        torch.save(model.state_dict(), f"model_pretrain_cap_{epoch+1}.pth")
        pd.to_pickle(epoch_losses, f"losses_{epoch+1}.pkl")
    
    print("训练完成！")
    return model

if __name__ == "__main__":
    train_model()