from glob import glob
# from hex_read_sort import TokenSystem
from high_vocab3 import UniVoc
# from SamOutVXP import SamOut
from model3 import SamOut
# 3Epoch 1:  40%|███▉      | 17595/44160 [16:19<22:43, 19.48it/s, loss=3.62]
# 5Epoch 1:   2%|▏         | 1069/44160 [02:30<52:34, 13.66it/s, loss=5.44]
# 3Epoch 1:   2%|▏         | 762/44160 [01:53<38:30, 18.78it/s, loss=5.38]
# 5Epoch 1:   2%|▏         | 855/44160 [02:03<42:19, 17.05it/s, loss=5.53]
# 5Epoch 1:   2%|▏         | 861/44160 [01:59<38:51, 18.57it/s, loss=5.43]
# 5Epoch 1:   2%|▏         | 966/44160 [02:08<41:33, 17.33it/s, loss=5.49]
# Epoch 1:   2%|▏         | 785/44160 [01:55<39:57, 18.09it/s, loss=5.34]
# Epoch 1:   2%|▏         | 805/44160 [01:55<37:23, 19.33it/s, loss=5.34]
# Epoch 1:   2%|▏         | 769/44160 [01:53<39:14, 18.43it/s, loss=5.35]
# Epoch 1:   2%|▏         | 785/44160 [01:52<37:47, 19.13it/s, loss=5.34]
# Epoch 1:   2%|▏         | 741/44160 [01:54<40:07, 18.04it/s, loss=5.38]
# Epoch 1:   2%|▏         | 715/44160 [01:51<39:16, 18.43it/s, loss=5.37]
# Epoch 1:   2%|▏         | 728/44160 [01:54<39:03, 18.53it/s, loss=5.35]
# Epoch 1:  38%|███▊      | 16569/44160 [17:31<27:42, 16.60it/s, loss=3.61]


import torch
from torch.utils.data import Dataset, DataLoader
import pandas as pd
from tqdm import tqdm
import numpy as np
import json
import random
from typing import List
vocab = UniVoc()
# 超参数
voc_size = vocab.voc_size+1
num_layers = 8
hidden_size = 2 ** 6 * num_layers
num_heads = num_layers
learning_rate = 0.0005
batch_size = 32
num_epochs = 6
max_seq_len = 512  # 最大序列长度限制

# 自定义数据集类
class PretrainDataset(Dataset):
    def __init__(self,data_path: str):

        self.data_path = data_path
        
        # 一次性加载所有数据
        with open(data_path, "r", encoding="utf-8") as f:
            self.all_data = f.readlines()
    
    def __len__(self):
        return len(self.all_data)
    
    def __getitem__(self, idx):
        try:
            line = self.all_data[idx]
            data = json.loads(line)
            text = data["text"]

            # 处理文本
            segments = text.replace("<|im_start|>", "").split("<|im_end|>")[:-1]
            token_list = []
            for seg in segments:
                encoded_seg = [1]+vocab.encode(seg)+[2]
                # 限制最大长度
                if len(token_list) + len(encoded_seg) > max_seq_len:
                    break
                token_list.extend(encoded_seg)

            # 确保序列至少包含2个token
            if len(token_list) < 2:
                return [0, 0]  # 返回两个0的最小序列
        except Exception as e:
            return [0,0]
        return token_list

# 修正后的批处理函数
def collate_fn(batch: List[List[int]]):
    # 过滤掉空序列
    batch = [seq for seq in batch if len(seq) >= 2]
    if len(batch) == 0:
        # 创建最小批次的占位符
        return torch.zeros(1, 2, dtype=torch.long), torch.zeros(1, 2, dtype=torch.long)
    
    # 找出批次中最长序列的长度
    max_len = max(len(seq) for seq in batch)
    
    # 初始化输入和目标张量
    input_batch = []
    target_batch = []
    
    for seq in batch:
        # 序列长度必须>=2才能分成输入和目标
        # 输入序列 (从第一个到倒数第二个token)
        input_seq = seq[:-1]
        # 目标序列 (从第二个到最后一个token)
        target_seq = seq[1:]
        
        # 填充输入序列
        padded_input = input_seq + [0] * (max_len - len(input_seq))
        # 填充目标序列 - 长度与输入相同
        padded_target = target_seq + [0] * (max_len - len(target_seq))
        
        # 如果目标序列比输入序列短，确保填充后的长度相同
        if len(padded_input) != len(padded_target):
            padded_target = target_seq + [0] * (len(padded_input) - len(target_seq))
        
        input_batch.append(padded_input)
        target_batch.append(padded_target)
    
    # 转换为张量
    input_tensor = torch.tensor(input_batch, dtype=torch.long)
    target_tensor = torch.tensor(target_batch, dtype=torch.long)
    
    return input_tensor, target_tensor

# 模型初始化
def create_model():
    model = SamOut(
        voc_size=voc_size, 
        hidden_size=hidden_size, 
        num_heads=num_heads, 
        num_layers=num_layers
    )
    params = 0
    for i in model.parameters():
        if i.shape != torch.Size([]):
            params += i.numel()
    print(params)
    
    # 检查 GPU 是否支持 bfloat16
    if not torch.cuda.is_bf16_supported():
        raise ValueError("当前 GPU 不支持 bfloat16，请检查硬件支持或改用 float16。")
    
    model.to("cuda")
    return model

# 训练函数
def train_model():
    # 初始化模型
    model = create_model()
    
    # 损失函数和优化器
    criterion = torch.nn.CrossEntropyLoss(ignore_index=0)
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    scaler = torch.amp.GradScaler()  # 使用新API
    
    # 加载token系统

    # token_system.load("token_system")
    print("Token系统已加载")
    
    # 创建数据集
    dataset = PretrainDataset("pretrain_hq.jsonl")
    print(f"数据集已加载，包含 {len(dataset)} 个样本")
    
    # 创建DataLoader
    dataloader = DataLoader(
        dataset, 
        batch_size=batch_size,
        shuffle=True,
        collate_fn=collate_fn,
        num_workers=12,
        pin_memory=True
    )
    
    # 训练循环
    epoch_losses = []
    for epoch in range(num_epochs):
        print(f"\n开始训练周期 {epoch+1}/{num_epochs}")
        model.train()
        
        running_loss = 0.0
        step_count = 0
        progress_bar = tqdm(dataloader, desc=f"Epoch {epoch+1}")
        
        for inputs, targets in progress_bar:
            # 移动数据到GPU
            inputs = inputs.to("cuda", non_blocking=True)
            targets = targets.to("cuda", non_blocking=True)
            
            # 跳过空批次或单元素批次
            if inputs.numel() == 0 or inputs.size(0) == 0:
                continue
            
            # 混合精度训练
            with torch.amp.autocast('cuda', dtype=torch.bfloat16):  # 使用新API
                # 前向传播
                outputs, _ = model(inputs)
                
                # 调整形状计算损失
                B, S, V = outputs.size()
                outputs = outputs.view(B*S, V)
                targets = targets.view(B*S)
                
                loss = criterion(outputs, targets)
            
            # 反向传播和优化
            optimizer.zero_grad()
            scaler.scale(loss).backward()
            scaler.step(optimizer)
            scaler.update()
            
            # 更新进度条
            running_loss += loss.item()
            step_count += 1
            avg_loss = running_loss / step_count
            progress_bar.set_postfix(loss=avg_loss)
        
        # 记录该epoch的平均损失
        epoch_loss = running_loss / step_count if step_count > 0 else 0.0
        epoch_losses.append(epoch_loss)
        print(f"周期 {epoch+1} 完成，平均损失: {epoch_loss:.4f}")
        
        # 保存模型和损失
        torch.save(model.state_dict(), f"model_pretrain_cap_{epoch+1}.pth")
        pd.to_pickle(epoch_losses, f"losses_{epoch+1}.pkl")
    
    print("训练完成！")
    return model

if __name__ == "__main__":
    train_model()


    # Epoch 1:   5%|▍         | 2203/44160 [04:04<1:17:43,  9.00it/s, loss=3.74]Epoch 1:  12%|█▏        | 5451/44160 [09:35<1:00:44, 10.62it/s, loss=3.24]
    # token.split("_")[-1].isdigit()
    # Epoch 6: 100%|██████████| 44160/44160 [50:11<00:00, 14.66it/s, loss=2.21]