import wandb
import torch
import pickle

from fire import Fire
from tqdm import tqdm
from accelerate import Accelerator
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset
from transformers import get_cosine_schedule_with_warmup
from transformers import Qwen2Tokenizer, Qwen2ForSequenceClassification

EOS_TOKEN_ID = 151643

# 定义数据集
class CustomDataset(Dataset):
    
    def __init__(self, data_path, tokenizer, max_length=2048):

        """
        初始化一个数据集实例。
        参数:
            - data_path (str): 数据文件的路径。
            - tokenizer (object): 用于编码内容的分词器对象。
            - max_length (int): 编码内容的最大长度。
        数据内容:
            [{
            'content': str,  # 文本的内容 
            'label': int     # 文本的标签 (选择 0 或 1)
            },...]
        """

        self.data = pickle.load(open(data_path, 'rb'))
        self.data = [{**item, 'encoded': tokenizer(
            item['content'],
            truncation=True,
            max_length=max_length,
            return_tensors='pt'
        )} for item in tqdm(self.data, desc='Tokenizing') if len(item['content']) >= 20]
        
    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        input_ids = self.data[idx]['encoded']['input_ids'].squeeze()
        attention_mask = self.data[idx]['encoded']['attention_mask'].squeeze()
        label = self.data[idx]['label']
        return { 'input_ids': input_ids, 'attention_mask': attention_mask, 'label': label }

def collate_fn(batch):
    """ DataLoader 的整理函数 """

    input_ids = pad_sequence([item['input_ids'] for item in batch], batch_first=True, padding_value=EOS_TOKEN_ID)
    attention_mask = pad_sequence([item['attention_mask'] for item in batch], batch_first=True)
    labels = torch.tensor([item['label'] for item in batch])

    return { 'input_ids': input_ids, 'attention_mask': attention_mask, 'labels': labels }

def train(num_epochs, model, dataloader, optimizer, accelerator, scheduler, wandb):

    for _ in range(num_epochs):
        model.train()
        train_bar = tqdm(dataloader)
        for batch in train_bar:
            optimizer.zero_grad()

            outputs = model(
                input_ids=batch['input_ids'],
                attention_mask=batch['attention_mask'],
                labels=batch['labels']
            )
            loss = outputs.loss
            accelerator.backward(loss)  # 使用 accelerate 进行反向传播
            optimizer.step()
            scheduler.step()  # 更新学习率

            train_bar.set_description("Loss: {:.6f}".format(loss.detach().item()))
            wandb.log({"loss": loss.detach().item()})

def main(
    data_path = "/shd/fanggx/Paper/WithZzr/Rater/data/embedding/qwen-all-loss-train.pkl",
    model_name = "/shd/fanggx/models/qwen-0.5b",
    saved_name = "saved_model_v2",
    batch_size = 4,
    num_epochs = 4,
    learning_rate = 6e-5,
):
    # 初始化 accelerator 以及 wandb
    accelerator = Accelerator()
    wandb.init(project="rater", entity="fanggx")

    # 初始化 tokenizer 和模型
    print("初始化 tokenizer 和模型")
    tokenizer = Qwen2Tokenizer.from_pretrained(model_name)
    model = Qwen2ForSequenceClassification.from_pretrained(model_name, num_labels=2)

    # 准备数据
    dataset = CustomDataset(data_path, tokenizer)
    dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)

    model, dataloader = accelerator.prepare(model, dataloader)

    # 创建优化器以及学习率调度器
    total_steps = len(dataloader) * num_epochs
    optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
    scheduler = get_cosine_schedule_with_warmup(
        optimizer,
        num_warmup_steps=int(0.05 * total_steps),
        num_training_steps=total_steps
    )

    # 开始训练
    train(num_epochs, model, dataloader, optimizer, accelerator, scheduler, wandb)

    # 保存模型
    model.save_pretrained(saved_name)
    tokenizer.save_pretrained(saved_name)
    wandb.finish()

if __name__ == "__main__":
    Fire(main)