import torch
from torch.utils.data import DataLoader
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from torch.optim import AdamW
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
from tqdm import tqdm

# 1. 加载数据集
def load_sentiment_dataset(name="imdb"):
    dataset = load_dataset(name,cache_dir="./cache")
    return dataset

# 2. 数据预处理
def preprocess_data(dataset, tokenizer):
    def tokenize_function(examples):
        return tokenizer(examples["text"], padding="max_length", truncation=True, max_length=128)
    tokenized_datasets = dataset.map(tokenize_function, batched=True)
    tokenized_datasets.set_format(type='torch', columns=['input_ids', 'attention_mask', 'label'])
    return tokenized_datasets

# 3. 加载预训练模型
def load_model(model_path, num_labels=2):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = AutoModelForSequenceClassification.from_pretrained(model_path, num_labels=num_labels)
    model.to(device)
    return model

# 4. 微调模型
def fine_tune_model(model, tokenized_datasets, optimizer, epochs, device, batch_size=16):
    train_dataloader = DataLoader(tokenized_datasets["train"], batch_size=batch_size, shuffle=True)
    model.train()
    loss_list = []
    for epoch in range(epochs):
        total_loss = 0
        progress_bar = tqdm(train_dataloader, desc=f"Epoch {epoch + 1}/{epochs}")
        for batch in progress_bar:
            batch = {k: v.to(device) for k, v in batch.items()}
            outputs = model(input_ids=batch["input_ids"], attention_mask=batch["attention_mask"], labels=batch["label"])
            loss = outputs.loss
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            total_loss += loss.item()
            progress_bar.set_postfix({'Loss': total_loss / (progress_bar.n + 1)})
            
            loss_list.append(loss.item())
            
        print(f"Epoch {epoch + 1}/{epochs}, Average Loss: {total_loss / len(train_dataloader):.4f}")
        # evaluate_model(model, tokenized_datasets, device)
        
    return model, loss_list

# 5. 评估模型
def evaluate_model(model, tokenized_datasets, device, num_labels=2):
    test_dataloader = DataLoader(tokenized_datasets["test"], batch_size=16, shuffle=False)
    model.eval()
    all_preds = []
    all_labels = []
    total_loss = 0
    progress_bar = tqdm(test_dataloader, desc="Evaluating")
    
    with torch.no_grad():
        for batch in progress_bar:
            batch = {k: v.to(device) for k, v in batch.items()}
            outputs = model(input_ids=batch["input_ids"], attention_mask=batch["attention_mask"], labels=batch["label"])
            loss = outputs.loss
            total_loss += loss.item()
            logits = outputs.logits
            preds = torch.argmax(logits, dim=-1)
            all_preds.extend(preds.cpu().tolist())
            all_labels.extend(batch["label"].cpu().tolist())
            progress_bar.set_postfix({'Loss': total_loss / (progress_bar.n + 1)})
    
    acc = accuracy_score(all_labels, all_preds)
    f1 = f1_score(all_labels, all_preds, average='weighted')
    precision = precision_score(all_labels, all_preds, average=None)
    recall = recall_score(all_labels, all_preds, average=None)

    print(f"Accuracy: {acc:.4f}")
    print(f"F1 Score: {f1:.4f}")
    for i in range(len(precision)):
        print(f"Class {i} - Precision: {precision[i]:.4f}, Recall: {recall[i]:.4f}")
    
if __name__ == "__main__":
    from train import plot_loss
    import json
    
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    dataset_name = "imdb"
    num_labels = 2  # 二分类任务
    model_path = "base-bert"
    epochs = 3
    lr = 2e-5
    batch_size = 16
    
    dataset = load_sentiment_dataset(dataset_name)
    tokenizer = AutoTokenizer.from_pretrained(model_path)
    tokenized_datasets = preprocess_data(dataset, tokenizer)
    model = load_model(model_path, num_labels=num_labels)
    optimizer = AdamW(model.parameters(), lr=lr)
    fine_tuned_model, loss_list = fine_tune_model(model, tokenized_datasets, optimizer, epochs, device, batch_size=batch_size)
    evaluate_model(fine_tuned_model, tokenized_datasets, device, num_labels=num_labels)
    # plot_loss(loss_list, 'FineTunning Loss')
    with open('loss_list.json', 'w') as f:
        json.dump(loss_list, f)
