import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.metrics import f1_score
import os

def train_model(model, train_loader, val_loader, criterion, optimizer, 
                num_epochs=5, save_path='model.pth'):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = model.to(device)
    
    best_f1 = 0
    
    for epoch in range(num_epochs):
        model.train()
        total_loss = 0
        for batch_texts, batch_labels in train_loader:
            batch_texts, batch_labels = batch_texts.to(device), batch_labels.to(device)
            
            optimizer.zero_grad()
            outputs = model(batch_texts)
            loss = criterion(outputs, batch_labels)
            loss.backward()
            optimizer.step()
            
            total_loss += loss.item()
        
        model.eval()
        val_preds = []
        val_labels = []
        with torch.no_grad():
            for batch_texts, batch_labels in val_loader:
                batch_texts = batch_texts.to(device)
                outputs = model(batch_texts)
                preds = torch.argmax(outputs, dim=1).cpu().numpy()
                val_preds.extend(preds)
                val_labels.extend(batch_labels.numpy())
        
        val_f1 = f1_score(val_labels, val_preds, average='macro')
        
        if val_f1 > best_f1:
            best_f1 = val_f1
            torch.save(model.state_dict(), save_path)
            print(f'保存新的最佳模型，F1分数: {val_f1:.4f}')
        
        print(f'第 {epoch+1}/{num_epochs} 轮:')
        print(f'平均损失: {total_loss/len(train_loader):.4f}')
        print(f'验证集F1分数: {val_f1:.4f}\n')
    
    return model

def load_model(model, path):
    model.load_state_dict(torch.load(path))
    return model
