import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR, StepLR
from tqdm import tqdm
from utils.config import config
from training.metrics import accuracy

class Trainer:
    def __init__(self, model, train_loader, val_loader, test_loader):
        self.model = model.to(config.DEVICE)
        self.train_loader = train_loader
        self.val_loader = val_loader
        self.test_loader = test_loader
        
        # 定义损失函数和优化器
        self.criterion = nn.CrossEntropyLoss()
        self.optimizer = optim.SGD(
            model.parameters(), 
            lr=config.LEARNING_RATE,
            momentum=config.MOMENTUM,
            weight_decay=config.WEIGHT_DECAY
        )
        
        # 学习率调度器
        if config.LR_SCHEDULER == "cosine":
            self.scheduler = CosineAnnealingLR(self.optimizer, T_max=config.EPOCHS)
        else:
            self.scheduler = StepLR(self.optimizer, step_size=30, gamma=0.1)
        
        # 记录训练过程中的指标
        self.train_loss_history = []
        self.train_acc_history = []
        self.val_loss_history = []
        self.val_acc_history = []
        
    def train_one_epoch(self, epoch):
        self.model.train()
        running_loss = 0.0
        running_acc = 0.0
        total = 0
        
        pbar = tqdm(self.train_loader, desc=f"Epoch {epoch + 1}/{config.EPOCHS}")
        for inputs, labels in pbar:
            inputs, labels = inputs.to(config.DEVICE), labels.to(config.DEVICE)
            
            # 前向传播
            outputs = self.model(inputs)
            loss = self.criterion(outputs, labels)
            
            # 反向传播和优化
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()
            
            # 统计指标
            running_loss += loss.item() * inputs.size(0)
            acc = accuracy(outputs, labels)
            running_acc += acc * inputs.size(0)
            total += inputs.size(0)
            
            # 更新进度条
            pbar.set_postfix({
                'loss': running_loss / total,
                'acc': running_acc / total
            })
        
        # 计算平均损失和准确率
        epoch_loss = running_loss / total
        epoch_acc = running_acc / total
        
        self.train_loss_history.append(epoch_loss)
        self.train_acc_history.append(epoch_acc)
        
        return epoch_loss, epoch_acc
    
    def validate(self):
        self.model.eval()
        running_loss = 0.0
        running_acc = 0.0
        total = 0
        
        with torch.no_grad():
            for inputs, labels in self.val_loader:
                inputs, labels = inputs.to(config.DEVICE), labels.to(config.DEVICE)
                
                outputs = self.model(inputs)
                loss = self.criterion(outputs, labels)
                
                running_loss += loss.item() * inputs.size(0)
                acc = accuracy(outputs, labels)
                running_acc += acc * inputs.size(0)
                total += inputs.size(0)
        
        val_loss = running_loss / total
        val_acc = running_acc / total
        
        self.val_loss_history.append(val_loss)
        self.val_acc_history.append(val_acc)
        
        return val_loss, val_acc
    
    def test(self):
        self.model.eval()
        running_loss = 0.0
        running_acc = 0.0
        total = 0
        
        with torch.no_grad():
            for inputs, labels in self.test_loader:
                inputs, labels = inputs.to(config.DEVICE), labels.to(config.DEVICE)
                
                outputs = self.model(inputs)
                loss = self.criterion(outputs, labels)
                
                running_loss += loss.item() * inputs.size(0)
                acc = accuracy(outputs, labels)
                running_acc += acc * inputs.size(0)
                total += inputs.size(0)
        
        test_loss = running_loss / total
        test_acc = running_acc / total
        
        return test_loss, test_acc
    
    def save_model(self, epoch, path):
        torch.save({
            'epoch': epoch,
            'model_state_dict': self.model.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'train_loss': self.train_loss_history,
            'train_acc': self.train_acc_history,
            'val_loss': self.val_loss_history,
            'val_acc': self.val_acc_history,
        }, path)
    
    def train(self):
        best_val_acc = 0.0
        
        for epoch in range(config.EPOCHS):
            # 训练一个epoch
            train_loss, train_acc = self.train_one_epoch(epoch)
            
            # 验证
            val_loss, val_acc = self.validate()
            
            # 更新学习率
            self.scheduler.step()
            
            # 打印结果
            print(f"Epoch {epoch + 1}/{config.EPOCHS}")
            print(f"Train Loss: {train_loss:.4f} | Train Acc: {train_acc:.4f}")
            print(f"Val Loss: {val_loss:.4f} | Val Acc: {val_acc:.4f}")
            print("-" * 50)
            
            # 保存最佳模型
            if val_acc > best_val_acc:
                best_val_acc = val_acc
                self.save_model(epoch, f"{config.SAVE_DIR}/best_model.pth")
            
            # 定期保存模型
            if (epoch + 1) % config.SAVE_FREQ == 0:
                self.save_model(epoch, f"{config.SAVE_DIR}/model_epoch_{epoch + 1}.pth")
        
        # 训练结束后测试
        test_loss, test_acc = self.test()
        print(f"Test Loss: {test_loss:.4f} | Test Acc: {test_acc:.4f}")
        
        return test_acc