import torch
from torch.optim import AdamW
from torch.utils.data import DataLoader
from models.multi_branch import MoldTempModel
from utils.data_loader import MoldDataset
from utils.logger import TrainingLogger

class Trainer:
    def __init__(self, config):
        self.model = MoldTempModel(config)
        self.optimizer = AdamW(self.model.parameters(), lr=config.lr)
        self.criterion = torch.nn.HuberLoss(delta=1.0)
        self.logger = TrainingLogger()
        
        # 分阶段训练配置
        self.freeze_layers = {
            'stage1': ['env_branch', 'tundish_branch'],
            'stage2': []  # 全部解冻
        }
        
    def freeze_parameters(self, stage):
        for name, param in self.model.named_parameters():
            if any(layer in name for layer in self.freeze_layers[stage]):
                param.requires_grad_(False)
                
    def train_epoch(self, dataloader, epoch):
        self.model.train()
        for batch_idx, (features, targets) in enumerate(dataloader):
            self.optimizer.zero_grad()
            outputs = self.model(features)
            loss = self.criterion(outputs, targets)
            loss.backward()
            self.optimizer.step()
            
            # 记录训练指标
            self.logger.log(
                epoch=epoch, 
                batch=batch_idx,
                loss=loss.item(),
                mae=torch.abs(outputs - targets).mean().item()
            )

    def run(self, train_data, val_data, epochs=100):
        # 阶段1训练
        self.freeze_parameters('stage1')
        for epoch in range(epochs//2):
            self.train_epoch(train_data, epoch)
            
        # 阶段2微调
        self.freeze_parameters('stage2')
        for epoch in range(epochs//2, epochs):
            self.train_epoch(train_data, epoch)
            
        # 模型保存
        torch.save(self.model.state_dict(), 'model_final.pth')