import os
import time
import torch
import yaml
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader, TensorDataset
from model import PoetryModel
from utils import load_dataset, save_model, load_model_state, update_training_history
from model import create_model  # 从model.py导入create_model

class Trainer:
    def __init__(self, config):
        self.config = config
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model_dir = config['model']['save_dir']
        self.history_file = os.path.join(self.model_dir, 'training_history.yaml')
        
        # 创建保存目录
        os.makedirs(self.model_dir, exist_ok=True)
        
        # 加载数据集
        self.word2ix, self.ix2word, self.poems = load_dataset(config['data']['path'])
        self.vocab_size = len(self.word2ix)
        
        # 创建模型
        self.model = create_model(self.vocab_size, config['model']).to(self.device)
        
        # 优化器和学习率调度器
        self.optimizer = torch.optim.AdamW(
            self.model.parameters(), 
            lr=config['training']['learning_rate'],
            weight_decay=config['training']['weight_decay']
        )
        # 修改trainer.py中的学习率调度器初始化
        self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer,
            mode='min',
            factor=config['training']['lr_factor'],
            patience=config['training']['lr_patience']
        )
        
        # 损失函数
        self.criterion = torch.nn.CrossEntropyLoss(ignore_index=self.word2ix['</s>'])
        
        # 检查并恢复训练状态
        self.start_epoch = 0
        self.best_loss = float('inf')
        if os.path.exists(self.history_file):
            self.resume_training()
    
    def resume_training(self):
        """从历史文件恢复训练状态"""
        with open(self.history_file, 'r', encoding='utf-8') as f:  # 指定UTF-8编码
            history = yaml.safe_load(f)
        
        if history and len(history) > 0:
            last_epoch = history[-1]['epoch']
            self.start_epoch = last_epoch + 1
            self.best_loss = min([h.get('val_loss', float('inf')) for h in history])
            
            # 加载模型和优化器状态
            model_path = os.path.join(self.model_dir, f'model_epoch_{last_epoch}.pth')
            load_model_state(self.model, self.optimizer, model_path)
            print(f"已从 epoch {last_epoch} 恢复训练")
    
    def train(self):
        """执行训练过程"""
        epochs = self.config['training']['epochs']
        
        # 准备数据
        dataset = TensorDataset(torch.tensor(self.poems, dtype=torch.long))
        dataloader = DataLoader(
            dataset,
            batch_size=self.config['training']['batch_size'],
            shuffle=True,
            num_workers=self.config['training']['num_workers'],
            pin_memory=True
        )
        
        # 训练循环
        for epoch in range(self.start_epoch, epochs):
            start_time = time.time()
            train_loss = self.train_epoch(dataloader, epoch)
            end_time = time.time()
            
            # 保存模型
            model_path = os.path.join(self.model_dir, f'model_epoch_{epoch}.pth')
            save_model(self.model, self.optimizer, model_path)
            
            # 更新训练历史
            history_entry = {
                'epoch': epoch,
                'train_loss': train_loss,
                'learning_rate': self.optimizer.param_groups[0]['lr'],
                'duration': end_time - start_time
            }
            update_training_history(self.history_file, history_entry)
            
            # 学习率调整
            self.scheduler.step(train_loss)
            
            # 打印训练信息
            print(f"Epoch {epoch}/{epochs-1} | Loss: {train_loss:.4f} | "
                  f"LR: {self.optimizer.param_groups[0]['lr']:.6f} | "
                  f"Time: {end_time - start_time:.2f}s")
    
    def train_epoch(self, dataloader, epoch):
        """训练一个epoch"""
        self.model.train()
        total_loss = 0.0
        
        progress_bar = tqdm(enumerate(dataloader), total=len(dataloader), desc=f"Epoch {epoch}")
        for batch_idx, (batch_data,) in progress_bar:
            batch_data = batch_data.to(self.device)
            
            # 前向传播
            inputs = batch_data[:, :-1]
            targets = batch_data[:, 1:]
            
            outputs = self.model(inputs)
            loss = self.criterion(outputs.reshape(-1, self.vocab_size), targets.reshape(-1))
            
            # 反向传播
            self.optimizer.zero_grad()
            loss.backward()
            
            # 梯度裁剪
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config['training']['grad_clip'])
            
            # 优化步骤
            self.optimizer.step()
            
            total_loss += loss.item()
            
            # 更新进度条
            progress_bar.set_postfix({'loss': loss.item()})
        
        return total_loss / len(dataloader)    