# utils/train.py
import torch
import torch.nn as nn
import torch.optim as optim
from torch.cuda.amp import GradScaler, autocast
from model.model import Transformer
from config import Config
from utils.data_loader import get_data_loaders
from utils.bleu4 import calculate_bleu4
from tqdm import tqdm
import logging
import os
import numpy as np

# 设置日志记录
logging.basicConfig(filename='training.log', level=logging.INFO, format='%(asctime)s - %(message)s')

class LabelSmoothing(nn.Module):
    """标签平滑实现"""
    def __init__(self, smoothing=0.1, ignore_index=None):
        super(LabelSmoothing, self).__init__()
        self.smoothing = smoothing
        self.ignore_index = ignore_index
        self.confidence = 1.0 - smoothing

    def forward(self, pred, target):
        pred = pred.log_softmax(dim=-1)
        with torch.no_grad():
            true_dist = torch.zeros_like(pred)
            true_dist.fill_(self.smoothing / (pred.size(-1) - 1))
            true_dist.scatter_(1, target.unsqueeze(1), self.confidence)
            if self.ignore_index is not None:
                true_dist.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
                
        return torch.mean(torch.sum(-true_dist * pred, dim=-1))

def get_noam_lr(step, warmup_steps, hidden_dim, factor):
    """计算 Noam 学习率"""
    if step == 0: # Avoid division by zero for step_num^(-0.5)
        step = 1
    arg1 = step ** (-0.5)
    arg2 = step * (warmup_steps ** (-1.5))
    return factor * (hidden_dim ** (-0.5)) * min(arg1, arg2)

def train(config):
    # 自动检测设备
    device = torch.device(config.device if torch.cuda.is_available() else 'cpu')
    print(f"Using device: {device}")
    logging.info(f"Using device: {device}")

    # 获取数据加载器
    train_loader, dev_loader, test_loader = get_data_loaders(config, pin_memory=torch.cuda.is_available())
    
    # 初始化模型
    model = Transformer(
        src_vocab_size=len(train_loader.dataset.src_vocab),
        tgt_vocab_size=len(train_loader.dataset.tgt_vocab),
        embedding_dim=config.embedding_dim,
        hidden_dim=config.hidden_dim,
        num_heads=config.num_heads,
        num_layers=config.num_layers,
        dropout=config.dropout,
        max_seq_length=config.max_seq_length
    ).to(device)
    
    # 多 GPU 支持
    if config.multi_gpu and torch.cuda.device_count() > 1:
        print(f"Let's use {torch.cuda.device_count()} GPUs!")
        logging.info(f"Let's use {torch.cuda.device_count()} GPUs!")
        model = nn.DataParallel(model)
    
    # 定义优化器和损失函数
    optimizer = optim.AdamW(model.parameters(), lr=config.learning_rate, weight_decay=config.weight_decay)
    criterion = LabelSmoothing(smoothing=config.label_smoothing, 
                             ignore_index=train_loader.dataset.src_vocab.stoi['<pad>'])
    
    # 混合精度训练
    scaler = GradScaler() if torch.cuda.is_available() else None

    # 计算总训练步数
    total_steps = len(train_loader) * config.epochs

    # 训练和验证函数
    def train_epoch(model, train_loader, optimizer, criterion, device, scaler=None, epoch=0):
        model.train()
        total_loss = 0
        for batch_idx, batch in enumerate(tqdm(train_loader, desc="Training", unit="batch")):
            # 计算当前步数和学习率
            global_step = epoch * len(train_loader) + batch_idx + 1 # Noam step_num is typically 1-indexed
            # lr_mult = get_lr_multiplier(global_step, config.warmup_steps)
            current_lr = get_noam_lr(global_step, config.warmup_steps, config.hidden_dim, config.noam_factor)
            for param_group in optimizer.param_groups:
                param_group['lr'] = current_lr # Use the calculated Noam LR

            src = batch['src'].to(device)
            tgt = batch['tgt'].to(device)
            src_mask = batch['src_mask'].to(device)
            tgt_mask = batch['tgt_mask'].to(device)

            optimizer.zero_grad()
            
            with autocast(enabled=scaler is not None):
                output = model(src, tgt[:, :-1], src_mask, tgt_mask[:, :-1])
                output = output.contiguous().view(-1, output.size(-1))
                tgt = tgt[:, 1:].contiguous().view(-1)
                loss = criterion(output, tgt)
            
            if scaler:
                scaler.scale(loss).backward()
                scaler.unscale_(optimizer)
                # logging.info(f"Gradients for epoch {epoch}, batch {batch_idx} (Scaler ON)")
                # for name, param in model.named_parameters():
                #     if param.grad is not None:
                #         logging.info(f"Param: {name}, Grad Norm: {param.grad.norm().item():.4f}")   
                torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=config.clip_grad_norm)
                scaler.step(optimizer)
                scaler.update()
            else:
                loss.backward()
                torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=config.clip_grad_norm)
                optimizer.step()
            total_loss += loss.item()
        return total_loss / len(train_loader)

    def validate_epoch(model, dev_loader, criterion, device):
        model.eval()
        total_loss = 0
        total_bleu4 = 0
        preview_shown = False  # 用于控制只显示一个预览
        
        with torch.no_grad():
            for batch in tqdm(dev_loader, desc="Validation", unit="batch"):
                src = batch['src'].to(device)
                tgt = batch['tgt'].to(device)
                src_mask = batch['src_mask'].to(device)
                tgt_mask = batch['tgt_mask'].to(device)
                    
                # 前向传播
                output = model(src, tgt[:, :-1], src_mask, tgt_mask[:, :-1])
                    
                # 计算损失
                output_flat = output.contiguous().view(-1, output.size(-1))
                tgt_flat = tgt[:, 1:].contiguous().view(-1)
                loss = criterion(output_flat, tgt_flat)
                total_loss += loss.item()

                # 计算BLEU-4和生成预览
                # 获取预测的token ID
                pred_tokens = torch.argmax(output, dim=-1).cpu().numpy()
                tgt_tokens = tgt[:, 1:].cpu().numpy()  # 去掉<sos>
                
                # 计算每个句子的BLEU-4
                batch_bleu4 = []
                for i in range(len(pred_tokens)):
                    # 过滤掉<pad>和特殊token
                    pred_seq = [dev_loader.dataset.tgt_vocab.itos[idx] for idx in pred_tokens[i] 
                               if idx < len(dev_loader.dataset.tgt_vocab.itos) and dev_loader.dataset.tgt_vocab.itos[idx] not in ['<pad>', '<sos>', '<eos>']]
                    tgt_seq = [dev_loader.dataset.tgt_vocab.itos[idx] for idx in tgt_tokens[i] 
                              if idx < len(dev_loader.dataset.tgt_vocab.itos) and dev_loader.dataset.tgt_vocab.itos[idx] not in ['<pad>', '<sos>', '<eos>']]
                    
                    if len(pred_seq) == 0 or len(tgt_seq) == 0:
                        continue
                    
                    bleu4 = calculate_bleu4(tgt_seq, pred_seq)
                    batch_bleu4.append(bleu4)

                    # 显示第一个批次的第一个样本的预览
                    if not preview_shown:
                        # 获取源文本
                        src_seq = [dev_loader.dataset.src_vocab.itos[idx] for idx in src[i].cpu().numpy()
                                 if idx < len(dev_loader.dataset.src_vocab.itos) and 
                                 dev_loader.dataset.src_vocab.itos[idx] not in ['<pad>', '<sos>', '<eos>']]
                        
                        preview_text = f"\nTranslation Preview:\n"
                        preview_text += f"Source: {''.join(src_seq)}\n"  # 中文不需要空格
                        preview_text += f"Translation: {' '.join(pred_seq)}\n"
                        preview_text += f"Reference: {' '.join(tgt_seq)}\n"
                        preview_text += f"BLEU-4: {bleu4:.4f}\n"
                        
                        print(preview_text)
                        logging.info(preview_text)
                        preview_shown = True
                
                if batch_bleu4:
                    total_bleu4 += sum(batch_bleu4) / len(batch_bleu4)
        
        avg_loss = total_loss / len(dev_loader)
        avg_bleu4 = total_bleu4 / len(dev_loader)
        return avg_loss, avg_bleu4

    # 训练循环
    best_bleu4 = 0.0
    patience = config.patience
    no_improve_epochs = 0

    for epoch in range(config.epochs):
        train_loss = train_epoch(model, train_loader, optimizer, criterion, device, scaler, epoch)
        dev_loss, dev_bleu4 = validate_epoch(model, dev_loader, criterion, device)
        
        print(f'Epoch [{epoch+1}/{config.epochs}], Train Loss: {train_loss:.4f}, Dev Loss: {dev_loss:.4f}, Dev BLEU-4: {dev_bleu4:.4f}')
        logging.info(f'Epoch [{epoch+1}/{config.epochs}], Train Loss: {train_loss:.4f}, Dev Loss: {dev_loss:.4f}, Dev BLEU-4: {dev_bleu4:.4f}')
        
        # 早停机制
        if dev_bleu4 > best_bleu4:
            best_bleu4 = dev_bleu4
            no_improve_epochs = 0
            # 保存模型
            if isinstance(model, nn.DataParallel):
                torch.save(model.module.state_dict(), config.save_model_path)
            else:
                torch.save(model.state_dict(), config.save_model_path)
            print(f"Model saved with BLEU-4: {dev_bleu4:.4f}")
            logging.info(f"Model saved with BLEU-4: {dev_bleu4:.4f}")
        else:
            no_improve_epochs += 1
            if no_improve_epochs >= patience:
                print(f"Early stopping at epoch {epoch+1}")
                logging.info(f"Early stopping at epoch {epoch+1}")
                break

        # 如果 BLEU-4 分数达到阈值，提前停止训练
        if dev_bleu4 >= config.bleu4_threshold:
            print(f"BLEU-4 score reached the threshold {config.bleu4_threshold:.4f}, stopping training early.")
            logging.info(f"BLEU-4 score reached the threshold {config.bleu4_threshold:.4f}, stopping training early.")
            break

    # 测试集评估
    print("Loading best model for testing...")
    if os.path.exists(config.save_model_path):
        if isinstance(model, nn.DataParallel):
            model.module.load_state_dict(torch.load(config.save_model_path, map_location=device))
        else:
            model.load_state_dict(torch.load(config.save_model_path, map_location=device))
        
        test_loss, test_bleu4 = validate_epoch(model, test_loader, criterion, device)
        print(f'Test Loss: {test_loss:.4f}, Test BLEU-4: {test_bleu4:.4f}')
        logging.info(f'Test Loss: {test_loss:.4f}, Test BLEU-4: {test_bleu4:.4f}')
    else:
        print("No saved model found. Skipping testing.")
        logging.info("No saved model found. Skipping testing.")