# GAN主训练脚本

import torch
import logging
import os

from datetime import datetime
from torch.utils.data import DataLoader
from torch.optim import Adam
from src.models.struct_encoder import StructEncoder
from src.models.style_encoder import StyleEncoder
from src.models.decoder import GlyphDecoder
from src.models.discriminator import Discriminator
from src.training.losses import PathLoss
from src.data.dataset import FontDataset

logger = logging.getLogger(__name__)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

def train(config):
    # 使用配置中的路径
    dataset = FontDataset()
    dataloader = DataLoader(dataset, batch_size=base_config['data']['batch_size'])

    try:
        # 模型初始化
        struct_encoder = StructEncoder(vocab_size=100, d_model=512).to(device)
        style_encoder = StyleEncoder(latent_dim=256).to(device)
        decoder = GlyphDecoder(struct_dim=256).to(device)
        discriminator = Discriminator().to(device)
    except Exception as e:
        logger.error(f'Model initialization failed: {str(e)}')
        raise

    # 数据集和加载器
    # dataset = FontDataset(config['data_path'])
    dataset = FontDataset()
    dataloader = DataLoader(dataset, batch_size=config['batch_size'], shuffle=True)

    # 优化器配置（关键修正部分）
    opt_g = Adam(
        list(style_encoder.parameters()) + 
        list(decoder.parameters()),
        lr=config['lr_g'],
        betas=(0.5, 0.999)
    )
    opt_d = Adam(
        discriminator.parameters(),
        lr=config['lr_d'],
        weight_decay=1e-5,
        betas=(0.5, 0.999)
    )

    # 日志目录
    log_dir = os.path.join('logs', datetime.now().strftime('%Y%m%d_%H%M%S'))
    os.makedirs(log_dir, exist_ok=True)

    # 训练循环
    for epoch in range(config['epochs']):
        # 在训练循环中修改数据访问方式
        for batch_idx, batch in enumerate(dataloader):
            # 统一从字典获取数据
            style_img = batch['image'].to(device).float()
            true_path = batch['tokens'].to(device).long()
            
            # 模型前向传播
            struct_feat = struct_encoder({'tokens': true_path})
            style_feat = style_encoder({'image': style_img}, struct_feat)
            pred_path = decoder({'style_feat': style_feat})

            # 判别器更新
            opt_d.zero_grad()
            real_loss = discriminator(true_path).mean()
            fake_loss = discriminator(pred_path.detach()).mean()
            d_loss = real_loss + fake_loss
            d_loss.backward()
            torch.nn.utils.clip_grad_norm_(discriminator.parameters(), 1.0)
            opt_d.step()

            # 生成器更新
            opt_g.zero_grad()
            adv_loss = -discriminator(pred_path).mean()
            path_loss = PathLoss()(pred_path, true_path)
            total_g_loss = adv_loss + path_loss
            total_g_loss.backward()
            torch.nn.utils.clip_grad_norm_(decoder.parameters(), 1.0)
            opt_g.step()

            # 日志记录
            if batch_idx % 100 == 0:
                torch.save({
                    'struct_encoder': struct_encoder.state_dict(),
                    'style_encoder': style_encoder.state_dict(),
                    'decoder': decoder.state_dict(),
                    'discriminator': discriminator.state_dict(),
                    'opt_g': opt_g.state_dict(),
                    'opt_d': opt_d.state_dict(),
                }, os.path.join(log_dir, f'ckpt_epoch{epoch}_batch{batch_idx}.pt'))
                
                logger.info(
                    f'Epoch {epoch+1}/{config["epochs"]} | Batch {batch_idx} | '
                    f'D_loss: {d_loss.item():.4f} | G_adv: {adv_loss.item():.4f} | '
                    f'G_path: {path_loss.item():.4f}'
                )

    return struct_encoder, style_encoder, decoder, discriminator