import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from model.model import SPIN
from data import get_dataloaders
import time
import os
from tqdm import tqdm
from torch.utils.flop_counter import FlopCounterMode

# ---------- 关于进度条的终端显示问题 ---------
import sys
is_terminal=sys.stdout.isatty()

# 训练配置
class Config:
    def __init__(self):
        self.root_dir = 'SR_Datasets'  # 数据集根目录
        self.scale_factor = 4          # 超分辨率缩放因子
        self.batch_size = 2           # 批大小
        self.patch_size = 16           # 训练时裁剪的patch大小
        self.num_workers = 8           # 数据加载线程数
        
        # 模型参数
        self.height = 128
        self.width = 128
        self.grid_size = (8, 8)
        self.num_iters = 2
        self.hidden_dim = 64
        self.num_heads = 8
        self.patch_size_model = 8
        self.overlap = 0
        self.num_blocks = 4
        
        # 训练参数
        self.lr = 1e-4                # 学习率
        self.num_epochs = 40         # 训练轮数
        self.save_interval = 2       # 模型保存间隔
        self.val_interval = 2         # 验证间隔
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        
        # 输出目录
        self.checkpoint_dir = 'checkpoints'
        os.makedirs(self.checkpoint_dir, exist_ok=True)

# 训练函数
def train():
    # 初始化配置
    cfg = Config()
    
    # 初始化模型
    model = SPIN(
        height=cfg.height,
        width=cfg.width,
        grid_size=cfg.grid_size,
        num_iters=cfg.num_iters,
        hidden_dim=cfg.hidden_dim,
        num_heads=cfg.num_heads,
        patch_size=cfg.patch_size_model,
        overlap=cfg.overlap,
        num_blocks=cfg.num_blocks,
        upscale_factor=cfg.scale_factor
    ).to(cfg.device)

    # 损失函数
    criterion = nn.L1Loss()  # 使用L1损失，超分辨率任务常用
    
    # 优化器
    optimizer = optim.Adam(model.parameters(), lr=cfg.lr)
    
    # 学习率调度器
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=5, verbose=True)
    
    # 数据加载器
    print("loading...")
    train_loader, val_loader = get_dataloaders(
        batch_size=cfg.batch_size,
        num_workers=cfg.num_workers
    )
    print("loaded")
    # 训练循环
    best_val_loss = float('inf')
    val_loss = 'N/A'  # 初始化验证损失
    print("Begin training...")
    for epoch in tqdm(
        range(1, cfg.num_epochs + 1),
        disable=not is_terminal,
        ascii=not is_terminal,
    ):
        # 训练阶段
        model.train()
        train_loss = 0.0
        start_time = time.time()
        
        # with tqdm(train_loader, desc=f'Epoch {epoch}/{cfg.num_epochs}', unit='batch') as tepoch:
            # for batch in tepoch:
            #     lr_images = batch['lr'].to(cfg.device)
            #     hr_images = batch['hr'].to(cfg.device)
                
            #     # 前向传播
            #     outputs = model(lr_images)
            #     loss = criterion(outputs, hr_images)
                
            #     # 反向传播和优化
            #     optimizer.zero_grad()
            #     loss.backward()
            #     optimizer.step()
                
            #     # 记录损失
            #     train_loss += loss.item() * lr_images.size(0)
            #     tepoch.set_postfix(loss=loss.item())
        for batch in tqdm(train_loader):
            # print(len(train_loader))
            batch = {k: v.to(cfg.device, non_blocking=True) for k, v in batch.items()}
            load_begin = time.time()
            # 数据加载
            lr_images = batch['lr'].to(cfg.device)
            hr_images = batch['hr'].to(cfg.device)
            load_end = time.time()
            # 前向传播和训练
            begin_train = time.time()
            outputs = model(lr_images)
            loss = criterion(outputs, hr_images)
            optimizer.step()
            optimizer.zero_grad()
            end_train = time.time()

            # 输出时分别打印加载时间和训练时间
            print(f"Data loading time: {load_end - load_begin:.4f}s, Training time: {end_train - begin_train:.4f}s")

        
        # 计算平均训练损失
        train_loss /= len(train_loader.dataset)
        print(f"Epoch {epoch}/{cfg.num_epochs} - "
              f"训练损失: {train_loss:.6f} - "
              f"时间: {time.time() - start_time:.2f}s")
        
        # 验证阶段
        if epoch % cfg.val_interval == 0:
            model.eval()
            val_loss = 0.0
            with torch.no_grad():
                for batch in val_loader:
                    lr_images = batch['lr'].to(cfg.device)
                    hr_images = batch['hr'].to(cfg.device)
                    outputs = model(lr_images)
                    loss = criterion(outputs, hr_images)
                    val_loss += loss.item() * lr_images.size(0)
            
            val_loss /= len(val_loader.dataset)
            
            # 更新学习率
            scheduler.step(val_loss)
            
            # 保存最佳模型
            if val_loss < best_val_loss:
                best_val_loss = val_loss
                torch.save(model.state_dict(), os.path.join(cfg.checkpoint_dir, 'best_model.pth'))
                print(f"保存最佳模型，验证损失: {val_loss:.6f}")
        
        # 定期保存模型
        if epoch % cfg.save_interval == 0:
            torch.save(model.state_dict(), os.path.join(cfg.checkpoint_dir, f'model_epoch_{epoch}.pth'))
        
        # 打印统计信息
        epoch_time = time.time() - start_time
        print(f"Epoch {epoch}/{cfg.num_epochs} - "
              f"训练损失: {train_loss:.6f} - "
              f"验证损失: {val_loss if isinstance(val_loss, float) else 'N/A'} - "
              f"时间: {epoch_time:.2f}s")
    
    print("训练完成!")

if __name__ == '__main__':
    # train()
    
    cfg = Config()
    
    # 初始化模型
    model = SPIN(
        height=cfg.height,
        width=cfg.width,
        grid_size=cfg.grid_size,
        num_iters=cfg.num_iters,
        hidden_dim=cfg.hidden_dim,
        num_heads=cfg.num_heads,
        patch_size=cfg.patch_size_model,
        overlap=cfg.overlap,
        num_blocks=cfg.num_blocks,
        upscale_factor=cfg.scale_factor
    ).to(cfg.device)
    inputs=torch.rand([2, 3, 128, 128]).to(cfg.device)
    flop_counter = FlopCounterMode(model)
    with flop_counter:
        model.forward(inputs)
    print(flop_counter.get_total_flops())
    pass