from satvideo_dataset import SatVideoTrainSetLoader, SatVideoTestSetLoader
from metrics import *
from utils import *
import model.Config as config
from model.SCTransNet import SCTransNet as SCTransNet

# 尝试导入tensorboard，如果失败则使用None
try:
    from torch.utils.tensorboard import SummaryWriter
    TENSORBOARD_AVAILABLE = True
except ImportError:
    SummaryWriter = None
    TENSORBOARD_AVAILABLE = False
    print("Warning: tensorboard not available, training will continue without logging")
from torch.utils.data import DataLoader
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import os
import time
from collections import OrderedDict

# 参数设置
parser = argparse.ArgumentParser(description="PyTorch SatVideoIRSDT train")
parser.add_argument("--model_name", default='SCTransNet', type=str, help="model name")
parser.add_argument("--dataset_dir", default=r'./SatVideoIRSDT', type=str, help="dataset directory")
parser.add_argument("--optimizer_name", default='Adam', type=str, help="optimizer name: AdamW, Adam, Adagrad, SGD")
parser.add_argument("--epochs", default=1000, type=int, help="number of epochs")
parser.add_argument("--batchSize", type=int, default=16, help="training batch size")
parser.add_argument("--patchSize", type=int, default=256, help="training patch size")
parser.add_argument("--lr", type=float, default=0.0005, help="learning rate")
parser.add_argument("--threads", type=int, default=4, help="number of threads for data loader")
parser.add_argument("--save_path", default='./results/', type=str, help="save path")
parser.add_argument("--every_save_pth", default=100, type=int, help="save model every N epochs")
parser.add_argument("--every_print", default=10, type=int, help="print loss every N iterations")
parser.add_argument("--resume", default=None, type=str, help="path to checkpoint to resume")

opt = parser.parse_args()

# 设置图像归一化配置
opt.img_norm_cfg = {'mean': 0.0, 'std': 1.0}

def weights_init_kaiming(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
    elif classname.find('Linear') != -1:
        nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_out')
        nn.init.constant_(m.bias.data, 0.0)
    elif classname.find('BatchNorm') != -1:
        nn.init.normal_(m.weight.data, 1.0, 0.02)
        nn.init.constant_(m.bias.data, 0.0)

class Net(nn.Module):
    def __init__(self, model_name, mode):
        super(Net, self).__init__()
        if model_name == 'SCTransNet':
            config_vit = config.get_SCTrans_config()
            self.net = SCTransNet(config_vit, mode=mode, n_classes=1, deepsuper=True)
        else:
            raise ValueError(f"Unknown model: {model_name}")
        
        # 初始化权重
        self.net.apply(weights_init_kaiming)
    
    def forward(self, x):
        return self.net(x)
    
    def loss(self, pred, target):
        # 使用BCE损失
        if isinstance(pred, tuple):
            # 深度监督模式
            loss_total = 0
            for p in pred:
                loss_total += nn.BCEWithLogitsLoss()(p, target)
            return loss_total / len(pred)
        else:
            return nn.BCEWithLogitsLoss()(pred, target)

def save_checkpoint(state, save_path, filename):
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    torch.save(state, os.path.join(save_path, filename))

def train():
    print(f"Training {opt.model_name} on SatVideoIRSDT dataset")
    print(f"Dataset directory: {opt.dataset_dir}")
    print(f"Batch size: {opt.batchSize}, Patch size: {opt.patchSize}")
    print(f"Learning rate: {opt.lr}, Epochs: {opt.epochs}")
    
    # 创建数据加载器
    train_set = SatVideoTrainSetLoader(dataset_dir=opt.dataset_dir, 
                                     patch_size=opt.patchSize,
                                     img_norm_cfg=opt.img_norm_cfg)
    train_loader = DataLoader(dataset=train_set, 
                            num_workers=opt.threads, 
                            batch_size=opt.batchSize, 
                            shuffle=True)
    
    print(f"Training samples: {len(train_set)}")
    
    # 创建模型
    net = Net(model_name=opt.model_name, mode='train').cuda()
    
    # 创建优化器
    if opt.optimizer_name == 'Adam':
        optimizer = optim.Adam(net.parameters(), lr=opt.lr)
    elif opt.optimizer_name == 'AdamW':
        optimizer = optim.AdamW(net.parameters(), lr=opt.lr)
    elif opt.optimizer_name == 'SGD':
        optimizer = optim.SGD(net.parameters(), lr=opt.lr, momentum=0.9)
    else:
        raise ValueError(f"Unknown optimizer: {opt.optimizer_name}")
    
    # 学习率调度器
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=200, gamma=0.5)
    
    # 创建保存目录
    save_path = os.path.join(opt.save_path, 'SatVideoIRSDT', opt.model_name)
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    
    # TensorBoard (如果可用)
    writer = None
    if TENSORBOARD_AVAILABLE:
        writer = SummaryWriter(os.path.join(save_path, 'logs'))
        print("TensorBoard logging enabled")
    else:
        print("TensorBoard logging disabled")
    
    # 恢复训练
    start_epoch = 0
    if opt.resume:
        if os.path.isfile(opt.resume):
            print(f"Loading checkpoint '{opt.resume}'")
            checkpoint = torch.load(opt.resume)
            start_epoch = checkpoint['epoch']
            net.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print(f"Loaded checkpoint '{opt.resume}' (epoch {checkpoint['epoch']})")
        else:
            print(f"No checkpoint found at '{opt.resume}'")
    
    # 训练循环
    net.train()
    for epoch in range(start_epoch, opt.epochs):
        epoch_loss = 0
        start_time = time.time()
        
        for i, (img, mask) in enumerate(train_loader):
            img = img.cuda()
            mask = mask.cuda()
            
            optimizer.zero_grad()
            pred = net(img)
            loss = net.loss(pred, mask)
            loss.backward()
            optimizer.step()
            
            epoch_loss += loss.item()
            
            # 打印训练信息
            if (i + 1) % opt.every_print == 0:
                print(f'Epoch [{epoch+1}/{opt.epochs}], Step [{i+1}/{len(train_loader)}], Loss: {loss.item():.6f}')
        
        # 更新学习率
        scheduler.step()
        
        # 计算平均损失
        avg_loss = epoch_loss / len(train_loader)
        epoch_time = time.time() - start_time
        
        print(f'Epoch [{epoch+1}/{opt.epochs}] completed in {epoch_time:.2f}s, Average Loss: {avg_loss:.6f}, LR: {scheduler.get_last_lr()[0]:.8f}')
        
        # 记录到TensorBoard (如果可用)
        if writer is not None:
            writer.add_scalar('Loss/Train', avg_loss, epoch)
            writer.add_scalar('Learning_Rate', scheduler.get_last_lr()[0], epoch)
        
        # 保存模型
        if (epoch + 1) % opt.every_save_pth == 0:
            save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': net.state_dict(),
                'optimizer': optimizer.state_dict(),
                'loss': avg_loss
            }, save_path, f'checkpoint_epoch_{epoch+1}.pth')
            print(f'Model saved at epoch {epoch+1}')
    
    # 保存最终模型
    save_checkpoint({
        'epoch': opt.epochs,
        'state_dict': net.state_dict(),
        'optimizer': optimizer.state_dict(),
        'loss': avg_loss
    }, save_path, 'final_model.pth')
    
    if writer is not None:
        writer.close()
    print('Training completed!')

if __name__ == '__main__':
    train()