import os
import re
import time
import torch

from lightning import Trainer, seed_everything
from lightning.pytorch.callbacks import EarlyStopping, ModelCheckpoint, Callback
from lightning.pytorch.loggers import CSVLogger, TensorBoardLogger
from .networks import AEAPraNet_PVT
from .command_helper import Command
from .dataloaders import get_test_data_loader, get_data_loader
from .losses import get_loss_function
from .models import get_network_model
from .simple_image_segmentation_model import SimpleImageSegmentationModel
from .progress_bar import CustomProgressBar
from .utils import dual_print

best_model_name = 'best_model'
best_train_loss = 'best_train_loss'
best_val_loss = 'best_val_loss'


class PerformanceMonitorCallback(Callback):
    """性能监控回调"""
    
    def __init__(self):
        super().__init__()
        self.epoch_times = []
        self.start_time = None
        
    def on_train_epoch_start(self, trainer, pl_module):
        self.start_time = time.time()
        
    def on_train_epoch_end(self, trainer, pl_module):
        if self.start_time:
            epoch_time = time.time() - self.start_time
            self.epoch_times.append(epoch_time)
            
            # 每10个epoch显示性能信息
            current_epoch = trainer.current_epoch
            if current_epoch > 0 and current_epoch % 10 == 0:
                avg_time = sum(self.epoch_times[-5:]) / min(len(self.epoch_times), 5)
                dual_print(f"📊 Epoch {current_epoch}: 近5个epoch平均时间 {avg_time:.2f}s")
                
                # GPU内存使用情况
                if torch.cuda.is_available():
                    gpu_memory = torch.cuda.memory_allocated() / 1024**3
                    dual_print(f"   GPU内存使用: {gpu_memory:.2f}GB")
    
    def on_fit_end(self, trainer, pl_module):
        """训练结束统计"""
        if self.epoch_times:
            total_time = sum(self.epoch_times)
            avg_epoch_time = total_time / len(self.epoch_times)
            dual_print(f"🎯 训练完成统计:")
            dual_print(f"   - 总训练时间: {total_time/3600:.2f} 小时")
            dual_print(f"   - 平均epoch时间: {avg_epoch_time:.2f} 秒")


def _check_gpu_config(params):
    """检查GPU配置"""
    if torch.cuda.is_available():
        gpu_count = torch.cuda.device_count()
        gpu_memory = torch.cuda.get_device_properties(0).total_memory / 1024**3
        gpu_name = torch.cuda.get_device_name(0)
        dual_print(f"🔧 GPU配置: {gpu_count}个GPU, {gpu_name}, 显存: {gpu_memory:.1f}GB")
        
        # 根据显存给出batch_size建议
        batch_size = params.get('batch_size', 32)
        if gpu_memory < 8 and batch_size > 16:
            dual_print(f"💡 显存较小({gpu_memory:.1f}GB), 建议batch_size <= 16")
            dual_print(f"💡 建议启用混合精度训练节省显存")
        elif gpu_memory > 20 and batch_size < 32:
            dual_print(f"💡 显存充足({gpu_memory:.1f}GB), 可以增加batch_size")
        
        # 混合精度建议
        mixed_precision = params.get('mixed_precision', False)
        if mixed_precision:
            dual_print("⚡ 混合精度训练已启用")
            dual_print("   优点: 显存使用减少~50%, 训练速度提升15-50%")
            dual_print("   注意: 如遇训练不稳定，可关闭混合精度")
        else:
            # 检查GPU是否支持混合精度
            if any(x in gpu_name for x in ["RTX", "V100", "A100", "T4"]):
                dual_print(f"💡 您的GPU ({gpu_name}) 支持混合精度训练，可考虑启用")
                if gpu_memory < 12:
                    dual_print("   特别推荐：显存<12GB时启用混合精度可显著节省显存")
    else:
        dual_print("⚠️  未检测到GPU")


def get_best_model_checkpoint(opt):
    best_path = None
    best_number = -1000
    base_path = os.path.join(opt.run_dir, 'lightning_logs')
    for data in os.listdir(base_path):
        match = re.search(r'\d+', data)
        if match:
            number = int(match.group())
            if number > best_number:
                best_number = number
                best_path = data
    # 确保best_path不为None
    if best_path is None:
        best_path = "version_0"
    
    best_ckpt_path = os.path.join(base_path, str(best_path), 'checkpoints', f'{best_model_name}.ckpt')
    last_ckpt_path = os.path.join(base_path, str(best_path), 'checkpoints', f'last.ckpt')
    
    return best_number, best_ckpt_path, last_ckpt_path


def test(args=None):
    command = Command(isTest=True, args=args)

    best_number, best_ckpt_path, last_ckpt_path = get_best_model_checkpoint(command.params)

    if command.params.best_model_path is not None:
        best_ckpt_path = command.params.best_model_path

    run_dir = getattr(command.params, 'run_dir', './runs')
    command.params.run_dir = os.path.join(str(run_dir), 'lightning_logs', f'version_{best_number}')

    test_loader = get_test_data_loader(command.params)

    network = get_network_model(command.params, isTrain=False)

    model = SimpleImageSegmentationModel(net=network, loss_func=None, opt=command.params)

    trainer = Trainer(
        default_root_dir=command.params.run_dir,
        benchmark=True,
        inference_mode=False,
        callbacks=[CustomProgressBar(command.params.dataset_name, command.params.model_name)]
    )
    trainer.test(model, test_loader, ckpt_path=best_ckpt_path)


def train(args=None):
    command = Command(args=args)
    
    # 检查GPU配置
    _check_gpu_config(command.params)
    
    if command.params.custom_seed is not None:
        seed_everything(command.params.custom_seed)

    # 获取优化的数据加载器
    dual_print("🚀 创建数据加载器...")
    train_loader, valid_loader = get_data_loader(command.params)

    network = AEAPraNet_PVT(num_classes=command.params.classes)

    use_custom_loss_function = False
    try:
        use_custom_loss_function = network.use_custom_loss_function
    except:
        pass

    if not use_custom_loss_function:
        loss_func = get_loss_function(command.params)
    else:
        loss_func = None

    model = SimpleImageSegmentationModel(net=network, loss_func=loss_func, opt=command.params)
    
    # 构建回调函数
    callbacks = [
        CustomProgressBar(command.params.dataset_name, command.params.model_name),
        ModelCheckpoint(
            filename=best_model_name,
            monitor='val_MIOU',
            mode='max',
            save_top_k=1,
            save_last=False,
            save_weights_only=True,
            save_on_train_epoch_end=True,
            enable_version_counter=False,
        )
    ]
    
    # 添加性能监控（默认启用）
    if command.params.get('enable_performance_monitor', True):
        callbacks.append(PerformanceMonitorCallback())
        dual_print("📊 已启用性能监控")
    
    if command.params.get('need_early_stop', False):
        callbacks.append(EarlyStopping(monitor="val_MIOU", mode="max", patience=50))

    # 确保run_dir存在
    run_dir = str(command.params.get('run_dir', './runs'))
    os.makedirs(run_dir, exist_ok=True)
    
    csv_logger = CSVLogger(run_dir)

    # 配置trainer
    trainer_kwargs = {
        'accelerator': command.params.get('accelerator', 'auto'),
        'devices': command.params.get('devices', 'auto'),
        'default_root_dir': run_dir,
        'logger': [
            csv_logger,
            TensorBoardLogger(run_dir, version=csv_logger.version)
        ],
        'benchmark': True,
        'max_epochs': command.params.get('end_epoch', 100),
        'callbacks': callbacks
    }
    
    # 混合精度训练（可选）
    if command.params.get('mixed_precision', False):
        trainer_kwargs['precision'] = '16-mixed'
        dual_print("⚡ 启用混合精度训练")

    trainer = Trainer(**trainer_kwargs)
    
    # 开始训练
    start_time = time.time()
    dual_print(f"🚀 开始训练 {command.params.model_name}")
    dual_print(f"📊 数据集: {command.params.dataset_name}")
    dual_print(f"📦 批次大小: {command.params.batch_size}")
    dual_print(f"🔢 总epoch数: {command.params.get('end_epoch', 100)}")
    
    trainer.fit(model, train_loader, valid_loader)
    
    # 训练完成
    total_time = time.time() - start_time
    dual_print(f"✅ 训练完成! 总用时: {total_time/3600:.2f} 小时")
