from pathlib import Path
from functools import partial
import argparse
import yaml
import glob
from tqdm import tqdm
import importlib
from typing import Dict, Any, Tuple, Optional, List, Union

import torch
import torch.nn as nn
import torchvision.transforms as transforms

from model.myunet import HighResolutionModel
from utils.sensors import preprocess_sensor_data
from data.pa_us_dataset import PaUsDataset


def load_experiment_config(experiment_dir: str) -> Dict[str, Any]:
    """从实验目录加载配置文件"""
    config_path = Path(experiment_dir) / "experiment_config.yaml"
    if not config_path.exists():
        raise FileNotFoundError(f"Config file not found: {config_path.absolute()}")
    
    with open(config_path, 'r', encoding='utf-8') as f:
        config = yaml.safe_load(f)
    
    return config


def find_checkpoint(experiment_dir: str, ckpt_filename: Optional[str] = None) -> str:
    """在实验目录中查找检查点文件"""
    checkpoint_dir = Path(experiment_dir) / "checkpoints"
    if not checkpoint_dir.exists():
        raise FileNotFoundError(f"Checkpoints directory not found: {checkpoint_dir}")
    
    if ckpt_filename:
        ckpt_path = checkpoint_dir / ckpt_filename
        if not ckpt_path.exists():
            raise FileNotFoundError(f"Specified checkpoint not found: {ckpt_path}")
        return str(ckpt_path)
    
    ckpt_files = list(checkpoint_dir.glob("*.ckpt"))
    if not ckpt_files:
        raise FileNotFoundError(f"No checkpoint files found in: {checkpoint_dir}")
    
    newest_ckpt = max(ckpt_files, key=lambda x: x.stat().st_mtime)
    return str(newest_ckpt)


def save_individual_images(images: torch.Tensor, output_dir: Union[str, Path], prefix: str, 
                          filenames: Optional[List[str]] = None, labels: Optional[List[str]] = None) -> None:
    """保存单独的图像文件，而不是拼接的图像"""
    output_dir = Path(output_dir)
    output_dir.mkdir(parents=True, exist_ok=True)
    
    for i, img in enumerate(images):
        if filenames and i < len(filenames):
            base_name = Path(filenames[i]).stem
            filename = f"{prefix}_{base_name}.jpg"
        elif labels and i < len(labels):
            filename = f"{prefix}_{labels[i]}.jpg"
        else:
            filename = f"{prefix}_{i:04d}.jpg"
        
        filepath = output_dir / filename
        
        if isinstance(img, torch.Tensor):
            img_np = img.squeeze().cpu().numpy()
            
            if img_np.ndim == 3 and img_np.shape[0] in [1, 3]:
                img_np = img_np.transpose(1, 2, 0)
            
            if img_np.min() < 0:
                img_np = (img_np + 1) / 2
            
            img_np = (img_np * 255).astype('uint8')
            
            if img_np.shape[-1] == 1:
                img_np = img_np.squeeze(-1)
            
            from PIL import Image
            if img_np.ndim == 2:
                img_pil = Image.fromarray(img_np, mode='L')
            else:
                img_pil = Image.fromarray(img_np, mode='RGB')
            
            img_pil.save(filepath)
        
        print(f"Saved {prefix} image: {filepath}")


def process_batch(model: nn.Module, sensor_batch: torch.Tensor, bfimg_batch: torch.Tensor, 
                 enable_segmentation: bool, device: torch.device) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
    """处理单个批次的数据"""
    sensor_batch = sensor_batch.to(device)
    bfimg_batch = bfimg_batch.to(device)
    
    with torch.no_grad():
        if enable_segmentation:
            predict_batch, seg_prediction_batch = model(sensor_batch, bfimg_batch, return_segmentation=True)
            return predict_batch.cpu(), seg_prediction_batch.cpu()
        else:
            predict_batch = model(sensor_batch, bfimg_batch)
            return predict_batch.cpu(), None


def load_model(model: nn.Module, checkpoint_path: str, device: torch.device) -> nn.Module:
    """加载模型权重"""
    checkpoint = torch.load(checkpoint_path, map_location=device)
    
    if 'state_dict' not in checkpoint:
        raise KeyError(f"Checkpoint missing 'state_dict' key. Available keys: {list(checkpoint.keys())}")
    
    state_dict = {}
    for k, v in checkpoint['state_dict'].items():
        if k.startswith('model.'):
            state_dict[k[6:]] = v
    
    if not state_dict:
        raise ValueError("No model parameters found in checkpoint with 'model.' prefix")
    
    model_keys = set(model.state_dict().keys())
    filtered_state_dict = {k: v for k, v in state_dict.items() if k in model_keys}
    
    unexpected_keys = set(state_dict.keys()) - model_keys
    missing_keys = model_keys - set(filtered_state_dict.keys())
    
    if unexpected_keys:
        print(f"Warning: Found {len(unexpected_keys)} unexpected keys in checkpoint (will be ignored)")
        if __debug__:
            print(f"First 5 unexpected keys: {list(unexpected_keys)[:5]}")
    
    if missing_keys:
        print(f"Warning: Found {len(missing_keys)} missing keys in model")
        if __debug__:
            print(f"First 5 missing keys: {list(missing_keys)[:5]}")
    
    try:
        model.load_state_dict(filtered_state_dict, strict=True)
        print("Model loaded successfully")
    except RuntimeError as e:
        print(f"Warning: Loading with strict=False due to: {e}")
        model.load_state_dict(filtered_state_dict, strict=False)
    
    return model


def create_model(model_type: str, model_config: Dict[str, Any], args: argparse.Namespace) -> nn.Module:
    """根据模型类型创建模型"""
    model_type = model_type.lower()
    
    in_channels = model_config.get('in_channels', 3)
    up_mode = model_config.get('up_mode', 'upsample')
    merge_mode = model_config.get('merge_mode', 'concat')
    
    enable_segmentation = model_config.get('enable_segmentation', args.enable_segmentation)
    seg_use_multiscale = model_config.get('seg_use_multiscale', args.seg_use_multiscale)
    seg_use_dropout = model_config.get('seg_use_dropout', args.seg_use_dropout)
    seg_dropout_rate = model_config.get('seg_dropout_rate', args.seg_dropout_rate)
    encoder_mode = model_config.get('encoder_mode', args.encoder_mode)
    enable_cam = model_config.get('enable_cam', args.enable_cam)
    
    if model_type == 'myunet':
        model = HighResolutionModel()
        print("Using HighResolutionModel (Enhanced U-Net)")
        
    elif model_type in ('aynet', 'model_aynet', 'model_ynet', 'model_ynet_conv', 'model_ynet_dascat', 'model_ynet_rdcat'):
        model = _create_dynamic_model(model_type, {
            'in_channels': in_channels,
            'up_mode': up_mode,
            'merge_mode': merge_mode,
            'enable_segmentation': enable_segmentation,
            'encoder_mode': encoder_mode,
            'enable_cam': enable_cam,
            'seg_use_multiscale': seg_use_multiscale,
            'seg_use_dropout': seg_use_dropout,
            'seg_dropout_rate': seg_dropout_rate,
        })
        
    else:
        supported_types = ['aynet', 'model_aynet', 'myunet', 'model_ynet', 'model_ynet_conv', 'model_ynet_dascat', 'model_ynet_rdcat']
        raise ValueError(f"Unsupported model type: {model_type}. Supported types: {supported_types}")
    
    return model


def _create_dynamic_model(model_type: str, model_kwargs: Dict[str, Any]) -> nn.Module:
    """创建动态导入的模型"""
    try:
        module_name = f"model.{model_type}"
        model_module = importlib.import_module(module_name)
        
        if 'aynet' in model_type:
            ModelClass = getattr(model_module, 'AYNet')
        else:
            ModelClass = getattr(model_module, 'YNet')
        
        import inspect
        model_signature = inspect.signature(ModelClass.__init__)
        model_params = set(model_signature.parameters.keys())
        
        filtered_kwargs = {k: v for k, v in model_kwargs.items() if k in model_params}
        
        model = ModelClass(**filtered_kwargs)
        
        param_info = ", ".join([f"{k}={v}" for k, v in filtered_kwargs.items() if k != 'in_channels'])
        print(f"Using {ModelClass.__name__} from {module_name} with {param_info}")
        
        return model
        
    except ImportError as e:
        raise ImportError(f"Failed to import model {model_type}: {e}")
    except AttributeError as e:
        raise AttributeError(f"Model class not found in {model_type}: {e}")


def setup_model(config: Dict[str, Any], checkpoint_path: str, device: torch.device, 
               args: argparse.Namespace) -> Tuple[nn.Module, bool]:
    """设置模型：创建、加载权重并移动到设备"""
    model_config = config.get('model', {})
    model_type = model_config.get('model_type', args.model_type)
    
    model = create_model(model_type, model_config, args)
    
    model.to(device)
    model.eval()
    
    model = load_model(model, checkpoint_path, device)
    
    return model, model_config.get('enable_segmentation', args.enable_segmentation)


def validate_config(config: Dict[str, Any], args: argparse.Namespace) -> Tuple[str, str, float, float]:
    """验证并提取配置参数"""
    dataset_config = config.get('data', {})
    
    dataset_path = dataset_config.get('dataset_path', args.dataset_path)
    if not dataset_path:
        dataset_path = dataset_config.get('val_file', dataset_config.get('train_file', ''))
    if not dataset_path:
        raise ValueError("No dataset path found in config or provided as argument")
    
    modality = dataset_config.get('modality', args.modality)
    
    return dataset_path, modality


def create_prediction_report(output_dir: Path, processed_samples: int, batch_size: int, 
                           enable_segmentation: bool) -> None:
    """创建预测报告"""
    import datetime
    
    report_file = output_dir / "prediction_report.md"
    with open(report_file, 'w', encoding='utf-8') as f:
        f.write("# 预测结果报告\n\n")
        f.write("## 基本信息\n\n")
        f.write(f"- 样本数量: {processed_samples}\n")
        f.write(f"- 批处理大小: {batch_size}\n")
        f.write(f"- 预测日期: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
        f.write(f"- 分割功能: {'启用' if enable_segmentation else '禁用'}\n")
        f.write("\n## 说明\n\n")
        f.write("预测结果已保存为单独的图像文件，按批次处理并立即保存。\n")
        f.write("包含输入图像和预测图像。\n")
        if enable_segmentation:
            f.write("分割预测结果也已保存。\n")
    
    print(f"Prediction report saved to {report_file}")


def main(args: argparse.Namespace) -> None:
    """主函数"""
    config = load_experiment_config(args.experiment_dir)
    
    dataset_path, modality = validate_config(config, args)
    print(f"Using dataset: {dataset_path}")
    
    checkpoint_path = find_checkpoint(args.experiment_dir, args.ckpt_filename)
    print(f"Using checkpoint: {checkpoint_path}")
    
    device = torch.device(args.device)
    if device.type == 'cuda' and not torch.cuda.is_available():
        print("Warning: CUDA requested but not available, falling back to CPU")
        device = torch.device('cpu')
    
    model, enable_segmentation = setup_model(config, checkpoint_path, device, args)

    image_transform = transforms.Compose([
        transforms.Resize((256, 128)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.5], std=[0.5])
    ])
    sensor_transform = partial(preprocess_sensor_data, 
                              target_shape=(2560, 64))
    
    dataset = PaUsDataset(dataset_path, 
                         image_transform=image_transform, 
                         sensor_transform=sensor_transform,
                         return_filestem=True)

    total_samples = len(dataset)
    if args.num_samples == -1:
        n = total_samples
        print(f"Processing all {n} samples")
    else:
        n = min(args.num_samples, total_samples)
        print(f"Processing {n} out of {total_samples} samples")
    
    batch_size = args.batch_size
    print(f"Using batch size: {batch_size}")
    
    output_dir = Path(args.output_dir)
    output_dir.mkdir(parents=True, exist_ok=True)
    
    predict_dir = output_dir / "predictions"
    bfimg_dir = output_dir / "inputs"
    gt_dir = output_dir / "ground_truth"
    predict_dir.mkdir(parents=True, exist_ok=True)
    bfimg_dir.mkdir(parents=True, exist_ok=True)
    gt_dir.mkdir(parents=True, exist_ok=True)
    
    if enable_segmentation:
        seg_pred_dir = output_dir / "segmentation_predictions"
        seg_pred_dir.mkdir(parents=True, exist_ok=True)
    
    processed_samples = 0
    for start_idx in tqdm(range(0, n, batch_size), total=(n-1)//batch_size + 1, desc="Processing batches"):
        end_idx = min(start_idx + batch_size, n)
        current_batch_size = end_idx - start_idx
        
        print(f"Processing batch {start_idx//batch_size + 1}/{(n-1)//batch_size + 1}: samples {start_idx}-{end_idx-1}")
        
        batch_indices = list(range(start_idx, end_idx))
        batch_data = [dataset[i] for i in batch_indices]
        
        sensor_batch_list, gt_batch_list, bfimg_batch_list, current_batch_filenames = zip(*batch_data)
        
        sensor_batch = torch.stack(sensor_batch_list)
        gt_batch = torch.stack([gt for gt in gt_batch_list if gt is not None])
        bfimg_batch = torch.stack(bfimg_batch_list)
        
        predict_batch, seg_prediction_batch = process_batch(
            model, sensor_batch, bfimg_batch, enable_segmentation, device
        )
        
        save_individual_images(predict_batch, predict_dir, "predict", current_batch_filenames)
        save_individual_images(bfimg_batch, bfimg_dir, "input", current_batch_filenames)
        
        # Save GT images if they exist
        if len(gt_batch) > 0:
            # Filter filenames for samples that have GT images
            gt_filenames = [fname for i, fname in enumerate(current_batch_filenames) 
                           if gt_batch_list[i] is not None]
            save_individual_images(gt_batch, gt_dir, "gt", gt_filenames)
        
        if enable_segmentation and seg_prediction_batch is not None:
            seg_pred_probs = torch.sigmoid(seg_prediction_batch)
            seg_pred_binary = (seg_pred_probs > 0.5).float()
            save_individual_images(seg_pred_binary, seg_pred_dir, "seg_pred", current_batch_filenames)
        
        processed_samples += current_batch_size
        print(f"Batch {start_idx//batch_size + 1} completed. Processed {processed_samples}/{n} samples.")
        
        if device.type == 'cuda':
            torch.cuda.empty_cache()
    
    print(f"All {processed_samples} samples processed and saved.")
    print(f"Predicted images saved to {predict_dir}")
    print(f"Input images saved to {bfimg_dir}")
    print(f"Ground truth images saved to {gt_dir}")
    
    if enable_segmentation:
        print(f"Segmentation predictions saved to {seg_pred_dir}")
    
    create_prediction_report(output_dir, processed_samples, args.batch_size, enable_segmentation)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='运行预训练模型进行预测和可视化')
    
    parser.add_argument('--experiment_dir', type=str, 
                        default=r"logs/lightning_logs/version_88",
                        help='实验目录路径，包含experiment_config.yaml和checkpoints')
    parser.add_argument('--ckpt_filename', type=str, 
                        default=None,
                        help='指定检查点文件名')
    parser.add_argument('--dataset_path', type=str, 
                        default=r'D:\project\dataset\实验数据',
                        help='数据集路径（备用，如果配置文件中没有,优先选择val_file，然后train_file）')
    parser.add_argument('--modality', type=str, 
                        default='',
                        help='使用的模态（备用，如果配置文件中没有）')
    parser.add_argument('--num_samples', type=int, 
                        default=12,
                        help='要处理的样本数量（-1表示处理所有样本）')
    parser.add_argument('--batch_size', type=int, 
                        default=4,
                        help='批处理大小，避免显存溢出')
    parser.add_argument('--output_dir', type=str, 
                        default="outputs",
                        help='输出图像的目录')
    parser.add_argument('--model_type', type=str, 
                        default='myunet',
                        choices=['aynet', 'model_aynet', 'myunet', 'model_ynet', 'model_ynet_conv', 'model_ynet_dascat', 'model_ynet_rdcat'],
                        help='模型类型（备用，如果配置文件中没有）：支持的模型类型包括 aynet, model_aynet, myunet, model_ynet, model_ynet_conv, model_ynet_dascat, model_ynet_rdcat')
    parser.add_argument('--device', type=str, 
                        default="cpu",
                        help='运行设备 (cpu 或 cuda)')
    
    parser.add_argument('--enable_segmentation', action='store_true',
                        default=False,
                        help='是否启用分割功能（备用，如果配置文件中没有）')
    parser.add_argument('--seg_use_multiscale', action='store_true',
                        default=False,
                        help='是否使用多尺度分割头（备用，如果配置文件中没有）')
    parser.add_argument('--seg_use_dropout', action='store_true',
                        default=False,
                        help='是否在分割头中使用dropout（备用，如果配置文件中没有）')
    parser.add_argument('--seg_dropout_rate', type=float,
                        default=0.1,
                        help='分割头dropout率（备用，如果配置文件中没有）')
    parser.add_argument('--encoder_mode', type=str,
                        default='patch',
                        choices=['patch', 'reshape'],
                        help='编码器模式（备用，如果配置文件中没有）：patch 或 reshape')
    parser.add_argument('--enable_cam', action='store_true',
                        default=False,
                        help='是否启用CAM可解释性功能（备用，如果配置文件中没有）')
    
    args = parser.parse_args()
    
    main(args)

