#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
检查微表情识别模型加载状态
"""

import os
import torch
import logging
import json
import importlib.util
import sys
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torch import nn, optim

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

def load_config():
    """加载配置"""
    config_path = "config.json"
    if os.path.exists(config_path):
        with open(config_path, 'r', encoding='utf-8') as f:
            return json.load(f)
    return None

def load_model(config):
    """加载模型"""
    try:
        # 确保models目录在Python路径中
        if not os.path.abspath("models") in sys.path:
            sys.path.append(os.path.abspath("."))
        
        # 动态导入SwinTransformer3D
        spec = importlib.util.find_spec("models.swin_transformer_3d")
        if spec is None:
            logger.error("无法导入models.swin_transformer_3d模块")
            return None
        
        swin_module = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(swin_module)
        
        # 获取SwinTransformer3D类
        SwinTransformer3D = getattr(swin_module, "SwinTransformer3D")
        
        # 创建模型实例
        model = SwinTransformer3D(
            embed_dim=96,
            depths=[2, 2, 6, 2],
            num_heads=[3, 6, 12, 24],
            patch_size=(2, 4, 4),
            window_size=(8, 7, 7),
            drop_path_rate=0.1,
            in_chans=3,
            num_classes=7,  # 7种微表情类别
            patch_norm=True
        )
        
        # 加载模型参数
        device = 'cpu'
        checkpoint_path = config['model']['checkpoint_path']
        
        if os.path.exists(checkpoint_path):
            logger.info(f"加载模型检查点: {checkpoint_path}")
            checkpoint = torch.load(checkpoint_path, map_location=device)
            
            # 打印模型参数信息
            logger.info(f"模型参数类型: {type(checkpoint)}")
            
            if isinstance(checkpoint, dict) and 'model_state_dict' in checkpoint:
                logger.info("检测到嵌套的model_state_dict")
                state_dict = checkpoint['model_state_dict']
            else:
                logger.info("直接使用state_dict")
                state_dict = checkpoint
            
            # 检查参数匹配情况
            model_dict = model.state_dict()
            missing_keys = [k for k in model_dict.keys() if k not in state_dict]
            unexpected_keys = [k for k in state_dict.keys() if k not in model_dict]
            
            logger.info(f"参数总数: {len(state_dict)}")
            logger.info(f"模型期望参数数量: {len(model_dict)}")
            logger.info(f"缺失的参数: {len(missing_keys)}")
            if missing_keys:
                logger.info(f"缺失参数示例: {missing_keys[:5]}")
            logger.info(f"意外的参数: {len(unexpected_keys)}")
            if unexpected_keys:
                logger.info(f"意外参数示例: {unexpected_keys[:5]}")
            
            # 查看分类头的大小和权重
            if hasattr(model, 'head') and isinstance(model.head, torch.nn.Linear):
                head_shape = model.head.weight.shape
                logger.info(f"分类头权重形状: {head_shape}")
                logger.info(f"分类头输入特征数: {model.head.in_features}")
                logger.info(f"分类头输出类别数: {model.head.out_features}")
                
                # 检查配置文件中的类别数量
                if 'dataset' in config and 'emotion_labels' in config['dataset']:
                    dataset_labels = config['dataset']['emotion_labels']
                    logger.info(f"配置文件中的标签数量: {len(dataset_labels)}")
                    logger.info(f"配置文件中的标签: {dataset_labels}")
                    if len(dataset_labels) != model.head.out_features:
                        logger.warning(f"配置文件标签数量({len(dataset_labels)})与模型输出维度({model.head.out_features})不匹配!")
            
            # 检查分类头参数是否匹配
            head_mismatch = False
            for k in ['head.weight', 'head.bias']:
                if k in state_dict and k in model_dict and state_dict[k].shape != model_dict[k].shape:
                    pretrain_shape = str(state_dict[k].shape).replace(' ', '').replace('\n', '')
                    current_shape = str(model_dict[k].shape).replace(' ', '').replace('\n', '')
                    logger.warning(f"分类头参数形状不匹配: {k}, 预训练形状: {pretrain_shape}, 当前模型形状: {current_shape}")
                    head_mismatch = True
            
            # 尝试加载模型
            if len(missing_keys) == 0:
                try:
                    # 检查分类头参数是否匹配
                    if head_mismatch:
                        logger.warning("检测到分类头参数形状不匹配")
                        # 移除分类头参数
                        logger.info("仅加载特征提取参数，跳过分类头参数")
                        
                        # 从state_dict中移除分类头参数
                        for k in ['head.weight', 'head.bias']:
                            if k in state_dict:
                                pretrain_shape = str(state_dict[k].shape).replace(' ', '').replace('\n', '')
                                current_shape = str(model_dict[k].shape).replace(' ', '').replace('\n', '')
                                logger.info(f"跳过参数: {k}, 预训练形状: {pretrain_shape}, 当前模型形状: {current_shape}")
                                state_dict.pop(k)
                        
                        # 设置strict=False允许缺失一些参数
                        model.load_state_dict(state_dict, strict=False)
                        logger.info("模型加载成功 - 使用预训练特征提取器，不包含分类头")
                        return model
                    else:
                        # 完全匹配的情况
                        model.load_state_dict(state_dict)
                        logger.info("模型加载成功 - 使用完整预训练模型（包含分类头）")
                        return model
                except Exception as e:
                    logger.error(f"加载模型失败: {e}")
                    return None
            else:
                logger.warning("参数不匹配，无法正确加载模型")
                return None
        else:
            logger.error(f"模型文件不存在: {checkpoint_path}")
            return None
    except Exception as e:
        logger.error(f"加载模型过程中出错: {e}")
        import traceback
        logger.error(traceback.format_exc())
        return None

# 简单的示例数据集，用于微调分类头
class DummyEmotionDataset(Dataset):
    """简化的情绪数据集，用于微调分类头"""
    def __init__(self, num_samples=100, num_frames=16, frame_size=(224, 224), num_classes=7):
        self.num_samples = num_samples
        self.num_frames = num_frames
        self.frame_size = frame_size
        self.num_classes = num_classes
        
        # 创建随机数据和标签
        self.data = torch.randn(num_samples, 3, num_frames, *frame_size)
        self.labels = torch.randint(0, num_classes, (num_samples,))
    
    def __len__(self):
        return self.num_samples
    
    def __getitem__(self, idx):
        return self.data[idx], self.labels[idx]

def finetune_model_head(model, config, epochs=10, batch_size=8):
    """微调模型的分类头"""
    if model is None:
        logger.error("模型不可用，无法进行微调")
        return False
    
    try:
        # 确保只训练分类头，冻结特征提取器的参数
        for name, param in model.named_parameters():
            if "head" not in name:
                param.requires_grad = False
            else:
                param.requires_grad = True
        
        logger.info("开始微调分类头")
        
        # 使用CPU或CUDA
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        model.to(device)
        
        # 创建一个简单的数据集进行微调
        # 在实际应用中，应该加载您的真实情绪数据集
        num_classes = len(config['dataset']['emotion_labels'])
        dataset = DummyEmotionDataset(
            num_samples=50, 
            num_frames=config['dataset']['num_frames'],
            frame_size=tuple(config['dataset']['frame_size']),
            num_classes=num_classes
        )
        
        # 数据加载器
        dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
        
        # 定义损失函数和优化器
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=0.001)
        
        # 训练循环
        model.train()
        for epoch in range(epochs):
            running_loss = 0.0
            correct = 0
            total = 0
            
            for inputs, labels in dataloader:
                inputs, labels = inputs.to(device), labels.to(device)
                
                # 梯度清零
                optimizer.zero_grad()
                
                # 前向传播
                outputs = model(inputs)
                loss = criterion(outputs, labels)
                
                # 反向传播和优化
                loss.backward()
                optimizer.step()
                
                # 统计训练信息
                running_loss += loss.item()
                _, predicted = outputs.max(1)
                total += labels.size(0)
                correct += predicted.eq(labels).sum().item()
            
            # 打印每个周期的训练信息
            logger.info(f'Epoch {epoch+1}/{epochs}: Loss: {running_loss/len(dataloader):.4f} | Acc: {100.*correct/total:.2f}%')
        
        # 保存微调后的模型
        torch.save(model.state_dict(), 'models/finetuned_model.pth')
        logger.info("分类头微调完成，模型已保存到 models/finetuned_model.pth")
        
        # 将模型设置为评估模式
        model.eval()
        return True
        
    except Exception as e:
        logger.error(f"微调分类头时出错: {e}")
        import traceback
        logger.error(traceback.format_exc())
        return False

def main():
    """主函数"""
    config = load_config()
    if config is None:
        logger.error("无法加载配置文件")
        return
    
    model = load_model(config)
    if model is not None:
        logger.info("系统正在使用真实的预训练模型")
        
        # 检查是否需要微调分类头
        if 'model' in config and 'finetune_head' in config['model'] and config['model']['finetune_head']:
            logger.info("配置中启用了分类头微调")
            success = finetune_model_head(model, config)
            if success:
                logger.info("分类头微调成功，系统现在使用经过微调的模型")
            else:
                logger.warning("分类头微调失败")
        else:
            logger.info("配置中未启用分类头微调，使用随机初始化的分类头")
    else:
        logger.info("系统未使用真实的预训练模型")

if __name__ == "__main__":
    main() 