import torch
import os
import sys

# 动态添加路径，以便能从当前目录导入其他模块
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

from multimodal_vectorizer import MultimodalVectorizer
from scoring_model import ComprehensiveScoringModel

class InterviewPredictor:
    """
    一个封装了向量化和评分模型的预测器。
    这是项目最终应该使用的、用于进行实时预测的类。
    """
    def __init__(self, model_path):
        if not os.path.exists(model_path):
            raise FileNotFoundError(f"模型文件未找到: {model_path}。请先运行 train.py 训练并保存模型。")
            
        print("--- 初始化预测器 ---")
        self.vectorizer = MultimodalVectorizer()
        print("向量化模块加载完毕。")

        self.model = ComprehensiveScoringModel(input_dim=384)
        self.model.load_state_dict(torch.load(model_path, map_location='cpu'))
        self.model.eval()
        print(f"评分模型加载完毕，权重来源: {model_path}")
        print("--- 预测器准备就绪 ---")

    def predict(self, interview_raw_data):
        if not isinstance(interview_raw_data, list) or not interview_raw_data:
            raise ValueError("输入数据必须是一个非空的列表，代表多轮对话。")

        print(f"\n开始对一场包含 {len(interview_raw_data)} 轮对话的面试进行评分...")
        
        # 添加数据格式转换，处理嵌套结构
        flattened_data = []
        for turn in interview_raw_data:
            flat_turn = turn.copy()  # 复制基本字段
            
            # 处理嵌套的面部动作
            if "face_actions" in turn:
                for k, v in turn["face_actions"].items():
                    # 确保值是数值类型
                    if isinstance(v, str) and v.replace('.', '', 1).isdigit():
                        flat_turn[k] = float(v)
                    else:
                        flat_turn[k] = v
                del flat_turn["face_actions"]
                
            # 处理嵌套的身体动作
            if "body_actions" in turn:
                for k, v in turn["body_actions"].items():
                    # 确保值是数值类型
                    if isinstance(v, str) and v.replace('.', '', 1).isdigit():
                        flat_turn[k] = float(v)
                    else:
                        flat_turn[k] = v
                del flat_turn["body_actions"]
                
            flattened_data.append(flat_turn)
        
        # 使用转换后的扁平数据
        sequence_vectors = [
            torch.tensor(self.vectorizer.vectorize(turn)["feature_vector"])
            for turn in flattened_data
        ]
        
        interview_tensor = torch.stack(sequence_vectors)
        interview_length = torch.tensor([len(interview_tensor)])
        padded_interview = interview_tensor.unsqueeze(0)
        
        with torch.no_grad():
            predicted_score = self.model(padded_interview, interview_length)
        
        final_score = predicted_score.item()
        print(f"评分完成。预测综合分数为: {final_score:.2f}")
        return final_score

def main():
    """主测试入口，演示如何使用InterviewPredictor"""
    # 定位到项目根目录，以便找到模型文件
    project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
    model_path = os.path.join(project_root, 'model', 'scoring_model.pth')

    try:
        predictor = InterviewPredictor(model_path=model_path)
    except FileNotFoundError as e:
        print(e)
        return

    # 模拟一个"自信"候选人的面试数据（使用扁平结构）
    confident_interview_data = [
        {
            "text": "我有五年的Java后端开发经验，熟悉Spring Boot和微服务架构。",
            "audio_emotion": "Confident//Calm",
            "stress": 0.28,
            "expression_seq": "calm//confident//calm",
            "eyeBlink": 0.9,  # 扁平结构
            "mouthPuff": 0,
            "eyeUpLeft": 1, 
            "eyeUpRight": 0,
            "earTouch": 0, 
            "noseTouch": 0,
            "speech_rate": "2.1字/秒"
        },
        {
            "text": "上一个项目中，我负责搭建高并发API接口，使用了Redis进行缓存优化。",
            "audio_emotion": "Confident//Nervous",
            "stress": 0.35,
            "expression_seq": "nervous//calm",
            "eyeBlink": 1.0,
            "mouthPuff": 0,
            "eyeUpLeft": 0, 
            "eyeUpRight": 1,
            "earTouch": 0, 
            "noseTouch": 0,
            "speech_rate": "2.0字/秒"
        },
        {
            "text": "我熟悉MySQL数据库，写过复杂的SQL语句，也做过索引优化。",
            "audio_emotion": "Calm",
            "stress": 0.30,
            "expression_seq": "calm//nervous",
            "eyeBlink": 0.8,
            "mouthPuff": 0,
            "eyeUpLeft": 0, 
            "eyeUpRight": 0,
            "earTouch": 0, 
            "noseTouch": 1,
            "speech_rate": "1.8字/秒"
        },
        {
            "text": "我习惯使用JUnit进行单元测试，并使用Mock工具进行接口测试。",
            "audio_emotion": "Calm//Calm",
            "stress": 0.22,
            "expression_seq": "calm//calm",
            "eyeBlink": 0.7,
            "mouthPuff": 0,
            "eyeUpLeft": 1, 
            "eyeUpRight": 1,
            "earTouch": 0, 
            "noseTouch": 0,
            "speech_rate": "1.7字/秒"
        },
        {
            "text": "我有使用Kafka进行异步消息处理的经验，解决过分布式事务的问题。",
            "audio_emotion": "Confident//Nervous",
            "stress": 0.42,
            "expression_seq": "confident//nervous",
            "eyeBlink": 1.1,
            "mouthPuff": 0,
            "eyeUpLeft": 2, 
            "eyeUpRight": 0,
            "earTouch": 1, 
            "noseTouch": 0,
            "speech_rate": "2.2字/秒"
        },
        {
            "text": "为提高系统稳定性，我使用了Nginx负载均衡和Spring Cloud进行服务治理。",
            "audio_emotion": "Nervous//Calm",
            "stress": 0.50,
            "expression_seq": "nervous//calm",
            "eyeBlink": 1.0,
            "mouthPuff": 1,
            "eyeUpLeft": 1, 
            "eyeUpRight": 2,
            "earTouch": 1, 
            "noseTouch": 0,
            "speech_rate": "1.9字/秒"
        },
        {
            "text": "我熟练使用Docker和Kubernetes进行服务容器化和部署。",
            "audio_emotion": "Confident//Calm",
            "stress": 0.37,
            "expression_seq": "calm//confident",
            "eyeBlink": 0.8,
            "mouthPuff": 0,
            "eyeUpLeft": 0, 
            "eyeUpRight": 1,
            "earTouch": 0, 
            "noseTouch": 0,
            "speech_rate": "2.3字/秒"
        },
        {
            "text": "我注重团队沟通，会主动与产品和测试对齐需求，确保交付质量。",
            "audio_emotion": "Calm",
            "stress": 0.30,
            "expression_seq": "calm//nervous",
            "eyeBlink": 0.9,
            "mouthPuff": 0,
            "eyeUpLeft": 0, 
            "eyeUpRight": 0,
            "earTouch": 0, 
            "noseTouch": 0,
            "speech_rate": "1.8字/秒"
        }
    ]
    predictor.predict(confident_interview_data)


if __name__ == "__main__":
    main() 