import cv2
import os
import asyncio
import aiohttp
from asgiref.sync import sync_to_async
from django.db import transaction
from django.conf import settings
from .models import ExpressionAnalysis, ExpressionFrameData, EmotionData

class VideoAnalysisService:
    """视频分析服务类"""
    
    # 基于评价标准表的评分常量
    EMOTION_WEIGHT = 0.2185  # 面部表情权重
    POSE_WEIGHT = 0.2578     # 头部姿态权重  
    GAZE_WEIGHT = 0.2607     # 视线角度权重
    
    # 专注度等级标记（新的评分系统）
    # 表情与姿态评分
    EMOTION_POSE_HIGH = 70      # 专注度高
    EMOTION_POSE_MEDIUM = 20    # 专注度一般
    EMOTION_POSE_LOW = 10       # 专注度低
    
    # 视线评分
    GAZE_HIGH = 100             # 视线朝屏幕
    GAZE_LOW = 0                # 视线不朝屏幕
    
    def __init__(self):
        # Face++ API配置
        self.api_key = "gAFij4yrT4wNrRZ7wPaz15dSJMBKUWOF"
        self.api_secret = "Sgyabih1rsOnqHgqMOxeJkHnYKYp-3I4"
        self.api_url = 'https://api-cn.faceplusplus.com/facepp/v3/detect'

    def extract_frames_from_video(self, video_path, interval_seconds=5, output_dir="video_frames"):
        """从视频中按固定时间间隔提取帧"""
        # 确保在media目录下创建输出目录
        if hasattr(settings, 'MEDIA_ROOT') and settings.MEDIA_ROOT:
            frames_dir = os.path.join(settings.MEDIA_ROOT, output_dir)
        else:
            frames_dir = output_dir
            
        if not os.path.exists(frames_dir):
            os.makedirs(frames_dir)
        
        cap = cv2.VideoCapture(video_path)
        if not cap.isOpened():
            raise Exception(f"无法打开视频文件: {video_path}")
        
        fps = cap.get(cv2.CAP_PROP_FPS)
        total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        duration = total_frames / fps
        
        frame_interval = int(fps * interval_seconds)
        if frame_interval == 0:
            raise Exception("时间间隔太短，无法计算有效的帧间隔")
            
        extracted_files = []
        frame_count = 0
        extracted_count = 0
        
        while True:
            ret, frame = cap.read()
            if not ret:
                break
            
            if frame_count % frame_interval == 0:
                timestamp = frame_count / fps
                filename = f"frame_{extracted_count:04d}_time_{timestamp:.2f}s.jpg"
                filepath = os.path.join(frames_dir, filename)
                
                cv2.imwrite(filepath, frame)
                extracted_files.append(filepath)
                extracted_count += 1
            
            frame_count += 1
        
        cap.release()
        return extracted_files

    async def async_analyze_face_with_facepp(self, session, filepath, semaphore):
        """使用Face++异步分析单张图片"""
        async with semaphore:
            data = aiohttp.FormData()
            data.add_field('api_key', self.api_key)
            data.add_field('api_secret', self.api_secret)
            data.add_field('return_landmark', '0')
            data.add_field('return_attributes', "smiling,headpose,facequality,blur,emotion,eyegaze")
            
            try:
                with open(filepath, 'rb') as img_file:
                    data.add_field('image_file',
                                   img_file,
                                   filename=os.path.basename(filepath),
                                   content_type='application/octet-stream')
                    
                    async with session.post(self.api_url, data=data, timeout=30) as response:
                        if response.status == 200:
                            result = await response.json()
                            result['frame_path'] = filepath
                            return result
                        else:
                            error_text = await response.text()
                            return {'frame_path': filepath, 'error': f"HTTP {response.status}: {error_text}"}
                            
            except Exception as e:
                return {'frame_path': filepath, 'error': f"请求异常: {str(e)}"}

    def calculate_emotion_score(self, emotion_data):
        """计算情绪评分 (70, 20, 10) - 根据面部表情评价标准"""
        if not emotion_data:
            return 20  # 默认为一般
        
        # 获取主要情绪及其置信度
        primary_emotion = max(emotion_data.items(), key=lambda x: x[1])[0]
        confidence = max(emotion_data.values())
        
        # 根据评价标准表进行评分
        # 专注度高v1: 开心、惊讶
        if primary_emotion in ['happiness', 'surprise']:
            return self.EMOTION_POSE_HIGH  # 70
        # 专注度一般v2: 中性（平静）
        elif primary_emotion == 'neutral':
            return self.EMOTION_POSE_MEDIUM  # 20
        # 专注度低v3: 伤心、生气、厌恶、恐惧
        elif primary_emotion in ['sadness', 'anger', 'disgust', 'fear']:
            return self.EMOTION_POSE_LOW  # 10
        
        return 20  # 默认为一般

    def calculate_pose_score(self, headpose_data):
        """计算头部姿态评分 (70, 20, 10) - 根据头部姿态评价标准"""
        if not headpose_data:
            return 20  # 默认为一般
        
        yaw = abs(headpose_data.get('yaw_angle', 0))
        pitch = abs(headpose_data.get('pitch_angle', 0))
        
        # 根据评价标准表进行评分
        # 专注度高v1: |yaw| ≤ 20°且 |pitch| ≤ 25° (正常状态)
        if yaw <= 20 and pitch <= 25:
            return self.EMOTION_POSE_HIGH  # 70
        # 专注度一般v2: 20° < |yaw| ≤ 30°且 25° < |pitch| ≤ 35° (头部轻微偏移状态)  
        elif yaw <= 30 and pitch <= 35:
            return self.EMOTION_POSE_MEDIUM  # 20
        # 专注度低v3: |yaw| > 30°或 |pitch| > 35° (头部重度偏移状态)
        else:
            return self.EMOTION_POSE_LOW  # 10

    def calculate_gaze_score(self, eyegaze_data):
        """计算视线评分 (100, 0) - 根据视线角度评价标准"""
        if not eyegaze_data:
            return 0
        
        left_gaze = eyegaze_data.get('left_eye_gaze', {})
        right_gaze = eyegaze_data.get('right_eye_gaze', {})
        
        # 获取视线向量的X分量（左右偏移）
        left_x = left_gaze.get('vector_x_component', 0) if left_gaze else 0
        right_x = right_gaze.get('vector_x_component', 0) if right_gaze else 0
        
        # 计算平均偏移角度
        avg_x_component = abs((left_x + right_x) / 2)
        
        # 根据评价标准表进行评分
        # 假设vector_x_component对应偏移角度的正弦值
        if avg_x_component <= 0.6:  # 约对应θ ≤ 35° (视线朝屏幕)
            return self.GAZE_HIGH  # 100
        else:  # θ > 35° (视线不朝屏幕)
            return self.GAZE_LOW  # 0

    def calculate_weighted_concentration_score(self, emotion_score, pose_score, gaze_score, 
                                             emotion_weight=None, pose_weight=None, gaze_weight=None):
        """
        计算加权专注度分数
        
        Args:
            emotion_score: 情绪分数
            pose_score: 姿态分数
            gaze_score: 视线分数
            emotion_weight: 情绪权重 (默认使用归一化的原始权重)
            pose_weight: 姿态权重 (默认使用归一化的原始权重)
            gaze_weight: 视线权重 (默认使用归一化的原始权重)
            
        Returns:
            加权综合分数
            
        注意: 当需要权重为1和0时，可设置某个权重为1，其他为0
        例如：只考虑情绪时设置 emotion_weight=1, pose_weight=0, gaze_weight=0
        """
        # 如果没有指定权重，使用归一化后的原始权重
        if emotion_weight is None or pose_weight is None or gaze_weight is None:
            total_original_weight = self.EMOTION_WEIGHT + self.POSE_WEIGHT + self.GAZE_WEIGHT
            emotion_weight = emotion_weight or (self.EMOTION_WEIGHT / total_original_weight)
            pose_weight = pose_weight or (self.POSE_WEIGHT / total_original_weight)
            gaze_weight = gaze_weight or (self.GAZE_WEIGHT / total_original_weight)
        
        # 确保权重总和为1（归一化）
        total_weight = emotion_weight + pose_weight + gaze_weight
        if total_weight > 0:
            emotion_weight /= total_weight
            pose_weight /= total_weight
            gaze_weight /= total_weight
        
        return (emotion_score * emotion_weight + 
                pose_score * pose_weight + 
                gaze_score * gaze_weight)

    def calculate_frequency_weighted_score(self, frame_scores, emotion_weight=None, pose_weight=None, gaze_weight=None):
        """
        基于频率权重计算专注度分数
        
        Args:
            frame_scores: 帧分数列表，每个元素包含 (emotion_score, pose_score, gaze_score)
            emotion_weight: 情绪权重 (默认使用归一化的原始权重)
            pose_weight: 姿态权重 (默认使用归一化的原始权重)
            gaze_weight: 视线权重 (默认使用归一化的原始权重)
            
        Returns:
            频率加权的综合分数
        """
        if not frame_scores:
            return 0.0
        
        # 如果没有指定权重，使用归一化后的原始权重
        if emotion_weight is None or pose_weight is None or gaze_weight is None:
            total_original_weight = self.EMOTION_WEIGHT + self.POSE_WEIGHT + self.GAZE_WEIGHT
            emotion_weight = emotion_weight or (self.EMOTION_WEIGHT / total_original_weight)
            pose_weight = pose_weight or (self.POSE_WEIGHT / total_original_weight)
            gaze_weight = gaze_weight or (self.GAZE_WEIGHT / total_original_weight)
        
        # 统计各维度的专注度频率
        # 情绪和姿态：70, 20, 10
        emotion_counts = {70: 0, 20: 0, 10: 0}
        pose_counts = {70: 0, 20: 0, 10: 0}
        # 视线：100, 0
        gaze_counts = {100: 0, 0: 0}
        
        total_frames = len(frame_scores)
        
        # 统计频率
        for emotion_score, pose_score, gaze_score in frame_scores:
            if emotion_score in emotion_counts:
                emotion_counts[emotion_score] += 1
            if pose_score in pose_counts:
                pose_counts[pose_score] += 1
            if gaze_score in gaze_counts:
                gaze_counts[gaze_score] += 1
        
        # 计算频率 (0-1)
        emotion_freq = {level: count/total_frames for level, count in emotion_counts.items()}
        pose_freq = {level: count/total_frames for level, count in pose_counts.items()}
        gaze_freq = {level: count/total_frames for level, count in gaze_counts.items()}
        
        # 计算加权分数
        emotion_score = (emotion_freq[70] * 70 + emotion_freq[20] * 20 + emotion_freq[10] * 10)
        pose_score = (pose_freq[70] * 70 + pose_freq[20] * 20 + pose_freq[10] * 10)
        gaze_score = (gaze_freq[100] * 100 + gaze_freq[0] * 0)
        
        # 返回加权总分
        return (emotion_score * emotion_weight + 
                pose_score * pose_weight + 
                gaze_score * gaze_weight)

    def get_evaluation_for_weighted_score(self, weighted_score):
        """根据加权分数获取评价"""
        if weighted_score >= 80:
            return "优秀"
        elif weighted_score >= 60:
            return "良好"
        elif weighted_score >= 40:
            return "一般"
        else:
            return "较差"

    def get_evaluation(self, score):
        """根据分数获取评价"""
        if score >= 70:
            return "专注度高"
        elif score >= 20:
            return "专注度一般"
        else:
            return "专注度低"

    def save_analysis_to_database(self, video_path, interval_seconds, results, video_answer=None):
        """将分析结果保存到数据库"""
        try:
            with transaction.atomic():
                # 创建ExpressionAnalysis记录
                analysis_data = {
                    'video_path': video_path,
                    'interval_seconds': interval_seconds,
                    'total_frames': len(results.get('frame_results', [])),
                    'success_frames': len([r for r in results.get('frame_results', []) if 'error' not in r]),
                    'average_concentration': results.get('average_concentration', 0),
                    'max_score': results.get('max_score', 0),
                    'min_score': results.get('min_score', 0),
                    'overall_evaluation': results.get('overall_evaluation', ''),
                    'emotion_avg_score': results.get('emotion_avg_score', 0),
                    'emotion_high_ratio': results.get('emotion_high_ratio', 0),
                    'pose_avg_score': results.get('pose_avg_score', 0),
                    'pose_high_ratio': results.get('pose_high_ratio', 0),
                    'gaze_avg_score': results.get('gaze_avg_score', 0),
                    'gaze_high_ratio': results.get('gaze_high_ratio', 0),
                    'analysis_status': 'completed'
                }
                
                if video_answer:
                    analysis_data['video_answer'] = video_answer
                
                analysis = ExpressionAnalysis.objects.create(**analysis_data)
                
                # 保存帧分析数据
                for frame_result in results.get('frame_results', []):
                    if 'error' in frame_result:
                        continue
                        
                    frame_analysis = ExpressionFrameData.objects.create(
                        expression_analysis=analysis,
                        frame_name=os.path.basename(frame_result['frame_path']),
                        frame_path=frame_result['frame_path'],
                        concentration_score=frame_result.get('concentration_score', 0),
                        analysis_status='success',
                        primary_emotion=frame_result.get('primary_emotion'),
                        emotion_confidence=frame_result.get('emotion_confidence'),
                        emotion_score=frame_result.get('emotion_score'),
                        emotion_evaluation=frame_result.get('emotion_evaluation'),
                        yaw_angle=frame_result.get('yaw_angle'),
                        pitch_angle=frame_result.get('pitch_angle'),
                        roll_angle=frame_result.get('roll_angle'),
                        pose_score=frame_result.get('pose_score'),
                        pose_evaluation=frame_result.get('pose_evaluation'),
                        left_gaze_angle=frame_result.get('left_gaze_angle'),
                        right_gaze_angle=frame_result.get('right_gaze_angle'),
                        avg_gaze_angle=frame_result.get('avg_gaze_angle'),
                        gaze_score=frame_result.get('gaze_score'),
                        gaze_evaluation=frame_result.get('gaze_evaluation'),
                        api_raw_data=frame_result.get('api_raw_data', {})
                    )
                    
                    # 保存情绪详细数据
                    emotion_data = frame_result.get('emotion_data', {})
                    if emotion_data:
                        EmotionData.objects.create(
                            frame_analysis=frame_analysis,
                            happiness=emotion_data.get('happiness', 0.0),
                            surprise=emotion_data.get('surprise', 0.0),
                            neutral=emotion_data.get('neutral', 0.0),
                            sadness=emotion_data.get('sadness', 0.0),
                            anger=emotion_data.get('anger', 0.0),
                            disgust=emotion_data.get('disgust', 0.0),
                            fear=emotion_data.get('fear', 0.0)
                        )
                
                return analysis
                
        except Exception as e:
            print(f"保存分析结果到数据库失败: {e}")
            raise

    def sync_analyze_video(self, video_path, interval_seconds=5, max_concurrency=3):
        """同步分析视频"""
        return asyncio.run(self.analyze_video(video_path, interval_seconds, max_concurrency))

    def _run_async_analysis(self, video_path, interval_seconds, max_concurrency):
        """运行异步分析"""
        return asyncio.run(self.analyze_video(video_path, interval_seconds, max_concurrency))

    @sync_to_async
    def async_save_analysis_to_database(self, video_path, interval_seconds, results):
        """异步保存分析结果到数据库"""
        return self.save_analysis_to_database(video_path, interval_seconds, results)

    async def analyze_video(self, video_path, interval_seconds=5, max_concurrency=3):
        """异步分析视频"""
        # 提取帧
        frame_files = self.extract_frames_from_video(video_path, interval_seconds)
        
        # 创建信号量限制并发数
        semaphore = asyncio.Semaphore(max_concurrency)
        
        # 异步分析所有帧
        async with aiohttp.ClientSession() as session:
            tasks = [
                self.async_analyze_face_with_facepp(session, filepath, semaphore)
                for filepath in frame_files
            ]
            results = await asyncio.gather(*tasks, return_exceptions=True)
        
        # 处理分析结果
        processed_results = []
        frame_scores = []
        
        for result in results:
            if isinstance(result, Exception):
                processed_results.append({'error': str(result)})
                continue
                
            if 'error' in result:
                processed_results.append(result)
                continue
            
            # 解析API结果
            faces = result.get('faces', [])
            if not faces:
                processed_results.append({'error': '未检测到人脸'})
                continue
            
            face = faces[0]
            attributes = face.get('attributes', {})
            
            # 提取情绪数据
            emotion_data = attributes.get('emotion', {})
            emotion_score = self.calculate_emotion_score(emotion_data)
            
            # 提取姿态数据
            headpose_data = attributes.get('headpose', {})
            pose_score = self.calculate_pose_score(headpose_data)
            
            # 提取视线数据
            eyegaze_data = attributes.get('eyegaze', {})
            gaze_score = self.calculate_gaze_score(eyegaze_data)
            
            # 计算加权专注度分数
            concentration_score = self.calculate_weighted_concentration_score(
                emotion_score, pose_score, gaze_score
            )
            
            # 获取主要情绪
            primary_emotion = max(emotion_data.items(), key=lambda x: x[1])[0] if emotion_data else None
            emotion_confidence = max(emotion_data.values()) if emotion_data else 0
            
            # 构建帧结果
            frame_result = {
                'frame_path': result['frame_path'],
                'concentration_score': concentration_score,
                'primary_emotion': primary_emotion,
                'emotion_confidence': emotion_confidence,
                'emotion_score': emotion_score,
                'emotion_evaluation': self.get_evaluation(emotion_score),
                'yaw_angle': headpose_data.get('yaw_angle'),
                'pitch_angle': headpose_data.get('pitch_angle'),
                'roll_angle': headpose_data.get('roll_angle'),
                'pose_score': pose_score,
                'pose_evaluation': self.get_evaluation(pose_score),
                'left_gaze_angle': eyegaze_data.get('left_eye_gaze', {}).get('vector_x_component'),
                'right_gaze_angle': eyegaze_data.get('right_eye_gaze', {}).get('vector_x_component'),
                'avg_gaze_angle': (eyegaze_data.get('left_eye_gaze', {}).get('vector_x_component', 0) + 
                                  eyegaze_data.get('right_eye_gaze', {}).get('vector_x_component', 0)) / 2,
                'gaze_score': gaze_score,
                'gaze_evaluation': '视线朝屏幕' if gaze_score == 100 else '视线不朝屏幕',
                'emotion_data': emotion_data,
                'api_raw_data': result
            }
            
            processed_results.append(frame_result)
            frame_scores.append((emotion_score, pose_score, gaze_score))
        
        # 计算统计摘要
        if frame_scores:
            # 计算频率加权分数
            frequency_weighted_score = self.calculate_frequency_weighted_score(frame_scores)
            
            # 计算各维度统计
            emotion_scores = [score[0] for score in frame_scores]
            pose_scores = [score[1] for score in frame_scores]
            gaze_scores = [score[2] for score in frame_scores]
            
            concentration_scores = [result.get('concentration_score', 0) for result in processed_results if 'error' not in result]
            
            summary = {
                'average_concentration': frequency_weighted_score,
                'max_score': max(concentration_scores) if concentration_scores else 0,
                'min_score': min(concentration_scores) if concentration_scores else 0,
                'overall_evaluation': self.get_evaluation_for_weighted_score(frequency_weighted_score),
                'emotion_avg_score': sum(emotion_scores) / len(emotion_scores) if emotion_scores else 0,
                'emotion_high_ratio': len([s for s in emotion_scores if s == 70]) / len(emotion_scores) if emotion_scores else 0,
                'pose_avg_score': sum(pose_scores) / len(pose_scores) if pose_scores else 0,
                'pose_high_ratio': len([s for s in pose_scores if s == 70]) / len(pose_scores) if pose_scores else 0,
                'gaze_avg_score': sum(gaze_scores) / len(gaze_scores) if gaze_scores else 0,
                'gaze_high_ratio': len([s for s in gaze_scores if s == 100]) / len(gaze_scores) if gaze_scores else 0,
                'frame_results': processed_results
            }
        else:
            summary = {
                'average_concentration': 0,
                'max_score': 0,
                'min_score': 0,
                'overall_evaluation': '分析失败',
                'emotion_avg_score': 0,
                'emotion_high_ratio': 0,
                'pose_avg_score': 0,
                'pose_high_ratio': 0,
                'gaze_avg_score': 0,
                'gaze_high_ratio': 0,
                'frame_results': processed_results
            }
        
        return summary 