import json
import os
import shutil
import tempfile
from pathlib import Path
from urllib.parse import urlparse

import cv2
import pandas as pd
from fer.fer import FER
from funasr import AutoModel as am
import librosa
import numpy as np
from django.conf import settings
from pydub import AudioSegment
from transformers import pipeline
from Interview.service.audio_utils import process_full_audio, NoAudioError, ensure_wav
from Interview.service.tingwu_services import AliyunOfflineSTT
from InterviewAgent.celery_app import app
from Interview.models import Analysis, Interview, Answer, AnswerAnalysis
from Interview.service.video_analysis import video_analysis
from Resume.service.utils import download_from_oss
from django.utils import timezone
from django.db import transaction
import logging

from Interview.service.report import generate_interview_report  # 负责调用讯飞、聚合数据并存库

logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s %(message)s')
logger = logging.getLogger(__name__)


@app.task
def analyze_video_task(file_path: str, interview_id: int):
    try:
        logger.info(f"开始分析视频: {file_path}")

        # 1. 从 OSS 下载视频到本地
        local_path = download_from_oss(file_path)
        if not local_path:
            raise RuntimeError("视频下载失败")
        print(">>> analyze_video_task 被调用了")
        logger.info(">>> analyze_video_task 被调用了")
        # 2. 执行分析
        results = video_analysis(local_path)

        # 更新 Interview
        interview = Interview.objects.get(pk=interview_id)
        interview.video_path = file_path
        interview.end_time = timezone.now()
        interview.save(update_fields=['video_path', 'end_time'])

        # 写入 Analysis（modality_type='video'）
        Analysis.objects.create(
            interview_id=str(interview.interview_id),
            modality_type='video',
            metrics=results
        )
        return {"status": "success", "metrics": results}
    except Exception as e:
        logger.exception(f"视频分析任务失败: {file_path}")
        return {"status": "failed", "error": str(e)}


@app.task
def analyze_audio_task(file_path: str, interview_id: int):
    """
    离线批量转写 + 本地特征提取
    1. 从 OSS 下载视频/音频
    2. 发起通义·听悟离线转写，轮询拿到 transcript
    3. 用 ffmpeg/librosa 抽取 WAV → 切分 PCM
    4. 本地计算 VAD、清晰度、音调、语速
    5. 写入 Answer + Analysis；更新 Interview.audio_path
    6. 清理临时文件
    """
    working_dir = Path(settings.BASE_DIR) / "temp" / str(interview_id)
    shutil.rmtree(working_dir, ignore_errors=True)
    working_dir.mkdir(parents=True, exist_ok=True)

    try:
        print(f"[analyze_audio_task] interview_id={interview_id} 开始")

        # 1. 下载 OSS 文件
        local_file = download_from_oss(file_path)
        if not local_file or not os.path.exists(local_file):
            raise RuntimeError("OSS 下载失败或文件不存在")
        print(f"下载到本地: {local_file}")

        # 2. 离线转写
        file_url = f"{settings.ALIYUN_OSS_CONFIG['BUCKET_URL'].rstrip('/')}/{file_path}"
        stt_client = AliyunOfflineSTT()
        task_id = stt_client.create_task(file_url)
        result = stt_client.get_result()  # 阻塞轮询直到完成

        # 拼接转写文本
        transcript = ""
        for sent in result.get("Result", {}).get("Sentences", []):
            transcript += sent.get("Text", "")
        if not transcript:
            transcript = result.get("Transcription", {}).get("Text", "")

        # 3. 音频抽取 & 切分
        try:
            pcm_list = process_full_audio(local_file, str(working_dir), segment_seconds=60.0)
        except NoAudioError:
            # 无音轨，写入默认结果
            Analysis.objects.create(
                interview_id=str(interview_id),
                modality_type="audio",
                metrics={
                    "clarity": 5.0,
                    "pace": 5.0,
                    "tone": 5.0,
                    "fillerWordsCount": 0,
                    "speechRate": 0.0,
                    "pitchMean": 0.0,
                    "duration": 0.0,
                    "transcript": "",
                    "recommendations": "未检测到音频，跳过分析。"
                }
            )
            Interview.objects.filter(pk=interview_id).update(audio_path=file_path)
            return {"status": "no_audio"}

        print(f"生成 {len(pcm_list)} 个 PCM 片段")

        # 4. 本地特征提取（整体 WAV）
        wav16 = working_dir / (Path(local_file).stem + "_16k_mono.wav")
        # process_full_audio 已转码生成此 WAV
        y, sr = librosa.load(str(wav16), sr=None, mono=True)
        duration = len(y) / sr

        # VAD
        energy = np.array([np.sum(y[i:i+1024]**2) for i in range(0, len(y), 512)])
        speech_frames = np.where(energy > 0.01 * np.mean(energy))[0] if energy.size else []

        # 清晰度
        centroid = librosa.feature.spectral_centroid(y=y, sr=sr)
        clarity_score = float(np.mean(centroid)) / 1000 * 5
        clarity = round(min(10, max(1, clarity_score)), 1)

        # 音调
        pitches, _ = librosa.piptrack(y=y, sr=sr)
        vals = [np.mean(pitches[:, i][pitches[:, i] > 0]) for i in speech_frames if np.any(pitches[:, i] > 0)]
        pitch_mean = round(float(np.mean(vals)) if vals else 0.0, 2)
        pitch_std  = round(float(np.std(vals)) if vals else 0.0, 2)
        tone = round(min(10, max(1, pitch_std / 10)), 1)

        # 语速
        zcr = np.sum(librosa.zero_crossings(y, pad=False)) / len(y)
        est_syllables = int(zcr * duration * 1.5)
        speech_rate = round(est_syllables / duration if duration > 0 else 0.0, 2)
        pace = round(min(10, max(1, 10 - abs(speech_rate - 2.5) * 2)), 1)

        # 填充词统计
        filler_list = ["嗯", "啊", "那个", "就是", "然后", "其实", "所以", "你知道"]
        filler_count = sum(transcript.count(w) for w in filler_list)

        recs = []
        if clarity < 7 : recs.append("提高发音清晰度")
        if pace < 6 : recs.append("调整语速")
        if tone < 6 : recs.append("增加音调变化")
        if filler_count > 5 : recs.append("减少填充词")
        recommendations = "、".join(recs) if recs else "语音表现良好。"

        # 5. 写入 Analysis
        Analysis.objects.create(
            interview_id=str(interview_id),
            modality_type="audio",
            metrics={
                "transcript": transcript,
                "clarity": clarity,
                "pace": pace,
                "tone": tone,
                "fillerWordsCount": filler_count,
                "speechRate": speech_rate,
                "pitchMean": pitch_mean,
                "duration": round(duration, 1),
                "recommendations": recommendations
            }
        )

        # 更新 Interview.audio_path
        Interview.objects.filter(pk=interview_id).update(audio_path=file_path)
        print(f"[analyze_audio_task] interview_id={interview_id} 完成")
        return {"status": "success"}

    except Exception as e:
        print(f"[analyze_audio_task] interview_id={interview_id} 失败: {e}")
        return {"status": "failed", "error": str(e)}

    finally:
        shutil.rmtree(working_dir, ignore_errors=True)


# 文本情感分析（anger，disgust，fear，joy，neutral，sadness，surprise）
@app.task
def text_emotion_analysis_task(answer_id, interview_id):
    try:
        logger.info(f"开始对answer_id={answer_id}的文本进行情感分析")
        content = Answer.objects.get(answer_id=answer_id).content
        print(">>> text_emotion_analysis_task被调用了")
        logger.info(">>> text_emotion_analysis_task被调用了")
        pipe = pipeline(
            "text-classification",
            model="j-hartmann/emotion-english-distilroberta-base",
            tokenizer="j-hartmann/emotion-english-distilroberta-base",
            framework="pt",
            return_all_scores=True
        )
        data = pipe(content)
        json_data = json.dumps(data[0], ensure_ascii=False)
        AnswerAnalysis.objects.create(
            interview_id=interview_id,
            answer_id=answer_id,
            modality_type="text_emotion",
            metrics=json_data,
        )
        # serializer = AnswerAnalysisSerializer(analysis)
        logger.info(f"[text_emotion_analysis_task] answer_id={answer_id} 完成")
        return {"status": "success", "metrics": json_data}
    except Exception as e:
        logger.exception(f"文本情感分析任务失败")
        return {"status": "failed", "error": str(e)}


# 音频情感分析（anger，disgust，fearful，happy，neutral，（other），sad，surprised，（unk））
@app.task
def audio_emotion_analysis_task(answer_id, interview_id):
    try:
        logger.info(f"开始对answer_id={answer_id}的进行音频情感分析")
        print(">>> audio_emotion_analysis_task被调用了")
        logger.info(">>> audio_emotion_analysis_task被调用了")
        video_url = Answer.objects.get(answer_id=answer_id).video_path
        parsed_url = urlparse(video_url)
        object_key = parsed_url.path.lstrip('/')
        video = download_from_oss(object_key)
        output = ensure_wav(video, 'output.wav')
        logger.info(">>> 成功提取音频")
        #  分割音频
        sound = AudioSegment.from_wav(output)
        duration_ms = len(sound)
        chunks = []
        chunk_length_ms = 10_000
        for start in range(0, duration_ms, chunk_length_ms):
            end = min(start + chunk_length_ms, duration_ms)
            chunk = sound[start:end]
            tmp_chunk = tempfile.NamedTemporaryFile(delete=False, suffix=".wav")
            chunk.export(tmp_chunk.name, format="wav")
            chunks.append(tmp_chunk.name)
        all_results = []
        model = am(model="iic/emotion2vec_plus_large")
        for idx, chunk_wav in enumerate(chunks, start=1):
            print(f"Processing chunk {idx}/{len(chunks)}: {chunk_wav}")
            res = model.generate(
                chunk_wav,
                output_dir="./outputs",
                granularity="utterance",
                extract_embedding=False,
                batch_size=1
            )
            all_results.append({
                "chunk": idx,
                "duration_s": round(10_000 / 1000, 2),
                "result": res
            })
        # 打印每个片段的分析结果
        # for item in all_results:
        #     print(f"Chunk {item['chunk']} (≈{item['duration_s']}s): {item['result']}")
        logger.info(f">>> 将分割后的{len(all_results)}段音频分析完成")
        # 求均值
        first_labels = all_results[0]["result"][0]["labels"]
        score_vectors = []
        for item in all_results:  # 分数向量
            for utt in item["result"]:
                score_vectors.append(np.array(utt["scores"], dtype=float))
        mat = np.stack(score_vectors, axis=0)  # shape = (总utter数, label数)
        mean_scores = mat.mean(axis=0)  # shape = (label数,)
        avg = {label: float(score) for label, score in zip(first_labels, mean_scores)}
        # print(avg)
        json_data = json.dumps(avg, ensure_ascii=False, indent=2)
        AnswerAnalysis.objects.create(
            interview_id=interview_id,
            answer_id=answer_id,
            modality_type="audio_emotion",
            metrics=json_data
        )
        logger.info(f"[audio_emotion_analysis_task] answer_id={answer_id} 完成")
        return {"status": "success", "metrics": json_data}
    except Exception as e:
        logger.exception(f"音频情感分析任务失败")
        return {"status": "failed", "error": str(e)}


# 视频情感分析（angry，disgust，fear，happy，sad，surprise，neutral）
@app.task
def video_emotion_analysis_task(answer_id, interview_id):
    try:
        logger.info(f"开始对answer_id={answer_id}的进行视频情感分析")
        print(">>> video_emotion_analysis_task被调用了")
        logger.info(">>> video_emotion_analysis_task被调用了")
        # 初始化检测器（使用mtcnn提升人脸检测准确度）
        detector = FER(mtcnn=True)
        # 读取视频
        video_path = Answer.objects.get(answer_id=answer_id).video_path
        parsed_url = urlparse(video_path)
        object_key = parsed_url.path.lstrip('/')
        video = download_from_oss(object_key)
        cap = cv2.VideoCapture(video)
        frame_count = 0
        results = []
        while cap.isOpened():
            ret, frame = cap.read()
            if not ret:
                break
            if frame_count % 5 == 0:  # 每5帧分析一次，加快速度
                emotions = detector.detect_emotions(frame)
                if emotions:
                    result = emotions[0]["emotions"]  # 假设一个人脸
                    results.append(result)
            frame_count += 1
        cap.release()
        df = pd.DataFrame(results)
        # 保存结果为 CSV
        # df.to_csv("emotion_results.csv", index=False)
        # 保存为图片文件（推荐）
        # df.plot()
        # plt.title("Emotion Trend Over Time")
        # plt.xlabel("Frame Sample")
        # plt.ylabel("Emotion Probability")
        # plt.savefig("emotion_trend.png")
        # 平均每种情绪的概率
        avg_emotions = df.mean()
        emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
        # 生成分布字典
        emotion_distribution = dict(zip(emotion_labels, avg_emotions))
        json_data = json.dumps(emotion_distribution, ensure_ascii=False, indent=2)
        AnswerAnalysis.objects.create(
            interview_id=interview_id,
            answer_id=answer_id,
            modality_type="video_emotion",
            metrics=json_data,
        )
        logger.info(f"[video_emotion_analysis_task] answer_id={answer_id} 完成")
        return {"status": "success", "metrics": json_data}
    except Exception as e:
        logger.exception(f"视频情感分析任务失败")
        return {"status": "failed", "error": str(e)}