import json
import logging
import time
import hmac
import hashlib
import base64 as _b64
from collections import OrderedDict
from typing import Dict, Any
from urllib.parse import urlparse, urlencode
import websocket
import asyncio
import backoff
from django.conf import settings
from jinja2 import Template

from Interview.models import GeneratedQuestion, Answer, AnswerAnalysis, Analysis, Report

logger = logging.getLogger(__name__)

# ---- WebSocket 参数构造 ----
class WsParam:
    def __init__(self):
        o = urlparse(settings.XF_SPARK_ULTRA_API_HOST)
        self.host = o.netloc
        self.path = o.path

    def create_url(self):
        date = time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime())
        sign_str = f"host: {self.host}\ndate: {date}\nGET {self.path} HTTP/1.1"
        sig = _b64.b64encode(
            hmac.new(settings.XF_API_SECRET.encode(), sign_str.encode(), hashlib.sha256).digest()
        ).decode()
        auth_str = (
            f'api_key="{settings.XF_API_KEY}", algorithm="hmac-sha256", '
            f'headers="host date request-line", signature="{sig}"'
        )
        auth_b64 = _b64.b64encode(auth_str.encode()).decode()
        params = {"authorization": auth_b64, "date": date, "host": self.host}
        return settings.XF_SPARK_ULTRA_API_HOST + "?" + urlencode(params)

# ---- Prompt 模板 ----
from jinja2 import Template

SYSTEM_PROMPT = Template("""
你是一名高校学生面试评测专家，精通多模态（文本/语音/视频）分析与评分。
请仅以纯 JSON 格式输出，不要输出任何多余文字或模式定义。
""")

USER_PROMPT = Template("""
下面是本次模拟面试的完整多模态数据，请综合分析。

{% for qa in qa_pairs %}
=== 问题 {{ loop.index }} ===
问题：{{ qa.question }}

— 回答文本 —
{{ qa.answer_text }}

— 文本评分（0–10） —
专业知识 (knowledge):      {{ qa.text_metrics.knowledge }}
技能匹配 (match):           {{ qa.text_metrics.match }}
语言表达 (clarity):         {{ qa.text_metrics.clarity }}
逻辑思维 (structure):       {{ qa.text_metrics.structure }}
创新能力 (innovation):       {{ qa.text_metrics.innovation }}
应变抗压 (adaptability):     {{ qa.text_metrics.adaptability }}

改进建议：
{% for s in qa.text_metrics.suggestions %}
  - {{ s }}
{% endfor %}

— 文本情感分布 —
{% for emo in qa.text_emotion %}
  - {{ emo.label }}：{{ (emo.score * 100)|round(1) }}%
{% endfor %}

— 音频情感分布 —
{% for emo in qa.audio_emotion %}
  - {{ emo.label }}：{{ (emo.score * 100)|round(1) }}%
{% endfor %}

— 视频情感分布 —
{% for emo in qa.video_emotion %}
  - {{ emo.label }}：{{ (emo.score * 100)|round(1) }}%
{% endfor %}

{% endfor %}

=== 整体多模态指标 ===
— 音频整体 —
{% for k, v in audio_metrics.items() %}
  - {{ k }}：{{ v }}
{% endfor %}

— 视频整体 —
{% for k, v in video_metrics.items() %}
  - {{ k }}：{{ v }}
{% endfor %}
""")

ASSISTANT_INSTRUCTION = Template("""
请基于以上所有数据输出 **纯 JSON**，**不要回显任何多余文字或 Schema 定义**。  
生成一个包含以下字段的对象：  
- overall_score（0–100 的整数）  
- radar_data（六项能力分数字典）  
- strengths（强项名称数组）  
- weaknesses（弱项名称数组）  
- questionDetails（数组，每项包含 question、scores、feedback）  
- audio_analysis（对象）  
- video_analysis（对象）  
- learning_paths（按弱项列出学习资源的对象）   

注意：
1. 如果输入数据中任何指标缺失，AI 需要输出对应字段的空对象 {} 或空数组 []。
2. 关于建议部分，尽可能多输出一点
1. overall_score 按 PositionConfig 中的权重动态计算。
2. radar_data 包含所有六项能力的数值。
3. questionDetails 中，每题 feedback 应提炼为一句话。
4. learning_paths 给出每个弱项 2 条资源，格式为 { \"能力\": [ {\"name\":\"…\",\"url\":\"…\"}, … ] }。
严格输出上述 JSON，不得额外输出。  
""")


# ---- 数据聚合 ----
def collect_interview_data(interview_id):
    # 1. 拉取所有题目
    questions = list(GeneratedQuestion.objects.filter(interview_id=interview_id))
    q_ids = [str(q.generated_q_id) for q in questions]

    # 2. 拉取所有回答
    answers = Answer.objects.filter(generated_q_id__in=q_ids)
    ans_map = {str(ans.generated_q_id): ans for ans in answers}

    # 3. 拉取 AnswerAnalysis (text/audio/video)
    all_anas = AnswerAnalysis.objects.filter(
        interview_id=interview_id,
        modality_type__in=['text', 'audio', 'video'],
        answer_id__in=[str(ans.answer_id) for ans in answers]
    )
    # 按 answer_id 分组
    ana_map = {}
    for ana in all_anas:
        key = str(ana.answer_id)
        ana_map.setdefault(key, {})[ana.modality_type] = ana.metrics or {}

    # 4. 组装每题数据
    qa_pairs = []
    for q in questions:
        rec = {'question': q.question_content}
        ans = ans_map.get(str(q.generated_q_id))
        if ans:
            rec['answer_text'] = ans.content
            per = ana_map.get(str(ans.answer_id), {})
            # 文本评分: 6项能力
            rec['text_metrics'] = {
                'knowledge': per.get('text', {}).get('knowledge', 0),
                'match': per.get('text', {}).get('match', 0),
                'clarity': per.get('text', {}).get('clarity', 0),
                'structure': per.get('text', {}).get('structure', 0),
                'innovation': per.get('text', {}).get('innovation', 0),
                'adaptability': per.get('text', {}).get('adaptability', 0),
                'suggestions': per.get('text', {}).get('suggestions', [])
            }
            # 情感分布
            rec['text_emotion'] = per.get('text_emotion', [])
            rec['audio_emotion'] = per.get('audio', [])
            rec['video_emotion'] = per.get('video', [])
        else:
            rec.update({
                'answer_text': '',
                'text_metrics': {'knowledge':0,'match':0,'clarity':0,'structure':0,'innovation':0,'adaptability':0,'suggestions':[]},
                'text_emotion': [],
                'audio_emotion': [],
                'video_emotion': []
            })
        qa_pairs.append(rec)

    # 5. 整场综合多模态分析
    latest_audio = Analysis.objects.filter(interview_id=interview_id, modality_type='audio')
    latest_audio = latest_audio.order_by('-created_at').first()
    latest_video = Analysis.objects.filter(interview_id=interview_id, modality_type='video')
    latest_video = latest_video.order_by('-created_at').first()
    audio_metrics = latest_audio.metrics if latest_audio else {}
    video_metrics = latest_video.metrics if latest_video else {}

    payload = {
        'qa_pairs': qa_pairs,
        'audio_metrics': audio_metrics,
        'video_metrics': video_metrics,
    }
    # 补全结构
    payload['qa_pairs'] = payload.get('qa_pairs', [])
    payload['audio_metrics'] = payload.get('audio_metrics', {})
    payload['video_metrics'] = payload.get('video_metrics', {})
    return payload


# ---- 构造消息 ----
def build_messages(data):
    # 把 SYSTEM_PROMPT 和 ASSISTANT_INSTRUCTION 合并成一个 system 消息
    full_system = SYSTEM_PROMPT.render() + "\n" + ASSISTANT_INSTRUCTION.render()
    return [
        {'role': 'system', 'content': full_system},
        {'role': 'user',   'content': USER_PROMPT.render(**data)},
    ]


# ---- 调用星火 WebSocket ----
@backoff.on_exception(backoff.expo, Exception, max_tries=3)
def call_xf(messages, timeout=10):
    """
    messages: list of dicts, e.g. build_messages(data) 返回的
    [
      {'role': 'system', 'content': '...'},
      {'role': 'user',   'content': '...'},
    ]
    """
    result = {'content': '', 'error': None}

    def on_message(ws, message):
        data = json.loads(message)
        if data.get('header', {}).get('code') != 0:
            result['error'] = data
            ws.close()
            return
        chunk = data['payload']['choices']['text'][0].get('content', '')
        result['content'] += chunk
        if data['header'].get('status') == 2:
            ws.close()

    def on_open(ws):
        ws.send(json.dumps({
            'header': {
                'app_id': settings.XF_APPID,
                'uid': 'user_report'
            },
            'parameter': {
                'chat': {
                    'domain': '4.0Ultra',
                    'temperature': 0.7,
                    'max_tokens': 8192,
                    'top_k': 4,
                    'show_ref_label': True
                }
            },
            'payload': {
                'message': {
                    'text': messages
                }
            }
        }))

    ws = websocket.WebSocketApp(
        WsParam().create_url(),
        on_open=on_open,
        on_message=on_message,
        on_error=lambda ws, e: result.update(error=str(e)),
        on_close=lambda ws, *_: None
    )

    ws.run_forever(ping_interval=20)

    # 提取模型返回的 JSON 部分
    try:
        s = result['content'].index('{')
        e = result['content'].rindex('}') + 1
        return result['content'][s:e]
    except ValueError:
        logger.error("无法提取 JSON 段：%s", result['content'])
        raise RuntimeError("未收到完整 JSON 响应")

# ---- 主流程 ----
def extract_last_json(raw: str) -> str:
    """
    从 raw 文本中截取第一个 '{' 到最后一个 '}' 之间的子串，
    认为这是完整的 JSON 对象。
    """
    start = raw.find('{')
    end = raw.rfind('}') + 1
    if start == -1 or end == 0:
        raise ValueError("No JSON object found in response")
    return raw[start:end]


def generate_interview_report(interview_id):
    logger.info(f"[REPORT] 开始生成 report for interview {interview_id}")
    data = collect_interview_data(interview_id)
    msgs = build_messages(data)
    raw = call_xf(msgs)
    try:
        json_str = extract_last_json(raw)
        report = json.loads(json_str)
        logger.info(f"[REPORT] AI 端返回 report json: {report!r}")
        # 在 generate_interview_report 中，load 完 report 之后：
        required_keys = [
            'overall_score', 'radar_data', 'strengths', 'weaknesses',
            'questionDetails', 'audio_analysis', 'video_analysis', 'learning_paths'
        ]
        for key in required_keys:
            if key not in report:
                report[key] = {} if isinstance(report.get(key), dict) or key.endswith(
                    '_analysis') or key == 'radar_data' else []
    except Exception as e:
        logger.error("JSON 解析失败：%s\n原始内容：%s", e, raw)
        report = {
            'overall_score':    0,
            'radar_data':       {},
            'strengths':        [],
            'weaknesses':       [],
            'questionDetails':  [],
            'audio_analysis':   {},
            'video_analysis':   {},
            'learning_paths':   {}
        }
    logger.info("[REPORT] 正在 update_or_create Report……")
    obj, created = Report.objects.update_or_create(
        interview_id=interview_id,
        defaults={'metrics': report}
    )
    logger.info(f"[REPORT] update_or_create 完成，created={created}，对象 id={obj.report_id}")


def compare_two_reports(r1: Dict[str, Any], r2: Dict[str, Any]) -> Dict[str, Any]:
    """
    比较两次面试报告 metrics，返回详细 diff 和 AI 解读（纯文本）。
    """
    diff: Dict[str, Any] = {}

    # 1) overall_score 对比
    s1 = r1.get("overall_score", 0)
    s2 = r2.get("overall_score", 0)
    diff["overall_score_delta"] = s2 - s1

    # 2) radar_data 对比
    rd1, rd2 = r1.get("radar_data", {}), r2.get("radar_data", {})
    diff["radar_delta"] = {k: rd2.get(k, 0) - rd1.get(k, 0) for k in set(rd1) | set(rd2)}

    # 3) strengths / weaknesses 对比
    st1, st2 = set(r1.get("strengths", [])), set(r2.get("strengths", []))
    wk1, wk2 = set(r1.get("weaknesses", [])), set(r2.get("weaknesses", []))
    diff["new_strengths"]    = list(st2 - st1)
    diff["fixed_strengths"]  = list(st1 & st2)
    diff["new_weaknesses"]   = list(wk2 - wk1)
    diff["fixed_weaknesses"] = list(wk1 & wk2)

    # 4) audio/video 分析对比
    aa1, aa2 = r1.get("audio_analysis", {}), r2.get("audio_analysis", {})
    va1, va2 = r1.get("video_analysis", {}), r2.get("video_analysis", {})
    diff["audio_delta"] = {k: aa2.get(k, 0) - aa1.get(k, 0) for k in aa1.keys() & aa2.keys()}
    diff["video_delta"] = {k: va2.get(k, 0) - va1.get(k, 0) for k in va1.keys() & va2.keys()}

    # 5) questionDetails 对比：题数 & 平均分
    qd1, qd2 = r1.get("questionDetails", []), r2.get("questionDetails", [])
    answered1 = [q for q in qd1 if any(v > 0 for v in q.get("scores", {}).values())]
    answered2 = [q for q in qd2 if any(v > 0 for v in q.get("scores", {}).values())]
    avg1 = sum(sum(q["scores"].values()) for q in answered1) / (len(answered1) or 1)
    avg2 = sum(sum(q["scores"].values()) for q in answered2) / (len(answered2) or 1)
    diff["answered_count_delta"] = len(answered2) - len(answered1)
    diff["avg_score_delta"]      = avg2 - avg1

    # 6) AI 生成自然语言解读（纯文本，不截 JSON）
    system  = (
        "你是一名面试测评专家，请比较下面两次模拟面试报告的主要区别，\n"
        "并总结第二次面试的最大进步，以及三条针对性改进建议。"
    )
    user  = (
        f"第一次报告：\n{json.dumps(r1, ensure_ascii=False, indent=2)}\n\n"
        f"第二次报告：\n{json.dumps(r2, ensure_ascii=False, indent=2)}"
    )
    result = {"content": "", "error": None}

    def on_message(ws, message):
        data = json.loads(message)
        if data.get("header", {}).get("code") != 0:
            result["error"] = data
            ws.close()
            return
        chunk = data["payload"]["choices"]["text"][0].get("content", "")
        result["content"] += chunk
        if data["header"].get("status") == 2:
            ws.close()

    def on_open(ws):
        ws.send(json.dumps({
            "header": {"app_id": settings.XF_APPID, "uid": "compare_report"},
            "parameter": {"chat": {"domain": "4.0Ultra", "temperature": 0.7, "max_tokens": 8192, "top_k": 4}},
            "payload": {"message": {"text": [{"role": "system", "content": system}, {"role": "user", "content": user}]}}
        }))

    ws = websocket.WebSocketApp(
        WsParam().create_url(),
        on_open=on_open,
        on_message=on_message,
        on_error=lambda ws, e: result.update(error=str(e)),
        on_close=lambda ws, *_: None
    )
    # 同步执行
    ws.run_forever(ping_interval=30)

    if result["error"]:
        logger.error("对比 AI 调用失败：%s", result["error"])
        narrative = "AI 解读失败，请稍后重试。"
    else:
        narrative = result["content"].strip()

    return OrderedDict([("diff", diff), ("narrative", narrative)])
