import base64
import json
import os
import time
from datetime import datetime

import cv2
import numpy as np
import socketio
from fastapi.responses import JSONResponse

from database.config import get_db
from lagent.agents.code_agent import CodeAgent
from lagent.agents.interview_agent import InterviewAgent
from lagent.agents.aggregator.fewshot_aggregator import FewshotAggregator
from lagent.agents.question_agent import questionAgent
from lagent.agents.question_score import QuestionScore
from lagent.agents.question_search import QuestionRetrievalEngine
from lagent.llms.spark_llm import SparkDeskLLM

from lagent.agents.facialExpression_analyze import FacialExpressionAnalyze
from models.model import Resume, User, InterviewRecord
from utils.jwt import decode_access_token

BASE_DIR = os.path.dirname(os.path.abspath(__file__))

sio = socketio.AsyncServer(async_mode='asgi', cors_allowed_origins='*')
UPLOAD_FOLDER = 'uploadFaceImg'
connected_clients = {}
stream_sessions = {}


# 自我介绍大模型
llm = SparkDeskLLM(app_id='938ba839', api_key='5252debef08498da0080040f34d4e721', api_secret='ZWIwMWI1ODIyZTRiNDllZTFmMTFhZjZj', domain='x1') #x1
global_agent = {} # 自我介绍大模型全局变量
global_codeagent = {}  # 代码智能体

def get_routes(app):
    @app.get("/")
    async def index():
        return JSONResponse({"message": "WebRTC实时流传输服务器正在运行!", "type": "realtime"})

    @app.get("/status")
    async def get_status():
        return JSONResponse({
            "server_status": "running",
            "connected_clients": len(connected_clients),
            "active_streams": len(stream_sessions),
            "message": "WebRTC实时流传输服务器正在运行",
            "type": "realtime"
        })



# 下面是sio事件处理函数（connect、disconnect、start_stream、video_chunk、stop_stream等）
# ...（你的Socket.IO事件代码原样放这里）...
# Socket.IO 事件
@sio.event
async def connect(sid, environ, auth):
    connected_clients[sid] = {
        'connected_at': time.time(),
        'status': 'connected'
    }
    print(f"客户端连接: {sid}")
    await sio.emit('connection_response', {
        'status': 'connected',
        'client_id': sid,
        'message': '成功连接到实时流服务器'
    }, to=sid)


@sio.event
async def disconnect(sid):
    if sid in connected_clients:
        del connected_clients[sid]
    if sid in stream_sessions:
        del stream_sessions[sid]
    print(f"客户端断开: {sid}")


@sio.event
async def start_stream(sid, data):
    # token = 'Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXIwMDEiLCJleHAiOjE3NTE2MjAwNjB9.yZD5bPySf88-skZBadU_snQzpZD7hp2X9YkD9_0Ilrs'

    token = data.get('token')
    payload = decode_access_token(token)
    username = payload['username']
    # username = 'user001'

    db = next(get_db())  # 手动获取 db
    resume_text = db.query(Resume).filter(Resume.username == username).first().resume_content
    resume = json.loads(resume_text)
    technology_stack = resume['technology_stack']['main_stack']

    # 提取为字符串
    project_str_list = []
    for item in resume['project_expericenc_list']:
        name = item.get("project_name", "")
        role = item.get("project_role", "")
        contribution = item.get("project_contribution", "")

        project_str = f"项目名称：{name}\n项目角色：{role}\n项目贡献：{contribution}"
        project_str_list.append(project_str)

    project_description = ";".join(project_str_list)
    interviewResume = f"技术栈为{technology_stack};项目经历有：{project_description}"
    # 设置候选人简历
    systemPrompt = ("你是一位经验丰富的计算机领域面试官，主要负责根据候选人的自我介绍和简历内容进行专业部分的提问，"
                    "考虑到求职者是大学生群体没有丰富的项目经验，尽量减少提问技术优化之类的问题。以下是求职者的简历：") + interviewResume

    agent = InterviewAgent(
        llm=llm,
        aggregator=FewshotAggregator(
            [
                {"role": "system", "content": systemPrompt}
            ]
        )
    )

    global_agent[sid] = agent  # 以 sid 为键保存 agent

    session_id = f"stream_{sid}_{int(time.time())}"
    stream_sessions[sid] = {
        'session_id': session_id,
        'started_at': time.time(),
        'chunks_received': 0,
        'total_bytes': 0,
        'status': 'streaming'
    }
    print(f"开始实时流传输: {session_id}")
    await sio.emit('stream_started', {
        'success': True,
        'session_id': session_id,
        'message': 'AI面试实时流传输已开始',
        'timestamp': time.time()
    }, to=sid)


# 开启AI面试
@sio.event
async def video_chunk(sid, data):
    if sid not in stream_sessions:
        await sio.emit('stream_error', {'error': '没有活动的流会话'}, to=sid)
        return
    try:
        chunk_data = data.get('chunk')
        chunk_index = data.get('index', 0)
        if not chunk_data:
            await sio.emit('stream_error', {'error': '无效的视频数据'}, to=sid)
            return
        session = stream_sessions[sid]
        session['chunks_received'] += 1
        session['last_chunk_time'] = time.time()
        chunk_size = len(chunk_data) if isinstance(chunk_data, str) else len(str(chunk_data))
        session['total_bytes'] += chunk_size

        # 初始化 last_save_time
        if 'last_image_save_time' not in session:
            session['last_image_save_time'] = time.time()  # 初始化为当前时间
        now = time.time()
        if now - session['last_image_save_time'] >= 40:
            # 去除 base64 前缀
            if "base64," in chunk_data:
                chunk_data = chunk_data.split("base64,")[1]

            try:
                img_bytes = base64.b64decode(chunk_data)
                img_array = np.frombuffer(img_bytes, np.uint8)
                frame = cv2.imdecode(img_array, cv2.IMREAD_COLOR)

                if frame is not None:
                    filename = f"{int(time.time())}.jpg"
                    filepath = os.path.join(BASE_DIR, UPLOAD_FOLDER, filename)
                    cv2.imwrite(filepath, frame)
                    print(f"图像已保存: {filepath}")
                    session['last_image_save_time'] = now

                    # 可选：调用你的表情分析类
                    faceExpression = FacialExpressionAnalyze()
                    label = faceExpression.getExpression(filename, filepath)
                    if label is None:
                        expression = label
                        print(f"人物表情是{expression}")
                        await sio.emit('facialExpression', {
                            'expression':expression
                        }, to=sid)

                else:
                    print("图像解码失败")

            except Exception as e:
                print(f"图像处理错误: {e}")

            # 重置计时
            session['last_image_save_time'] = now
            print(f"每隔40秒保存图像并分析人物表情:{label}")

        # ai_feedback = process_ai_analysis(chunk_data, chunk_index)
        print(f"接收视频块 #{chunk_index}: {chunk_size} bytes (会话: {session['session_id']})")

        await sio.emit('chunk_processed', {
            'success': True,
            'chunk_index': chunk_index,
            'chunk_size': chunk_size,
            'session_stats': {
                'chunks_received': session['chunks_received'],
                'total_bytes': session['total_bytes'],
                'duration': time.time() - session['started_at']
            },
            'timestamp': time.time()
        }, to=sid)

        # if session['chunks_received'] % 10 == 0:
        #     await sio.emit('stream_status', {
        #         'message': f'AI面试进行中... 已处理 {session["chunks_received"]} 个数据块',
        #         'status': 'analyzing',
        #         'progress': min(100, session['chunks_received'] * 2)
        #     }, to=sid)
    except Exception as e:
        print(f"处理视频块时出错: {str(e)}")
        await sio.emit('stream_error', {'error': f'处理视频数据失败: {str(e)}'}, to=sid)

# 保存图片
def save_base64_image(base64_data, filename):
    try:
        with open(filename, 'wb') as f:
            f.write(base64.b64decode(base64_data))
    except Exception as e:
        print(f"保存图像失败: {e}")


session_state = {}
# 自我介绍开启面试
@sio.event
async def startInterview(sid, data):
    # 初始化用户状态
    session_state[sid] = {
        'last_question': '',
        'qa_pairs': []
    }
    await sio.emit('message', {'text': '同学你好，请做个自我介绍。'}, to=sid)

@sio.event
async def selfIntroduction(sid, data):
    intro = data.get('answer')

    agent = global_agent.get(sid)  # 获取自我介绍大模型
    question = agent.generete_FistQuestion(intro)
    session_state[sid]['last_question'] = question
    await sio.emit('question', {'text': question}, to=sid)

@sio.event
async def selfAnswer(sid, data):
    answer_text = data.get('answer')
    last_question = session_state[sid]['last_question']

    agent = global_agent.get(sid)  # 获取自我介绍大模型
    session_state[sid]['qa_pairs'].append((last_question, answer_text))
    next_question = agent.generate_normalQuestion(last_question, answer_text)
    session_state[sid]['last_question'] = next_question

    if '面试拷打' in next_question:
        qa = ';'.join(
            f"对于问题「{q}」，面试者的回答是「{a}」"
            for q, a in session_state[sid]['qa_pairs']
        )
        questionScore = QuestionScore()
        score,evaluate, detail_evaluate = questionScore.score_to_ans(qa)

        token = data.get('token')
        payload = decode_access_token(token)
        username = payload['username']
        db = next(get_db())
        new_interview_record = InterviewRecord(professional_knowledge=score.knowledge,
                                               adaptability=score.stress,
                                               skill_match=score.skill,
                                               innovation=score.innovate,
                                               communication=score.language,
                                               logical_thinking=score.logic,
                                               evaluation=evaluate,
                                               interview_time=datetime.datetime.now(),
                                               username=username,
                                               professional_knowledge_evaluate=score.knowledge_evaluate,
                                               adaptability_evaluate=score.stress_evaluate,
                                               skill_match_evaluate=score.skill_evaluate,
                                               innovation_evaluate=score.innovate_evaluate,
                                               communication_evaluate=score.language_evaluate,
                                               logical_thinking_evaluate=score.logic_evaluate,
                                               )
        db.add(new_interview_record)
        db.commit()
        db.refresh(new_interview_record)

        await sio.emit('end', {'qa': session_state[sid]['qa_pairs']}, to=sid)
        del session_state[sid]

    else:
        await sio.emit('question', {'text': next_question}, to=sid)


# 面试拷打——提问八股
@sio.event
async def askTechQuestion(sid, data):
    token = data.get('token')
    payload = decode_access_token(token)
    username = payload['username']

    db = next(get_db())  # 手动获取 db
    position = db.query(User).filter(User.username == username).first().position
    resume_text = db.query(Resume).filter(Resume.username == username).first().resume_content
    resume = json.loads(resume_text)
    technology_stack = resume['technology_stack']['main_stack']

    if '前端' in position:
        qs_Tool = QuestionRetrievalEngine("front_questions_with_vectors.json", "front_vectors.npy", "front_questions.index")
    elif '后端' in position:
        qs_Tool = QuestionRetrievalEngine("back_questions_with_vectors.json", "back_vectors.npy", "back_questions.index")
    else:
        qs_Tool = QuestionRetrievalEngine("back_questions_with_vectors.json", "back_vectors.npy", "back_questions.index")

    qsAgent = questionAgent(llm, qs_Tool)

    questionList = qsAgent.getProfessionQuestion(technology_stack,position,None)
    print("AI面试官：", questionList)

    stream_sessions[sid].update({
        'question_list': questionList,
        'question_index': 0
    })

    if questionList:
        first_q = questionList[0]
        await sio.emit('tech_next_question', {
            'question': first_q,
            'index': 1,
            'total': len(questionList)
        }, to=sid)
    else:
        await sio.emit('tech_next_question', {
            'question': '暂无问题',
            'index': 0,
            'total': 0
        }, to=sid)

@sio.event
async def continueTechQuestion(sid, data):
    answer = data.get('answer')

    session = stream_sessions.get(sid, {})
    question_list = session.get('question_list', [])
    question_index = session.get('question_index', 0) + 1

    if question_index < len(question_list):
        session['question_index'] = question_index
        await sio.emit('tech_next_question', {
            'question': question_list[question_index],
            'index': question_index + 1,
            'total': len(question_list)
        }, to=sid)
    else:
        await sio.emit('tech_next_question', {
            'question': '面试拷打提问结束',
            'index': len(question_list),
            'total': len(question_list)
        }, to=sid)


# oj
@sio.event
async def codeJudge(sid, data):
    token = data.get('token')
    payload = decode_access_token(token)
    username = payload['username']

    db = next(get_db())  # 手动获取 db
    position = db.query(User).filter(User.username == username).first().position

    codeAgent = CodeAgent(position)
    global_codeagent[sid] = codeAgent
    content, code = codeAgent.generate_code()
    await sio.emit('codeoj', {
        'content': content,
        'code': code
    }, to=sid)

@sio.event
async def codeEvaluate(sid, data):
    answer = data.get('answer')

    codeAgent = global_codeagent[sid]
    advantage, advice, score = codeAgent.analyze(answer)
    content = f"代码优点\n{advantage}\n代码改进建议\n{advice}"
    await sio.emit('codeoj', {
        'content': content
    }, to=sid)


@sio.event
async def stop_stream(sid):
    if sid not in stream_sessions:
        await sio.emit('stream_error', {'error': '没有活动的流会话'}, to=sid)
        return

    session = stream_sessions[sid]
    session['ended_at'] = time.time()

    print(f"结束实时流传输: {session['session_id']}")

    # ai_summary = generate_ai_interview_summary(session)

    await sio.emit('stream_completed', {
        'success': True,
        'session_id': session['session_id'],
        'message': 'AI面试实时流传输完成',
        'timestamp': time.time()
    }, to=sid)

    del stream_sessions[sid]


# 模拟AI分析
# def process_ai_analysis(chunk_data, chunk_index):
#     ai_responses = [
#         "检测到清晰的音视频信号",
#         "正在分析面试者表情和语调",
#         "AI正在评估回答质量",
#         "检测到良好的眼神交流",
#         "分析语音语调变化",
#         "评估专业表达能力"
#     ]
#     return {
#         'status': 'processing',
#         'message': ai_responses[chunk_index % len(ai_responses)],
#         'confidence': round(85 + (chunk_index % 15), 1),
#         'timestamp': time.time()
#     }
#
#
# # 模拟AI面试总结
# def generate_ai_interview_summary(session):
#     return {
#         'overall_score': 87.5,
#         'communication_score': 90,
#         'technical_score': 85,
#         'confidence_level': 88,
#         'recommendations': [
#             "语言表达清晰流畅",
#             "眼神交流良好",
#             "建议加强技术细节阐述",
#             "整体表现优秀"
#         ],
#         'duration_analysis': f"面试时长适中，约 {(session.get('ended_at', 0) - session['started_at']):.1f} 秒",
#         'data_quality': "音视频质量良好，数据传输稳定"
#     }
