# E:\Download\ChatRoomProject\Document\service\resumes_service.py
import base64
import datetime
import hashlib

import aioredis
import requests
from typing import Optional, List, Dict
from sqlalchemy.orm import Session
from uuid import uuid4
from model.resumes import Resume
from model.interviews import Interview  # 新增导入 Interview 模型
import logging
import time
import hmac

logger = logging.getLogger(__name__)
# 全局 Redis 连接实例（由 main.py 注入）
redis_client = None
redis_pubsub_pool = {}

def init_redis(client: aioredis.Redis):
    global redis_client
    redis_client = client


class DocumentUploader:
    def __init__(self, APPId: str, APISecret: str, timestamp: str):
        self.APPId = APPId
        self.APISecret = APISecret
        self.Timestamp = timestamp

    def get_origin_signature(self) -> str:
        m2 = hashlib.md5()
        data = bytes(self.APPId + self.Timestamp, encoding="utf-8")
        m2.update(data)
        return m2.hexdigest()

    def get_signature(self) -> str:
        signature_origin = self.get_origin_signature()
        signature = hmac.new(
            self.APISecret.encode('utf-8'),
            signature_origin.encode('utf-8'),
            digestmod=hashlib.sha1
        ).digest()
        return base64.b64encode(signature).decode(encoding='utf-8')

    def get_header(self) -> Dict[str, str]:
        return {
            "appId": self.APPId,
            "timestamp": self.Timestamp,
            "signature": self.get_signature(),
        }


async def create_interview_record(
    db: Session,
    user_id: str,
    position: str,
    interviewid: str,
    resume_id: Optional[str] = None,
    end_time: Optional[datetime.datetime] = None
):
    """
    创建并插入一条新的面试记录到 interviews 表中
    :param db: SQLAlchemy Session
    :param user_id: 用户ID
    :param resume_id: 简历ID（可选）
    :param position: 面试岗位名称
    :param start_time: 面试开始时间，默认为当前时间
    :param status: 面试状态，默认为“进行中”
    :param end_time: 面试结束时间，默认为空
    """
    start_time: datetime.datetime = datetime.datetime.now()
    status: str = "进行中"
    try:
        # 创建 Interview 对象
        db_interview = Interview(
            interview_id=interviewid,
            user_id=user_id,
            resume_id=resume_id,
            position=position,
            start_time=start_time,
            end_time=end_time,
            status=status
        )
        # 插入到数据库
        db.add(db_interview)
        db.commit()
        db.refresh(db_interview)
        print(f"面试记录已成功保存到数据库: {db_interview.interview_id}")
        logger.info(f"面试记录已成功保存到数据库: {db_interview.interview_id}")
        return db_interview
    except Exception as e:
        db.rollback()
        logger.error(f"保存面试记录失败: {e}")
        raise


async def fetch_parsed_data(fileId: str, APPId: str, APISecret: str) -> Optional[dict]:
    """
    请求 summary 接口获取解析后的数据
    :param fileId: 文件ID
    :param APPId: 认证信息
    :param APISecret: 签名密钥
    :return: JSON 数据或 None
    """
    timestamp = str(int(time.time()))
    uploader = DocumentUploader(APPId, APISecret, timestamp)
    headers = uploader.get_header()

    body = {
        "fileId": fileId
    }

    try:
        response = requests.post(
            "https://chatdoc.xfyun.cn/openapi/v1/file/summary/query",
            data=body,
            headers=headers
        )
        if response.status_code == 200 and response.json().get("code") == 0 and response.json()["data"].get("summary"):
            summary_data = {"summary": response.json()["data"].get("summary")}

            # 👇 存储到 Redis，键为 field:{fileId}:summary
            redis_key = f"file:{fileId}:summary"
            await redis_client.set(redis_key, str(summary_data))  # 将字典转为字符串存入 Redis
            await redis_client.expire(redis_key, 36000)

            return summary_data
        else:
            logger.error(f"获取 summary 失败: {response.text}")
            return None
    except Exception as e:
        logger.error(f"请求 summary 接口异常: {e}")
        return None



import asyncio

async def save_resume_to_db(db: Session, userid: str, fileId: str, APPId: str, APISecret: str):
    """
    后台任务函数：先保存简历基础信息到数据库，再异步获取解析数据并更新记录
    :param db: SQLAlchemy Session
    :param userid: 用户ID
    :param fileId: 文件ID
    :param APPId: 第三方配置
    :param APISecret: 第三方签名密钥
    """
    max_retries = 10
    retry_count = 0

    try:
        # Step 1: 插入基础简历记录（parsed_data 初始化为 None）
        db_resume = Resume(
            resume_id=uuid4(),
            user_id=userid,
            parsed_data=None,  # 先设置为空
            fileids=fileId
        )
        db.add(db_resume)
        db.commit()
        db.refresh(db_resume)
        logger.info(f"简历 {fileId} 基础信息已保存到数据库，用户ID: {userid}")
        await redis_client.delete(f"resume:{userid}")
        await asyncio.sleep(1)
        await redis_client.delete(f"resume:{userid}")

        # Step 2: 循环获取 parsed_data
        parsed_data = None
        while retry_count < max_retries:
            parsed_data = await fetch_parsed_data(fileId, APPId, APISecret)
            if parsed_data:
                break
            retry_count += 1
            logger.info(f"获取解析数据失败，第 {retry_count} 次重试 fileId={fileId}")
            await asyncio.sleep(100)

        if not parsed_data:
            logger.error(f"超过最大重试次数，仍无法获取解析数据，fileId={fileId}")
            return

        # Step 3: 更新 parsed_data
        db_resume.parsed_data = parsed_data
        db.commit()
        db.refresh(db_resume)
        logger.info(f"简历 {fileId} 的解析数据已更新到数据库")
        await redis_client.delete(f"resume:{userid}")
        await asyncio.sleep(1)
        await redis_client.delete(f"resume:{userid}")


    except Exception as e:
        db.rollback()
        error_msg = f"更新简历解析数据失败: {e}"
        logger.error(error_msg)
