# -*- coding: utf-8 -*-
import asyncio
import json
import os
from typing import Tuple, Dict, List

from loguru import logger

from utils.file_utils import download_attachment
from utils.documents_utils import create_documents_from_pdf
from db.repository.qa_file import db_add_qa_attach, db_update_qa_file_attributes, db_update_qa_file_scope_gb
from db.repository.audio_file import db_update_audio_file
from db.repository.attributes import db_get_element_classification_by_name
from service.es_service import add_documents_to_es, kb_vector_store, kb_vector_store_emergency
from service.meeting_summary import xfyun_asr_client, meeting_content_processor


def process_attachments(attachments: List[Dict], file_obj, save_dir: str) -> List[Dict]:
    """处理所有附件"""
    processed_attachments = []

    try:
        for attach in attachments:
            if attach.get("type", "") in ("image", "image_in_table"):
                # {"type": "image", "id": "chunk_004", "image_id": "image_012",, "content": "图片链接 ...", "image_url": "...", "local_save_path": "abs path"}
                image_url = attach.get("image_url", "")
                if not image_url:
                    continue

                attach_origin_name = os.path.basename(image_url)
                status, img_path = download_attachment(image_url, save_dir)
                # 若下载失败或路径为空，跳过该附件
                if not status or not img_path:
                    logger.warning(f"Skip attachment due to download failure or empty path: {image_url}")
                    continue

                file_size = os.path.getsize(img_path)
                attach["local_save_path"] = img_path
                attach["id"] = os.path.basename(img_path)

                try:
                    attach_obj = db_add_qa_attach(attach_origin_name, file_size, img_path, file_obj.id, image_url)
                    if attach_obj:
                        processed_attachments.append({**attach})
                except Exception as e:
                    logger.error(f"save attachment to db error: {e}")

            elif attach.get("type", "") in ("table"):
                # {"type": "table", "id": "chunk_004", "table_id": "table_004", "content": "<table>..."}
                processed_attachments.append({**attach})

        return processed_attachments
    except Exception as e:
        logger.error(f"process attachments error: {e}")
        raise


async def parse_kb_pdf_background(file_obj, file_type, pdf_save_path: str, is_delete: bool):
    if file_type == "gb":
        from document_loader import GbPdfLoaderV2
        pdf_loader = GbPdfLoaderV2(pdf_save_path, delete_old_file=True)
    else:
        from document_loader import CommonPdfLoader
        pdf_loader = CommonPdfLoader(pdf_save_path, delete_old_file=True)
    pdf_metadata = pdf_loader.load()
    file_name = pdf_metadata.get("filename", None)
    scope = pdf_metadata.get("scope", None)
    gb_num = pdf_metadata.get("gb_number", None)
    element_classification = pdf_metadata.get("category", None)
    if scope or gb_num or file_name or element_classification:
        file_obj = db_update_qa_file_scope_gb(file_obj.id, file_name, gb_num, scope)
        # 将要素分类名称转换为ID
        if element_classification:
            try:
                # 如果element_classification是字符串，需要查找对应的ID
                if isinstance(element_classification, str):
                    classification_obj = db_get_element_classification_by_name(element_classification)
                    if classification_obj:
                        element_classification_id = classification_obj.id
                        db_update_qa_file_attributes(file_obj.id, "element_classification", [element_classification_id])
            except Exception as e:
                logger.error(f"更新要素分类失败: {e}, category={element_classification}")

    save_dir = os.path.dirname(pdf_save_path)
    processed_attachments = process_attachments(pdf_metadata.get("attachments", []), file_obj, save_dir)

    # 转化格式到document对象
    documents = create_documents_from_pdf(pdf_metadata, file_obj, processed_attachments)

    # 保存到ES
    if file_type == "gb":
        logger.info(f"Adding {len(documents)} documents from {file_obj.name} to ES")
        insert_ids = add_documents_to_es(kb_vector_store, documents)
        logger.info(f"Added {len(insert_ids)} documents to ES")
    else:
        logger.info(f"Adding {len(documents)} documents from {file_obj.name} to ES")
        insert_ids = add_documents_to_es(kb_vector_store_emergency, documents)
        logger.info(f"Added {len(insert_ids)} documents to ES")
    logger.info(f"Finished parsing gb pdf: {pdf_save_path}")


async def parse_dc_excel_background(file_obj, excel_save_path: str, delete_old_file: bool = True):
    from document_loader.excel_loader_DualControlList import DualControlListExcelLoader
    excel_loader = DualControlListExcelLoader(excel_save_path, delete_old_file=delete_old_file)
    excel_metadata = excel_loader.load()
    logger.info(f"Finished parsing dc excel: {excel_save_path}")


async def parse_ac_pdf_background(file_obj, file_hash_path: str, delete_old_file: bool):
    from document_loader import AcPdfLoader
    pdf_loader = AcPdfLoader(file_hash_path, delete_old_file)
    result = pdf_loader.load()
    logger.info(f"Finished parsing ac pdf: {file_hash_path}")


async def parse_ep_pdf_background(file_obj, file_hash_path: str, scene_type: str, delete_old_file: bool):
    from document_loader import EpPdfLoader
    pdf_loader = EpPdfLoader(file_hash_path, delete_old_file)
    result = pdf_loader.load(scene_type=scene_type)
    logger.info(f"Finished parsing ep pdf: {file_hash_path}")


async def parse_audio_background(file_obj, file_hash_path: str):
    # 上传音频，获取订单ID
    order_id = xfyun_asr_client.upload_audio(file_hash_path)
    while True:
        try:
            result = xfyun_asr_client.get_transcribe_result(order_id)
            result_status = result["content"]["orderInfo"]["status"]
            if result_status == 3:
                logger.info("音频转写中，等待10秒后再次查询…")
                logger.info(f"状态码={result_status}，描述={result.get('descInfo')}")
                # 异步等待
                await asyncio.sleep(10)
                trans_status = "translating"
                continue
            if result_status == 4:
                trans_status = "success"
                break
        except Exception as e:
            logger.error(f"音频转写异常: {e}")
            trans_status = "failed"
            break

    text = xfyun_asr_client.parse_order_result(result) or ""

    # 若ASR文本为空，避免后续空结果覆盖已有内容，仅记录状态
    if not text.strip():
        db_update_audio_file(file_obj.id, status=trans_status)
        logger.warning(f"ASR返回文本为空，跳过摘要与关键词生成: {file_obj.id}")
        return

    db_update_audio_file(file_obj.id, content=text, status=trans_status)
    logger.info(f"音频转写结果更新到数据库: {file_obj.id}")

    # 基于LLM生成会议摘要与关键词
    result = await meeting_content_processor.process(text)
    with open(file_hash_path.rsplit(".", 1)[0] + "_result.json", "w") as f:
        f.write(json.dumps(result, ensure_ascii=False))
    logger.info(f"音频转写结果保存到文件: {file_hash_path.rsplit('.', 1)[0] + '_result.json'}")

    meeting_summary = result.get("summary", [])
    meeting_keywords = result.get("keywords", [])

    db_update_audio_file(file_obj.id, summary=json.dumps(meeting_summary, ensure_ascii=False), keywords=meeting_keywords)
    logger.info(f"会议总结和关键词更新到数据库: {file_obj.id}")
