import json
from langchain_core.messages import HumanMessage, AIMessage
from langgraph.constants import END
from langgraph.graph import add_messages
from app.utils.llm_client import LLMClient
from app.models import SessionState, CustomEncoder
import os
from typing import Dict, Any, List, Optional
from http import HTTPStatus
from pymilvus import MilvusClient
from openai import OpenAI
import dashscope
from openpyxl import load_workbook
from docx import Document

from app.utils.logger_util import user_logger

RANK_TOPK = 5
RERANK_TOPK = 1



class FieldAvailableOrNot:
    AVAILABLE = "available"
    NOT_AVAILABLE = "not_available"
message_str='''{
	"type": "",
	"message": 
		{
			"desc": "",
			"data": ""	
		}
    }'''
test_result='''{
	"summary": "分析了150个字段，其中80个直接获取，50个通过加工获取，20个未找到需要采集",
	"fields_info": [
		{
			"original_field": "用户年龄",
			"status": "1",
			"simple_match_top3": ["用户表-age", "会员表-birth_year", "订单表-user_birthdate"],
			"final_match": "用户表-age",
			"match_status": "直接使用",
			"final_match_reason": "在匹配过程中，首先通过字段名称相似度算法对'用户年龄'进行匹配，得到三个候选字段：'用户表-age'（相似度95%）、'会员表-birth_year'（相似度85%）、'订单表-user_birthdate'（相似度75%）。经过数据质量评估，'用户表-age'字段数据完整度达到98%，且数据类型匹配（整数型），可直接使用无需加工。因此选择'用户表-age'作为最终匹配字段。"

		},
		{
			"original_field": "月消费金额",
			"status": "1",
			"simple_match_top3": ["订单表-total_amount", "支付表-monthly_sum", "用户表-avg_spend"],
			"final_match": "支付表-monthly_sum",
			"match_status": "加工后使用",
			"final_match_reason": "对'月消费金额'进行匹配后，得到三个候选字段：'订单表-total_amount'（单笔订单金额）、'支付表-monthly_sum'（月汇总金额）、'用户表-avg_spend'（平均消费金额）。虽然'支付表-monthly_sum'名称匹配度最高，但该字段需要按用户ID进行分组汇总，并过滤掉测试数据。经过数据加工处理（包括分组聚合和数据清洗），最终确定使用'支付表-monthly_sum'作为基础字段进行加工后使用。"
		},
		{
			"original_field": "最近登录设备",
			"status": "0",
			"simple_match_top3": ["登录表-device_type", "用户表-last_device", "设备表-model"],
			"final_match": "",
			"match_status": "待采集后使用",
			"final_match_reason": "匹配到三个相关字段：'登录表-device_type'（数据完整度仅30%）、'用户表-last_device'（字段已废弃）、'设备表-model'（缺乏设备与用户的关联信息）。现有数据源均无法提供完整可用的设备信息，需要采集新的设备日志数据并建立用户-设备映射关系后才能使用。建议新增设备信息采集流程。"
		},
		{
			"original_field": "最近登录人ID",
			"status": "0",
			"simple_match_top3": ["登录表-device_type", "用户表-last_device", "设备表-model"],
			"final_match": "",
			"match_status": "待采集后使用",
			"final_match_reason": "匹配到三个相关字段：'登录表-device_type'（数据完整度仅10%）。现有数据源均无法提供完整可用的设备信息，需要采集新的设备日志数据并建立用户-设备映射关系后才能使用。建议新增设备信息采集流程。"
		}
	]
}'''

# —— 可调参数 ——
_MAX_ATTEMPTS = 3  # 字段修改最大次数

def _compose_original_text(state: "SessionState") -> str:
    """从多个来源稳健拼出 original_text：require_json -> user_require(list) -> 最近一条人类消息。"""
    # 1) require_json
    req = state.get("require_json")
    if isinstance(req, str):
        try:
            req = json.loads(req)
        except Exception:
            req = None
    if isinstance(req, dict):
        ot = (
            req.get("original_text")
            or (req.get("parsed_require") or {}).get("original_text")
            or ""
        )
        if ot and isinstance(ot, str):
            return ot.strip()

    # 2) user_require(list[str]
    ur = state.get("user_require") or []
    if isinstance(ur, (list, tuple)):
        joined = ", ".join([str(x) for x in ur if x])
        if joined.strip():
            return joined.strip()

    # 3) 最近一条 HumanMessage
    try:
        msgs = state.get("messages") or []
        human_texts = [m.content for m in msgs if isinstance(m, HumanMessage)]
        if human_texts:
            return str(human_texts[-1]).strip()
    except Exception:
        pass

    return ""


def construct_query_sentence(state: 'SessionState'):
    """
    将 require_json v1 拼成一句自然中文查询描述：
    - 只保留 status == '1' 的字段
    - 字段段落格式：
      提取各个<original_field...>字段，其中，<original_field>从<direct_from_table>提取（仅对有 direct_from_table 的字段）
    返回:
        {
            "sentence": <拼好的查询句子>,
            "field_count": <字段个数>   # 仅统计 status=='1' 的字段数量
        }
    """
    req = state.get("require_json")

    # 1) 字符串 -> 字典
    if isinstance(req, str):
        try:
            req = json.loads(req)
        except Exception:
            return {"sentence": "", "field_count": 0}

    if not isinstance(req, dict):
        return {"sentence": "", "field_count": 0}

    pr: Dict[str, Any] = (req.get("parsed_require") or {})
    if not isinstance(pr, dict):
        return {"sentence": "", "field_count": 0}

    # 2) 小工具
    def present(v) -> bool:
        return not (v is None or v == "" or v == "未明确")

    cycle_map = {"Y": "年", "M": "月", "D": "日", "H": "小时", "W": "周"}

    # 3) 取值
    data_range = pr.get("data_range")
    cycle = pr.get("cycle")
    extract_methord = pr.get("extract_methord")
    effectiveness = pr.get("effectiveness")
    applications = pr.get("applications")

    # 4) 字段列表（只保留 status == "1"），并准备 direct_from_table 映射
    fields_info = pr.get("fields_info") or []
    original_fields: List[str] = []
    mappings: List[str] = []  # 形如：original_field从direct_from_table提取

    if isinstance(fields_info, list):
        for f in fields_info:
            if not isinstance(f, dict):
                continue
            if str(f.get("status", "1")).strip() != "1":
                continue

            of = (f.get("original_field") or "").strip()
            df = (f.get("direct_from_table") or "").strip()

            if present(of):
                original_fields.append(of)
                if present(df):
                    if "." in df:
                        df = df.split(".", 1)[0]
                    mappings.append(f"{of}从{df}提取")

    field_count = len(original_fields)

    # 5) 逐段拼装
    parts: List[str] = []

    # 开头
    if present(data_range):
        parts.append(f"请帮我查一下{data_range}")
    else:
        parts.append("请帮我查一下相关数据")

    # 字段段落（按你的新格式）
    if original_fields:
        field_clause = f"提取{'、'.join(original_fields)}"
        if mappings:
            field_clause += f"，其中，{'、'.join(mappings)}"
        parts.append(field_clause)

    # 数据周期
    if present(cycle):
        parts.append(f"数据周期请按{cycle_map.get(cycle, cycle)}提供")

    # 抽取方式
    if present(extract_methord):
        parts.append(f"进行{extract_methord}更新")

    # 时效性
    if present(effectiveness):
        parts.append(f"时效性为{effectiveness}")

    # 应用场景
    if present(applications):
        parts.append(f"主要用于{applications}")

    if not parts:
        return {"sentence": "", "field_count": field_count}

    sentence = parts[0]
    if len(parts) > 1:
        sentence += "，" + "，".join(parts[1:])
    if not sentence.endswith("。"):
        sentence += "。"

    return sentence, field_count


def _safe_parse_field_availability(answer_pack: Dict[str, Any]) -> Optional[Dict[str, Any]]:
    """
    兼容两种返回结构：
    - {'answer': '<json str>'}
    - {'result': {...}} 或 {'result': '<json str>'}
    最终返回 dict；解析失败返回 None。
    """
    if not isinstance(answer_pack, dict):
        return None

    payload = None
    if "result" in answer_pack:
        payload = answer_pack["result"]
    elif "answer" in answer_pack:
        payload = answer_pack["answer"]

    if payload is None:
        return None

    if isinstance(payload, dict):
        return payload

    if isinstance(payload, str):
        try:
            return json.loads(payload)
        except Exception:
            return None

    # 其他类型，转成 str 再尝试
    try:
        return json.loads(str(payload))
    except Exception:
        return None


def field_availability_check_node(state: SessionState,config: dict) -> SessionState:
    """
    字段可用性检查节点：
    - 若用户已表示满意 -> 直接进入 generate_report
    - 否则：构造 original_text，按次数可选地用 LLM 规范化后查询
    - 基于结果生成追问，并跳转 human_require_confirm
    - 超过最大次数，直接给出结果并进入 generate_report
    """
    new_state = state.copy()
    thread_id=config.get("configurable",{}).get("thread_id")
     # 将JSON字符串转换为Python字典


    message_dict = json.loads(message_str)




    # 1) 增加循环计数
    new_state["user_loop_count"] = int(new_state.get("user_loop_count", 0)) + 1
    loop_cnt = new_state["user_loop_count"]

    # 2) 用户满意则直接结束
    if str(state.get("user_satisfaction_intention", "0")).strip() == "1":
        message_dict["type"] = "节点"
        message_dict["message"]["desc"] = "最优方案产出"
        filtered_state = {
            key: value for key, value in new_state.items() #user_require,require_json
            if key not in ['field_availability','messages','require_json']
        }
        message_dict["message"]["data"] = "*当前已是最优方案。"
        user_logger.info(thread_id, json.dumps(message_dict, ensure_ascii=False))
        new_state["next"] = "generate_report"
        return new_state

    message_dict["type"] = "节点"
    message_dict["message"]["desc"] = "最优方案产出"
    filtered_state = {
        key: value for key, value in state.items() #user_require,require_json
        if key not in ['field_availability','messages','require_json']
    }
    # if new_state.get("field_availability") in (None, {}):
    #     filtered_state = {
    #         key: value for key, value in state.items()
    #         if key not in ['field_availability']
    #     }
    # else:
    #     filtered_state = {
    #         key: value for key, value in state.items()
    #         if key not in ['messages']
    #     }
    message_dict["message"]["data"] = "*最优方案产出：" + str(filtered_state)
    #message_dict["message"]["data"] = json.dumps(filtered_state,  cls=CustomEncoder,ensure_ascii=False)
    user_logger.info(thread_id, json.dumps(message_dict, ensure_ascii=False))

    # 4) 构造 original_text

    original_text, field_number = construct_query_sentence(new_state)
    message_dict["type"]="操作"
    message_dict["message"]["desc"]="记忆加载，获取用户历史所有输入"

    data_to_dump = {
        'data_struct': str(original_text)  # 添加新字段
    }
    message_dict["message"]["data"]="*获取用户历史全量数据："+str(new_state['user_require'])
    user_logger.info(thread_id, json.dumps(message_dict, ensure_ascii=False))

    print(f"[field_availability_check_node] original_text: {original_text}")

    # 5) 若不是第一次（即需要清洗），调用句子重写以提升匹配
    if loop_cnt >= _MAX_ATTEMPTS and original_text:
        try:
            original_text = original_text
            # original_text = reformat_sentence_with_llm(original_text)
        except Exception as e:
            # 清洗失败不应中断主流程
            print(f"[WARN] reformat_sentence_with_llm failed: {e}")

    # 6) 调用字段可用性检查（
    result_obj: Optional[Dict[str, Any]] = None
    message_dict["type"]="操作"
    message_dict["message"]["desc"]="开始进行知识库检索"

    data_to_dump = {
        'data_struct': f"需要知识库检索的需求是：{state.get('require_json')}"  # 添加新字段
    }
    message_dict["message"]["data"]="*通过用户历史所有输入，进行字段级检索"
    user_logger.info(thread_id, json.dumps(message_dict, ensure_ascii=False))
    try:

        answer_pack = field_availability_check(original_text,thread_id,state,field_number)  #windows需要注释掉这两行
        result_obj = _safe_parse_field_availability(answer_pack)  #windows需要注释掉这两行
        #result_obj = json.loads(test_result) #windows请打开这一行
    except Exception as e:
        print(f"[ERROR] field_availability_check failed: {e}")
        result_obj = None
    message_dict["type"]="操作"
    message_dict["message"]["desc"]="知识库检索完成"

    data_to_dump = {
        'data_struct': str(result_obj)  # 添加新字段
    }
    message_dict["message"]["data"]="*知识库检索已完成"
    user_logger.info(thread_id, json.dumps(message_dict, ensure_ascii=False))


    if result_obj is None:
        # 查询或解析失败的兜底：给用户友好提示，并允许继续人机确认
        tip = (
            "根据您所有需求，尝试进行了字段级别的分析，但暂未获取到有效结果。"
            "可能是由于系统繁忙或网络异常导致的。"
        )
        new_state["messages"] = add_messages(state.get("messages", []), [AIMessage(content=tip)])
        new_state["next"] = "generate_report"
        return new_state

    # 7) 正常结果落状态
    new_state["field_availability"] = result_obj
    print("[field_availability_check_node] 已完成知识库字段匹配")

    # 7.1) 生成并记录思考摘要
    try:
        think_pack = LLMClient(thread_id=thread_id).generate_think_sync(
            purpose="字段可用性匹配与结论",
            context={
                "original_text": original_text,
                "loop_count": loop_cnt,
                "match_result_preview": result_obj,
            },
            max_tokens=280,
        )
        think_text = think_pack.get("display") if isinstance(think_pack, dict) else str(think_pack)
        # 以统一格式落一条‘思考’日志
        message_dict = json.loads(message_str)
        message_dict["type"] = "思考"
        message_dict["message"]["desc"] = think_text
        message_dict["message"]["data"] = "字段匹配摘要"
        user_logger.info(thread_id, json.dumps(message_dict, ensure_ascii=False))
    except Exception:
        # 失败也保持格式
        try:
            message_dict = json.loads(message_str)
            message_dict["type"] = "思考"
            message_dict["message"]["desc"] = "字段匹配摘要生成失败"
            message_dict["message"]["data"] = "字段匹配摘要"
            user_logger.info(thread_id, json.dumps(message_dict, ensure_ascii=False))
        except Exception:
            pass

    # 8) 组织反馈文案与追问
    content_tex = "根据您所有需求，已完成字段级别的需求分析"
    try:
        dismatch_list = _extract_missing_fields(result_obj)
        content_tex += _generate_dismatch_question(result_obj, dismatch_list, loop_cnt)
    except Exception as e:
        print(f"[WARN] 生成追问内容失败：{e}")

     # 3) 超过最大次数，直接收口到报告
    if loop_cnt > _MAX_ATTEMPTS:
        new_state["messages"] = add_messages(state.get("messages", []), [AIMessage(content="根据您所有需求，已完成字段级别的需求分析:"+str(result_obj))])
        new_state["next"] = "generate_report"
        return new_state

    # 9) 推进到人工确认

    message_dict["type"]="用户"
    message_dict["message"]["desc"]="用户输入提示"

    data_to_dump = {
            'data_struct':  content_tex# 添加新字段
    }
    message_dict["message"]["data"]=json.dumps(data_to_dump,ensure_ascii=False)
    user_logger.info(thread_id, json.dumps(message_dict, ensure_ascii=False))

    new_state["messages"] = add_messages(state.get("messages", []), [AIMessage(content=content_tex)])
    new_state["next"] = "human_require_confirm"
    return new_state



#暂定调用官方API，后续切换
# def field_availability_check(state,require_json) -> Dict[str, Any]:
#     result = {}
#     return result


os.environ['DASHSCOPE_API_KEY'] = 'sk-f9e011a9e1d54cddbcdf2c192fbdf55b'
dashscope.api_key = os.getenv("DASHSCOPE_API_KEY")

# llm定义接口
_llm_client = OpenAI(
    api_key=os.getenv("DASHSCOPE_API_KEY"),
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
)

#  向量化接口
def _emd_text(user_input: str) -> List[float]:

    completion = _llm_client.embeddings.create(
        model="text-embedding-v4",
        input=user_input,
        dimensions=1024,
        encoding_format="float"
    )
    return completion.data[0].embedding

# rerank接口
def _dashscope_rerank(query: str, docs: List[str], top_n: int = None, return_documents: bool = True):

    resp = dashscope.TextReRank.call(
        model="gte-rerank-v2",
        query=query,
        documents=docs,
        top_n=top_n or len(docs),
        return_documents=return_documents
    )
    if resp.status_code != HTTPStatus.OK:
        raise RuntimeError(f"Rerank failed: {resp}")
    results = resp.output.get("results", [])
    # 按相关性降序
    return sorted(results, key=lambda x: x.get("relevance_score", 0.0), reverse=True)

def field_availability_check(query: str, thread_id,state, field_number) -> Dict[str, Any]:
    """

    1) 用 text-embedding-v4 计算 query 向量
    2) Milvus 在 demo_collection 中召回 top-5 文本（COSINE）
    3) 用 gte-rerank-v2 精排，取 top-2 作为 <context>
    4) 组装 SYSTEM_PROMPT / USER_PROMPT
    5) 调 qwen-plus 生成答案
    返回：答案 + 召回与精排细节
    """
    # 1) Milvus 初始化


    client = MilvusClient(uri="http://172.20.10.12:19530", token="username:password")
    #client = MilvusClient(uri="/Users/shuaichen/Downloads/ETL_agent-master-5/milvus_demo.db")

    # client = MilvusClient(uri="http://172.20.10.12:19530", token="username:password")
    #client = MilvusClient(uri="/Users/anleike/Desktop/milvus_demo.db")


    # client = MilvusClient(uri="http://localhost:19530", token="username:password")

    collection_name = "demo_collection"
    if not client.has_collection(collection_name=collection_name):
        client.create_collection(collection_name=collection_name, dimension=1024)

    # text_lines = load_split_file('./新字段描述全量知识库v2.xlsx')
    #
    #
    # from tqdm import tqdm
    #
    # data = []
    #
    # for i, line in enumerate(tqdm(text_lines, desc="Creating embeddings")):
    #     data.append({"id": i, "vector": emd_text(line), "text": line})
    #
    # res = client.insert(collection_name=collection_name, data=data)
    # print(res)

    # 2) 向量检索（top-5, COSINE）
    file_len=1
    try:
        file_len= len(json.loads(state.get("require_json"))['parsed_require']['fields_info'])
    except Exception as e:
        file_len=1
    embedding = _emd_text(query)
    search_res = client.search(
        collection_name=collection_name,
        data=[embedding],
        limit=RANK_TOPK*field_number,
        search_params={"metric_type": "COSINE", "params": {}},
        output_fields=["text"],
    )

    retrieved_lines_with_distances = [
        (hit["entity"]["text"], hit["distance"]) for hit in search_res[0]
    ]

    retrieved_results = [
        {"text": text, "distance": float(dist), "score": 1 - float(dist)}
        for text, dist in retrieved_lines_with_distances
    ]


    processed_results = []
    for item in retrieved_results:  # 假设 item 是 (text, score) 这样的元组
        text, score = item['text'], item['score']
        preview = text[:20] + "..."
        processed_results.append({"preview": preview, "score": score})
    message_dict = json.loads(message_str)
    message_dict["type"] = "操作"
    message_dict["message"]["desc"] = f"初次排序后结果&共排序*{RANK_TOPK*field_number}*个知识元素"
    data_to_dump = {
            'data_struct':  str(processed_results)# 添加新字段
    }
    #message_dict["message"]["data"]=json.dumps(data_to_dump,ensure_ascii=False)
    message_dict["message"]["data"]="*排序后结果："+str(processed_results)
    user_logger.info(thread_id, json.dumps(message_dict, ensure_ascii=False))


    # 3) 精排（gte-rerank-v2），取 top-2
    candidates = [t for (t, _) in retrieved_lines_with_distances]
    reranked = _dashscope_rerank(query, candidates, top_n=RERANK_TOPK * field_number, return_documents=True)

    # 从返回中取 document.text
    reranked_texts = []
    log_texts = []
    for item in reranked:
        doc = item.get("document", {})
        text_val = doc.get("text", doc.get("content", ""))
        score_val = item.get("relevance_score") or item.get("score")
        if text_val:
            reranked_texts.append(text_val)
            log_texts.append((text_val[:20], score_val))

    context = "\n".join(reranked_texts)

    message_dict["type"] = "操作"
    message_dict["message"]["desc"] = f"精细排序后结果&共排序*{RERANK_TOPK * field_number}*个知识元素"
    data_to_dump = {
            'data_struct':  str(log_texts)# 添加新字段
    }
   # message_dict["message"]["data"]=json.dumps(data_to_dump,ensure_ascii=False)
    message_dict["message"]["data"]=""
    user_logger.info(thread_id, json.dumps(message_dict, ensure_ascii=False))

    # 4) 提示词

    SYSTEM_PROMPT = f"""
        你是一个智能需求分析助手。当用户提出问题时，你会从知识库/数据库（RAG 检索结果）中获得相关的表和字段信息。你的任务是根据检索结果和用户需求，输出最终的查询建议，并将每个需求字段的匹配情况结构化输出。

        请严格遵循以下规则：

        1. **优先单张表**
           - 如果单张表中的字段能直接满足用户需求，直接返回该表名称和字段。
           - 示例：应查询【用户表】中的字段。

        2. **单张表 + 轻度加工**
           - 如果单张表字段基本满足，但需要加工（如计算月消费金额、在网时长大于5年），返回该表并说明加工方式。
           - 示例：应查询【支付表】中的字段，并进行月度汇总统计。

        3. **多表联合**
           - 如果单表无法满足，而需要多表，则返回需要联合的表，并说明如何关联。
           - 示例：应查询【物联网用户表】与【集团客户表】，并通过集团客户标识进行关联。

        4. **结果分类**
           - 完全满足：字段均可直接匹配到表。
           - 部分满足：部分字段匹配成功，部分需要加工或推荐替代表。
           - 无法满足：无可用字段，明确说明原因。

        5. **注意事项**
           - 严禁凭空编造表或字段，只能基于 RAG 检索结果回答。
           - 输出必须为合法 JSON，不得包含自然语言或解释。
           - 输出字段级别的详细匹配过程，包括候选字段、最终选择、是否加工。

        ### 输出格式（必须遵循）
        输出一个 JSON 对象，结构如下：

        {{
            "summary": "整体总结，例如分析了多少字段，多少直接获取，多少加工，多少缺失",
            "fields_info": [
                {{
                    "original_field": "用户需求的原始字段",
                    "status": "1 或 0，1=匹配到，0=未匹配",
                    "simple_match_top3": ["候选字段1", "候选字段2", "候选字段3"],
                    "final_match": "最终使用的字段名，若没有则空字符串",
                    "match_status": "直接使用 / 加工后使用 / 待采集后使用",
                    "final_match_reason": "解释为什么选择该字段或无法匹配的原因"
                }}
            ]
        }}

        ### Few-shot 示例

        - 用户：请帮我查一下用户的年龄和月消费金额。
        - RAG返回：用户表（包含 age 字段）、支付表（包含 monthly_sum 字段）。
        - 模型输出：
        {{
            "summary": "",
            "fields_info": [
                {{
                    "original_field": "用户年龄",
                    "status": "1",
                    "simple_match_top3": ["用户表-age", "会员表-birth_year", "订单表-user_birthdate"],
                    "final_match": "用户表-age",
                    "match_status": "直接使用",
                    "final_match_reason": "字段名称匹配度最高且数据完整度达98%，类型正确，可直接使用。"
                }},
                {{
                    "original_field": "月消费金额",
                    "status": "1",
                    "simple_match_top3": ["订单表-total_amount", "支付表-monthly_sum", "用户表-avg_spend"],
                    "final_match": "支付表-monthly_sum",
                    "match_status": "加工后使用",
                    "final_match_reason": "该字段需按用户ID分组汇总后使用，并过滤异常数据，最终作为月消费金额。"
                }}
            ]
        }}

        - 用户：请帮我查一下最近登录设备和最近登录人ID。
        - RAG返回：登录表（device_type 字段）、用户表（last_device 字段，已废弃）。
        - 模型输出：
        {{
            "summary": "分析了2个字段，其中0个直接获取，0个加工获取，2个未找到需要采集",
            "fields_info": [
                {{
                    "original_field": "最近登录设备",
                    "status": "0",
                    "simple_match_top3": ["登录表-device_type", "用户表-last_device", "设备表-model"],
                    "final_match": "",
                    "match_status": "待采集后使用",
                    "final_match_reason": "候选字段数据质量低或缺乏关联信息，无法满足需求，需要新增设备日志采集。"
                }},
                {{
                    "original_field": "最近登录人ID",
                    "status": "0",
                    "simple_match_top3": ["登录表-user_id", "操作表-actor_id", "用户表-last_login_id"],
                    "final_match": "",
                    "match_status": "待采集后使用",
                    "final_match_reason": "现有字段缺失或无效，无法提供完整的最近登录人信息，需要采集新数据源。"
                }}
            ]
        }}
        """

    USER_PROMPT = f"""
 请使用以下包含在<context>标签中的信息，来回答包含在<question>标签中的问题。
<context>
{context}
</context>
<question>
{query}
</question>
"""

    # 5) qwen-plus 生成
    llm = OpenAI(
        api_key=os.getenv("DASHSCOPE_API_KEY"),
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    )
    completion = llm.chat.completions.create(
        model="qwen-plus",
        messages=[
            {"role": "system", "content": SYSTEM_PROMPT},
            {"role": "user", "content": USER_PROMPT},
        ],
    )
    answer = completion.choices[0].message.content
    print(answer)

    return {
        "answer": answer,
        "retrieved": retrieved_lines_with_distances,
        "reranked_texts": reranked_texts
    }


def reformat_sentence_with_llm(sentence: str, model: str = "qwen-plus") -> str:
    """
    调用 DashScope(OpenAI兼容)对句子进行“格式化重写”，打印并返回 LLM 的 answer。
    需要环境变量: DASHSCOPE_API_KEY
    """
    sys_p = """
你是一个语义格式化助手。  
输入是一段自然语言请求，可能包含字段描述或需求说明。  
你的任务是根据输入内容进行语义保持的格式化：  
1. 保留原文的语义和结构，不要删减。  
2. 对指定字段进行替换或改写，但保持整体句式流畅。  
3. 输出只保留格式化后的完整句子。  

示例：  
输入：我想要研究手机号用户，需要以下字段：手机号客户、夜间常驻基站, 将手机号客户替换为用户流量  
输出：我想要研究手机号用户，需要以下字段：用户流量、夜间常驻基站
"""

    api_key = os.getenv("DASHSCOPE_API_KEY")
    if not api_key:
        raise RuntimeError("缺少环境变量 DASHSCOPE_API_KEY")

    llm = OpenAI(
        api_key=api_key,
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    )

    # 把句子包在标签里传给 user
    user_prompt = f"""以下是需要处理的语句：
<sentence>
{sentence}
</sentence>"""

    completion = llm.chat.completions.create(
        model=model,
        messages=[
            {"role": "system", "content": sys_p},
            {"role": "user", "content": user_prompt},
        ],
        temperature=0.7,
    )

    answer = completion.choices[0].message.content
    print(answer)
    return answer

def split_excel_into_blocks(file_path: str, split_column='last_column', selected_columns: List[str] = None) -> \
List[str]:
    """将Excel文件拆分为文本块，处理合并单元格情况

    Args:
        file_path: 要处理的Excel文件路径
        split_column: 用于拆分的列，默认'last_column'
        selected_columns: 要向量化的列名列表，None表示所有列

    Returns:
        (List[str]): 从Excel文件中提取的文本块列表
    """

    def find_last_merged_column(ws):
        """找到最后一列有合并单元格的列号"""
        last_merged_col = 0
        for merged_cell in ws.merged_cells.ranges:
            min_col, _, max_col, _ = merged_cell.bounds
            if max_col > last_merged_col:
                last_merged_col = max_col
        return last_merged_col

    def get_merged_regions_for_column(ws, column):
        """获取指定列的合并区域"""
        regions = []
        for merged_range in ws.merged_cells.ranges:
            min_col, min_row, max_col, max_row = merged_range.bounds
            if min_col == column and max_col == column:
                regions.append((min_row, max_row))
        return regions

    def unmerge_and_fill(ws):
        """解合并单元格并用左上角的值填充所有单元格"""
        merged_ranges = list(ws.merged_cells.ranges)
        for merged_cell in merged_ranges:
            min_col, min_row, max_col, max_row = merged_cell.bounds
            top_left_value = ws.cell(row=min_row, column=min_col).value
            ws.unmerge_cells(merged_cell.coord)
            for row in range(min_row, max_row + 1):
                for col in range(min_col, max_col + 1):
                    ws.cell(row=row, column=col).value = top_left_value

    wb = load_workbook(file_path)
    ws = wb.active
    data_start_row = 1
    blocks = []

    # 获取表头
    headers = [ws.cell(row=1, column=col).value or f"列{col}" for col in range(1, ws.max_column + 1)]

    # 处理选中的列
    if selected_columns is not None:
        selected_indices = []
        for col_name in selected_columns:
            try:
                idx = headers.index(col_name)
                selected_indices.append(idx)
            except ValueError:
                raise FileProcessingError(f"列名 '{col_name}' 不存在于文件中")
    else:
        selected_indices = list(range(len(headers)))

    if split_column == 'last_column':
        split_column = find_last_merged_column(ws)

    if split_column == 0:
        for row_num in range(data_start_row + 1, ws.max_row + 1):
            row_data = [ws.cell(row=row_num, column=col + 1).value for col in selected_indices]
            selected_headers = [headers[idx] for idx in selected_indices]
            block_str = " | ".join([f"{selected_headers[idx]}: {value if value is not None else 'nan'}"
                                    for idx, value in enumerate(row_data)])
            blocks.append(block_str)
        return blocks

    merged_regions = get_merged_regions_for_column(ws, split_column)
    unmerge_and_fill(ws)
    row_num = data_start_row + 1
    while row_num <= ws.max_row:
        in_merged = False
        for (min_row, max_row) in merged_regions:
            if min_row <= row_num <= max_row:
                block_lines = []
                for r in range(min_row, max_row + 1):
                    row_data = [ws.cell(row=r, column=col + 1).value for col in selected_indices]
                    selected_headers = [headers[idx] for idx in selected_indices]
                    line = " | ".join([f"{selected_headers[idx]}: {value if value is not None else 'nan'}"
                                       for idx, value in enumerate(row_data)])
                    block_lines.append(line)
                blocks.append("\n".join(block_lines))
                row_num = max_row + 1
                in_merged = True
                break
        if not in_merged:
            row_data = [ws.cell(row=row_num, column=col + 1).value for col in selected_indices]
            selected_headers = [headers[idx] for idx in selected_indices]
            line = " | ".join([f"{selected_headers[idx]}: {value if value is not None else 'nan'}"
                               for idx, value in enumerate(row_data)])
            blocks.append(line)
            row_num += 1
    return blocks


def load_split_file(file_path: str, selected_columns: List[str] = None) -> List[str]:
    """根据文件类型加载并拆分文件内容

    Args:
        file_path: 要处理的文件路径
        selected_columns: 要向量化的列名列表(仅对Excel有效)，None表示所有列

    Returns:
        (List[str]): 从文件中提取的文本块列表
    """
    if file_path.endswith('.docx'):
        doc = Document(file_path)
        content = [para.text for para in doc.paragraphs]
    elif file_path.endswith('.xlsx'):
        content = split_excel_into_blocks(file_path, selected_columns=selected_columns)
    elif file_path.endswith('.pdf'):
        raise FileProcessingError(f"Unsupported file type")
    else:
        raise FileProcessingError(f"Unsupported file type: {file_path}")
    return content


class RAGError(Exception):
    """RAG相关异常的基类"""
    pass

class FileProcessingError(RAGError):
    """文件处理过程中出现的异常"""
    pass

def _extract_missing_fields(test_result: str) -> List[str]:
    """
    从测试结果中提取final_match为空的字段（original_field），组成列表返回

    参数:
        test_result: 包含字段信息的JSON字符串

    返回:
        缺失字段的列表，元素为original_field的值
    """
    # 解析JSON字符串
    result_data = test_result

    missing_fields = []

    # 遍历所有字段信息
    for field_info in result_data.get('fields_info', []):
        # 检查final_match是否为空
        if not field_info.get('final_match', '').strip():
            original_field = field_info.get('original_field', '')
            if original_field:  # 确保字段名不为空
                missing_fields.append(original_field)

    return missing_fields

def _generate_dismatch_question(result: str, missing_fields: List[str],loop_cnt:int) -> str:
    """生成未查询到的字段的问题"""
    #生成result
    summary=result.get('summary')
    if not missing_fields:
        return f"。您的字段均被检索到,当前结果为:\n{summary}\n请问您是否满意？"

    #if loop_cnt <= _MAX_ATTEMPTS:
    question = f"。当前结果为：\n{summary}\n以下字段目前数据仓库中暂不具备，请问是否需要修改需求，删改以下字段：\n\n"
    for i, field in enumerate(missing_fields, 1):
        question += f"{i}. {field}\n"
    #else:
    #     question=""

    return question
