import os
import hashlib
from typing import Iterable, Optional, List, Dict, Any

from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
from elasticsearch_dsl import Document, Text, Keyword, Integer, Object, Date, connections, Index

from langchain_rag.rag.document_loading import load_document
from langchain_rag.rag.tongyi_text_vectorization import initialize_tongyi_embeddings
from elasticsearch_dsl import Search
from langchain_community.vectorstores import Chroma
from openai import OpenAI


class RAGDoc(Document):
    """Elasticsearch DSL document for RAG chunks."""

    content = Text(analyzer="standard")
    source = Keyword()  # file path or source id
    page = Integer()
    metadata = Object(enabled=True)
    created_at = Date()

    class Index:
        name = "langchain_rag_docs"


def _ensure_index(index_name: str) -> None:
    """Create index with mapping if it does not exist."""
    idx = Index(index_name)
    if not idx.exists():
        # bind mapping from Document to this index name
        RAGDoc.init(index=index_name)


def _connect_es(hosts: Optional[List[str]] = None) -> Elasticsearch:
    hosts_cfg = hosts or os.getenv("ES_HOSTS", "http://localhost:9200").split(",")
    connections.create_connection(hosts=hosts_cfg)
    return Elasticsearch(hosts_cfg)


def document_data_es(
    documents: Iterable[Any],
    index_name: str = "langchain_rag_docs",
    hosts: Optional[List[str]] = None,
) -> Dict[str, Any]:

    es = _connect_es(hosts)
    _ensure_index(index_name)

    actions = []
    for i, doc in enumerate(documents):
        content = getattr(doc, "page_content", None)
        meta = getattr(doc, "metadata", {}) or {}
        if not content:
            continue

        action = {
            "_op_type": "index",
            "_index": index_name,
            "content": content,
            "source": meta.get("source"),
            "page": meta.get("page"),
            "metadata": meta,
        }
        actions.append(action)

    if not actions:
        return {"indexed": 0, "errors": []}

    success, errors = bulk(es, actions, raise_on_error=False)
    return {"indexed": success, "errors": errors}

def chuli(question: str):
    """接受单个问题，使用通义千问 text-embedding-v4 转为向量。"""
    key = "sk-6434b554122c4b049ceb805d703f695b"
    embeddings = initialize_tongyi_embeddings(key)
    return embeddings.embed_query(question)
def chuli_es(
    question: str,
    top_k: int = 5,
    hosts: Optional[List[str]] = None,
    index_name: str = "langchain_rag_docs",
) -> List[Dict[str, Any]]:
    """使用 elasticsearch-dsl 基于问题进行 ES 检索（BM25 关键词检索）。"""
    es = _connect_es(hosts)
    _ensure_index(index_name)

    s = Search(using=es, index=index_name)
    # 仅对文本字段进行模糊匹配，避免对数值字段（如 metadata.* 中的 long）触发错误
    s = s.query(
        "multi_match",
        query=question,
        fields=["content^3", "source"],
        type="best_fields",
        operator="or",
        fuzziness="AUTO",
        lenient=True
    )
    response = s[:max(1, top_k)].execute()

    results: List[Dict[str, Any]] = []
    for hit in response:
        source = hit.to_dict()
        results.append({
            "id": hit.meta.id,
            "score": float(hit.meta.score) if hasattr(hit.meta, "score") else None,
            "content": source.get("content"),
            "source": source.get("source"),
            "page": source.get("page"),
            "metadata": source.get("metadata"),
        })

    return results

def _fingerprint_record(text: Optional[str], source: Optional[str], page: Optional[int]) -> str:
    base = (text or "").strip()
    mix = f"{base}\u0001{source or ''}\u0001{page if page is not None else ''}"
    return hashlib.md5(mix.encode("utf-8")).hexdigest()


def quchong(
    question: str,
    es_top_k: int = 5,
    vec_top_k: int = 5,
    db_path: Optional[str] = None,
) -> List[Dict[str, Any]]:
    """融合 ES 检索与 Chroma 语义检索，合并去重并排序返回。"""
    es_hits = chuli_es(question, top_k=es_top_k)
    print(f"🔍 ES 检索结果: {len(es_hits)}个")
    vec_hits = yuyijiansuo(question, top_k=vec_top_k, db_path=db_path)
    print(f"🔍 Chroma 语义检索结果: {len(vec_hits)}个")

    # 归一化 ES 分数（相对本批最大值）
    es_scores = [h.get("score") for h in es_hits if h.get("score") is not None]
    es_max = max(es_scores) if es_scores else 0.0

    merged: Dict[str, Dict[str, Any]] = {}

    def upsert(record: Dict[str, Any], origin: str, score: Optional[float]) -> None:
        fp = _fingerprint_record(record.get("content"), record.get("source"), record.get("page"))
        existing = merged.get(fp)
        norm_score = 0.0
        if origin == "es":
            norm_score = (float(score) / es_max) if (es_max and score is not None) else 0.0
        else:
            norm_score = float(score) if (score is not None) else 0.0

        if not existing:
            merged[fp] = {
                "content": record.get("content"),
                "metadata": record.get("metadata"),
                "source": record.get("source"),
                "page": record.get("page"),
                "score_es": norm_score if origin == "es" else 0.0,
                "score_vec": norm_score if origin == "vec" else 0.0,
                "origins": [origin],
            }
        else:
            if origin not in existing["origins"]:
                existing["origins"].append(origin)
            if origin == "es":
                existing["score_es"] = max(existing.get("score_es", 0.0), norm_score)
            else:
                existing["score_vec"] = max(existing.get("score_vec", 0.0), norm_score)

    for h in es_hits:
        upsert(h, "es", h.get("score"))

    for h in vec_hits:
        upsert(h, "vec", h.get("score"))

    # 排序：语义分数优先，其次 ES 分数
    fused: List[Dict[str, Any]] = list(merged.values())
    fused.sort(key=lambda x: (x.get("score_vec", 0.0), x.get("score_es", 0.0)), reverse=True)
    return fused
def yuyijiansuo(
    question: str,
    top_k: int = 5,
    db_path: Optional[str] = None,
) -> List[Dict[str, Any]]:
    """基于 Chroma 向量数据库进行语义检索。"""
    # 1) 生成查询向量（通义千问 text-embedding-v4）
    query_vec = chuli(question)

    # 2) 载入已持久化的 Chroma DB（此前通过 build_chroma_vector_db 构建）
    persist_dir = './tongyi_chroma_db'
    db = Chroma(embedding_function=None, persist_directory=persist_dir)

    # 3) 用向量直接检索（无需 embedding_function）
    try:
        # 优先尝试带分数的接口（若可用）
        results_with_scores = db.similarity_search_by_vector_with_relevance_scores(query_vec, k=top_k)  # type: ignore[attr-defined]
        docs: List[Dict[str, Any]] = []
        for doc, score in results_with_scores:
            docs.append({
                "content": doc.page_content,
                "metadata": doc.metadata,
                "score": float(score) if score is not None else None,
            })
        return docs
    except Exception:
        # 回退到不带分数的检索
        docs_only = db.similarity_search_by_vector(query_vec, k=top_k)
        return [{
            "content": d.page_content,
            "metadata": d.metadata,
        } for d in docs_only]


def llm_response(question,search_text):
    """将问题与检索结果组装为提示词，调用通义千问大模型返回答案。"""
    # 1) 组织上下文（限制长度，避免提示超长）
    if isinstance(search_text, list):
        top_items = search_text[:5]
        context_parts = []
        for i, item in enumerate(top_items, start=1):
            content = (item.get("content") or "").strip()
            source = item.get("source")
            page = item.get("page")
            prefix = f"【片段{i}】"
            if source is not None:
                prefix += f" 来源: {source}"
            if page is not None:
                prefix += f" 页码: {page}"
            context_parts.append(f"{prefix}\n{content}")
        context = "\n\n".join(context_parts)
    else:
        context = str(search_text)

    system_prompt = (
        "你是一个检索增强问答助手。请严格依据提供的参考内容回答用户问题。"
        "如果参考内容中没有答案，请明确说明找不到相关信息，不要编造。"
    )

    messages = [
        {"role": "system", "content": system_prompt},
        {"role": "user", "content": f"参考内容如下：\n{context}\n\n请根据以上内容回答问题：{question}"},
    ]

    print(f"提示词:{messages}")

    # 2) 调用通义千问（OpenAI兼容接口）
    api_key = "sk-6434b554122c4b049ceb805d703f695b"
    if not api_key:
        raise ValueError("未找到通义千问 API Key，请设置 DASHSCOPE_API_KEY/DASH_SCOPE_API_KEY/TONGYI_API_KEY 环境变量")

    client = OpenAI(
        api_key=api_key,
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    )

    completion = client.chat.completions.create(
        model="qwen-plus",
        messages=messages,
        temperature=0.3,
    )

    answer = completion.choices[0].message.content if completion.choices else ""
    return answer


if __name__ == '__main__':
    # documents = load_document("平安保险用户手册.pdf")
    # print(documents)
    # document_data_es(documents,hosts=["http://localhost:9200"])
    #question = "人为什么要睡觉"
    #print(chuli(question))
    question1 = "如何报销住院费用"
    #print(chuli_es(question1))
    #print(yuyijiansuo(question1))
    #print(quchong(question1))
    print(llm_response(question1,quchong(question1)))


