from typing import Dict, List

from django.db.models import QuerySet, Q


def fetch_jobs_via_orm(limit: int = 1000, city: str | None = None, tag: str | None = None) -> List[Dict[str, str]]:
    """
    极简版本：通过 ORM 拉取岗位字段，并直接把 description 作为职责，要求留空。

    返回：[{id, title, company, city, tags, responsibilities, requirements}]
    """
    # 延迟导入，避免在 Django 未初始化时导入模型
    from home.models import Jobposting
    qs = Jobposting.objects.select_related("company", "city").all()
    # OR 条件组合：当 city 与 tag 同时存在时，使用 (city LIKE OR tag LIKE)
    if city and tag:
        qs = qs.filter(Q(city__name__icontains=city) | Q(label__icontains=tag))
    elif city:
        qs = qs.filter(city__name__icontains=city)
    elif tag:
        qs = qs.filter(label__icontains=tag)
    qs = qs[:limit]

    results: List[Dict[str, str]] = []
    for job in qs:
        company_name = getattr(job.company, "name", "") if hasattr(job, "company") else ""
        city_name = getattr(job.city, "name", "") if hasattr(job, "city") else ""
        results.append({
            "id": str(job.id),
            "title": job.title or "",
            "company": company_name,
            "city": city_name,
            "tags": job.label or "",
            "responsibilities": (job.description or "").strip(),
            "requirements": "",
        })

    return results


def build_searchable_text_from_job(job) -> str:
    """
    基于 Jobposting 实例构造可检索字段 searchable_text（极简版）。
    选取：title、description、label、education、jobcate.name（若有）按行拼接。
    """
    title = getattr(job, "title", "") or ""
    desc = getattr(job, "description", "") or ""
    label = getattr(job, "label", "") or ""
    edu = getattr(job, "education", "") or ""
    jobcate_name = getattr(getattr(job, "jobcate", None), "name", "")

    parts = [title.strip(), desc.strip(), label.strip(), edu.strip(), str(jobcate_name).strip()]
    parts = [p for p in parts if p]
    return "\n".join(parts)


def fetch_jobs_with_searchable(limit: int = 1000, city: str | None = None, tag: str | None = None) -> List[Dict[str, str]]:
    """
    批量返回包含 searchable_text 的岗位列表（极简版）。

    返回：[
      {id, title, company, city, tags, responsibilities, requirements, searchable_text}
    ]
    """
    # 延迟导入，避免在 Django 未初始化时导入模型
    from home.models import Jobposting
    qs = Jobposting.objects.select_related("company", "city", "jobcate").all()
    if city and tag:
        from django.db.models import Q
        qs = qs.filter(Q(city__name__icontains=city) | Q(label__icontains=tag))
    elif city:
        qs = qs.filter(city__name__icontains=city)
    elif tag:
        qs = qs.filter(label__icontains=tag)
    qs = qs[:limit]

    rows: List[Dict[str, str]] = []
    for job in qs:
        company_name = getattr(job.company, "name", "") if hasattr(job, "company") else ""
        city_name = getattr(job.city, "name", "") if hasattr(job, "city") else ""
        searchable = build_searchable_text_from_job(job)
        rows.append({
            "id": str(job.id),
            "title": job.title or "",
            "company": company_name,
            "city": city_name,
            "tags": job.label or "",
            "responsibilities": (job.description or "").strip(),
            "requirements": "",
            "searchable_text": searchable,
        })
    return rows


def ensure_es_index(es_host: str, index_name: str) -> None:
    """
    极简版：若索引不存在则创建一个基础映射，包含我们会写入的字段。
    """
    from elasticsearch import Elasticsearch
    es = Elasticsearch(es_host)
    if es.indices.exists(index=index_name):
        return
    body = {
        "mappings": {
            "properties": {
                "title": {"type": "text"},
                "company": {"type": "keyword"},
                "city": {"type": "keyword"},
                "tags": {"type": "text"},
                "education": {"type": "keyword"},
                "job_type": {"type": "keyword"},
                "working_years": {"type": "keyword"},
                "jobcate": {"type": "keyword"},
                "searchable_text": {"type": "text"}
            }
        }
    }
    es.indices.create(index=index_name, **body)


def sync_jobs_to_es(es_host: str, index_name: str, limit: int = 1000, city: str | None = None, tag: str | None = None) -> int:
    """
    将 ORM 拉取的岗位（带 searchable_text）写入到 Elasticsearch。
    返回写入数量。
    """
    from elasticsearch import Elasticsearch

    ensure_es_index(es_host, index_name)
    es = Elasticsearch(es_host)
    rows = fetch_jobs_with_searchable(limit=limit, city=city, tag=tag)
    for r in rows:
        doc = {
            "id": r["id"],
            "title": r["title"],
            "company": r["company"],
            "city": r["city"],
            "tags": r["tags"],
            # 可补充结构化字段：education/job_type/working_years/jobcate，如后续需要；此处保持极简
            "searchable_text": r["searchable_text"],
        }
        es.index(index=index_name, id=r["id"], document=doc, refresh=False)
    es.indices.refresh(index=index_name)
    return len(rows)


def embed_jobs_with_text_embedding(rows: List[Dict[str, str]]) -> List[Dict[str, object]]:
    """
    使用项目内的 text-embedding-v4 生成岗位向量。

    入参 rows: 来自 fetch_jobs_with_searchable 的列表，需包含 id 与 searchable_text。
    返回: [{id, embedding}]，embedding 为 List[float]。
    需要环境变量: DASHSCOPE_API_KEY。
    """
    from utils.use_file.text_embedding import TextEmbedding

    texts = [(r.get("searchable_text") or r.get("title") or "").strip() for r in rows]
    embedder = TextEmbedding()
    vectors = embedder.generate_embeddings(texts)
    out: List[Dict[str, object]] = []
    for r, v in zip(rows, vectors):
        out.append({"id": r.get("id"), "embedding": v})
    return out


def sync_jobs_to_chroma(collection_name: str, rows: List[Dict[str, str]], vectors: List[Dict[str, object]]) -> int:
    """
    将岗位向量写入 Chroma。

    - collection_name: Chroma 集合名
    - rows: 需包含 id、searchable_text、title/company/city/tags 等元数据
    - vectors: 来自 embed_jobs_with_text_embedding 的结果 [{id, embedding}]
    返回：写入数量
    """
    import chromadb

    id_to_vec = {str(v["id"]): v["embedding"] for v in vectors if v.get("id") is not None}
    ids = []
    docs = []
    metas = []
    for r in rows:
        rid = str(r.get("id"))
        emb = id_to_vec.get(rid)
        if emb is None:
            continue
        ids.append(rid)
        docs.append(r.get("searchable_text") or r.get("title") or "")
        metas.append({
            "title": r.get("title", ""),
            "company": r.get("company", ""),
            "city": r.get("city", ""),
            "tags": r.get("tags", ""),
        })


    client = chromadb.PersistentClient(path="./chroma_db")
    col = client.get_or_create_collection(collection_name)
    if ids:
        col.upsert(ids=ids, documents=docs, embeddings=[id_to_vec[i] for i in ids], metadatas=metas)
    return len(ids)


def search_jobs_in_es(es_host: str, index_name: str, query: str, size: int = 10) -> List[Dict[str, object]]:
    """
    从 Elasticsearch 检索岗位：multi_match 命中 searchable_text/title/tags。
    返回：[{..._source, _es_score}]
    """
    from elasticsearch import Elasticsearch

    es = Elasticsearch(es_host)
    # 截断查询文本，避免 maxClauseCount 超限
    query_short = query[:500] if len(query) > 500 else query
    body = {
        "query": {
            "multi_match": {
                "query": query_short,
                "fields": ["searchable_text^3", "title^2", "tags"],
                "type": "best_fields",
                "fuzziness": "AUTO"
            }
        }
    }
    resp = es.search(index=index_name, query=body["query"], size=size)
    hits = resp.get("hits", {}).get("hits", [])
    out: List[Dict[str, object]] = []
    for h in hits:
        src = h.get("_source", {})
        src["_es_score"] = h.get("_score", 0.0)
        out.append(src)
    return out


def chroma_query_jobs(collection_name: str, query_embedding: List[float], top_k: int = 10) -> List[Dict[str, object]]:
    """
    从 Chroma 进行向量检索。
    返回：[{id, title, company, city, tags, _vec_score}]
    说明：使用 PersistentClient('./chroma_db')，需与写入时保持一致。
    """
    import chromadb

    client = chromadb.PersistentClient(path="./chroma_db")
    col = client.get_or_create_collection(collection_name)
    res = col.query(query_embeddings=[query_embedding], n_results=top_k, include=["metadatas", "distances"])

    out: List[Dict[str, object]] = []
    ids = res.get("ids", [[]])[0]
    metas = res.get("metadatas", [[]])[0]
    dists = res.get("distances", [[]])[0]
    for _id, meta, dist in zip(ids, metas, dists):
        out.append({
            "id": int(_id) if str(_id).isdigit() else _id,
            "title": (meta or {}).get("title", ""),
            "company": (meta or {}).get("company", ""),
            "city": (meta or {}).get("city", ""),
            "tags": (meta or {}).get("tags", ""),
            "_vec_score": float(dist),
        })
    return out


def _minmax_norm(values: List[float]) -> Dict[int, float]:
    if not values:
        return {}
    mn, mx = min(values), max(values)
    if mx - mn < 1e-9:
        return {i: 1.0 for i, _ in enumerate(values)}
    return {i: (v - mn) / (mx - mn) for i, v in enumerate(values)}


def fuse_es_chroma_results(
    es_results: List[Dict[str, object]],
    vec_results: List[Dict[str, object]],
    weight_es: float = 0.6,
    weight_vec: float = 0.4,
    top_k: int = 10,
) -> List[Dict[str, object]]:
    """
    融合 ES 与 Chroma 检索结果：0-1 归一 + 加权排序 + 去重。
    返回：TopK 列表，元素为合并后的字段（优先使用 ES 源字段）。
    """
    es_by_id = {}
    vec_by_id = {}
    for r in es_results:
        rid = r.get("id") or r.get("_id") or r.get("text_block_id")  # 兼容不同结构
        if rid is None:
            continue
        es_by_id[str(rid)] = r
    for r in vec_results:
        rid = r.get("id")
        if rid is None:
            continue
        vec_by_id[str(rid)] = r

    all_ids = list(set(es_by_id.keys()) | set(vec_by_id.keys()))

    es_scores = [r.get("_es_score", 0.0) for r in es_results]
    vec_scores = [r.get("_vec_score", 0.0) for r in vec_results]
    es_norm = _minmax_norm(es_scores)
    vec_norm = _minmax_norm(vec_scores)

    # 建立 id -> 归一分 的映射（通过索引回填）
    es_idx_map = {}
    for i, r in enumerate(es_results):
        rid = r.get("id") or r.get("_id") or r.get("text_block_id")
        if rid is not None:
            es_idx_map[str(rid)] = i
    vec_idx_map = {str(r.get("id")): i for i, r in enumerate(vec_results) if r.get("id") is not None}

    merged: List[Tuple[str, float]] = []
    for rid in all_ids:
        e_idx = es_idx_map.get(rid)
        v_idx = vec_idx_map.get(rid)
        e_score = es_norm.get(e_idx, 0.0) if e_idx is not None else 0.0
        v_score = vec_norm.get(v_idx, 0.0) if v_idx is not None else 0.0
        merged.append((rid, weight_es * e_score + weight_vec * v_score))

    merged.sort(key=lambda x: x[1], reverse=True)
    
    # 去重：按 title+company 组合去重，保留最高分
    seen = set()
    deduped_ids = []
    for rid, score in merged:
        doc = es_by_id.get(rid) or vec_by_id.get(rid) or {"id": rid}
        title = doc.get("title", "")
        company = doc.get("company", "")
        key = f"{title}|{company}"
        if key not in seen:
            seen.add(key)
            deduped_ids.append(rid)
            if len(deduped_ids) >= top_k:
                break

    fused: List[Dict[str, object]] = []
    for rid in deduped_ids:
        doc = es_by_id.get(rid) or vec_by_id.get(rid) or {"id": rid}
        fused.append(doc)
    return fused


def hybrid_search_jobs(
    query: str,
    es_host: str = "http://localhost:9200",
    es_index: str = "job_posting",
    chroma_collection: str = "jobs",
    top_k: int = 10,
    weight_es: float = 0.6,
    weight_vec: float = 0.4,
) -> List[Dict[str, object]]:
    """
    便捷混合检索：
    - ES multi_match (searchable_text/title/tags)
    - 生成查询向量 + Chroma 相似检索
    - 0-1 归一 + 加权融合
    返回 TopK。
    """
    # 1) ES 关键词检索
    es_results = search_jobs_in_es(es_host=es_host, index_name=es_index, query=query, size=top_k)

    # 2) 生成查询向量 + Chroma 检索
    from utils.use_file.text_embedding import TextEmbedding
    embedder = TextEmbedding()
    q_vec = embedder.generate_embeddings([query])[0]
    vec_results = chroma_query_jobs(collection_name=chroma_collection, query_embedding=q_vec, top_k=top_k)

    # 3) 融合
    return fuse_es_chroma_results(es_results, vec_results, weight_es=weight_es, weight_vec=weight_vec, top_k=top_k)

if __name__ == "__main__":
    # 为避免生产环境误触发，已移除命令行入口。
    # 请通过 Django AppConfig.ready() 在项目启动时调用需要的函数。
    print("This module is intended to be used via Django AppConfig.ready().")


