from typing import Dict

import chromadb
from elasticsearch import Elasticsearch

from home.models import Jobposting
from rag.boss_file.text_embedding import TextEmbedding
from user.models import City


def fetch_job(limit: int = 200):
    """
    获取岗位信息  N+1
    """
    print("=====fetch_job=========")
    working_dict = {
        "0": "经验不限",
        "1": "在校生",
        "2": "应届生",
        "3": "1-3年",
        "4": "3-5年",
        "5": "5年以上"
    }
    qs = Jobposting.objects.select_related('city', 'company').all()
    qs = qs[:limit]
    job_list = []
    for job in qs:
        job_list.append(
            {
                "id": str(job.id),
                "title": job.title,
                "company": job.company.name,
                "city": job.city.name,
                "description": job.description,
                "label": job.label,
                "education": job.education,
                "working": working_dict.get(job.education)

            }
        )
    return job_list


def build_searchable_text_from_job(job) -> str:
    print("=====build_searchable_text_from_job=========")
    """
    基于 Jobposting 实例构造可检索字段 searchable_text（极简版）。
    选取：title、description、label、education、jobcate.name（若有）按行拼接。
    """

    title = job.get("title", "")
    desc = job.get("description", "")
    label = job.get("label", "")
    edu = job.get("education", "")
    city = job.get("city", "")
    working = job.get("working", job.get("working_years", ""))
    job_str = f"""
    职位名称:{title},
    职位描述:{desc},
    职位标签:{label},
    职位学历:{edu},
    职位城市:{city},
    工作经验:{working}
    """

    return job_str


def job_es_index(index_name: str):
    """
     es:index索引
     先创建一个索引库
     索引库设计字段,增加数据
    """
    print("=====job_es_index=========")

    es_client = Elasticsearch("http://localhost:9200")
    if es_client.indices.exists(index=index_name):
        return
    # 创建索引字段映射
    body = {
        "mappings": {
            "properties": {
                "id": {
                    "type": "keyword"
                },
                "title": {
                    "type": "keyword"
                },
                "company": {
                    "type": "keyword"
                },
                "city": {
                    "type": "keyword"
                },
                "description": {
                    "type": "text"
                },
                "label": {
                    "type": "keyword"
                },
                "education": {
                    "type": "keyword"
                },
                "job_str": {
                    "type": "text"
                }
            }
        }
    }
    es_client.indices.create(index=index_name, body=body)


def job_es_add(index_name: str, job_list: list):
    """
    添加数据
    """
    print("=======job_es_add=============")

    es_client = Elasticsearch("http://localhost:9200")
    job_list = fetch_job()
    for job in job_list:
        job_str = build_searchable_text_from_job(job)
        id = job['id']
        title = job['title']
        company = job['company']
        city = job['city']
        description = job['description']
        label = job['label']
        education = job['education']

        doc = {
            "id": id,
            "title": title,
            "company": company,
            "city": city,
            "description": description,
            "label": label,
            "education": education,
            "job_str": job_str
        }
        es_client.index(index=index_name, id=job['id'], document=doc, refresh=False)
    es_client.indices.refresh(index=index_name)


def job_with_text_embedding(job_list: list) -> list[Dict[str, object]]:
    print("=====job_with_text_embedding=========")
    """
    把岗位信息转化成向量
    [[],[],[]]
    [{"id":[1024]},{"":[]}]
    """
    job_text_embedding_list = []
    for job in job_list:
        job_str = build_searchable_text_from_job(job)
        text_embedding = TextEmbedding()
        job_text_embedding = text_embedding.generate_embeddings(job_str)
        print("=====岗位向量=========", job_text_embedding)
        job_text_embedding = {
            "id": job['id'],
            "text_embedding": job_text_embedding[0],
            "job_str": job_str,
            # 保留所有必要的字段用于metadata
            "title": job.get('title', ''),
            "company": job.get('company', ''),
            "city": job.get('city', ''),
            "tags": job.get('tags', ''),
            "education": job.get('education', ''),
            "working_years": job.get('working_years', job.get('working', '')),
            "description": job.get('description', ''),
            "label": job.get('label', '')
        }
        print("=====岗位向量完整数据=========", job_text_embedding)
        job_text_embedding_list.append(job_text_embedding)

    # print("=====所有岗位对应的向量=========",job_text_embedding_list)
    return job_text_embedding_list


def sync_jobs_to_chroma(job_text_embedding_list: list[Dict[str, object]]):
    """
    把向量存储到chromaDB
    """
    print("=====sync_jobs_to_chroma=========")
    print("=====sync_jobs_to_chromajob_text_embedding_list=========", job_text_embedding_list)

    client = chromadb.PersistentClient(path="./chromadb_db")
    col = client.get_or_create_collection(name="job_text_embedding_0927", embedding_function=None)

    # 确保 embedding 是二维数组
    embeddings = []
    texts = []
    ids = []
    metadatas = []

    for job in job_text_embedding_list:
        # 假设 job 结构为 {text: "职位描述", embedding: [embedding_vector]}
        if isinstance(job['text_embedding'], list) and len(job['text_embedding']) > 0:
            embeddings.append(job['text_embedding'])
            texts.append(job['job_str'])
            ids.append(job['id'])
            
            # 添加metadata信息
            metadata = {
                "title": job.get('title', ''),
                "company": job.get('company', ''),
                "city": job.get('city', ''),
                "tags": job.get('tags', ''),
                "education": job.get('education', ''),
                "working_years": job.get('working_years', ''),
                "description": job.get('description', ''),
                "label": job.get('label', '')
            }
            metadatas.append(metadata)

    # 使用正确的嵌入向量格式，包含metadata
    col.add(
        ids = ids,
        documents=texts,
        embeddings=embeddings,
        metadatas=metadatas
    )
    print(f"成功往向量数据库job_text_embedding_0927,存入{len(job_text_embedding_list)}个向量")




def search_job_by_es(query: str, index_name: str = "job_list_0927", size: int = 20, 
                     city: str = None, company: str = None, sort_by: str = None) -> list:
    """
    通过ES搜索职位
    
    Args:
        query: 搜索查询字符串
        index_name: ES索引名称，默认为"job_list_0927"
        size: 返回结果数量，默认20
        city: 城市过滤条件
        company: 公司过滤条件
        sort_by: 排序字段，可选值: "browse_number", "collect_number", "deliver_number"
        
    Returns:
        list: 包含岗位信息的列表，每个元素包含_source和_score
    """
    print("=====search_job_by_es=========")
    
    try:
        es_client = Elasticsearch("http://localhost:9200")
        
        # 检查索引是否存在
        if not es_client.indices.exists(index=index_name):
            print(f"索引 {index_name} 不存在")
            return []
        
        # 截断查询文本，避免查询过长
        query_short = query[:500] if len(query) > 500 else query
        
        # 构建查询条件
        body = {
            "query": {
                "multi_match": {
                    "query": query_short,
                    "fields": ["job_str^3", "title^2", "label"],
                    "type": "best_fields",
                    "fuzziness": "AUTO"
                }
            },
            "size": size,
            "_source": True
        }
        
        
        
        # 执行搜索
        response = es_client.search(index=index_name, body=body)
        
        # 提取结果
        hits = response.get("hits", {}).get("hits", [])
        results = []
        
        for hit in hits:
            result = hit.get("_source", {})
            result["_es_score"] = hit.get("_score", 0.0)
            result["_id"] = hit.get("_id")
            results.append(result)
        
        print(f"ES搜索完成，找到 {len(results)} 个结果")
        return results
        
    except Exception as e:
        print(f"ES搜索出错: {str(e)}")
        return []







def test_chroma_search():
    """
    测试ChromaDB检索功能
    """
    print("=====测试ChromaDB检索功能=====")
    
    # 测试基本ChromaDB检索
    test_query = "Python开发工程师"
    print(f"\n1. ChromaDB向量检索: '{test_query}'")
    chroma_results = search_job_by_chroma(test_query, top_k=5)
    
    if chroma_results:
        print(f"找到 {len(chroma_results)} 个相关岗位:")
        for i, job in enumerate(chroma_results, 1):
            similarity = job.get('_similarity', 0)
            print(f"  {i}. {job.get('title', 'N/A')} - {job.get('company', 'N/A')} (相似度: {similarity:.3f})")
    else:
        print("未找到相关岗位")
    
    # 测试带过滤的ChromaDB检索
    print(f"\n2. ChromaDB过滤检索: '{test_query}' + 北京")
    chroma_filtered = search_job_by_chroma_with_filters(test_query, city="北京", top_k=3)
    
    if chroma_filtered:
        print(f"找到 {len(chroma_filtered)} 个北京相关岗位:")
        for i, job in enumerate(chroma_filtered, 1):
            print(f"  {i}. {job.get('title', 'N/A')} - {job.get('company', 'N/A')} - {job.get('city', 'N/A')}")
    else:
        print("未找到北京相关岗位")
    
    # 测试混合检索
    print(f"\n3. 混合检索: '{test_query}'")
    hybrid_results = hybrid_search_jobs(test_query, top_k=5)
    
    if hybrid_results:
        print(f"找到 {len(hybrid_results)} 个混合检索结果:")
        for i, job in enumerate(hybrid_results, 1):
            fused_score = job.get('_fused_score', 0)
            print(f"  {i}. {job.get('title', 'N/A')} - {job.get('company', 'N/A')} (融合分数: {fused_score:.3f})")
    else:
        print("未找到相关岗位")


if __name__ == '__main__':
    # 测试数据获取
    job_list = fetch_job(limit=10)
    print(f"获取到 {len(job_list)} 个岗位数据")
    
    # 测试ChromaDB检索功能
    test_chroma_search()


def search_job_by_chroma(query_text: str, collection_name: str = "job_text_embedding_0927", 
                         top_k: int = 20, where_filter: dict = None) -> list:
    """
    通过ChromaDB向量检索职位
    
    Args:
        query_text: 查询文本
        collection_name: ChromaDB集合名称，默认为"job_text_embedding_0927"
        top_k: 返回结果数量，默认20
        where_filter: 过滤条件字典，支持按元数据过滤
        
    Returns:
        list: 包含岗位信息的列表，每个元素包含id、title、company、city、tags、_vec_score等
    """
    print("=====search_job_by_chroma=========")
    
    try:
        import chromadb
        
        # 连接ChromaDB
        client = chromadb.PersistentClient(path="./chromadb_db")
        
        # 获取集合
        try:
            collection = client.get_collection(name=collection_name)
        except Exception:
            print(f"集合 {collection_name} 不存在")
            return []
        
        # 生成查询向量
        from rag.boss_file.text_embedding import TextEmbedding
        text_embedding = TextEmbedding()
        query_embedding = text_embedding.generate_embeddings([query_text])[0]
        
        # 构建查询参数
        query_params = {
            "query_embeddings": [query_embedding],
            "n_results": top_k,
            "include": ["metadatas", "documents", "distances"]
        }
        
        # 添加过滤条件
        if where_filter:
            query_params["where"] = where_filter
        
        # 执行查询
        results = collection.query(**query_params)
        
        # 处理结果
        job_results = []
        print(f"ChromaDB查询结果: {type(results)}")
        
        if results and isinstance(results, dict):
            print(f"结果键: {list(results.keys())}")
            
            # 安全地获取ids
            ids = results.get("ids", [])
            if ids and len(ids) > 0 and ids[0]:
                ids = ids[0]
                print(f"找到 {len(ids)} 个ID")
                
                # 安全地获取其他数据
                metadatas = results.get("metadatas", [[]])
                documents = results.get("documents", [[]])
                distances = results.get("distances", [[]])
                
                # 确保数据格式正确
                if len(metadatas) > 0:
                    metadatas = metadatas[0] if isinstance(metadatas[0], list) else metadatas
                if len(documents) > 0:
                    documents = documents[0] if isinstance(documents[0], list) else documents
                if len(distances) > 0:
                    distances = distances[0] if isinstance(distances[0], list) else distances
                
                print(f"元数据数量: {len(metadatas)}")
                print(f"文档数量: {len(documents)}")
                print(f"距离数量: {len(distances)}")
                
                for i, job_id in enumerate(ids):
                    try:
                        metadata = metadatas[i] if i < len(metadatas) and metadatas[i] else {}
                        document = documents[i] if i < len(documents) else ""
                        distance = distances[i] if i < len(distances) else 0.0
                        
                        job_result = {
                            "id": job_id,
                            "title": metadata.get("title", "") if metadata else "",
                            "company": metadata.get("company", "") if metadata else "",
                            "city": metadata.get("city", "") if metadata else "",
                            "tags": metadata.get("tags", "") if metadata else "",
                            "education": metadata.get("education", "") if metadata else "",
                            "job_str": document,
                            "_vec_score": float(distance),
                            "_similarity": 1.0 - float(distance)  # 转换为相似度分数
                        }
                        job_results.append(job_result)
                    except Exception as e:
                        print(f"处理第{i}个结果时出错: {str(e)}")
                        continue
            else:
                print("ChromaDB查询结果中没有找到有效的IDs")
        else:
            print("ChromaDB查询结果为空或格式不正确")
        
        print(f"ChromaDB检索完成，找到 {len(job_results)} 个结果")
        return job_results
        
    except Exception as e:
        print(f"ChromaDB检索出错: {str(e)}")
        return []


def search_job_by_chroma_with_filters(query_text: str, collection_name: str = "job_text_embedding_0927", 
                                     top_k: int = 20, city: str = None, company: str = None) -> list:
    """
    通过ChromaDB向量检索职位（带过滤条件）
    
    Args:
        query_text: 查询文本
        collection_name: ChromaDB集合名称
        top_k: 返回结果数量
        city: 城市过滤条件
        company: 公司过滤条件
        
    Returns:
        list: 搜索结果列表
    """
    print("=====search_job_by_chroma_with_filters=========")
    
    # 构建过滤条件
    where_filter = {}
    if city:
        where_filter["city"] = city
    if company:
        where_filter["company"] = {"$regex": f".*{company}.*"}
    
    # 如果没有过滤条件，使用空字典
    if not where_filter:
        where_filter = None
    
    return search_job_by_chroma(query_text, collection_name, top_k, where_filter)


def hybrid_search_jobs(query_text: str, es_index: str = "job_list_0927", 
                      chroma_collection: str = "job_text_embedding_0927", 
                      top_k: int = 20, weight_es: float = 0.6, weight_chroma: float = 0.4) -> list:
    """
    混合检索：结合ES关键词检索和ChromaDB向量检索
    
    Args:
        query_text: 查询文本
        es_index: ES索引名称
        chroma_collection: ChromaDB集合名称
        top_k: 返回结果数量
        weight_es: ES权重，默认0.6
        weight_chroma: ChromaDB权重，默认0.4
        
    Returns:
        list: 融合后的搜索结果列表
    """
    print("=====hybrid_search_jobs=========")
    
    try:
        # 1. ES检索
        es_results = search_job_by_es(query_text, es_index, top_k)
        
        # 2. ChromaDB检索
        try:
            chroma_results = search_job_by_chroma(query_text, chroma_collection, top_k)
        except Exception as e:
            print(f"ChromaDB检索失败，仅使用ES结果: {str(e)}")
            chroma_results = []
        
        # 3. 统一收集所有分数进行归一化
        all_jobs = {}
        
        # 处理ES结果
        for i, job in enumerate(es_results):
            job_id = str(job.get("id", ""))
            if job_id not in all_jobs:
                all_jobs[job_id] = job.copy()
            all_jobs[job_id]["_es_score"] = job.get("_es_score", 0.0)
            all_jobs[job_id]["_chroma_score"] = 0.0  # 默认值
        
        # 处理ChromaDB结果
        for i, job in enumerate(chroma_results):
            job_id = str(job.get("id", ""))
            if job_id not in all_jobs:
                all_jobs[job_id] = job.copy()
                all_jobs[job_id]["_es_score"] = 0.0  # 默认值
            all_jobs[job_id]["_chroma_score"] = job.get("_similarity", 0.0)
        
        # 4. 统一归一化所有分数
        def normalize_scores_unified(jobs_dict, es_key, chroma_key):
            """统一归一化ES和ChromaDB分数"""
            if not jobs_dict:
                return {}
            
            # 收集所有ES分数
            es_scores = [job.get(es_key, 0.0) for job in jobs_dict.values()]
            chroma_scores = [job.get(chroma_key, 0.0) for job in jobs_dict.values()]
            
            # 分别归一化ES和ChromaDB分数
            es_norm = {}
            chroma_norm = {}
            
            # ES分数归一化
            if es_scores and max(es_scores) > 0:
                es_min = min(es_scores)
                es_max = max(es_scores)
                es_range = es_max - es_min
                if es_range > 1e-9:
                    for i, (job_id, job) in enumerate(jobs_dict.items()):
                        es_norm[job_id] = (job.get(es_key, 0.0) - es_min) / es_range
                else:
                    for job_id in jobs_dict.keys():
                        es_norm[job_id] = 1.0 if es_scores[list(jobs_dict.keys()).index(job_id)] > 0 else 0.0
            else:
                for job_id in jobs_dict.keys():
                    es_norm[job_id] = 0.0
            
            # ChromaDB分数归一化
            if chroma_scores and max(chroma_scores) > 0:
                chroma_min = min(chroma_scores)
                chroma_max = max(chroma_scores)
                chroma_range = chroma_max - chroma_min
                if chroma_range > 1e-9:
                    for i, (job_id, job) in enumerate(jobs_dict.items()):
                        chroma_norm[job_id] = (job.get(chroma_key, 0.0) - chroma_min) / chroma_range
                else:
                    for job_id in jobs_dict.keys():
                        chroma_norm[job_id] = 1.0 if chroma_scores[list(jobs_dict.keys()).index(job_id)] > 0 else 0.0
            else:
                for job_id in jobs_dict.keys():
                    chroma_norm[job_id] = 0.0
            
            return es_norm, chroma_norm
        
        # 执行统一归一化
        es_normalized, chroma_normalized = normalize_scores_unified(all_jobs, "_es_score", "_chroma_score")
        
        # 5. 计算融合分数
        fused_results = []
        for job_id, job in all_jobs.items():
            es_score = es_normalized.get(job_id, 0.0)
            chroma_score = chroma_normalized.get(job_id, 0.0)
            fused_score = weight_es * es_score + weight_chroma * chroma_score
            
            # 更新分数信息
            job["_es_score"] = es_score
            job["_chroma_score"] = chroma_score
            job["_fused_score"] = fused_score
            fused_results.append(job)
        
        # 6. 按融合分数排序
        fused_results.sort(key=lambda x: x.get("_fused_score", 0.0), reverse=True)
        
        # 7. 返回TopK结果
        final_results = fused_results[:top_k]
        
        print(f"混合检索完成，ES找到{len(es_results)}个，ChromaDB找到{len(chroma_results)}个，融合后返回{len(final_results)}个")
        return final_results
        
    except Exception as e:
        print(f"混合检索出错: {str(e)}")
        return []
