import re
import heapq
import logging
from typing import List, Dict, Any
from concurrent.futures import ThreadPoolExecutor

from elasticsearch import Elasticsearch
from sentence_transformers import SentenceTransformer
from openai import OpenAI

from config.settings import (
    ES_HOST, INDEX_NAME, VEC_FIELD, EMBED_MODEL_PATH, DEVICE,
    LLM_KEY, LLM_BASE, LLM_MODEL, TOP_N, RRF_K
)

logger = logging.getLogger(__name__)

class PatentSearchService:
    def __init__(self):
        self.es = Elasticsearch(
            ES_HOST, 
            request_timeout=120
        )
        self.embedding_model = SentenceTransformer(EMBED_MODEL_PATH, device=DEVICE)
        self.llm = OpenAI(api_key=LLM_KEY, base_url=LLM_BASE)
        self.filter_clause = {"wildcard": {"publication_number": "*B"}}
        self._think_re = re.compile(r'</?think>', flags=re.I)
    
    def _clean_llm_output(self, text: str) -> str:
        parts = self._think_re.split(text)
        return parts[-1].strip()
    
    def _generate_keywords(self, text: str) -> str:
        """使用LLM生成关键词查询"""
        prompt = (
            "从下面需求摘要抽取 6-10 个中文关键词，"
            "两组用 OR 连接，最后格式如: (词1 OR 词2 ...) AND (词n ...)，只输出这一行。"
        )
        try:
            response = self.llm.chat.completions.create(
                model=LLM_MODEL,
                timeout=30,
                messages=[
                    {"role": "system", "content": prompt},
                    {"role": "user", "content": text}
                ]
            )
            raw_output = response.choices[0].message.content
            keywords = self._clean_llm_output(raw_output)
            logger.info(f"Generated keywords: {keywords}")
            return keywords
        except Exception as e:
            logger.warning(f"LLM keywords generation failed: {e}")
            tokens = text.split()[:10]
            return "(" + " OR ".join(tokens) + ")"
    
    def _bm25_full_search(self, query: str) -> List[Dict[str, Any]]:
        """全文BM25搜索"""
        body = {
            "size": TOP_N,
            "query": {
                "bool": {
                    "must": [
                        {"multi_match": {"query": query, "fields": ["invention_title^3", "abstract"]}},
                        self.filter_clause
                    ]
                }
            },
            "_source": ["_id", "invention_title", "abstract", "inventor_name", 
                       "applicant_name", "applicant_address", "publication_date"]
        }
        result = self.es.search(index=INDEX_NAME, body=body)
        return result["hits"]["hits"]
    
    # def _bm25_keywords_search(self, query: str) -> List[Dict[str, Any]]:
    #     """关键词BM25搜索"""
    #     keywords = self._generate_keywords(query)
    #     body = {
    #         "size": TOP_N,
    #         "query": {
    #             "bool": {
    #                 "must": [
    #                     {"query_string": {"query": keywords, "fields": ["invention_title", "abstract"]}},
    #                     self.filter_clause
    #                 ]
    #             }
    #         },
    #         "_source": ["_id", "invention_title", "abstract", "inventor_name", 
    #                    "applicant_name", "applicant_address", "publication_date"]
    #     }
    #     result = self.es.search(index=INDEX_NAME, body=body)
    #     return result["hits"]["hits"]
    
    def _vector_search(self, query: str) -> List[Dict[str, Any]]:
        """向量搜索"""
        query_vector = self.embedding_model.encode(query, normalize_embeddings=True).tolist()
        body = {
            "size": TOP_N,
            "query": {
                "bool": {
                    "must": [
                        {"knn": {
                            "field": VEC_FIELD,
                            "query_vector": query_vector,
                            "k": TOP_N,
                            "num_candidates": TOP_N * 2
                        }},
                        self.filter_clause
                    ]
                }
            },
            "_source": ["_id", "invention_title", "abstract", "inventor_name", 
                       "applicant_name", "applicant_address", "publication_date"]
        }
        result = self.es.search(index=INDEX_NAME, body=body)
        return result["hits"]["hits"]
    
    def _rrf_fusion(self, result_lists: List[List[Dict]], k: int = TOP_N) -> List[str]:
        """RRF融合多个检索结果"""
        scores = {}
        for result_list in result_lists:
            for rank, doc in enumerate(result_list, 1):
                doc_id = doc["_id"]
                scores[doc_id] = scores.get(doc_id, 0) + 1 / (RRF_K + rank)
        
        return [doc_id for doc_id, _ in heapq.nlargest(k, scores.items(), key=lambda x: x[1])]
    
    def search_demand_patents(self, demand_text: str, top_n: int = TOP_N) -> List[Dict[str, Any]]:
        """搜索需求相关专利的主入口"""
        logger.info(f"开始搜索需求相关专利: {demand_text[:100]}...")
        
        # 并行执行三种搜索
        with ThreadPoolExecutor(max_workers=3) as executor:
            future_full = executor.submit(self._bm25_full_search, demand_text)
            # future_kw = executor.submit(self._bm25_keywords_search, demand_text)
            future_vec = executor.submit(self._vector_search, demand_text)
            
            full_results = future_full.result()
            # kw_results = future_kw.result()
            vec_results = future_vec.result()
        
        # 获取各自的ID集合
        full_ids = {hit["_id"] for hit in full_results}
        # kw_ids = {hit["_id"] for hit in kw_results}
        vec_ids = {hit["_id"] for hit in vec_results}
        
        # RRF融合
        # rrf_ids = set(self._rrf_fusion([full_results, kw_results, vec_results], top_n))
        rrf_ids = set(self._rrf_fusion([full_results, vec_results], top_n))
        
        # 取交集
        # intersection_ids = full_ids & kw_ids & vec_ids & rrf_ids
        intersection_ids = full_ids & vec_ids & rrf_ids

        
        # 构建结果字典
        id_to_patent = {}
        # for hit in full_results + kw_results + vec_results:
        for hit in full_results + vec_results:
            doc_id = hit["_id"]
            if doc_id in intersection_ids and doc_id not in id_to_patent:
                patent_data = hit["_source"].copy()
                patent_data["_id"] = doc_id
                id_to_patent[doc_id] = patent_data
        # for hit in full_results:
        #     doc_id = hit["_id"]
        #     if doc_id in full_ids and doc_id not in id_to_patent:
        #         patent_data = hit["_source"].copy()
        #         patent_data["_id"] = doc_id
        #         id_to_patent[doc_id] = patent_data
        final_patents = list(id_to_patent.values())
        logger.info(f"搜索完成，找到 {len(final_patents)} 个相关专利")
        # print(final_patents)
        
        return final_patents
