import re
import numpy as np
import json
from pymilvus import (
    connections,
    FieldSchema, CollectionSchema, DataType,
    Collection,
    utility
)
from sentence_transformers import SentenceTransformer
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
from rake_nltk import Rake
from typing import List, Dict, Tuple, Any

# ------------------- 本地资源路径配置（请确认与实际路径一致） -------------------
LOCAL_TOKENIZER_PATH = r"C:\Users\Windows10\Desktop\rag\tokenizers\bert-base-uncased"
LOCAL_EMBEDDING_MODEL_PATH = r"C:\Users\Windows10\Desktop\rag\models\all-MiniLM-L6-v2"
LOCAL_INTENT_MODEL_PATH = r"C:\Users\Windows10\Desktop\rag\models\distilbert-sst-2"

text_tokenizer = AutoTokenizer.from_pretrained(LOCAL_TOKENIZER_PATH)


class MilvusRAGSystem:
    def __init__(self, milvus_host: str, milvus_port: str):
        self._connect_milvus(milvus_host, milvus_port)
        self.embedding_model = self._load_local_embedding_model()
        self.intent_classifier = self._load_local_intent_classifier()
        self.rake_extractor = Rake()
        self.collection = self._create_or_load_collection()  # 创建后自动加载

    def _connect_milvus(self, host: str, port: str):
        try:
            connections.connect(host=host, port=port)
            print("✅ Milvus服务器连接成功")
        except Exception as e:
            raise RuntimeError(f"Milvus连接失败：{str(e)}")

    def _load_local_embedding_model(self):
        try:
            model = SentenceTransformer(LOCAL_EMBEDDING_MODEL_PATH)
            print("✅ 本地嵌入模型加载成功")
            return model
        except Exception as e:
            raise RuntimeError(f"嵌入模型加载失败：{str(e)}")

    def _load_local_intent_classifier(self):
        try:
            tokenizer = AutoTokenizer.from_pretrained(LOCAL_INTENT_MODEL_PATH)
            model = AutoModelForSequenceClassification.from_pretrained(LOCAL_INTENT_MODEL_PATH)
            classifier = pipeline("text-classification", model=model, tokenizer=tokenizer)
            print("✅ 本地意图分类模型加载成功")
            return classifier
        except Exception as e:
            raise RuntimeError(f"意图模型加载失败：{str(e)}")

    def _create_or_load_collection(self, collection_name: str = "rag_collection"):
        # 删除旧集合并重建
        if utility.has_collection(collection_name):
            utility.drop_collection(collection_name)
            print(f"⚠️  已删除旧集合{collection_name}，重建新集合")

        # 定义字段（keywords长度2000）
        fields = [
            FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
            FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=2000),
            FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR,
                        dim=self.embedding_model.get_sentence_embedding_dimension()),
            FieldSchema(name="keywords", dtype=DataType.VARCHAR, max_length=2000),
            FieldSchema(name="metadata", dtype=DataType.VARCHAR, max_length=1000)
        ]
        schema = CollectionSchema(fields, "RAG集合（已修复加载问题）")
        collection = Collection(collection_name, schema)

        # 创建索引并加载集合到内存（核心修复：添加load()）
        index_params = {"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 128}}
        collection.create_index("vector", index_params)
        collection.load()  # 关键：加载集合到内存，否则无法搜索
        print(f"✅ 新集合创建并加载完成：{collection_name}（已加载到内存）")
        return collection

    def _chunk_document(self, document: str, chunk_size: int = 300, chunk_overlap: int = 50) -> List[str]:
        sentences = re.split(r'(?<=[。！？；.]|[\.\!\?;])\s+', document.strip())
        if not sentences:
            return [document]

        chunks = []
        current_chunk_sentences = []
        current_word_count = 0

        for sentence in sentences:
            token_ids = text_tokenizer.encode(sentence, add_special_tokens=False)
            sentence_word_count = len(token_ids)

            if current_word_count + sentence_word_count > chunk_size and current_chunk_sentences:
                chunks.append(' '.join(current_chunk_sentences))
                if chunk_overlap > 0:
                    last_chunk_tokens = text_tokenizer.encode(' '.join(current_chunk_sentences),
                                                              add_special_tokens=False)
                    overlap_tokens = last_chunk_tokens[-chunk_overlap:] if len(
                        last_chunk_tokens) >= chunk_overlap else last_chunk_tokens
                    overlap_text = text_tokenizer.decode(overlap_tokens)
                    current_chunk_sentences = [overlap_text, sentence]
                    current_word_count = len(
                        text_tokenizer.encode(' '.join(current_chunk_sentences), add_special_tokens=False))
                else:
                    current_chunk_sentences = [sentence]
                    current_word_count = sentence_word_count
            else:
                current_chunk_sentences.append(sentence)
                current_word_count += sentence_word_count

        if current_chunk_sentences:
            chunks.append(' '.join(current_chunk_sentences))
        return chunks

    def _extract_keywords(self, text: str) -> List[str]:
        self.rake_extractor.extract_keywords_from_text(text)
        keywords = self.rake_extractor.get_ranked_phrases()[:8]
        return [kw.lower() for kw in keywords if len(kw.strip()) > 2]

    def _expand_query(self, query: str) -> List[str]:
        query_keywords = self._extract_keywords(query)
        if not query_keywords:
            return [query]

        expanded = [query]
        if len(query_keywords) >= 2:
            expanded.extend(query_keywords)
            for i in range(len(query_keywords)):
                for j in range(i + 1, len(query_keywords)):
                    expanded.append(f"{query_keywords[i]} {query_keywords[j]}")
        return list(set(expanded))[:5]

    def _classify_intent(self, query: str) -> Dict[str, float]:
        result = self.intent_classifier(query)[0]
        intent_map = {"POSITIVE": "寻求信息", "NEGATIVE": "质疑或否定"}
        return {
            "intent": intent_map.get(result["label"], "其他"),
            "confidence": round(float(result["score"]), 4)
        }

    def add_document(self, document: str, metadata: Dict[str, Any] = None) -> None:
        metadata = metadata or {}
        chunks = self._chunk_document(document)
        if not chunks:
            print("⚠️ 文档为空，未添加")
            return

        texts, vectors, keywords_list, metadatas = [], [], [], []
        for chunk in chunks:
            keywords = self._extract_keywords(chunk)
            vector = self.embedding_model.encode(chunk).tolist()
            texts.append(chunk)
            vectors.append(vector)
            keywords_list.append(','.join(keywords))
            metadatas.append(json.dumps(metadata))

        self.collection.insert(data=[texts, vectors, keywords_list, metadatas])
        self.collection.flush()
        print(f"✅ 文档添加完成，分块数量：{len(chunks)}")

    def _vector_search(self, query: str, limit: int = 10) -> List[Tuple[Any, float]]:
        query_vec = self.embedding_model.encode(query).tolist()
        search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
        results = self.collection.search(
            data=[query_vec], anns_field="vector", param=search_params,
            limit=limit, output_fields=["id", "text", "keywords", "metadata"]
        )
        return [(hit, 1 - hit.distance / 2) for hit in results[0]]

    def _keyword_search(self, query: str, limit: int = 10) -> List[Tuple[Any, float]]:
        query_kw = self._extract_keywords(query)
        if not query_kw:
            return []

        expr = " or ".join([f"keywords like '%{kw}%'" for kw in query_kw])
        results = self.collection.query(
            expr=expr, output_fields=["id", "text", "keywords", "metadata"], limit=limit
        )

        scored_results = []
        for res in results:
            doc_kw = res["keywords"].split(',') if res["keywords"] else []
            match_count = sum(1 for kw in query_kw if kw in doc_kw)
            scored_results.append((res, match_count / len(query_kw)))

        return sorted(scored_results, key=lambda x: x[1], reverse=True)

    def _rrf_fusion(self, results_list: List[List[Tuple[Any, float]]], k: int = 60) -> List[Tuple[Any, float]]:
        rrf_scores = {}
        for results in results_list:
            for rank, (res, _) in enumerate(results, 1):
                doc_id = res["id"] if "id" in res else str(res)
                rrf_scores[doc_id] = rrf_scores.get(doc_id, 0) + 1 / (k + rank)

        doc_map = {res["id"] if "id" in res else str(res): res for results in results_list for res, _ in results}
        return sorted([(doc_map[doc_id], score) for doc_id, score in rrf_scores.items()], key=lambda x: x[1],
                      reverse=True)

    def retrieve(self, query: str, limit: int = 10) -> Dict[str, Any]:
        intent = self._classify_intent(query)
        expanded_queries = self._expand_query(query)

        vector_res = self._vector_search(query, limit)
        keyword_res = []
        for eq in expanded_queries:
            keyword_res.extend(self._keyword_search(eq, limit))

        unique_keyword_res = []
        seen_ids = set()
        for res, score in keyword_res:
            doc_id = res["id"] if "id" in res else str(res)
            if doc_id not in seen_ids:
                seen_ids.add(doc_id)
                unique_keyword_res.append((res, score))
                if len(unique_keyword_res) >= limit:
                    break

        fused_res = self._rrf_fusion([vector_res, unique_keyword_res], k=60)

        formatted_results = []
        for res, score in fused_res[:limit]:
            formatted_results.append({
                "text": res["text"],
                "score": round(float(score), 4),
                "keywords": res["keywords"].split(',') if res["keywords"] else [],
                "metadata": json.loads(res["metadata"]) if res["metadata"] else {}
            })
        return {
            "query": query,
            "intent": intent,
            "expanded_queries": expanded_queries,
            "results": formatted_results
        }


# ------------------- 运行入口 -------------------
if __name__ == "__main__":
    MILVUS_HOST = "192.168.52.129"
    MILVUS_PORT = "19530"

    try:
        rag_system = MilvusRAGSystem(milvus_host=MILVUS_HOST, milvus_port=MILVUS_PORT)
    except RuntimeError as e:
        print(f"❌ 系统初始化失败：{e}")
        exit()

    sample_document = """
    机器学习是人工智能的核心分支，通过算法让计算机从数据中学习规律，无需人工编写规则。
    核心步骤：数据收集与清洗→特征工程→模型训练→评估优化。
    常见算法分类：
    1. 监督学习：线性回归（预测房价）、逻辑回归（垃圾邮件分类）、决策树（客户流失预测）
    2. 无监督学习：K-Means（用户分群）、PCA（数据降维）、关联规则（购物篮分析）
    3. 强化学习：Q-Learning（游戏AI）、PPO（机器人控制）

    学习基础：Python编程（Pandas/Numpy）、数学（线性代数/概率论）、框架（PyTorch/Scikit-learn）。
    应用场景：金融（风控欺诈检测）、医疗（疾病影像识别）、零售（个性化推荐）、自动驾驶（物体检测）。
    """

    try:
        rag_system.add_document(
            document=sample_document,
            metadata={"来源": "机器学习入门手册", "类型": "技术文档", "更新时间": "2024-10-28"}
        )
    except Exception as e:
        print(f"❌ 文档添加失败：{e}")
        exit()

    user_query = "学习机器学习需要哪些基础？常用算法有哪些分类？"

    try:
        results = rag_system.retrieve(query=user_query, limit=3)
    except Exception as e:
        print(f"❌ 检索失败：{e}")
        exit()

    print("\n" + "=" * 60)
    print(f"📌 查询内容：{results['query']}")
    print(f"🎯 意图识别：{results['intent']['intent']}（置信度：{results['intent']['confidence']}）")
    print(f"🔍 扩展查询：{results['expanded_queries']}")
    print("\n📊 检索结果（按相关性排序）：")
    for i, res in enumerate(results["results"], 1):
        print(f"\n【结果{i}】相关性分数：{res['score']}")
        print(f"📄 文本内容：{res['text']}")
        print(f"🔑 关键词：{res['keywords']}")
        print(f"📋 元数据：{res['metadata']}")
    print("=" * 60)