from langchain_text_splitters import RecursiveCharacterTextSplitter
from pathlib import Path
import json
import os
import torch
import numpy as np
from pymilvus import (
    connections,
    utility,
    FieldSchema,
    CollectionSchema,
    DataType,
    Collection,
    AnnSearchRequest,
    WeightedRanker
)

from pymilvus.model.hybrid import BGEM3EmbeddingFunction
from typing import List, Dict

import re


def chunk_image_folders(base_path: str, chunk_size: int = 1000, chunk_overlap: int = 150) -> list:
    """
    处理包含info.md和thumbnail.jpg的文件夹，提取图片路径和描述信息

    Args:
        base_path: 包含图片文件夹的根目录路径
        chunk_size: 块大小
        chunk_overlap: 重叠大小

    Returns:
        分块后的文档列表
    """
    # 初始化文本分块器
    text_splitter = RecursiveCharacterTextSplitter(
        separators=[
            "\n\n", "\n", "。", "！", "？", ". ", "! ", "? ", "；", "，", " ", ""
        ],
        chunk_size=chunk_size,
        chunk_overlap=chunk_overlap,
        length_function=len,
        is_separator_regex=False,
    )

    result = []

    # 遍历所有子文件夹
    for root, dirs, files in os.walk(base_path):
        # 检查是否包含info.md文件
        if "info.md" in files:
            info_path = os.path.join(root, "info.md")
            folder_name = os.path.basename(root)

            # 构造图片存储路径
            image_path = os.path.join(root, "thumbnail.jpg")

            # 读取描述内容
            with open(info_path, "r", encoding="utf-8") as f:
                description = f.read()

            # 过滤掉Markdown链接
            # 移除 [text](url) 格式的链接
            description = re.sub(r'\[([^\]]+)\]\([^)]+\)', r'\1', description)
            # 移除单独的URL链接
            description = re.sub(r'https?://[^\s]+', '', description)

            # 构造图像信息文本
            image_text = f"图像标识: {folder_name}\n"
            image_text += f"主要描述: {description}\n"
            image_text += f"URL: {image_path}\n"  # 存储实际文件路径

            # 使用text_splitter进行分块
            chunks = text_splitter.split_text(image_text)

            # 添加元数据
            for i, chunk in enumerate(chunks):
                if len(chunk.strip()) > 30:  # 过滤过短的块
                    result.append({
                        "text": chunk.strip(),
                        "chunk_id": len(result),
                        "total_chunks": len(chunks),
                        "source": info_path,
                        "title": f"{folder_name}_图像_{i + 1}"
                    })

    return result


class MilvusImageVectorStore:
    """图像信息向量化存储到Milvus"""

    def __init__(self, milvus_uri: str = "http://localhost:19530",
                 collection_name: str = "img_info",
                 bge_m3_path: str = None):
        self.milvus_uri = milvus_uri
        self.collection_name = collection_name
        self.bge_m3_path = bge_m3_path or '/mnt/Mars_data/rag/bge_m3'

        # 连接Milvus
        connections.connect("default", uri=self.milvus_uri)

        # 初始化嵌入模型
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.embedding_model = BGEM3EmbeddingFunction(
            model_name=self.bge_m3_path,
            use_fp16=True if self.device == 'cuda' else False,
            device=self.device,
            batch_size=8
        )

        self.collection = None

    def create_collection(self, dense_dim: int = 1024):
        """创建支持混合检索的集合"""
        print(f"创建集合: {self.collection_name}")

        if utility.has_collection(self.collection_name):
            Collection(self.collection_name).drop()

        fields = [
            FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=False),
            FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=65535),
            FieldSchema(name="source", dtype=DataType.VARCHAR, max_length=500),
            FieldSchema(name="title", dtype=DataType.VARCHAR, max_length=200),
            FieldSchema(name="chunk_id", dtype=DataType.INT64),
            FieldSchema(name="sparse_vector", dtype=DataType.SPARSE_FLOAT_VECTOR),
            FieldSchema(name="dense_vector", dtype=DataType.FLOAT_VECTOR, dim=dense_dim),
        ]

        schema = CollectionSchema(fields=fields, description="图像信息混合检索")
        self.collection = Collection(
            name=self.collection_name,
            schema=schema,
            consistency_level="Strong"
        )

        print("      创建向量索引...")
        sparse_index = {"index_type": "SPARSE_INVERTED_INDEX", "metric_type": "IP"}
        self.collection.create_index("sparse_vector", sparse_index)

        dense_index = {"index_type": "AUTOINDEX", "metric_type": "IP"}
        self.collection.create_index("dense_vector", dense_index)

        self.collection.load()
        print("      集合创建完成\n")

    def _convert_to_float32(self, embeddings: Dict) -> Dict:
        """将embedding转换为float32"""
        result = {}

        if 'dense' in embeddings:
            dense = embeddings['dense']
            if isinstance(dense, list):
                result['dense'] = [
                    np.array(vec, dtype=np.float32) if not isinstance(vec, np.ndarray)
                    else vec.astype(np.float32)
                    for vec in dense
                ]
            else:
                result['dense'] = np.array(dense, dtype=np.float32)

        if 'sparse' in embeddings:
            result['sparse'] = embeddings['sparse']

        return result

    def insert_documents(self, documents: List[Dict], batch_size: int = 1000):
        """分批插入文档到Milvus"""
        if self.collection is None:
            self.collection = Collection(self.collection_name)
            self.collection.load()

        print(f"向量化并插入 {len(documents)} 个文档块...")

        # 分批处理
        for i in range(0, len(documents), batch_size):
            batch_docs = documents[i:i + batch_size]
            print(f"处理批次 {i // batch_size + 1}/{(len(documents) - 1) // batch_size + 1}")

            texts = [doc["text"] for doc in batch_docs]
            sources = [doc["source"] for doc in batch_docs]
            titles = [doc["title"] for doc in batch_docs]
            chunk_ids = [doc["chunk_id"] for doc in batch_docs]
            pks = list(range(i, i + len(batch_docs)))

            # 向量化
            with torch.no_grad():
                embeddings = self.embedding_model(texts)

            embeddings = self._convert_to_float32(embeddings)
            dense_vecs = embeddings['dense']
            sparse_vecs = embeddings['sparse']

            # 准备插入数据
            entities = [
                pks, texts, sources, titles, chunk_ids, sparse_vecs, dense_vecs
            ]

            # 插入数据
            try:
                self.collection.insert(entities)
                self.collection.flush()
                print(f"成功插入批次 {i // batch_size + 1}，共 {len(batch_docs)} 条记录")
            except Exception as e:
                print(f"插入批次 {i // batch_size + 1} 失败: {e}")

    def search_with_description_and_url(self, query: str, top_k: int = 3) -> List[Dict]:
        """搜索相似文档并返回描述和URL"""
        if self.collection is None:
            self.collection = Collection(self.collection_name)
            self.collection.load()

        # 向量化查询
        with torch.no_grad():
            query_embeddings = self.embedding_model([query])

        query_embeddings = self._convert_to_float32(query_embeddings)
        query_dense = query_embeddings['dense'][0]
        query_sparse = query_embeddings['sparse'][[0]]

        # 密集搜索请求
        dense_search_params = {"metric_type": "IP", "params": {}}
        dense_req = AnnSearchRequest(
            [query_dense], "dense_vector", dense_search_params, limit=top_k
        )

        # 稀疏搜索请求
        sparse_search_params = {"metric_type": "IP", "params": {}}
        sparse_req = AnnSearchRequest(
            query_sparse, "sparse_vector", sparse_search_params, limit=top_k
        )

        # 混合搜索
        rerank = WeightedRanker(0.5, 0.5)
        results = self.collection.hybrid_search(
            [sparse_req, dense_req],
            rerank=rerank,
            limit=top_k,
            output_fields=["text", "title", "source"]
        )[0]

        search_results = []
        # 修改基础路径为完整的本地路径
        base_path = "/mnt/Mars_data/kepu_rag/mars_data/mars_data"

        for res in results:
            # 从文本中提取信息
            text = res.entity.get("text")
            title = res.entity.get("title")
            source = res.entity.get("source")

            # 提取URL和描述
            url = ""
            description_lines = []

            lines = text.split('\n')
            for line in lines:
                if line.startswith('URL:'):
                    url = line.replace('URL:', '').strip()
                    # 修改路径处理逻辑，正确拼接完整路径
                    if url:
                        # 如果是相对路径，则拼接完整路径
                        if not url.startswith('/'):
                            url = os.path.join(base_path, url)
                        # 如果已经包含基础路径部分，则直接使用
                        elif base_path not in url:
                            url = os.path.join(base_path, url.lstrip('/'))
                else:
                    description_lines.append(line)

            description = '\n'.join(description_lines).strip()

            search_results.append({
                "description": description,
                "url": url,
            })

        return search_results


# 使用示例
if __name__ == "__main__":
    # 1. 分块处理图片文件夹
    base_path = r"/mnt/Mars_data/kepu_rag/mars_data/mars_data"  # 例如: r"mars_data"
    chunks = chunk_image_folders(base_path)

    print(f"共生成 {len(chunks)} 个文本块:")
    for i, chunk in enumerate(chunks[:5]):  # 显示前5个
        print(f"\n块 {i + 1}:")
        print(f"  标题: {chunk['title']}")
        print(f"  内容: {chunk['text'][:100]}...")

    # 2. 向量化并存储到Milvus
    vector_store = MilvusImageVectorStore(collection_name="img_info")

    # 创建集合
    dense_dim = vector_store.embedding_model.dim["dense"]
    vector_store.create_collection(dense_dim)

    # 插入文档
    vector_store.insert_documents(chunks)

    # 3. 测试搜索
    query_text = "火星探测器"
    results = vector_store.search_urls(query_text, top_k=3)

    print(f"\n查询: {query_text}")
    print("检索结果:")
    for result in results:
        print(f"  {result}")

