# MinIO和Milvus入库实现（含分片、向量化）
import logging
from typing import List

from minio import Minio
from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
import numpy as np
from langchain_ollama import OllamaEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
class DataInserter:
    """数据插入器，支持MinIO文件上传和Milvus向量存储"""

    def __init__(self):
        self.minio_client = Minio('localhost:9100',
                                  access_key='minioadmin',
                                  secret_key='minioadmin',
                                  secure=False)
        # 初始化嵌入模型
        self.embeddings = OllamaEmbeddings(model="bge-m3:latest", base_url="http://localhost:11434")

    def upload_to_minio(self, file_path, bucket_name, object_name):
        """将文件上传到MinIO"""
        try:
            self.minio_client.fput_object(
                bucket_name=bucket_name,
                object_name=object_name,
                file_path=file_path
            )
            print(f"文件已上传到 {bucket_name}/{object_name}")
            return True
        except Exception as e:
            print(f"MinIO上传失败：{str(e)}")
            return False

    def _split_document(self, doc_path: str) -> list[str]:
        """文档分片处理"""
        with open(doc_path, 'r', encoding='utf-8') as f:
            content = f.read()

        splitter = RecursiveCharacterTextSplitter(
            chunk_size=800,
            chunk_overlap=100,
            separators=["\n## ", "\n### ", "\n\n", "\n", "。", "，"],
            length_function=len
        )

        chunks = splitter.split_text(content)
        # 过滤过短分片
        valid_chunks = [c for c in chunks if len(c.strip()) >= 100]
        return valid_chunks

    def _embed_text(self, texts: list[str]) -> list[List[float]]:
        """文本向量化"""
        embeddings = []
        for text in texts:
            embedding = self.embeddings.embed_query(text)
            embeddings.append(embedding)
        return embeddings

    def add_to_milvus(self, collection_name, doc_path, metadata=None):
        """将文档分片、向量化并添加到Milvus"""
        try:
            # 1. 分片文档
            chunks = self._split_document(doc_path)
            logging.info(f"文档分片完成：共{len(chunks)}个分片")

            # 2. 向量化
            vectors = self._embed_text(chunks)
            logging.info(f"向量化完成：共{len(vectors)}个向量")

            # 3. 连接Milvus
            connections.connect(host='127.0.0.1', port='19530')

            # 4. 创建集合
            fields = [
                FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
                FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=len(vectors[0])),
                FieldSchema(name="metadata", dtype=DataType.VARCHAR, max_length=500)
            ]
            schema = CollectionSchema(fields, description="NRS2002知识库")
            collection = Collection(name=collection_name, schema=schema)

            # 5. 创建索引
            index_params = {
                "index_type": "IVF_FLAT",
                "metric_type": "L2",
                "params": {"nlist": 100}
            }
            collection.create_index("embedding", index_params)

            # 6. 插入数据
            ids = list(range(len(vectors)))
            insert_data = [ids, vectors]
            if metadata:
                insert_data.append(metadata)

            collection.insert(insert_data)
            collection.flush()
            print(f"已上传 {len(vectors)} 条向量数据到 {collection_name}")
            return True

        except Exception as e:
            print(f"Milvus入库失败：{str(e)}")
            return False


# 使用示例
if __name__ == "__main__":
    inserter = DataInserter()

    # 上传文件到MinIO
    inserter.upload_to_minio("app/data/nrs2002_full.md", "string", "rules/nrs2002_rules.md")

    # 添加文档到Milvus（自动分片、向量化）
    inserter.add_to_milvus("nrs_rules_collection", "app/data/nrs2002_full.md", ["type=rule"])
