import json
from pathlib import Path
from pymilvus import (
    connections,
    FieldSchema, CollectionSchema, DataType,
    Collection,
    utility
)
from batch_article_processor import load_large_xml_data_to_batch_json
from single_article_processor import load_large_xml_data_to_json
from sentence_transformers import SentenceTransformer

# 初始化模型
print("正在加载模型...")
model = SentenceTransformer('./model/all-MiniLM-L6-v2')
print("模型已加载")

"""初始化Milvus连接"""
print("正在连接到远程Milvus向量数据库...")
connections.connect("default", host="192.168.31.250", port="19530")
print("已连接到Milvus服务器")


def get_or_create_collection():
    """获取或创建集合"""
    existing_collections = utility.list_collections()
    print("现有集合:", existing_collections)
    # 定义集合名称
    collection_name = "pubmed_abstracts"

    if collection_name in existing_collections:
        print("集合已存在")
        collection = Collection(name=collection_name)
    else:
        print("正在创建集合...")
        fields = [
            FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
            FieldSchema(name="pmid", dtype=DataType.INT64, unique=True),
            FieldSchema(name="year", dtype=DataType.INT32),
            FieldSchema(name="title", dtype=DataType.VARCHAR, max_length=1000),
            FieldSchema(name="abstract_title", dtype=DataType.VARCHAR, max_length=10000),
            FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=384)
        ]
        # 创建集合模式
        schema = CollectionSchema(fields=fields, description="PubMed摘要集合")
        collection = Collection(name=collection_name, schema=schema)
        print("集合已创建")
        # 为向量字段创建索引
    vector_index_params = {
        "index_type": "IVF_FLAT",
        "metric_type": "COSINE",
        "params": {"nlist": 128}
    }
    collection.create_index("embedding", vector_index_params)
    collection.create_index("title", {"index_type": "Trie"})
    collection.create_index("abstract_title", {"index_type": "Trie"})
    print("向量索引创建成功。")
    return collection


# 获取文件夹中的所有XML文件
def process_pubmed_folder(folder_path: str):
    xml_files = list(Path(folder_path).glob("*.xml"))
    for xml_file in xml_files:
        print(f"正在处理文件: {xml_file}")
        load_large_xml_data_to_batch_json(str(xml_file))
        load_large_xml_data_to_json(str(xml_file))


def process_json_files(folder_path: str):
    json_files = list(Path(folder_path).glob("*.json"))
    batch_size = 1000
    batch_data = {
        "pmid": [],
        "year": [],
        "title": [],
        "abstract_title": [],
        "embedding": []
    }

    collection = get_or_create_collection()

    for json_file in json_files:
        print(f"正在处理文件: {json_file}")
        with open(json_file, "r", encoding="utf-8") as file:
            data = json.load(file)
            for article in data:
                # 数据类型转换
                pmid_str = article.get("PMID")
                year_str = article.get("Year")
                title = article.get("Title")
                abstract_title = article.get("ArticleTitle")
                abstract_Text = article.get("AbstractText")

                # 添加空值检查
                if abstract_Text is None or abstract_Text.strip() == "":
                    print(f"警告: 文章 PMID {pmid_str} 的 abstract_Text 为空，跳过处理")
                    continue
                # 数据类型转换和验证
                try:
                    pmid = int(pmid_str)
                    year = int(year_str)
                except (ValueError, TypeError):
                    print(f"警告: 文章 PMID {pmid_str} 的 PMID 或 Year 格式不正确，跳过处理")
                    continue

                embedding = model.encode(abstract_Text, device="cpu", show_progress_bar=False)

                # 按字段添加到批次中
                batch_data["pmid"].append(pmid)
                batch_data["year"].append(year)
                batch_data["title"].append(title)
                batch_data["abstract_title"].append(abstract_title)
                batch_data["embedding"].append(embedding)

                # 当批次达到1000条时执行插入
                if len(batch_data["pmid"]) >= batch_size:
                    collection.insert([batch_data["pmid"], batch_data["year"], batch_data["title"],
                                       batch_data["abstract_title"], batch_data["embedding"]])
                    print(f"已插入 {len(batch_data['pmid'])} 条记录")
                    # 清空批次数据
                    batch_data = {
                        "pmid": [],
                        "year": [],
                        "title": [],
                        "abstract_title": [],
                        "embedding": []
                    }

    # 处理剩余不足1000条的数据
    if batch_data["pmid"]:
        try:
         collection.insert([batch_data["pmid"], batch_data["year"], batch_data["title"],
                           batch_data["abstract_title"], batch_data["embedding"]])
         print(f"已插入最后 {len(batch_data['pmid'])} 条记录")
        except Exception as e:
         print(f"插入失败: {e}")


if __name__ == "__main__":
    process_json_files("C:/local_pubmed_bath_json")
