import os
import PyPDF2
from transformers import AutoTokenizer, AutoModel
import torch
import numpy as np
from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType, utility, db
import nltk
from nltk.tokenize import sent_tokenize

# 下载必要的NLTK数据
nltk.download('punkt')
nltk.download('punkt_tab')

# 初始化模型和tokenizer
tokenizer = AutoTokenizer.from_pretrained("microsoft/BiomedNLP-BiomedBERT-large-uncased-abstract")
model = AutoModel.from_pretrained("microsoft/BiomedNLP-BiomedBERT-large-uncased-abstract")

# 配置
DATABASE_NAME = "my_medical_db"
COLLECTION_NAME = "new_medical_docs"
DIMENSION = 1024
CHUNK_SIZE = 512
WENDANG_DIR = "wendang"


# 连接Milvus并创建数据库
def connect_milvus():
    connections.connect(host='localhost', port='19530')
    if DATABASE_NAME not in db.list_database():
        db.create_database(DATABASE_NAME)
    db.using_database(DATABASE_NAME)


# 创建Milvus集合
def create_collection():
    if utility.has_collection(COLLECTION_NAME):
        utility.drop_collection(COLLECTION_NAME)

    fields = [
        FieldSchema(name="id", dtype=DataType.VARCHAR, is_primary=True, max_length=100),
        FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=DIMENSION),
        FieldSchema(name="chunk_text", dtype=DataType.VARCHAR, max_length=65535)  # 新增字段存储chunk文本
    ]
    schema = CollectionSchema(fields=fields, description="New medical documents collection")
    collection = Collection(COLLECTION_NAME, schema)

    index_params = {
        "metric_type": "L2",
        "index_type": "IVF_FLAT",
        "params": {"nlist": 16384}
    }
    collection.create_index("embedding", index_params)
    return collection


# 读取PDF并切片
def process_pdf(pdf_path):
    with open(pdf_path, 'rb') as file:
        pdf_reader = PyPDF2.PdfReader(file)
        num_pages = len(pdf_reader.pages)
        full_text = ""

        for page in range(num_pages):
            full_text += pdf_reader.pages[page].extract_text()

    sentences = sent_tokenize(full_text)
    chunks = []
    current_chunk = ""
    current_tokens = 0

    for sentence in sentences:
        tokens = tokenizer.encode(sentence, add_special_tokens=False)
        if current_tokens + len(tokens) <= CHUNK_SIZE:
            current_chunk += " " + sentence
            current_tokens += len(tokens)
        else:
            if current_chunk:
                chunks.append(current_chunk.strip())
            current_chunk = sentence
            current_tokens = len(tokens)

    if current_chunk:
        chunks.append(current_chunk.strip())

    return chunks


# 生成嵌入向量
def generate_embeddings(text_chunks):
    embeddings = []
    for chunk in text_chunks:
        inputs = tokenizer(chunk, return_tensors="pt", padding=True, truncation=True, max_length=CHUNK_SIZE)
        with torch.no_grad():
            outputs = model(**inputs)
            embedding = outputs.last_hidden_state[:, 0, :].squeeze().numpy()
            embeddings.append(embedding)
    return embeddings


# 存储到Milvus
def store_to_milvus(collection, embeddings, chunks, article_id):
    ids = [f"{article_id}_{i}" for i in range(len(embeddings))]
    vectors = [embedding.tolist() for embedding in embeddings]
    chunk_texts = chunks  # 存储原始chunk文本

    data = [ids, vectors, chunk_texts]
    collection.insert(data)
    collection.flush()
    return ids


# 获取wendang目录下的所有PDF文件
def get_pdf_files(directory):
    pdf_files = []
    for filename in os.listdir(directory):
        if filename.lower().endswith('.pdf'):
            pdf_files.append(os.path.join(directory, filename))
    return pdf_files


# 主流程
def main():
    connect_milvus()
    print("Milvus连接成功，当前数据库: ", DATABASE_NAME)

    collection = create_collection()
    print("Milvus集合创建成功: ", COLLECTION_NAME)

    pdf_files = get_pdf_files(WENDANG_DIR)
    if not pdf_files:
        print(f"目录 {WENDANG_DIR} 下未找到PDF文件")
        return

    for pdf_path in pdf_files:
        article_id = os.path.splitext(os.path.basename(pdf_path))[0]  # 直接使用文件名作为article_id
        print(f"\n处理文件: {pdf_path}")

        chunks = process_pdf(pdf_path)
        print(f"切片数量: {len(chunks)}")

        embeddings = generate_embeddings(chunks)
        print(f"生成嵌入数量: {len(embeddings)}")

        stored_ids = store_to_milvus(collection, embeddings, chunks, article_id)
        print(f"存储的ID (前5个): {stored_ids[:5]}...")


if __name__ == "__main__":
    main()