import requests
from bs4 import BeautifulSoup
import re
from pymilvus import connections, Collection, FieldSchema, DataType, CollectionSchema
import time

# Xinference 配置
XINFERENCE_HOST = "http://localhost:9997"
EMBEDDING_MODEL_NAME = "bge-small-en-v1.5"
EMBEDDING_MODEL_TYPE = "embedding"
MILVUS_HOST = 'localhost'
MILVUS_PORT = '19530'

# 爬取目标 URL
TARGET_URL = "https://pubmed.ncbi.nlm.nih.gov/35036878/"

# Milvus 集合名称
NEW_COLLECTION_NAME = "ncbi_book_collection"

def load_xinference_model():
    """加载 Xinference 模型并返回 model_uid."""
    model_url = f"{XINFERENCE_HOST}/v1/models"
    payload = {"model_name": EMBEDDING_MODEL_NAME, "model_type": EMBEDDING_MODEL_TYPE}
    try:
        response = requests.post(model_url, json=payload)
        response.raise_for_status()
        model_info = response.json()
        if "model_uid" in model_info:
            return model_info["model_uid"]
        else:
            print(f"Failed to load model: {response.text}")
            return None
    except requests.exceptions.RequestException as e:
        print(f"Error connecting to Xinference: {e}")
        return None
    except ValueError as e:
        print(f"Error parsing Xinference response: {e}")
        return None

def generate_embeddings(texts, model_uid):
    """使用 Xinference 生成文本向量."""
    if not model_uid:
        print("Xinference model not loaded. Cannot generate embeddings.")
        return []
    embed_url = f"{XINFERENCE_HOST}/v1/embeddings"
    embeddings = []
    for text in texts:
        payload = {"model": model_uid, "input": text}
        try:
            response = requests.post(embed_url, json=payload)
            response.raise_for_status()
            embedding = response.json()["data"][0]["embedding"]
            embeddings.append(embedding)
        except requests.exceptions.RequestException as e:
            print(f"Error generating embedding for '{text[:50]}...': {e}")
            embeddings.append(None) # Or handle error more specifically
        except (KeyError, IndexError) as e:
            print(f"Error parsing embedding response for '{text[:50]}...': {e} - Response: {response.text}")
            embeddings.append(None) # Or handle error more specifically
    return embeddings

def scrape_ncbi_book(url):
    """爬取 NCBI 书籍内容."""
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36'}
    try:
        response = requests.get(url, headers=headers)
        response.raise_for_status()
        soup = BeautifulSoup(response.content, 'html.parser')
        data = []

        # 查找主要内容区域，根据实际 HTML 结构调整
        main_content = soup.find('div', id='content') # 这是一个猜测的 ID，需要根据实际 HTML 结构修改
        if main_content:
            current_title = ""
            for tag in main_content.children:
                if tag.name in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']:
                    current_title = tag.get_text(strip=True)
                elif tag.name == 'p':
                    text_content = tag.get_text(strip=True)
                    if text_content:
                        data.append({
                            "url": url,
                            "title": current_title,
                            "content": re.sub(r'\s+', ' ', text_content).strip()
                        })
                # 可以根据需要提取更多结构化信息

        return data
    except requests.exceptions.RequestException as e:
        print(f"Error fetching {url}: {e}")
        return []
    except Exception as e:
        print(f"Error parsing {url}: {e}")
        return []

def create_milvus_collection(collection_name):
    """创建 Milvus 集合."""
    try:
        connections.connect(host=MILVUS_HOST, port=MILVUS_PORT)
        print(f"Connected to Milvus at {MILVUS_HOST}:{MILVUS_PORT}")
    except Exception as e:
        print(f"Failed to connect to Milvus: {e}")
        return None

    fields = [
        FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, description="Unique identifier for each document/chunk"),
        FieldSchema(name="url", dtype=DataType.VARCHAR, max_length=2048, description="URL of the source page or section"),
        FieldSchema(name="title", dtype=DataType.VARCHAR, max_length=512, description="Title of the section or document"),
        FieldSchema(name="content", dtype=DataType.VARCHAR, max_length=65535, description="Text content of the section"),
        FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=384, description="Vector embedding of the content")
    ]
    schema = CollectionSchema(fields=fields, description="Schema for crawled NCBI book data")

    try:
        collection = Collection(name=collection_name, schema=schema)
        print(f"Created collection '{collection_name}'")
        return collection
    except Exception as e:
        print(f"Error creating collection '{collection_name}': {e}")
        return None

def insert_data_to_milvus(collection, data, embeddings):
    """将数据和向量插入 Milvus 集合."""
    if not collection or not data or not embeddings:
        print("Cannot insert data. Collection or data/embeddings is None.")
        return

    ids = list(range(1, len(data) + 1))
    urls = [item['url'] for item in data]
    titles = [item['title'] for item in data]
    contents = [item['content'] for item in data]

    valid_embeddings = [emb for emb in embeddings if emb is not None]
    if len(valid_embeddings) == len(data):
        try:
            collection.insert([ids, urls, titles, contents, valid_embeddings])
            print(f"Inserted {collection.num_entities} entities into '{collection.name}'.")
            collection.create_index("embedding", {"metric_type": "COSINE", "index_type": "IVF_FLAT", "params": {"nlist": 1024}})
            collection.load()
            print(f"Collection '{collection.name}' loaded.")
        except Exception as e:
            print(f"Error inserting data or creating index: {e}")
    else:
        print(f"Warning: Number of valid embeddings ({len(valid_embeddings)}) does not match the number of data items ({len(data)}). Skipping insertion.")

if __name__ == "__main__":
    # 加载 Xinference 模型
    model_uid = load_xinference_model()

    # 爬取数据
    time.sleep(1) # Added a small delay before the first request
    scraped_data = scrape_ncbi_book(TARGET_URL)

    if scraped_data:
        texts_to_embed = [item['content'] for item in scraped_data if item['content']]
        if texts_to_embed:
            # 生成向量
            embeddings = generate_embeddings(texts_to_embed, model_uid)

            if embeddings:
                # 创建新的 Milvus 集合
                ncbi_collection = create_milvus_collection(NEW_COLLECTION_NAME)

                if ncbi_collection:
                    # 将数据和向量插入新的 Milvus 集合
                    insert_data_to_milvus(ncbi_collection, scraped_data, embeddings)
                else:
                    print(f"Failed to create Milvus collection: {NEW_COLLECTION_NAME}")
            else:
                print("No embeddings generated.")
        else:
            print("No text content found to embed.")
    else:
        print("No data scraped.")

    print("\n完成处理。新的数据已存入 Milvus 集合:", NEW_COLLECTION_NAME)