#!/usr/bin/env python
# -*- coding: utf-8 -*-

from pymilvus import MilvusClient
import pandas as pd
from tqdm import tqdm
import logging
import os
import sys
import requests
from typing import List, Optional
import numpy as np
from dotenv import load_dotenv
load_dotenv()

# 添加父目录到路径，以便导入utils
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils.embedding_config import EmbeddingProvider, EmbeddingConfig

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# Xinference嵌入类
class XinferenceEmbeddings:
    """使用Xinference服务进行文本嵌入的类"""
    
    def __init__(self, model_name: str, xinference_url: Optional[str] = None):
        """初始化Xinference嵌入处理器"""
        self.model_name = model_name
        # 直接使用model_name作为model_id
        self.model_id = model_name
        
        # 获取Xinference URL
        if xinference_url is None:
            self.base_url = os.environ.get("XINFERENCE_SERVER_URL", "http://10.128.10.186:9997")
        else:
            self.base_url = xinference_url
            
        if not self.base_url.endswith('/'):
            self.base_url += '/'
            
        logging.info(f"初始化Xinference嵌入，模型: {model_name}, 使用ID: {self.model_id}, 服务URL: {self.base_url}")
    
    def __call__(self, texts: List[str]) -> List[List[float]]:
        """向量化文本列表，兼容向量嵌入函数接口"""
        return self.embed_documents(texts)
    
    def embed_documents(self, texts: List[str]) -> List[List[float]]:
        """嵌入多个文档"""
        if not self.model_id:
            raise ValueError("模型ID未初始化")
        
        try:
            endpoint = f"{self.base_url}v1/embeddings"
            
            payload = {
                "model": self.model_id,
                "input": texts
            }
            
            response = requests.post(endpoint, json=payload)
            response.raise_for_status()
            result = response.json()
            
            # 提取嵌入向量
            if isinstance(result, dict) and "data" in result and len(result["data"]) > 0:
                embeddings = []
                for item in result["data"]:
                    if isinstance(item, dict) and "embedding" in item:
                        embeddings.append(item["embedding"])
                    else:
                        logging.warning(f"嵌入结果项格式不正确: {item}")
                        # 添加零向量替代
                        embeddings.append([0.0] * 1024)
                
                if embeddings:
                    return embeddings
                else:
                    raise ValueError(f"未能从响应中提取任何有效的嵌入向量")
            else:
                logging.error(f"嵌入结果中没有有效数据: {result}")
                raise ValueError(f"嵌入结果中没有有效数据: {result}")
        
        except Exception as e:
            logging.error(f"Xinference嵌入处理失败: {str(e)}")
            # 返回零向量作为后备
            if len(texts) > 0:
                # 返回1024维零向量
                return [[0.0] * 1024 for _ in range(len(texts))]
            return []

def main():
    # 使用Xinference嵌入模型
    xinference_url = os.environ.get("XINFERENCE_SERVER_URL", "http://10.128.10.186:9997")
    logging.info(f"使用Xinference服务URL: {xinference_url}")
    embedding_function = XinferenceEmbeddings(model_name="bge-large", xinference_url=xinference_url)

    # 设置相对路径，适应本地环境
    current_dir = os.path.dirname(os.path.abspath(__file__))
    parent_dir = os.path.dirname(current_dir)

    # 文件路径
    file_path = os.path.join(parent_dir, "data", "SNOMED_5000.csv")
    db_path = os.path.join(parent_dir, "db", "snomed_bge_m3.db")

    logging.info(f"使用数据文件: {file_path}")
    logging.info(f"创建数据库: {db_path}")

    # 检查数据文件是否存在
    if not os.path.exists(file_path):
        logging.error(f"数据文件不存在: {file_path}")
        sys.exit(1)

    # 连接到 Milvus
    try:
        client = MilvusClient(db_path)
    except Exception as e:
        logging.error(f"连接到Milvus数据库失败: {str(e)}")
        # 如果是数据库损坏，尝试删除数据库文件
        if os.path.exists(db_path):
            try:
                os.remove(db_path)
                logging.info(f"已删除损坏的数据库文件: {db_path}")
                client = MilvusClient(db_path)
            except Exception as e2:
                logging.error(f"删除并重新创建数据库失败: {str(e2)}")
                sys.exit(1)

    collection_name = "concepts_only_name"

    # 加载数据
    logging.info("Loading data from CSV")
    df = pd.read_csv(file_path, 
                     dtype=str, 
                     low_memory=False,
                     ).fillna("NA")

    # 获取向量维度（使用一个样本文档）
    sample_doc = "Sample Text"
    sample_embedding = embedding_function([sample_doc])[0]
    vector_dim = len(sample_embedding)
    logging.info(f"向量维度: {vector_dim}")

    # 构造Schema
    from pymilvus import DataType, FieldSchema, CollectionSchema

    fields = [
        FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
        FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=vector_dim), # 向量字段
        FieldSchema(name="concept_id", dtype=DataType.VARCHAR, max_length=50),
        FieldSchema(name="concept_name", dtype=DataType.VARCHAR, max_length=200),
        FieldSchema(name="domain_id", dtype=DataType.VARCHAR, max_length=20),
        FieldSchema(name="vocabulary_id", dtype=DataType.VARCHAR, max_length=20),
        FieldSchema(name="concept_class_id", dtype=DataType.VARCHAR, max_length=20),
        FieldSchema(name="standard_concept", dtype=DataType.VARCHAR, max_length=1),
        FieldSchema(name="concept_code", dtype=DataType.VARCHAR, max_length=50),
        FieldSchema(name="valid_start_date", dtype=DataType.VARCHAR, max_length=10),
        FieldSchema(name="valid_end_date", dtype=DataType.VARCHAR, max_length=10),
        FieldSchema(name="input_file", dtype=DataType.VARCHAR, max_length=500),
    ]
    schema = CollectionSchema(fields, 
                              "SNOMED-CT Concepts", 
                              enable_dynamic_field=True)

    # 如果集合已存在，先删除
    try:
        if client.has_collection(collection_name):
            client.drop_collection(collection_name)
            logging.info(f"Dropped existing collection: {collection_name}")
    except Exception as e:
        logging.warning(f"尝试删除现有集合时出错: {str(e)}")

    # 创建集合
    client.create_collection(
        collection_name=collection_name,
        schema=schema,
    )
    logging.info(f"Created new collection: {collection_name}")

    # 在创建集合后添加索引
    index_params = client.prepare_index_params()
    index_params.add_index(
        field_name="vector",  # 指定要为哪个字段创建索引，这里是向量字段
        index_type="AUTOINDEX",  # 使用自动索引类型，Milvus会根据数据特性选择最佳索引
        metric_type="COSINE",  # 使用余弦相似度作为向量相似度度量方式
        params={"nlist": 1024}  # 索引参数：nlist表示聚类中心的数量，值越大检索精度越高但速度越慢
    )

    client.create_index(
        collection_name=collection_name,
        index_params=index_params
    )
    logging.info(f"Created index for collection: {collection_name}")

    # 批量处理，降低批次大小以适应较小内存环境
    batch_size = 256  # 较小的批次大小
    total_processed = 0

    for start_idx in tqdm(range(0, len(df), batch_size), desc="Processing batches"):
        end_idx = min(start_idx + batch_size, len(df))
        batch_df = df.iloc[start_idx:end_idx]

        # 准备文档，仅使用概念名称
        docs = [row['concept_name'] for _, row in batch_df.iterrows()]

        # 生成嵌入
        try:
            embeddings = embedding_function(docs)
            logging.info(f"Generated embeddings for batch {start_idx // batch_size + 1}")
        except Exception as e:
            logging.error(f"Error generating embeddings for batch {start_idx // batch_size + 1}: {e}")
            continue

        # 准备数据
        data = [
            {
                "vector": embeddings[idx],
                "concept_id": str(row['concept_id']),
                "concept_name": str(row['concept_name']),
                "domain_id": str(row['domain_id']),
                "vocabulary_id": str(row['vocabulary_id']),
                "concept_class_id": str(row['concept_class_id']),
                "standard_concept": str(row['standard_concept']),
                "concept_code": str(row['concept_code']),
                "valid_start_date": str(row['valid_start_date']),
                "valid_end_date": str(row['valid_end_date']),
                "input_file": file_path
            } for idx, (_, row) in enumerate(batch_df.iterrows())
        ]

        # 插入数据
        try:
            res = client.insert(
                collection_name=collection_name,
                data=data
            )
            total_processed += len(data)
            logging.info(f"Inserted batch {start_idx // batch_size + 1}, inserted {len(data)} records, total processed: {total_processed}")
        except Exception as e:
            logging.error(f"Error inserting batch {start_idx // batch_size + 1}: {e}")

    logging.info("Insert process completed.")
    logging.info(f"Total records inserted: {total_processed} out of {len(df)}")

    # 示例查询测试
    try:
        query = "SOB"
        logging.info(f"Testing search with query: '{query}'")
        query_embeddings = embedding_function([query])

        # 搜索余弦相似度最高的
        search_result = client.search(
            collection_name=collection_name,
            data=[query_embeddings[0]],
            limit=5,
            output_fields=["concept_name", "concept_class_id"]
        )
        logging.info(f"Search result for '{query}':")
        for item in search_result[0]:
            logging.info(f"- {item['entity']['concept_name']} ({item['entity']['concept_class_id']}): {item['distance']}")
            
        logging.info("数据库创建完成并已测试，可以正常使用了！")
    except Exception as e:
        logging.error(f"测试查询时出错: {e}")
        logging.info("数据库创建完成，但测试查询失败，请检查日志。")

if __name__ == "__main__":
    main() 