import json
import math
import os
import sys
from typing import List, Dict

from config.LoadConfig import get_config
from config.ServerApp import app

try:
    from pymilvus import connections, FieldSchema, CollectionSchema, DataType, Collection, utility
    from openai import OpenAI
except ImportError:
    print("Warning: pymilvus or openai not installed, Milvus initialization will be skipped")
    connections = None
    Collection = None
    utility = None
    OpenAI = None


def check_and_init_milvus():
    """
    检查并初始化Milvus数据库
    如果数据库不存在或为空，则创建并插入类目数据
    """
    if not connections or not Collection or not utility or not OpenAI:
        print("Warning: pymilvus or openai not installed, Milvus initialization will be skipped")
        return False
    
    try:
        # 获取配置
        milvus_conf = get_config("milvus")
        sys_conf = get_config("system")
        
        collection_name = milvus_conf.get("collection", "categories_zh_v1")
        milvus_host = milvus_conf.get("host", "127.0.0.1")
        milvus_port = int(milvus_conf.get("port", 19530))
        
        # 优先从common.conf中读取embeddings配置
        embeddings_conf = get_config("embadding_models")
        embed_base_url = (embeddings_conf.get("embedding_server_url") or sys_conf.get("embeddings_vllm_server_url", "")).rstrip("/")
        embed_api_key = embeddings_conf.get("embedding_api_key") or sys_conf.get("embeddings_api_key", "EMPTY")
        embed_model = embeddings_conf.get("embedding_model_name") or sys_conf.get("embeddings_model_name", "models/bge-large-zh-v1.5")
        
        if not embed_base_url:
            print("⚠️  embeddings_vllm_server_url not configured, skipping Milvus initialization")
            return False
        
        # 连接Milvus
        try:
            connections.connect("default", host=milvus_host, port=milvus_port)
            print(f"✅ Connected to Milvus at {milvus_host}:{milvus_port}")
        except Exception as e:
            print(f"❌ Failed to connect to Milvus: {e}")
            return False
        
        # 检查集合是否存在且有数据
        if utility.has_collection(collection_name):
            coll = Collection(collection_name)
            coll.load()
            # 检查是否有数据
            if coll.num_entities > 0:
                print(f"✅ Milvus collection '{collection_name}' already exists with {coll.num_entities} entities")
                return True
            else:
                print(f"⚠️  Milvus collection '{collection_name}' exists but is empty, will populate data")
        else:
            print(f"📝 Milvus collection '{collection_name}' does not exist, will create and populate")
        
        # 初始化OpenAI客户端
        client = OpenAI(api_key=embed_api_key or "EMPTY", base_url=embed_base_url + "/v1")
        
        # 加载类目数据
        categories_data = load_categories_data()
        if not categories_data:
            app.ctx.logger.error("No categories data found, skipping Milvus initialization")
            return False
        
        # 创建集合
        coll = create_collection(collection_name, categories_data, client, embed_model)
        
        # 插入数据
        insert_categories_data(coll, categories_data, client, embed_model)
        
        print(f"🎉 Successfully initialized Milvus collection '{collection_name}' with {len(categories_data)} categories")
        return True
        
    except Exception as e:
        print(f"❌ Failed to initialize Milvus: {e}")
        return False


def load_categories_data() -> List[Dict[str, str]]:
    """
    加载类目数据，优先使用categories_aliases.json，如果不存在则使用categories.json
    """
    # 优先使用categories_aliases.json
    aliases_file = "categories_aliases.json"
    categories_file = "categories.json"
    
    if os.path.exists(aliases_file):
        app.ctx.logger.info(f"Loading categories from {aliases_file}")
        return load_from_aliases_file(aliases_file)
    elif os.path.exists(categories_file):
        app.ctx.logger.info(f"Loading categories from {categories_file}")
        return load_from_categories_file(categories_file)
    else:
        app.ctx.logger.error(f"Neither {aliases_file} nor {categories_file} found")
        return []


def load_from_aliases_file(file_path: str) -> List[Dict[str, str]]:
    """从categories_aliases.json加载数据"""
    with open(file_path, "r", encoding="utf-8") as f:
        data = json.load(f)
    
    items = []
    for item in data:
        name = (item.get("name") or "").strip()
        desc = (item.get("description") or item.get("desc") or name).strip()
        l1_name = (item.get("l1_name") or "").strip()
        aliases = item.get("aliases", [])
        
        if name:
            items.append({
                "name": name,
                "desc": desc,
                "l1_name": l1_name,
                "aliases": aliases
            })
    
    return items


def load_from_categories_file(file_path: str) -> List[Dict[str, str]]:
    """从categories.json加载数据"""
    with open(file_path, "r", encoding="utf-8") as f:
        data = json.load(f)
    
    items = []
    for l1 in data:
        l1_name = (l1.get("name") or "").strip()
        if not l1_name:
            continue
            
        children = l1.get("children", []) or []
        if not children:
            items.append({"name": l1_name, "desc": l1_name, "l1_name": "", "aliases": []})
            continue
            
        for child in children:
            child_name = (child.get("name") or "").strip()
            if child_name:
                items.append({
                    "name": child_name, 
                    "desc": child_name, 
                    "l1_name": l1_name,
                    "aliases": child.get("aliases", [])
                })
    
    return items


def embed_texts(client: OpenAI, model: str, texts: List[str]) -> List[List[float]]:
    """生成文本向量"""
    resp = client.embeddings.create(input=texts, model=model)
    vectors = [d.embedding for d in resp.data]
    
    # 归一化
    normed = []
    for v in vectors:
        s = math.sqrt(sum(x * x for x in v)) or 1.0
        normed.append([x / s for x in v])
    return normed


def create_collection(collection_name: str, categories_data: List[Dict], client: OpenAI, embed_model: str) -> Collection:
    """创建Milvus集合"""
    # 探测向量维度
    probe_text = categories_data[0].get("name", "").strip() or categories_data[0].get("desc", "")
    probe_vec = embed_texts(client, embed_model, [probe_text])[0]
    dim = len(probe_vec)
    
    # 如果集合已存在但为空，删除它
    if utility.has_collection(collection_name):
        utility.drop_collection(collection_name)
    
    # 创建字段
    fields = [
        FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
        FieldSchema(name="l1_name", dtype=DataType.VARCHAR, max_length=128),
        FieldSchema(name="name", dtype=DataType.VARCHAR, max_length=128),
        FieldSchema(name="desc", dtype=DataType.VARCHAR, max_length=256),
        FieldSchema(name="vector_name", dtype=DataType.FLOAT_VECTOR, dim=dim),
        FieldSchema(name="vector_aliases", dtype=DataType.FLOAT_VECTOR, dim=dim),
        FieldSchema(name="vector_desc", dtype=DataType.FLOAT_VECTOR, dim=dim),
        FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=dim),
    ]
    
    schema = CollectionSchema(fields, description="Chinese categories for retrieval (multi-embedding)")
    coll = Collection(name=collection_name, schema=schema)
    
    # 创建索引
    vector_fields = ["vector_name", "vector_aliases", "vector_desc", "vector"]
    for vf in vector_fields:
        try:
            params = {
                "index_type": "HNSW", 
                "metric_type": "COSINE", 
                "params": {"M": 16, "efConstruction": 200}
            }
            coll.create_index(field_name=vf, index_params=params)
        except Exception:
            pass
    
    return coll


def insert_categories_data(coll: Collection, categories_data: List[Dict], client: OpenAI, embed_model: str):
    """插入类目数据到Milvus"""
    # 准备文本数据
    names_texts = [(item.get("name") or "").strip() for item in categories_data]
    aliases_texts = [join_aliases(item) for item in categories_data]
    desc_texts = [(item.get("desc") or "").strip() for item in categories_data]
    
    # 生成向量
    vec_name = embed_texts(client, embed_model, names_texts)
    vec_aliases = embed_texts(client, embed_model, aliases_texts)
    vec_desc = embed_texts(client, embed_model, desc_texts)
    
    # 融合向量
    vectors = []
    for i in range(len(categories_data)):
        w_name, w_alias, w_desc = 0.5, 0.3, 0.2
        fused = [w_name * a + w_alias * b + w_desc * c for a, b, c in zip(vec_name[i], vec_aliases[i], vec_desc[i])]
        s = math.sqrt(sum(x * x for x in fused)) or 1.0
        vectors.append([x / s for x in fused])
    
    # 准备插入数据
    rows = [{
        "l1_name": categories_data[i].get("l1_name", ""),
        "name": names_texts[i],
        "desc": desc_texts[i],
        "vector_name": vec_name[i],
        "vector_aliases": vec_aliases[i],
        "vector_desc": vec_desc[i],
        "vector": vectors[i],
    } for i in range(len(categories_data))]
    
    # 分批插入
    batch_size = 1000
    total = 0
    for i in range(0, len(rows), batch_size):
        part = rows[i:i+batch_size]
        mr = coll.insert(part)
        total += mr.insert_count
    
    coll.flush()
    coll.load()
    
    app.ctx.logger.info(f"Inserted {total} categories into Milvus collection")


def join_aliases(item: Dict) -> str:
    """合并别名"""
    if isinstance(item, dict) and isinstance(item.get("aliases"), list) and item.get("aliases"):
        try:
            return "、".join([str(a).strip() for a in item["aliases"] if str(a).strip()])
        except Exception:
            return ""
    return ""
