#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
准备企业知识库到 Milvus 的一体化脚本
- 生成模拟企业文档（公司政策、技术文档、FAQ、产品文档）
- 文本分块（适配向量检索）
- 生成向量嵌入（复用现有 OpenAIEmbeddings）
- 创建/写入 Milvus 新集合：knowledge_base
- 输出统计报告到 knowledge_base/knowledge_stats.json

运行:
  python knowledge_base/prepare_knowledge_base.py
需要环境变量:
  OPENAI_API_KEY, OPENAI_BASE_URL, OPENAI_EMBEDDING_MODEL
  MILVUS_HOST, MILVUS_PORT
"""

import os
import json
import math
import time
import uuid
import logging
from typing import List, Dict, Any

import dotenv
from langchain_community.embeddings import OpenAIEmbeddings
from pymilvus import MilvusClient, DataType, FieldSchema, CollectionSchema

# 加载环境变量
dotenv.load_dotenv()

# 日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 环境变量
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL")
OPENAI_EMBEDDING_MODEL = os.getenv("OPENAI_EMBEDDING_MODEL")
MILVUS_HOST = os.getenv("MILVUS_HOST")
MILVUS_PORT = os.getenv("MILVUS_PORT")

COLLECTION_NAME = "knowledge_base"
STATS_PATH = os.path.join(os.path.dirname(__file__), "knowledge_stats.json")

# -------------------------
# 1) 准备模拟企业文档
# -------------------------

def build_virtual_documents() -> List[Dict[str, Any]]:
    """构造多领域企业文档数据。每个条目代表一份文档。"""
    docs: List[Dict[str, Any]] = []

    # 公司政策类
    docs.append({
        "category": "policy",
        "title": "报销流程与规范（2025版）",
        "source": "internal/policies/expense_2025.md",
        "content": (
            "一、总则\n"
            "本制度适用于全体正式员工与签约外包员工。\n\n"
            "二、费用类型\n"
            "1. 差旅费用：交通、住宿、餐饮、城市间交通等；\n"
            "2. 业务招待费：需提供事前审批与事后报备；\n"
            "3. 办公采购：固定资产需走采购流程。\n\n"
            "三、流程\n"
            "1. 费用发生后3个工作日内在系统中提交申请；\n"
            "2. 上传完整发票及佐证材料（电子发票优先）；\n"
            "3. 直属上级审批，超5,000元需部门负责人复核；\n"
            "4. 财务二次审核，10个工作日内完成报销打款。\n\n"
            "四、注意事项\n"
            "1. 出差前须完成出差申请；\n"
            "2. 高铁二等座/机票经济舱为默认标准；\n"
            "3. 紧急情况请在备注说明并补充证明材料。\n"
        )
    })

    docs.append({
        "category": "policy",
        "title": "请假制度与考勤规定（要点）",
        "source": "internal/policies/leave_attendance.md",
        "content": (
            "一、请假类别\n年假、调休、事假、病假、婚假、产假、护理假等。\n\n"
            "二、流程\n1. 在钉钉发起请假申请；2. 直属上级审批；3. 人事备案。\n\n"
            "三、考勤\n1. 标准工作制：9:30-18:30，午休1小时；\n2. 迟到/早退按照公司制度相应处理；\n3. 出差/外勤需在系统中如实标注。\n"
        )
    })

    # 技术文档类
    docs.append({
        "category": "tech",
        "title": "统一API使用指南（鉴权、限流、错误码）",
        "source": "docs/api/api_guide.md",
        "content": (
            "概述：本指南介绍统一API的鉴权方式、限流策略和通用错误码。\n\n"
            "鉴权：采用Bearer Token；客户端需在Header中携带Authorization: Bearer <token>。\n\n"
            "限流：默认100 QPS/应用；超过阈值返回429；可申请白名单提升配额。\n\n"
            "错误码：\n- 400 参数错误；\n- 401 未授权；\n- 403 权限不足；\n- 429 频率受限；\n- 5xx 服务异常。\n\n"
            "重试策略：建议指数退避，最大重试3次。\n"
        )
    })

    docs.append({
        "category": "tech",
        "title": "系统配置说明（环境变量、日志、连接池）",
        "source": "docs/ops/configuration.md",
        "content": (
            "环境变量：\nOPENAI_API_KEY、OPENAI_BASE_URL、OPENAI_EMBEDDING_MODEL、MILVUS_HOST、MILVUS_PORT等。\n\n"
            "日志：INFO级别默认启用，生产环境建议WARNING以上；\n\n"
            "连接池：数据库连接池最小值5，最大值20；Redis连接池需分场景配置。\n"
        )
    })

    # FAQ 文档
    docs.append({
        "category": "faq",
        "title": "常见问题与解答（报销/请假/门禁/邮箱）",
        "source": "internal/faq/general.md",
        "content": (
            "Q: 报销多久能到账？\nA: 审批通过后10个工作日内完成打款。\n\n"
            "Q: 我忘记打卡怎么办？\nA: 及时在系统中提交补卡申请，并说明原因。\n\n"
            "Q: 邮箱容量不够？\nA: 联系IT申请扩容或清理历史附件。\n"
        )
    })

    # 产品文档
    docs.append({
        "category": "product",
        "title": "企业知识助手 - 功能说明与使用教程",
        "source": "product/knowledge_assistant/guide.md",
        "content": (
            "功能：\n- 知识问答：基于企业知识库的问答能力；\n- 文档检索：按关键词/语义检索相关片段；\n- 上下文理解：结合查询上下文进行回答。\n\n"
            "使用：\n1. 输入问题；\n2. 系统自动检索并生成答案；\n3. 如未命中，尝试调整关键词或提供上下文。\n"
        )
    })

    return docs

# -------------------------
# 2) 文本分块
# -------------------------

def chunk_text(text: str, chunk_size: int = 700, overlap: int = 120) -> List[str]:
    """简单的按段落合并分块，尽量接近chunk_size字符，支持重叠。"""
    paragraphs = [p.strip() for p in text.split("\n\n") if p.strip()]
    chunks: List[str] = []
    buf = ""

    for para in paragraphs:
        if not buf:
            buf = para
        elif len(buf) + 2 + len(para) <= chunk_size:
            buf = buf + "\n\n" + para
        else:
            chunks.append(buf)
            # overlap: 从末尾截取 overlap 字符，作为下一块开头的上下文
            if overlap > 0 and len(buf) > overlap:
                buf = buf[-overlap:] + "\n\n" + para
            else:
                buf = para
    if buf:
        chunks.append(buf)

    # 控制过长chunk（双保险）
    normalized = []
    for c in chunks:
        if len(c) <= chunk_size + 200:
            normalized.append(c)
        else:
            # 粗暴切分
            step = chunk_size
            for i in range(0, len(c), step):
                normalized.append(c[i:i+step])
    return normalized

# -------------------------
# 3) 写入 Milvus
# -------------------------

def ensure_collection(client: MilvusClient, embedding_fn: OpenAIEmbeddings) -> int:
    """创建 knowledge_base 集合（如不存在），返回向量维度。"""
    vector_dim = len(embedding_fn.embed_query("dimension_probe"))

    fields = [
        FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
        FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=vector_dim),
        FieldSchema(name="doc_id", dtype=DataType.VARCHAR, max_length=64),
        FieldSchema(name="chunk_id", dtype=DataType.INT64),
        FieldSchema(name="category", dtype=DataType.VARCHAR, max_length=64),
        FieldSchema(name="title", dtype=DataType.VARCHAR, max_length=200),
        FieldSchema(name="source", dtype=DataType.VARCHAR, max_length=256),
        FieldSchema(name="content", dtype=DataType.VARCHAR, max_length=4000),
    ]
    schema = CollectionSchema(fields, description="Enterprise Knowledge Base", enable_dynamic_field=False)

    if not client.has_collection(COLLECTION_NAME):
        client.create_collection(collection_name=COLLECTION_NAME, schema=schema)
        logger.info(f"创建集合: {COLLECTION_NAME}")
    else:
        logger.info(f"集合已存在: {COLLECTION_NAME}")

    # 创建索引（如无）
    index_params = client.prepare_index_params()
    index_params.add_index(field_name="vector", index_type="IVF_FLAT", metric_type="COSINE", params={"nlist": 1024})
    client.create_index(collection_name=COLLECTION_NAME, index_params=index_params)

    return vector_dim


def ingest_documents() -> Dict[str, Any]:
    # 1) 初始化嵌入与 Milvus
    if not (OPENAI_API_KEY and OPENAI_BASE_URL and OPENAI_EMBEDDING_MODEL and MILVUS_HOST and MILVUS_PORT):
        raise RuntimeError("缺少必要环境变量：OPENAI_API_KEY/OPENAI_BASE_URL/OPENAI_EMBEDDING_MODEL/MILVUS_HOST/MILVUS_PORT")

    embedding_fn = OpenAIEmbeddings(
        api_key=OPENAI_API_KEY,
        base_url=OPENAI_BASE_URL,
        model=OPENAI_EMBEDDING_MODEL,
    )
    logger.info("嵌入函数初始化成功")

    client = MilvusClient(host=MILVUS_HOST, port=MILVUS_PORT)
    logger.info("Milvus客户端初始化成功")

    vector_dim = ensure_collection(client, embedding_fn)

    # 2) 生成文档并分块
    docs = build_virtual_documents()

    # 统计容器
    stats = {
        "collection": COLLECTION_NAME,
        "vector_dim": vector_dim,
        "doc_count": len(docs),
        "categories": {},
        "total_chunks": 0,
        "per_doc_chunks": {},
    }

    records = []
    texts_for_embed = []

    for doc in docs:
        doc_id = uuid.uuid4().hex[:16]
        chunks = chunk_text(doc["content"], chunk_size=700, overlap=120)
        stats["per_doc_chunks"][doc["title"]] = len(chunks)
        stats["total_chunks"] += len(chunks)
        stats["categories"].setdefault(doc["category"], 0)
        stats["categories"][doc["category"]] += len(chunks)

        for idx, ch in enumerate(chunks):
            texts_for_embed.append(ch)
            records.append({
                "doc_id": doc_id,
                "chunk_id": idx,
                "category": doc["category"],
                "title": doc["title"],
                "source": doc["source"],
                "content": ch[:3990],  # 保护VARCHAR长度
            })

    logger.info(f"准备生成嵌入，chunk总数: {len(texts_for_embed)}")

    # 3) 生成向量嵌入
    t0 = time.time()
    embeddings = embedding_fn.embed_documents(texts_for_embed)
    t1 = time.time()
    logger.info(f"向量嵌入生成完成: {len(embeddings)} 条，用时 {t1 - t0:.2f}s")

    # 4) 组织插入数据
    for emb, rec in zip(embeddings, records):
        rec["vector"] = emb

    # 5) 插入 Milvus
    res = client.insert(collection_name=COLLECTION_NAME, data=records)
    logger.info(f"成功插入 {len(records)} 条记录到 {COLLECTION_NAME}")
    logger.info(f"插入结果: {res}")

    # 6) 保存统计报告
    with open(STATS_PATH, "w", encoding="utf-8") as f:
        json.dump(stats, f, ensure_ascii=False, indent=2)
    logger.info(f"统计报告已保存: {STATS_PATH}")

    return stats


def main():
    stats = ingest_documents()
    logger.info("知识库构建完成。统计摘要：")
    logger.info(json.dumps(stats, ensure_ascii=False, indent=2))


if __name__ == "__main__":
    main()

