from tqdm import tqdm
from typing import List
from transformers import AutoTokenizer
import requests
import os
import numpy as np
import chromadb

EMBEDDING_URL="http://0.0.0.0:10001/v1/embeddings"
MODEL_NAME="chat"
DATA_DIR="/data/dataset/Chinese_Law"
CHROMA_DIR="./embed_database"
MAX_TOKENS = 768
STRIDE = 512
TOKENIZER_PATH="/data/model/Qwen3-Embedding-0.6B"

# ========== 初始化ChromaDB客户端 ==========
client = chromadb.PersistentClient(path=CHROMA_DIR)
collection = client.get_or_create_collection(name="law_rag")


# ========== Step 1: 读取所有txt文件 ==========
def read_laws(data_dir: str):
    laws = []
    for fname in os.listdir(data_dir):
        if fname.endswith(".txt"):
            path = os.path.join(data_dir, fname)
            with open(path, "r", encoding="utf-8") as f:
                for line in f:
                    line = line.strip()
                    if line:
                        laws.append({"file": fname, "content": line})
    return laws


# ========== Step 2: Chunk处理 ==========
def chunk_text(text: str, max_tokens: int, stride: int, tokenizer: AutoTokenizer) -> List[str]:
    tokens = tokenizer.encode(text, add_special_tokens=False)
    chunks = []
    for i in range(0, len(tokens), stride):
        window = tokens[i : i + max_tokens]
        if len(window) == 0:
            break
        chunk_text = tokenizer.decode(window, skip_special_tokens=True)
        if chunk_text.strip():  # 避免空 chunk
            chunks.append(chunk_text)
        if i + max_tokens >= len(tokens):
            break
    return chunks


# ========== Step 3: 获取embedding ==========
def get_embedding(text: str, embedding_url=None, model_name=None):
    """调用embedding接口"""
    embedding_url = embedding_url or EMBEDDING_URL
    model_name = model_name or MODEL_NAME
    response = requests.post(embedding_url, json={"input": text, "model": model_name})
    data = response.json()
    return np.array(data["data"][0]["embedding"], dtype=np.float32)


# ========== Step 1: 构建数据库 ==========
def build_chroma_db():
    tokenizer = AutoTokenizer.from_pretrained(
        TOKENIZER_PATH,
        trust_remote_code=True  # Qwen 需要此参数
    )
    laws = read_laws(DATA_DIR)

    print(f"📚 开始处理 {len(laws)} 条法律条文...")
    all_texts, all_metas, all_ids, all_emb = [], [], [], []
    counter = 0

    def _flush_batch():
        """将当前缓存的数据批量插入 ChromaDB"""
        nonlocal all_texts, all_metas, all_ids, all_emb, counter
        if not all_texts:
            return
        try:
            collection.add(
                documents=all_texts,
                embeddings=all_emb,
                metadatas=all_metas,
                ids=all_ids
            )
            print(f"  ➕ 已插入 {len(all_texts)} 条，累计 {counter} 条")
        except Exception as e:
            print(f"❌ 批量插入失败: {e}")
            # 可选择跳过或重试
        # 清空缓存
        all_texts, all_metas, all_ids, all_emb = [], [], [], []
    
    max_batch_size = 2000

    for item in tqdm(laws):
        try:
            chunks = chunk_text(item["content"], MAX_TOKENS, STRIDE, tokenizer)
        except Exception as e:
            print(f"❌ 分块失败: {e}")
            continue
        for chunk in chunks:
            emb = get_embedding(chunk)
            all_texts.append(chunk)
            all_metas.append({"file": item["file"]})
            all_ids.append(f"id_{counter}")
            all_emb.append(emb)
            counter += 1
            if len(all_texts) >= max_batch_size:
                _flush_batch()
            
    _flush_batch()  # 插入剩余数据
    print(f"数据已存入 ChromaDB: 共 {counter} 条")


if __name__ == "__main__":
    if collection.count() == 0:
        build_chroma_db()
        print("ChromaDB 构建完成！")
    else:
        print("ChromaDB 已存在，跳过构建步骤。")