# -*- coding: utf-8 -*-
# time: 2025/3/27 16:56
# file: chromadb_ollama_ARG.py
# author: hanson
"""
通过脑筋急转弯的json数据 RAG + qwen2.5:1.5b , 最好使用 qwen2.5:7b
"""
import json
import pandas as pd
from sentence_transformers import SentenceTransformer
import chromadb
from transformers import AutoModelForCausalLM, AutoTokenizer


# ----------------------
# 1. 加载数据集并预处理（适配JSON数组格式）
# ----------------------
def load_data(file_path):
    """加载JSON数组格式数据集"""
    with open(file_path, 'r', encoding='utf-8') as f:
        data = json.load(f)  # 直接解析整个JSON数组
    return pd.DataFrame(data)


# 示例数据格式：[{"instruction": "...", "input": "...", "output": "..."}]
df = load_data(file_path=r"C:\Users\Administrator\Downloads\thinking.json")  # 替换为你的数据集路径

# 合并 instruction 和 input 作为检索的文本
df["query_text"] = df["instruction"] + " " + df["input"]

# ----------------------
# 2. 向量数据库构建（Chroma）
# ----------------------
# 初始化嵌入模型（用于将文本转换为向量）
embedding_model = SentenceTransformer("paraphrase-multilingual-MiniLM-L12-v2")

# 初始化 Chroma 客户端
chroma_client = chromadb.PersistentClient(path="rag_chroma_db")  # 数据持久化到本地
collection = chroma_client.create_collection(name="rag_demo")

# 添加数据到向量数据库
documents = df["query_text"].tolist()
metadata = [{"答案": output} for output in df["output"].tolist()]
ids = [f"id_{i}" for i in range(len(documents))]

# 生成嵌入向量并存储
embeddings = embedding_model.encode(documents).tolist()
collection.add(
    embeddings=embeddings,
    documents=documents,
    metadatas=metadata,
    ids=ids
)

# ----------------------
# 3. 检索增强生成（RAG）
# ----------------------
# 加载 Qwen2-1.5B 模型和分词器
model_name = r"E:\soft\model\qwen\Qwen\Qwen2___5-1___5B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")



# ----------------------
# 4. 测试案例
# ----------------------


