import os

from llama_index.core import SimpleDirectoryReader,VectorStoreIndex, StorageContext
from llama_index.vector_stores.qdrant import QdrantVectorStore
from nltk.corpus.reader import documents
from sentence_transformers import SentenceTransformer
from qdrant_client import QdrantClient
from qdrant_client.models import Distance,VectorParams
from unstructured.partition.md import partition_md
from unstructured.staging.base import elements_to_json
import logging
import uuid
from transformers import AutoTokenizer, AutoModelForCausalLM


logging.basicConfig(level=logging.DEBUG)
# 配置参数
MD_DIR = "./data"
COLLECTION_NAME = "md_docs"
QDRANT_URL = "http://localhost:6333"

# 初始化模型金和客户端
# 初始化本地模型
try:
    model = SentenceTransformer(r"D:\self\python\AIModel\all-MiniLM-L6-v2",device="cpu")
    print("✅ 模型加载成功")
    print(f"模型信息：{model}")
    print(f"向量维度：{model.get_sentence_embedding_dimension()}")
except Exception as e:
    print(f"❌ 模型加载失败：{str(e)}")
    print("请检查：1. 路径是否正确 2. 模型文件是否完整")
    exit()

tokenizer = AutoTokenizer.from_pretrained(r"D:\self\python\AIModel\Qwen2.5-0.5B-Instruct")
qwen_model = AutoModelForCausalLM.from_pretrained(r"D:\self\python\AIModel\Qwen2.5-0.5B-Instruct")

# 测试基础连接（先不创建集合）
try:
    client = QdrantClient(url="http://localhost:6333", timeout=30)
    print("服务状态:", client.get_collections())  # 预期输出: {"status": "ok"}
except Exception as e:
    print("连接失败:", str(e))
    print("请检查：1. 服务是否运行 2. 防火墙设置 3. 端口是否正确")

# 创建集合
if not client.collection_exists(collection_name=COLLECTION_NAME):
    # 获取模型维护
    size = model.get_sentence_embedding_dimension()
    print(f"{size}")
    client.create_collection(collection_name=COLLECTION_NAME,vectors_config=VectorParams(size=size,distance=Distance.COSINE), timeout=300)

def generate_uuid(filename: str, idx: int) -> str:
    """生成版本5 UUID"""
    namespace = uuid.NAMESPACE_URL
    name = f"{filename}_{idx}"
    return str(uuid.uuid5(namespace, name))

def process_md_files():
    for filename in os.listdir(MD_DIR):
        if filename.endswith(".md"):
            filepath = os.path.join(MD_DIR, filename)
            with open(filepath, "r", encoding="utf-8") as file:
                content = file.read()
                 # 使用unstructured进行智能分块
                elements = partition_md(text=content)
                chunks = [element.text for element in elements]

                #生成嵌入向量
                embeddings = model.encode(chunks,show_progress_bar=True)

                # 准备数据
                records = []
                for i,(chunk,embedding) in enumerate(zip(chunks,embeddings)):
                    record = {
                        "id": generate_uuid(filename,i),
                        "vector": embedding.tolist(),
                        "payload": {
                            "text": chunk,
                            "source": filepath,
                            "chunk_index": i,
                            "last_modified": os.path.getmtime(filepath),  # 添加时间戳
                        }
                    }
                    records.append(record)
                # 批量上传
                client.upsert(
                    collection_name=COLLECTION_NAME,
                    points=records
                )

def query_data(query_text):
    query_embedding = model.encode(query_text).tolist()

    hits = client.search(
        collection_name=COLLECTION_NAME,
        query_vector=query_embedding,
        limit=3
    )

    for hit in hits:
        print(f"Score: {hit.score:.4f}")
        print(f"Source: {hit.payload['source']}")
        print(f"Content: {hit.payload['text'][:200]}...\n")
        print(f"hit:{hit.dict()}..\n")

        # 构建上下文
    context = "\n".join([
        f"文档 {i + 1}: {hit.payload['text']}"
        for i, hit in enumerate(hits)
    ])

    return context

def query_llm(query,context):
    # 构建Qwen指令模板
    prompt = f"""<|im_start|>system
    你是一个专业助手，请根据以下上下文回答问题：
    {context}<|im_end|>
    <|im_start|>user
    {query}<|im_end|>
    <|im_start|>assistant
    """

    # 生成回答
    inputs = tokenizer(prompt, return_tensors="pt")
    outputs = qwen_model.generate(
        inputs.input_ids,
        max_new_tokens=500,
        temperature=0.7,
        do_sample=True
    )

    return tokenizer.decode(outputs[0], skip_special_tokens=True)

if __name__ == "__main__":
    # process_md_files()
    # 执行搜索
    query = "微调技巧？"
    response = query_llm(query,query_data(query))
    print(f"问题：{query}\n回答：{response.split('<|im_end|>')[-1].strip()}")