import os
import re
import json
import torch
import numpy as np
import requests
from transformers import GPT2LMHeadModel, GPT2Tokenizer
from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection

# -------------------------
# 1) 连接 Milvus（带打印）
# -------------------------
def connect_milvus(host: str = "127.0.0.1", port: str = "19530", alias: str = "default"):
    print(f"[STEP] 1. 连接 Milvus -> {host}:{port} (alias='{alias}') ...")
    try:
        connections.connect(alias=alias, host=host, port=port)
        print("  ✅ 已连接到 Milvus")
    except Exception as e:
        print("  ❌ 连接 Milvus 失败：", e)
        raise

# -------------------------------------
# 2) 创建集合（如果存在可选择覆盖/复用）
# -------------------------------------
def create_collection(collection_name: str, dim: int = 768, drop_if_exists: bool = True):
    print(f"[STEP] 2. 准备 collection '{collection_name}' (dim={dim}) ...")
    if utility.has_collection(collection_name):
        print(f"  -> collection '{collection_name}' 已存在")
        if drop_if_exists:
            print(f"  -> drop_if_exists=True，删除旧集合 '{collection_name}' ...")
            utility.drop_collection(collection_name)
            print("  -> 已删除旧集合")
        else:
            print("  -> 复用已有集合")
            return Collection(collection_name)

    fields = [
        FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=True),
        FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=dim),
        FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=65535),
    ]
    schema = CollectionSchema(fields, description="document chunks collection")
    collection = Collection(name=collection_name, schema=schema)
    print(f"  ✅ 创建集合 '{collection_name}' 成功")
    return collection

# -------------------------
# 3) 文本分片（chunk）使用固定长度切片
# -------------------------
def chunk_text(text: str, chunk_size: int = 1000, overlap: int = 200) -> list:
    print(f"[STEP] 3. 开始分片：chunk_size={chunk_size}, overlap={overlap}")
    text = re.sub(r'\s+', ' ', text).strip()
    chunks = []
    start = 0
    L = len(text)
    while start < L:
        end = min(start + chunk_size, L)
        chunks.append(text[start:end].strip())
        start += max(1, chunk_size - overlap)

    print(f"  ✅ 分片完成，总段数：{len(chunks)}")
    return chunks

# -------------------------
# 4) 生成 Embeddings 使用 GPT-2
# -------------------------
def generate_embeddings(text: str, model_name: str = "gpt2") -> np.ndarray:
    print(f"[STEP] 4. 生成 Embedding 使用 GPT-2 模型: {model_name} ...")
    tokenizer = GPT2Tokenizer.from_pretrained(model_name)
    model = GPT2LMHeadModel.from_pretrained(model_name)

    # Encoding the text
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
    with torch.no_grad():
        outputs = model(**inputs)

    # GPT-2 embeddings are taken from the last hidden state
    embeddings = outputs.last_hidden_state.mean(dim=1).squeeze().numpy()
    print(f"  ✅ Embedding 生成完成，形状 {embeddings.shape}")
    return embeddings

# -------------------------
# 5) 插入到 Milvus
# -------------------------
def insert_into_milvus(collection: Collection, embeddings: np.ndarray, chunks: list, insert_batch_size: int = 500):
    print(f"[STEP] 5. 插入到 Milvus collection '{collection.name}' (batch_size={insert_batch_size}) ...")
    total = len(chunks)
    for i in range(0, total, insert_batch_size):
        end = min(i + insert_batch_size, total)
        b_emb = embeddings[i:end].tolist()
        b_txt = chunks[i:end]
        batch_no = i // insert_batch_size + 1
        print(f"  -> 插入批次 {batch_no}, size={len(b_txt)} ...")
        mr = collection.insert([b_emb, b_txt])
        print(f"     已插入 {len(mr.primary_keys)} 条")
    print("  ✅ 所有数据已插入 Milvus")

# -------------------------
# 6) 创建索引并加载
# -------------------------
def create_index_and_load(collection: Collection, index_type: str = "IVF_FLAT", metric_type: str = "COSINE", nlist: int = 1024):
    print(f"[STEP] 6. 创建索引 index_type={index_type} metric={metric_type} nlist={nlist} ...")
    index_params = {"index_type": index_type, "metric_type": metric_type, "params": {"nlist": nlist}}
    collection.create_index(field_name="embedding", index_params=index_params)
    print("  -> 索引已创建")
    collection.load()
    print("  -> collection 已加载到内存")

# -------------------------
# 7) 使用 Milvus 搜索并与 GPT-2 生成答案
# -------------------------
def rag_query(collection: Collection, query: str, top_k: int = 3, model_name: str = "gpt2") -> str:
    print(f"[STEP] 7. 执行 RAG 查询：{query[:100]} ...")

    # 生成查询的 Embedding
    query_embedding = generate_embeddings(query, model_name=model_name)

    # 搜索 Milvus 数据库中的相似项
    search_params = {"metric_type": "COSINE", "params": {"nprobe": 16}}
    results = collection.search(data=[query_embedding], anns_field="embedding", param=search_params, limit=top_k, output_fields=["text"])

    # 获取最相关的文本并组合
    context = "\n".join([hit.entity["text"] for hit in results[0]])
    print("  ✅ 检索到的上下文：")
    print(context[:500])

    # 使用 GPT-2 生成最终回答
    input_text = f"问题：{query}\n\n相关内容：{context}\n\n回答："
    return generate_gpt2_answer(input_text, model_name)

# -------------------------
# 8) 使用 GPT-2 模型生成回答
# -------------------------
def generate_gpt2_answer(input_text: str, model_name: str = "gpt2") -> str:
    print(f"[STEP] 8. 使用 GPT-2 生成回答 ...")
    tokenizer = GPT2Tokenizer.from_pretrained(model_name)
    model = GPT2LMHeadModel.from_pretrained(model_name)

    inputs = tokenizer(input_text, return_tensors="pt", truncation=True, padding=True)
    outputs = model.generate(inputs["input_ids"], max_length=500, num_return_sequences=1)

    answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return answer

# -------------------------
# 整体运行示例（将上述步骤串起来）
# -------------------------
def run_rag_pipeline(file_path: str, collection_name: str = "doc_chunks", model_name: str = "gpt2", top_k: int = 3):
    # 1) 连接 Milvus
    connect_milvus()

    # 2) 创建集合
    collection = create_collection(collection_name)

    # 3) 读取文件并进行文本分片
    with open(file_path, "r", encoding="utf-8") as f:
        text = f.read()
    chunks = chunk_text(text)

    # 4) 为文本片段生成 Embeddings
    embeddings = np.array([generate_embeddings(chunk, model_name=model_name) for chunk in chunks])

    # 5) 插入 Milvus
    insert_into_milvus(collection, embeddings, chunks)

    # 6) 创建索引并加载
    create_index_and_load(collection)

    # 7) 执行查询
    query = input("我想知道端口里面有哪些参数")
    answer = rag_query(collection, query, top_k=top_k, model_name=model_name)
    print(f"最终回答：{answer}")


# -------------------------
# 执行流程示例
# -------------------------
file_path = r"D:\润和\点金需要的文档和存储\nginx配置解释.txt"
# demo_file = r"D:\润和\点金需要的文档和存储\nginx配置解释.txt"# 替换为你的文档路径
run_rag_pipeline(file_path)
