import os
import re

import chromadb
from chromadb.utils import embedding_functions
from docx import Document
from openai import OpenAI


def load_employee_file(file_path):
    """
    1:加载word文件
    """
    docx = Document(file_path)
    all_text = []
    for para in docx.paragraphs:
        # print(para.text)
        # 去除首尾空格 ,数据清洗的过程
        clean_text = para.text.strip()
        # 去除特殊符号
        # clean_text = re.sub(r'[^\w\s]', '', clean_text)
        all_text.append(clean_text)
    return "\n".join(all_text)


def split_text(text, chunk_size=500):
    """
    2:文本切割:按照固定长度切割
    """
    chunks = []
    for i in range(0, len(text), chunk_size):
        chunks.append(text[i:i + chunk_size])

    return chunks


def text_embeddings(texts):
    """
    texts = ["王科宇","陈治文","张三"]
    embeddings = [[1024],[1024],[1024]]

    """
    import os
    from openai import OpenAI

    client = OpenAI(
        api_key=os.getenv("DASHSCOPE_API_KEY"),  # 如果您没有配置环境变量，请在此处用您的API Key进行替换
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"  # 百炼服务的base_url
    )
    if isinstance(texts, str):
        texts = [texts]

    batch_size = 10
    embeddings = []
    for i in range(0, len(texts), batch_size):
        batch = texts[i:i + batch_size]
        completion = client.embeddings.create(
            model="text-embedding-v4",
            input=batch,
            dimensions=1024,  # 指定向量维度（仅 text-embedding-v3及 text-embedding-v4支持该参数）
            encoding_format="float"
        )
        batch_embeddings = [item.embedding for item in completion.data]
        embeddings.extend(batch_embeddings)

    return embeddings


def store_in_chromadb(chunks):
    embeddings = text_embeddings(chunks)
    if len(chunks) != len(embeddings):
        print("文本块数量和嵌入向量数量不一致")
        return
    client = chromadb.PersistentClient(path="./chroma_db")
    collection = client.get_or_create_collection(
        name="employee_manual",
        embedding_function=None
    )
    collection.add(
        documents=chunks,
        embeddings=embeddings,
        ids=[f"chunk_{i}" for i in range(len(chunks))]
    )

    print(f"成功往向量数据库employee_manual,存入{len(embeddings)}个向量")


def query_chroma(query_text):
    client = chromadb.PersistentClient(path="./chroma_db")
    embedding_function = embedding_functions.OpenAIEmbeddingFunction(
        api_key=os.getenv("DASHSCOPE_API_KEY"),
        model_name="text-embedding-v4",
        api_base="https://dashscope.aliyuncs.com/compatible-mode/v1"

    )
    collection = client.get_or_create_collection(
        name="employee_manual",
        embedding_function=embedding_function
    )

    results = collection.query(
        query_texts=[query_text],
        n_results=5,
        include=['embeddings', 'documents', 'distances']
    )

    # print(results)
    return results


def rag_answer_question(results, question):
    """
     rag: 检索-聚合-生成
    """
    print("results:", results)
    retrieved_chunks = list(set(results["documents"][0]))
    print(f"去重之后的结果列表:{retrieved_chunks}")
    context = "\n".join([f"-{chunk}" for chunk in retrieved_chunks])
    print(f"\n【检索到的参考内容】\n{context}\n")

    ai_client = OpenAI(
        api_key=os.getenv("DASHSCOPE_API_KEY"),
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    )

    prompt = f"""
    你是一个专业的HR助理，请仔细阅读以下员工手册内容并准确回答用户问题。

    【员工手册内容】：
    {context}

    【用户问题】：
    {question}

    【回答要求】：
    1. 严格基于员工手册内容回答，不要编造或推测
    2. 如果手册中没有相关信息，请明确说明"手册中未提及此信息"
    3. 回答应简洁明了，条理清晰
    4. 如有多个要点，请分点列出
    5. 如果涉及具体条款，可引用相关内容
    """
    print(f"提示词:{prompt}")
    completion = ai_client.chat.completions.create(
        model="qwen-plus",
        messages=[
            {"role": "user", "content": f"{prompt}"},
        ],
        # Qwen3模型通过enable_thinking参数控制思考过程（开源版默认True，商业版默认False）
        # 使用Qwen3开源版模型时，若未启用流式输出，请将下行取消注释，否则会报错
    )

    print(completion.choices[0].message.content)
    return completion.choices[0].message.content


if __name__ == '__main__':
    all_text = load_employee_file("employee_manual.docx")
    print(all_text)
    chunks = split_text(all_text)
    print(chunks)

    # 文本嵌入
    embeddings = text_embeddings(chunks)
    print(embeddings)
    store_in_chromadb(chunks)
    #query_text = "员工上班迟到15分钟,怎么处理"
    #results = query_chroma(query_text)
    # print(results)
    # print(type(results))
    #rag_answer_question(results, query_text)
