import re

import numpy as np
from dotenv import load_dotenv, find_dotenv
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
from numpy import dot
from numpy.linalg import norm
from pdfminer.high_level import extract_pages
from pdfminer.layout import LTTextContainer
from openai import OpenAI

import os
# 加载环境变量
from dotenv import load_dotenv, find_dotenv

from ZhiHu.MyVectorDBConnector import MyVectorDBConnector

_ = load_dotenv(find_dotenv())  # 读取本地 .env 文件，里面定义了 OPENAI_API_KEY

vector_db=None

client = OpenAI(api_key=os.environ["OPENAI_API_KEY_ZHIHU"] ,base_url=os.environ["OPENAI_API_BASE_ZHIHU"])
os.environ["ALLOW_RESET"] = "True"

def get_completion(prompt, model="gpt-3.5-turbo"):
    '''封装 openai 接口'''
    messages = [{"role": "user", "content": prompt}]
    response = client.chat.completions.create(
        model=model,
        messages=messages,
        temperature=0,  # 模型输出的随机性，0 表示随机性最小
    )
    return response.choices[0].message.content

def build_prompt(prompt_template, **kwargs):
    '''将 Prompt 模板赋值'''
    prompt = prompt_template
    for k, v in kwargs.items():
        if isinstance(v, str):
            val = v
        elif isinstance(v, list) and all(isinstance(elem, str) for elem in v):
            val = '\n'.join(v)
        else:
            val = str(v)
        prompt = prompt.replace(f"__{k.upper()}__", val)
    return prompt


prompt_template = """
你是一个问答机器人。
你的任务是根据下述给定的已知信息回答用户问题。
确保你的回复完全依据下述已知信息。不要编造答案。
如果下述已知信息不足以回答用户的问题，请直接回复"我无法回答您的问题"。

已知信息:
__INFO__

用户问：
__QUERY__

请用中文回答用户问题。
"""

class RAG_Bot:
    def __init__(self, vector_db, llm_api, n_results=2):
        self.vector_db = vector_db
        self.llm_api = llm_api
        self.n_results = n_results

    def chat(self, user_query):
        # 1. 检索
        search_results = self.vector_db.search(user_query, self.n_results)

        # 2. 构建 Prompt
        prompt = build_prompt(
            prompt_template, info=search_results['documents'][0], query=user_query)

        # 3. 调用 LLM
        response = self.llm_api(prompt)
        return response


def get_embeddings(texts, model="text-embedding-ada-002",dimensions=None):
    '''封装 OpenAI 的 Embedding 模型接口'''
    if model == "text-embedding-ada-002":
        dimensions = None
    if dimensions:
        data = client.embeddings.create(input=texts, model=model, dimensions=dimensions).data
    else:
        data = client.embeddings.create(input=texts, model=model).data

    print("====embeddings from ",texts," to ",data)

    return [x.embedding for x in data]

def extract_text_from_pdf(filename, page_numbers=None,max_number=None, min_line_length=1):
    '''从 PDF 文件中（按指定页码）提取文字'''
    paragraphs = []
    buffer = ''
    full_text = ''
    # 提取全部文本
    for i, page_layout in enumerate(extract_pages(filename)):
        if max_number is not None and i > max_number:
            break

        # 如果指定了页码范围，跳过范围外的页
        if page_numbers is not None and i not in page_numbers:
            continue

        for element in page_layout:
            if isinstance(element, LTTextContainer):
                full_text += element.get_text() + '\n'
    # 按空行分隔，将文本重新组织成段落
    lines = full_text.split('\n')
    for text in lines:
        if len(text) >= min_line_length:
            buffer += (' '+text) if not text.endswith('-') else text.strip('-')
        elif buffer:
            paragraphs.append(buffer)
            buffer = ''
    if buffer:
        paragraphs.append(buffer)
    return paragraphs


def initDB():
    global vector_db
    # 创建一个向量数据库对象
    #vector_db = MyVectorDBConnector("demo", get_embeddings)
    vector_db = MyVectorDBConnector("rag_vec", get_embeddings)


def saveVectorDB():
    global vector_db

    paragraphs = extract_text_from_pdf("../data/llama2.pdf",page_numbers=[2, 3],max_number=3, min_line_length=10)

    # 向向量数据库中添加文档

    # paragraphs = [
    #     "联合国就苏丹达尔富尔地区大规模暴力事件发出警告",
    #     "土耳其、芬兰、瑞典与北约代表将继续就瑞典“入约”问题进行谈判",
    #     "日本岐阜市陆上自卫队射击场内发生枪击事件 3人受伤",
    #     "国家游泳中心（水立方）：恢复游泳、嬉水乐园等水上项目运营",
    #     "我国首次在空间站开展舱外辐射生物学暴露实验",
    # ]

    vector_db.add_documents(paragraphs)


def doSearch():
    global vector_db

    #results = search("how many parameters does llama 2 have?", 2)
    user_query = "Llama 2有多少参数"
    #results = search("Does llama 2 have a conversational variant?", 2)
    results = vector_db.search(user_query, 2)
    for para in results['documents'][0]:
        print(para + "\n")

def doSearch2():
    global vector_db
    # 创建一个RAG机器人
    bot = RAG_Bot(
        vector_db,
        llm_api=get_completion
    )

    user_query = "llama 2有对话版吗？"
    # user_query = "联合国"
    response = bot.chat(user_query)
    print(response)

def cos_sim(a, b):
    '''余弦距离 -- 越大越相似'''
    return dot(a, b)/(norm(a)*norm(b))

def l2(a, b):
    '''欧式距离 -- 越小越相似'''
    x = np.asarray(a)-np.asarray(b)
    return norm(x)


def testVectorLength():
    # test_query = ["测试文本"]
    # vec = get_embeddings(test_query)[0]
    # print(vec[:10])
    # print(len(vec))
    #
    # 且能支持跨语言
    query = "global conflicts"

    documents = [
        "联合国就苏丹达尔富尔地区大规模暴力事件发出警告",
        "土耳其、芬兰、瑞典与北约代表将继续就瑞典“入约”问题进行谈判",
        "日本岐阜市陆上自卫队射击场内发生枪击事件 3人受伤",
        "国家游泳中心（水立方）：恢复游泳、嬉水乐园等水上项目运营",
        "我国首次在空间站开展舱外辐射生物学暴露实验",
    ]

    query_vec = get_embeddings([query])[0]
    doc_vecs = get_embeddings(documents)

    print("Cosine distance:")
    print(cos_sim(query_vec, query_vec))
    for vec in doc_vecs:
        print(cos_sim(query_vec, vec))

    print("\nEuclidean distance:")
    print(l2(query_vec, query_vec))
    for vec in doc_vecs:
        print(l2(query_vec, vec))
def main():
    initDB()
    saveVectorDB()
    doSearch2()
    #testVectorLength()

if __name__ == "__main__":
    main()


