
import os
os.environ["OPENAI_API_KEY"] = '50259a4a-4893-4e64-a530-2419e3e27741'
os.environ["OPENAI_BASE_URL"] = 'https://a0ai-api.zijieapi.com/api/llm/v1'
os.environ["ARK_BASE_URL"] = 'https://ark.cn-beijing.volces.com/api/v3'
os.environ["ARK_API_KEY"] = '50259a4a-4893-4e64-a530-2419e3e27741'
os.environ["EMBEDDING_MODELEND"] = 'Doubao-embedding'
os.environ["LLM_MODELEND"] = 'Doubao-pro-32k'
os.environ['HUGGINGFACEHUB_API_TOKEN'] = 'hf_rugMWSUucWdxJhEAOzHUaRrepDqMlYNy'

import logging

from volcenginesdkarkruntime import Ark
from langchain_openai import ChatOpenAI
# from langchain_openai.embeddings import OpenAIEmbeddings
from langchain.embeddings.base import Embeddings
from pydantic import BaseModel
from typing import Dict, List, Any

from langchain_community.document_loaders import PyPDFLoader
from langchain_community.document_loaders import Docx2txtLoader
from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import Qdrant
from langchain.retrievers.multi_query import MultiQueryRetriever
from langchain.chains import RetrievalQA

from flask import Flask, request, render_template

api_key = os.environ.get("ARK_API_KEY")
model = os.environ.get("LLM_MODELEND")
model_embedding = "ep-20241111110355-2tp82"  # os.environ.get("EMBEDDING_END")
# model_embedding =  os.environ.get("EMBEDDING_END")
url = os.environ.get("ARK_BASE_URL")


if not api_key:
    raise ValueError("OPENAI_API_KEY 环境变量未设置")

model_embedding = os.environ.get("EMBEDDING_MODELEND")
if not model_embedding:
    raise ValueError("EMBEDDING_MODELEND 环境变量未设置")

# 加载文档文本
base_dir = os.path.dirname(os.path.abspath(__file__))   # 获取本py文件所在的绝对路径
folder_path = os.path.join(base_dir, 'OneFlower')      # 文档所在文件夹的绝对路径
documents = []
# 遍历文件夹的每个文件
for file in os.listdir(folder_path):
    file_path = os.path.join(folder_path, file)     # 单个文件的绝对路径
    if file.endswith('.pdf'):
        loader = PyPDFLoader(file_path)
        documents.extend(loader.load())
    elif file.endswith('.docx'):
        loader = Docx2txtLoader(file_path)
        documents.extend(loader.load())
    elif file.endswith('.txt'):
        loader = TextLoader(file_path)
        documents.extend(loader.load())

# 文本分割
text_splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=30)
chunk_docs = text_splitter.split_documents(documents)

# print(documents)


# 定义自己的Doubao-embedding类
class DoubaoEmbeddings(BaseModel, Embeddings):
    client: Ark = None
    api_key: str = ""
    model: str

    def __init__(self, **data: Any,):
        super().__init__(**data)
        # if self.api_key == "":
        #     self.api_key = os.environ.get("ARK_API_KEY")
        # if self.model == "":
        #     self.model = os.environ.get("EMBEDDING_END")
        self.client = Ark(
            base_url=url,
            api_key=self.api_key,
        )

    def embed_query(self, text: str) -> List[float]:
        """
        生成用于输入文本的embeddings(List[float])，一个浮点数值列表.
        :param text: 要生成 embedding 的文本.
        :type text: str
        :return: embeddings: 输入文本的 embedding，一个浮点数值列表.
        :rtype: List[float]
        """
        embeddings = self.client.embeddings.create(model=self.model, input=text)
        return embeddings.data[0].embedding

    def embed_documents(self, texts: List[str]) -> List[List[float]]:
        return [self.embed_query(text) for text in texts]

    class Config:
        arbitrary_types_allowed = True


# 将分割好的文本嵌入向量数据库Qdrant
vectorstore = Qdrant.from_documents(
    documents=chunk_docs,
    embedding=DoubaoEmbeddings(model=model_embedding, api_key=api_key,),
    location=":memory:",
    collection_name="my_docs",
)

# 创建检索式问答模型：Retrieval链
# 设置日志
logging.basicConfig()
logging.getLogger("langchain.retrievers.multi_query").setLevel(logging.INFO)

# 实例化大模型对象
llm = ChatOpenAI(
    openai_api_key=api_key,
    openai_api_base=url,
    model=model,
    temperature=0,
)

# 实例化一个多查询检测工具
retriever_llm = MultiQueryRetriever.from_llm(retriever=vectorstore.as_retriever(), llm=llm)

# 实例化RetrivalQA链
retrieval_chain = RetrievalQA.from_chain_type(llm, retriever=retriever_llm)

# 实现问答交互
template_path = os.path.join(base_dir, 'templates')
app = Flask(__name__, template_folder=template_path)


@app.route('/', methods=['GET', 'POST'])
def homepage():
    if request.method == 'POST':
        # 接收用户输入的问题
        question = request.form.get('question')
        # RetrievalQA链读取问题，并返回答案
        result = retrieval_chain({'query': question})
        # 网页渲染
        return render_template('index.html', result=result)
    return render_template('index.html')


if __name__ == "__main__":
    app.run(host='127.0.0.1', debug=True, port=5000)
