import logging
from langchain.chat_models import ChatTongyi
from langchain.retrievers import ContextualCompressionRetriever
from langchain.chains import RetrievalQA
from langchain_community.embeddings import DashScopeEmbeddings
from langchain.vectorstores import FAISS
from langchain.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
import os

# 1. Load 导入Document Loaders
from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders import Docx2txtLoader
from langchain_community.llms import Tongyi

DASHSCOPE_API_KEY = os.getenv('DASHSCOPE_API_KEY')   #'你的阿里云DashScope API Key'

# print(f"API Key: {os.getenv('DASHSCOPE_API_KEY')}")

# 设置日志级别
logging.basicConfig(level=logging.INFO)

# 实例化通义千问模型，使用qwen-turbo
llm = ChatTongyi(model_name="qwen-turbo", api_key= DASHSCOPE_API_KEY)

# 加载文档 Documents
base_dir =  './OneFlower'  # 你的文档的存放目录
documents = []
for file in os.listdir(base_dir):
    #print(file)
    # 构建完整的文件路径
    file_path = os.path.join(base_dir, file)
    if file.endswith('.pdf'):
        loader = PyPDFLoader(file_path)
        documents.extend(loader.load())
    elif file.endswith('.docx'):
        loader = Docx2txtLoader(file_path)
        documents.extend(loader.load())
    elif file.endswith('.txt'):
        loader = TextLoader(file_path)
        documents.extend(loader.load())

 # 2. 将Documents 文档进行文本切分 
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
texts = text_splitter.split_documents(documents)

# 3. 创建向量存储和检索器
embeddings = DashScopeEmbeddings(model="text-embedding-v1", dashscope_api_key= DASHSCOPE_API_KEY)
vectorstore = FAISS.from_documents(texts, embeddings)
retriever = vectorstore.as_retriever(search_kwargs={"k": 5})

# 使用LLMChainExtractor创建文档压缩器   
from langchain.retrievers.document_compressors import LLMChainExtractor     
compressor = LLMChainExtractor.from_llm(llm)        


# 创建上下文压缩检索器
compression_retriever = ContextualCompressionRetriever(
    base_retriever=retriever,
    base_compressor=compressor
)

# 4. 创建RetrievalQA链
qa_chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=compression_retriever)

# 5. Output 问答系统的UI实现
from flask import Flask, request, render_template
app = Flask(__name__) # Flask APP

@app.route('/', methods=['GET', 'POST'])
def home():
    if request.method == 'POST':

        # 接收用户输入作为问题
        question = request.form.get('question')        
        
        # RetrievalQA链 - 读入问题，生成答案
        result = qa_chain({"query": question})
        
        # 把大模型的回答结果返回网页进行渲染
        return render_template('index.html', result=result)
    
    return render_template('index.html')

if __name__ == "__main__":
    app.run(host='0.0.0.0',debug=True,port=5000)