'''
Author: kun 56216004@qq.com
Date: 2023-06-26 11:56:05
LastEditors: kun 56216004@qq.com
LastEditTime: 2023-06-27 17:34:47
FilePath: \langchain\docAsk.py
Description: 这是默认设置,请设置`customMade`, 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE
'''

# LangChain入门指南_人工智能_故里_-DevPress官方社区  https://huaweidevelopers.csdn.net/648c32c655c3e102e65f925d.html#devmenu12
# 
# (langchain39)
# pip install langchain
# Collecting langchain
#   Downloading langchain-0.0.215-py3-none-any.whl (1.1 MB)
# pip install openai
# Collecting openai
#   Downloading openai-0.27.8-py3-none-any.whl
# pip install jieba
# Collecting jieba
#   Downloading jieba-0.42.1.tar.gz (19.2 MB)
# pip install unstructured

import os
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import TokenTextSplitter
from langchain.llms import OpenAI
from langchain.chains import ChatVectorDBChain
from langchain.document_loaders import DirectoryLoader
import jieba as jb
import openai
from pathlib import Path
my_file = Path(f"./data/cut/")
if not my_file.is_dir():
    os.makedirs(my_file)

openai.api_base = "https://api.chatanywhere.com.cn/v1"
from config import * 
openai.api_key = api_key


files=['研发简要流程.txt','产品经理.txt']
import time
start_time = time.time()  

from langchain.document_loaders import TextLoader
documents=[]
for file in files:
    #读取data文件夹中的中文文档
    my_file=f"./data/{file}"
    loader = TextLoader(my_file, encoding='utf8')
    documents1 = loader.load()
    documents.append(documents1[0])
        


text_splitter = TokenTextSplitter(chunk_size=1000, chunk_overlap=0)
doc_texts = text_splitter.split_documents(documents)
#调用openai Embeddings
a=os.environ["OPENAI_API_KEY"] = api_key
embeddings = OpenAIEmbeddings(openai_api_key=a)
#向量化
# 创建 vectorestore 用作索引
# pip install faiss-cpu
from langchain.vectorstores import FAISS
db = FAISS.from_documents(doc_texts, embeddings)
#创建聊天机器人对象chain
chain = ChatVectorDBChain.from_llm(OpenAI(temperature=0, model_name="gpt-3.5-turbo"), db, return_source_documents=True)


# chat_history = [(query, result["answer"])]
chat_history = []
def get_answer(question):
  
  result = chain({"question": question, "chat_history": chat_history})
  answer=result["answer"]
  chat_history.append((question,answer))
  return answer

question = "产品经理的工作职责是什么？"
print(get_answer(question))
end_time = time.time()    # 程序结束时间
run_time = end_time - start_time    # 程序的运行时间，单位为秒
print(run_time)

while True:
    question = input()
    print(get_answer(question))
    end_time = time.time()    # 程序结束时间
    run_time = end_time - start_time    # 程序的运行时间，单位为秒
    print(run_time)
