Update app.py
Browse files
app.py
CHANGED
@@ -16,47 +16,42 @@ from langchain.llms import TextGen
|
|
16 |
import os
|
17 |
OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
|
18 |
|
19 |
-
#分割文档
|
20 |
-
text_splitter = CharacterTextSplitter(
|
21 |
-
separator="\n",
|
22 |
-
chunk_size=1000,
|
23 |
-
chunk_overlap=200,
|
24 |
-
length_function=len
|
25 |
-
)
|
26 |
-
|
27 |
-
texts = text_splitter.split_text("./output_1.txt")
|
28 |
-
|
29 |
# 嵌入模型
|
30 |
#embeddings = OpenAIEmbeddings()
|
31 |
-
embeddings = HuggingFaceEmbeddings(model_name="
|
32 |
|
33 |
# 加载数据
|
34 |
-
docsearch = FAISS.from_texts(texts, embeddings)
|
|
|
35 |
|
36 |
-
model_url = "http://36.103.234.50:5000"
|
37 |
|
38 |
-
|
39 |
|
40 |
-
|
41 |
|
42 |
def predict(message, history):
|
43 |
-
|
44 |
-
for human,
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
partial_message = ""
|
53 |
for chunk in response:
|
54 |
-
if len(chunk) != 0:
|
55 |
-
partial_message = partial_message + chunk
|
56 |
-
yield partial_message
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
|
|
16 |
import os
|
17 |
OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
# 嵌入模型
|
20 |
#embeddings = OpenAIEmbeddings()
|
21 |
+
embeddings = HuggingFaceEmbeddings(model_name="BAAI/bge-large-en")
|
22 |
|
23 |
# 加载数据
|
24 |
+
#docsearch = FAISS.from_texts(texts, embeddings)
|
25 |
+
docsearch = FAISS.load_local("./bge-large-en_faiss_index/faiss_index", embeddings)
|
26 |
|
|
|
27 |
|
28 |
+
chain = load_qa_chain(OpenAI(), chain_type="stuff",verbose=True)
|
29 |
|
30 |
+
prompt = "您是回答所有ANSYS软件使用查询的得力助手,如果所问的内容不在范围内,请回答“您提的问题不在本知识库内,请重新提问”,所有问题必需用中文回答"
|
31 |
|
32 |
def predict(message, history):
|
33 |
+
history_openai_format = []
|
34 |
+
for human, assistant in history:
|
35 |
+
history_openai_format.append({"role": "system", "content": prompt })
|
36 |
+
history_openai_format.append({"role": "user", "content": human })
|
37 |
+
history_openai_format.append({"role": "assistant", "content":assistant})
|
38 |
+
history_openai_format.append({"role": "user", "content": message})
|
39 |
+
|
40 |
+
response = openai.ChatCompletion.create(
|
41 |
+
model='gpt-3.5-turbo',
|
42 |
+
messages= history_openai_format,
|
43 |
+
temperature=1.0,
|
44 |
+
stream=True
|
45 |
+
)
|
46 |
|
47 |
partial_message = ""
|
48 |
for chunk in response:
|
49 |
+
if len(chunk['choices'][0]['delta']) != 0:
|
50 |
+
partial_message = partial_message + chunk['choices'][0]['delta']['content']
|
51 |
+
yield partial_message
|
52 |
+
|
53 |
+
gr.ChatInterface(predict,
|
54 |
+
textbox=gr.Textbox(placeholder="请输入您的问题", container=False, scale=7),
|
55 |
+
title="欢迎使用ANSYS软件AI机器人",
|
56 |
+
examples=["你是谁?", "请介绍一下Fluent 软件的用户界面说明", "请用关于春天写一首100字的诗","数学题:小红有3元钱,小红买了2斤香蕉,香蕉的价格是每斤1元。问小红一共花了多少钱?","请用表格做一份学生课程表"],
|
57 |
+
description="🦊请避免输入有违公序良俗的问题,模型可能无法回答不合适的问题🐇",).queue().launch(auth=(USER, PASS))
|