peichao.dong
commited on
Commit
•
8e967a7
1
Parent(s):
9bcc3e3
add feed back chat
Browse files- .gitignore +1 -0
- app.py +61 -26
- documents/business_context.md +13 -0
- memories.py +61 -0
- promopts.py +28 -0
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
__pycache__/*
|
app.py
CHANGED
@@ -1,40 +1,75 @@
|
|
1 |
import gradio as gr
|
2 |
-
import
|
3 |
-
import
|
4 |
-
from langchain.
|
5 |
-
from
|
6 |
-
from
|
7 |
-
from langchain.memory.prompt import ENTITY_MEMORY_CONVERSATION_TEMPLATE
|
8 |
|
9 |
-
|
10 |
|
11 |
-
|
12 |
-
|
13 |
-
entityMemory = ConversationEntityMemory(llm=llm)
|
14 |
|
15 |
-
|
16 |
-
llm=llm,
|
|
|
|
|
|
|
|
|
17 |
|
|
|
|
|
|
|
|
|
18 |
|
19 |
-
def answer(question, history=[]):
|
20 |
-
history.append(question)
|
21 |
-
response = conversation.predict(input=question)
|
22 |
-
history.append(response)
|
23 |
-
responses = [(u, b) for u, b in zip(history[::2], history[1::2])]
|
24 |
-
return responses, history
|
25 |
|
|
|
|
|
|
|
|
|
26 |
|
27 |
-
with gr.Blocks(css="#chatbot{height:300px} .overflow-y-auto{height:500px}") as demo:
|
28 |
-
chatbot = gr.Chatbot(elem_id="chatbot")
|
29 |
-
state = gr.State([])
|
30 |
|
31 |
-
|
32 |
-
|
33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
|
|
|
|
35 |
with gr.Row():
|
36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
-
button.click(
|
|
|
|
|
|
|
39 |
|
40 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
from langchain.chat_models import ChatOpenAI
|
4 |
+
from langchain.document_loaders import TextLoader
|
5 |
+
from memories import HumenFeedbackBufferMemory
|
6 |
+
from promopts import FEEDBACK, FEEDBACK_PROMPT
|
|
|
7 |
|
8 |
+
llm = ChatOpenAI(temperature=0.1)
|
9 |
|
10 |
+
memory = HumenFeedbackBufferMemory(
|
11 |
+
input_key="input", human_prefix="Answer", ai_prefix="AI")
|
|
|
12 |
|
13 |
+
llmchain = LLMChain(
|
14 |
+
llm=llm,
|
15 |
+
prompt=FEEDBACK_PROMPT,
|
16 |
+
memory=memory,
|
17 |
+
verbose=True
|
18 |
+
)
|
19 |
|
20 |
+
"""读取document/business_context.py文件内容作为context"""
|
21 |
+
context_path = "./documents/business_context.md"
|
22 |
+
textloader = TextLoader(context_path)
|
23 |
+
CONTEXT = textloader.load()[0].page_content
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
+
def sendMessage(chatbot, input):
|
27 |
+
chatbot.append((
|
28 |
+
(None if len(input) == 0 else input), None))
|
29 |
+
return chatbot
|
30 |
|
|
|
|
|
|
|
31 |
|
32 |
+
def clearMemory(chatbot):
|
33 |
+
chatbot.clear()
|
34 |
+
memory.clear()
|
35 |
+
return chatbot, ""
|
36 |
+
|
37 |
+
|
38 |
+
def feedBack(context, story, chatbot=[], input=""):
|
39 |
+
if len(input) > 0:
|
40 |
+
context += (f"\n- {input}")
|
41 |
+
with open(context_path, 'w') as f:
|
42 |
+
f.write(context)
|
43 |
+
|
44 |
+
response = llmchain.run(
|
45 |
+
input=(input if len(input) == 0 else ("Answer: " + input)), context=context, story=story, stop="\nAnswer:")
|
46 |
+
chatbot[-1][1] = response
|
47 |
+
return chatbot, "", context
|
48 |
|
49 |
+
|
50 |
+
with gr.Blocks() as demo:
|
51 |
with gr.Row():
|
52 |
+
with gr.Column():
|
53 |
+
chatbot = gr.Chatbot(elem_id="chatbot").style()
|
54 |
+
|
55 |
+
with gr.Row():
|
56 |
+
txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(
|
57 |
+
container=False)
|
58 |
+
with gr.Column():
|
59 |
+
with gr.Row():
|
60 |
+
context = gr.Textbox(show_label=True, label="Context", value=CONTEXT, placeholder="Enter Context").style(
|
61 |
+
container=False)
|
62 |
+
|
63 |
+
with gr.Row():
|
64 |
+
story = gr.Textbox(show_label=True, label="Story", placeholder="Enter Story").style(
|
65 |
+
container=False)
|
66 |
+
|
67 |
+
with gr.Row():
|
68 |
+
button = gr.Button("Generate Scenarios")
|
69 |
|
70 |
+
button.click(clearMemory, [chatbot], [chatbot, txt]).then(sendMessage, [chatbot, txt], [chatbot]).then(
|
71 |
+
feedBack, [context, story, chatbot], [chatbot, txt])
|
72 |
+
txt.submit(sendMessage, [chatbot, txt], [chatbot]).then(
|
73 |
+
feedBack, [context, story, chatbot, txt], [chatbot, txt, context])
|
74 |
|
75 |
demo.launch()
|
documents/business_context.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
ABstract是一个AB测试平台,整合了配置管理和实验管理,为企业提供数据驱动的决策能力。例如,一个用户想要创建一个实验来测试两个不同版本的网页,以了解哪个版本能带来更多的服务注册。他们将创建一个实验,包含两个分组(bucket),每个分组对应一个网页版本,并将不同的用户分配到每个分组以比较结果。
|
2 |
+
|
3 |
+
每个实验可以包含1个或多个分组,每个分组都有一个不同的配置。实验信息包括:实验名称、实验描述、开始和结束时间、抽样百分比、识别实验的关键字、每个分组的名称及其关联的配置和分配给每个分组的流量百分比,以及将用户分配到分组的目标规则。
|
4 |
+
|
5 |
+
实验和分组可以一起配置。实验中的配置用于为用户提供不同的交互体验。一个实验最多可以创建10个分组。分组配置将作为分组的一个配置项与分组关联。
|
6 |
+
|
7 |
+
实验分为灰度发布、AB对比和多分组实验三种类型。一个实验应该包含:实验名称、实验描述、开始和结束时间、抽样百分比、识别实验的关键字、每个分组的名称及其关联的配置和分配给每个分组的流量百分比,以及将用户分配到分组的目标规则。
|
8 |
+
|
9 |
+
实验的目的是将用户随机分配到不同的分组,不同分组中的用户获得不同的软件交互体验。通过收集不同分组中用户的行为数据,对比不同设计的效果。一个实验至少包含1个,最多10个分组。
|
10 |
+
|
11 |
+
用户在同一个编辑界面编辑实验和分组信息,点击保存按钮时一起提交。创建实验时应该包含:实验名称、分流比例、开始时间、截止时间、分组(名称、关键字、分流比例)。实验类型分为灰度发布、AB对比和多分组实验三种。
|
12 |
+
- 实验的目标是通过将用户随机分组,收集不同组用户的行为数据,对比分析不同设计的效果,找出最有设计
|
13 |
+
- 实验有三种: 1.灰度发布(一个实验只有一个分组,通过实验采样比例控制新特性的发布比例)2. A/B对比(一个实验有两个分组,在用户总量中采样部分用户进行实验,对比新旧两组的数据)3. 多分组实验(一个实验有多个分组,在用户总量中采样部分用户进行实验,对比不同设计的效果))
|
memories.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Dict, List, Optional
|
2 |
+
from langchain.memory.chat_memory import BaseChatMemory
|
3 |
+
from langchain.schema import BaseMessage, HumanMessage, AIMessage, SystemMessage, ChatMessage
|
4 |
+
|
5 |
+
|
6 |
+
def get_buffer_string(
|
7 |
+
messages: List[BaseMessage], human_prefix: str = "Human", ai_prefix: str = "AI"
|
8 |
+
) -> str:
|
9 |
+
"""Get buffer string of messages."""
|
10 |
+
string_messages = []
|
11 |
+
for m in messages:
|
12 |
+
if isinstance(m, HumanMessage):
|
13 |
+
print("HumanMessage: " + m.content)
|
14 |
+
role = human_prefix + ":"
|
15 |
+
elif isinstance(m, AIMessage):
|
16 |
+
print("AIMessage" + m.content)
|
17 |
+
role = ""
|
18 |
+
elif isinstance(m, SystemMessage):
|
19 |
+
print("SystemMessage")
|
20 |
+
role = "System:"
|
21 |
+
elif isinstance(m, ChatMessage):
|
22 |
+
print("ChatMessage")
|
23 |
+
role = m.role + ":"
|
24 |
+
else:
|
25 |
+
raise ValueError(f"Got unsupported message type: {m}")
|
26 |
+
|
27 |
+
string_messages.append(f"{m.content}")
|
28 |
+
|
29 |
+
return "\n".join(string_messages)
|
30 |
+
|
31 |
+
|
32 |
+
class HumenFeedbackBufferMemory(BaseChatMemory):
|
33 |
+
"""Buffer for storing conversation memory."""
|
34 |
+
|
35 |
+
human_prefix: str = "Human"
|
36 |
+
ai_prefix: str = "AI"
|
37 |
+
memory_key: str = "history" #: :meta private:
|
38 |
+
|
39 |
+
@property
|
40 |
+
def buffer(self) -> Any:
|
41 |
+
"""String buffer of memory."""
|
42 |
+
if self.return_messages:
|
43 |
+
return self.chat_memory.messages
|
44 |
+
else:
|
45 |
+
return get_buffer_string(
|
46 |
+
self.chat_memory.messages,
|
47 |
+
human_prefix=self.human_prefix,
|
48 |
+
ai_prefix=self.ai_prefix,
|
49 |
+
)
|
50 |
+
|
51 |
+
@property
|
52 |
+
def memory_variables(self) -> List[str]:
|
53 |
+
"""Will always return list of memory variables.
|
54 |
+
|
55 |
+
:meta private:
|
56 |
+
"""
|
57 |
+
return [self.memory_key]
|
58 |
+
|
59 |
+
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
|
60 |
+
"""Return history buffer."""
|
61 |
+
return {self.memory_key: self.buffer}
|
promopts.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
|
3 |
+
FEEDBACK = """You are a business analyst who is familiar with specification by example. I'm the domain expert.
|
4 |
+
|
5 |
+
===CONTEXT
|
6 |
+
{context}
|
7 |
+
===END OF CONTXT
|
8 |
+
|
9 |
+
===USER STORY
|
10 |
+
{story}
|
11 |
+
===END OF USER STORY
|
12 |
+
|
13 |
+
Explain the user story as scenarios. use the following format:
|
14 |
+
|
15 |
+
Thought: you should always think about what is still uncertain about the user story. Ignore technical concerns.
|
16 |
+
Question: the Question to ask to clarify the user story
|
17 |
+
Answer: the answer I responded to the question
|
18 |
+
... (this Thought/Question/Answer repeat at least 3 times, at most 10 times)
|
19 |
+
Thought: I know enough to explain the user story
|
20 |
+
Scenarios: List all possible scenarios with concrete example in Given/When/Then style
|
21 |
+
|
22 |
+
|
23 |
+
{history}
|
24 |
+
{input}"""
|
25 |
+
|
26 |
+
FEEDBACK_PROMPT = PromptTemplate(
|
27 |
+
input_variables=["context", "story", "history", "input"], template=FEEDBACK,
|
28 |
+
)
|