peichao.dong commited on
Commit
474437d
1 Parent(s): 0bd059f

add generate code agent

Browse files
.gitignore CHANGED
@@ -1 +1,2 @@
1
- __pycache__/*
 
 
1
+ __pycache__/*
2
+ .DS_Store
app.py CHANGED
@@ -2,22 +2,18 @@ import gradio as gr
2
  from langchain.chains import LLMChain
3
  from langchain.chat_models import ChatOpenAI
4
  from langchain.document_loaders import TextLoader
 
5
  from embedding import CustomEmbedding
6
  from memories import HumenFeedbackBufferMemory
7
  from langchain.memory import ConversationBufferMemory
8
  from promopts import FEEDBACK, FEEDBACK_PROMPT
 
9
 
10
- llm = ChatOpenAI(temperature=0.1)
11
 
12
- memory = HumenFeedbackBufferMemory(
13
- input_key="input", human_prefix="Answer", ai_prefix="AI")
14
-
15
- llmchain = LLMChain(
16
- llm=llm,
17
- prompt=FEEDBACK_PROMPT,
18
- memory=memory,
19
- verbose=True
20
- )
21
 
22
  """读取document/business_context.py文件内容作为context"""
23
  context_path = "./documents/bussiness_context/business_context.md"
@@ -33,18 +29,18 @@ def sendMessage(chatbot, input):
33
 
34
  def clearMemory(chatbot):
35
  chatbot.clear()
36
- memory.clear()
 
37
  return chatbot, ""
38
 
39
 
40
  def feedBack(context, story, chatbot=[], input=""):
41
  if len(input) > 0:
42
- context += (f"\n- {input}")
43
  with open(context_path, 'w') as f:
44
  f.write(context)
45
-
46
- response = llmchain.run(
47
- input=(input if len(input) == 0 else ("Answer: " + input)), context=context, story=story, stop="\nAnswer:")
48
  chatbot[-1][1] = response
49
  return chatbot, "", context
50
 
@@ -53,7 +49,7 @@ customerEmbedding = CustomEmbedding()
53
 
54
  faqChain = customerEmbedding.getFAQChain()
55
 
56
-
57
  def faqFromLocal(input, chatbot=[]):
58
  response = faqChain.run(input)
59
  chatbot.append((input, response))
@@ -65,6 +61,16 @@ def generateEmbeddings():
65
  customerEmbedding.calculateNotionEmbedding()
66
 
67
 
 
 
 
 
 
 
 
 
 
 
68
  with gr.Blocks() as demo:
69
  with gr.Row():
70
  with gr.Column():
@@ -74,7 +80,10 @@ with gr.Blocks() as demo:
74
  txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(
75
  container=False)
76
  with gr.Row():
77
- faq = gr.Textbox(show_label=False, placeholder="A&Q from local context").style(
 
 
 
78
  container=False)
79
  with gr.Column():
80
  with gr.Row():
@@ -96,6 +105,10 @@ with gr.Blocks() as demo:
96
  feedBack, [context, story, chatbot], [chatbot, txt])
97
  txt.submit(sendMessage, [chatbot, txt], [chatbot]).then(
98
  feedBack, [context, story, chatbot, txt], [chatbot, txt, context])
 
 
 
 
99
  faq.submit(faqFromLocal, [faq, chatbot], [chatbot, faq])
100
  generateEmbedding.click(generateEmbeddings)
101
 
 
2
  from langchain.chains import LLMChain
3
  from langchain.chat_models import ChatOpenAI
4
  from langchain.document_loaders import TextLoader
5
+ from chains import HumanFeedBackChain
6
  from embedding import CustomEmbedding
7
  from memories import HumenFeedbackBufferMemory
8
  from langchain.memory import ConversationBufferMemory
9
  from promopts import FEEDBACK, FEEDBACK_PROMPT
10
+ from code_generate import code_agent_executor
11
 
12
+ # llm = ChatOpenAI(temperature=0.1)
13
 
14
+ baMemory = HumenFeedbackBufferMemory(
15
+ input_key="input", human_prefix="Answer", ai_prefix="AI")
16
+ baChain = HumanFeedBackChain(verbose=True, memory=baMemory)
 
 
 
 
 
 
17
 
18
  """读取document/business_context.py文件内容作为context"""
19
  context_path = "./documents/bussiness_context/business_context.md"
 
29
 
30
  def clearMemory(chatbot):
31
  chatbot.clear()
32
+ if baMemory != None:
33
+ baMemory.clear()
34
  return chatbot, ""
35
 
36
 
37
  def feedBack(context, story, chatbot=[], input=""):
38
  if len(input) > 0:
39
+ context += (f"\n\n {input}")
40
  with open(context_path, 'w') as f:
41
  f.write(context)
42
+ response = baChain.run(
43
+ input=(input if len(input) == 0 else input), context=context, story=story, stop="\nAnswer:")
 
44
  chatbot[-1][1] = response
45
  return chatbot, "", context
46
 
 
49
 
50
  faqChain = customerEmbedding.getFAQChain()
51
 
52
+ code_agent_executor = code_agent_executor()
53
  def faqFromLocal(input, chatbot=[]):
54
  response = faqChain.run(input)
55
  chatbot.append((input, response))
 
61
  customerEmbedding.calculateNotionEmbedding()
62
 
63
 
64
+ def generateCode(input: str, chatbot=[]):
65
+ if len(input) <=0:
66
+ chatbot[-1][1] = None
67
+ return chatbot, ""
68
+ response = code_agent_executor.run(
69
+ input=(input if len(input) == 0 else input))
70
+ chatbot[-1][1] = response
71
+ return chatbot, ""
72
+
73
+
74
  with gr.Blocks() as demo:
75
  with gr.Row():
76
  with gr.Column():
 
80
  txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(
81
  container=False)
82
  with gr.Row():
83
+ code = gr.Textbox(show_label=True, label="Code Generate", placeholder="Enter text and press enter").style(
84
+ container=False)
85
+ with gr.Row():
86
+ faq = gr.Textbox(show_label=False, placeholder="Q&A from local context").style(
87
  container=False)
88
  with gr.Column():
89
  with gr.Row():
 
105
  feedBack, [context, story, chatbot], [chatbot, txt])
106
  txt.submit(sendMessage, [chatbot, txt], [chatbot]).then(
107
  feedBack, [context, story, chatbot, txt], [chatbot, txt, context])
108
+
109
+ code.submit(sendMessage, [chatbot, code], [chatbot]).then(
110
+ generateCode, [code, chatbot], [chatbot, code])
111
+
112
  faq.submit(faqFromLocal, [faq, chatbot], [chatbot, faq])
113
  generateEmbedding.click(generateEmbeddings)
114
 
chains.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Optional
2
+ from langchain.chains import LLMChain
3
+ from langchain.base_language import BaseLanguageModel
4
+ from langchain.schema import LLMResult, PromptValue
5
+ from langchain.prompts import PromptTemplate
6
+ from langchain.memory.chat_memory import BaseMemory
7
+ from langchain.chat_models import ChatOpenAI
8
+
9
+ from promopts import FEEDBACK_PROMPT
10
+
11
+
12
+ class HumanFeedBackChain(LLMChain):
13
+ """Chain to run queries against LLMs."""
14
+
15
+ memory: Optional[BaseMemory] = None
16
+
17
+ def __init__(self, verbose=True, llm: BaseLanguageModel = ChatOpenAI(temperature=0.7), memory: Optional[BaseMemory] = None, prompt: PromptTemplate = FEEDBACK_PROMPT):
18
+ super().__init__(llm=llm, prompt=prompt, memory=memory, verbose=verbose)
19
+
20
+ def run(self, *args: Any, **kwargs: Any) -> str:
21
+ """Run the chain as text in, text out or multiple variables, text out."""
22
+ if len(self.output_keys) != 1:
23
+ raise ValueError(
24
+ f"`run` not supported when there is not exactly "
25
+ f"one output key. Got {self.output_keys}."
26
+ )
27
+
28
+ if args and not kwargs:
29
+ if len(args) != 1:
30
+ raise ValueError(
31
+ "`run` supports only one positional argument.")
32
+ return self("Answer:" + args[0])[self.output_keys[0]]
33
+
34
+ if kwargs and not args:
35
+ return self(kwargs)[self.output_keys[0]]
36
+
37
+ raise ValueError(
38
+ f"`run` supported with either positional arguments or keyword arguments"
39
+ f" but not both. Got args: {args} and kwargs: {kwargs}."
40
+ )
41
+
42
+
code_generate.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from typing import List, Union
3
+ from langchain.chains import LLMChain
4
+ from langchain.chat_models import ChatOpenAI
5
+ from langchain.agents import tool, Tool, LLMSingleActionAgent, AgentExecutor, AgentOutputParser
6
+ from langchain.schema import AgentAction, AgentFinish
7
+ from langchain.agents import initialize_agent
8
+ from langchain.memory import ConversationBufferMemory
9
+ from langchain.prompts import StringPromptTemplate
10
+ from promopts import code_generate_agent_template
11
+
12
+
13
+
14
+
15
+ from promopts import API_LAYER_PROMPT, DOMAIN_LAYER_PROMPT, PERSISTENT_LAYER_PROMPT
16
+
17
+ domainLayerChain = LLMChain(llm = ChatOpenAI(temperature=0.1), prompt=DOMAIN_LAYER_PROMPT)
18
+
19
+ persistentChain = LLMChain(llm = ChatOpenAI(temperature=0.1), prompt=PERSISTENT_LAYER_PROMPT)
20
+
21
+ apiChain = LLMChain(llm = ChatOpenAI(temperature=0.1), prompt=API_LAYER_PROMPT)
22
+
23
+ @tool("Generate Domain Layer Code", return_direct=True)
24
+ def domainLayerCodeGenerator(input: str) -> str:
25
+ '''useful for when you need to generate domain layer code'''
26
+ response = domainLayerChain.run(input)
27
+ return response
28
+
29
+ @tool("Generate Persistent Layer Code", return_direct=True)
30
+ def persistentLayerCodeGenerator(input: str) -> str:
31
+ '''useful for when you need to generate persistent layer code'''
32
+ response = persistentChain.run(input)
33
+ return response
34
+
35
+ @tool("Generate API Layer Code", return_direct=True)
36
+ def apiLayerCodeGenerator(input: str) -> str:
37
+ '''useful for when you need to generate API layer code'''
38
+ response = apiChain.run(input)
39
+ return response
40
+
41
+
42
+
43
+ class CustomPromptTemplate(StringPromptTemplate):
44
+ # The template to use
45
+ template: str
46
+ # The list of tools available
47
+ tools: List[Tool]
48
+
49
+ def format(self, **kwargs) -> str:
50
+ # Get the intermediate steps (AgentAction, Observation tuples)
51
+ # Format them in a particular way
52
+ intermediate_steps = kwargs.pop("intermediate_steps")
53
+ thoughts = ""
54
+ for action, observation in intermediate_steps:
55
+ thoughts += action.log
56
+ thoughts += f"\nObservation: {observation}\nThought: "
57
+ # Set the agent_scratchpad variable to that value
58
+ kwargs["agent_scratchpad"] = thoughts
59
+ # Create a tools variable from the list of tools provided
60
+ kwargs["tools"] = "\n".join(
61
+ [f"{tool.name}: {tool.description}" for tool in self.tools])
62
+ # Create a list of tool names for the tools provided
63
+ kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools])
64
+ return self.template.format(**kwargs)
65
+
66
+
67
+ class CustomOutputParser(AgentOutputParser):
68
+
69
+ def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
70
+ # Check if agent should finish
71
+ if "Final Answer:" in llm_output:
72
+ return AgentFinish(
73
+ # Return values is generally always a dictionary with a single `output` key
74
+ # It is not recommended to try anything else at the moment :)
75
+ return_values={"output": llm_output.split(
76
+ "Final Answer:")[-1].strip()},
77
+ log=llm_output,
78
+ )
79
+ # Parse out the action and action input
80
+ regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
81
+ match = re.search(regex, llm_output, re.DOTALL)
82
+ if not match:
83
+ raise ValueError(f"Could not parse LLM output: `{llm_output}`")
84
+ action = match.group(1).strip()
85
+ action_input = match.group(2)
86
+ # Return the action and action input
87
+ return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output)
88
+
89
+ # chatllm=ChatOpenAI(temperature=0)
90
+ # code_genenrate_memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
91
+ # code_generate_agent = initialize_agent(tools, chatllm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, memory=memory, verbose=True)
92
+
93
+
94
+
95
+ # agent = initialize_agent(
96
+ # tools=tools, llm=llm_chain, template=AGENT_PROMPT, stop=["\nObservation:"], agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
97
+
98
+
99
+ def code_agent_executor() -> AgentExecutor:
100
+
101
+
102
+ tools = [domainLayerCodeGenerator, persistentLayerCodeGenerator, apiLayerCodeGenerator]
103
+
104
+ output_parser = CustomOutputParser()
105
+
106
+
107
+ AGENT_PROMPT = CustomPromptTemplate(
108
+ template=code_generate_agent_template,
109
+ tools=tools,
110
+ # This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically
111
+ # This includes the `intermediate_steps` variable because that is needed
112
+ input_variables=["input", "intermediate_steps"]
113
+ )
114
+
115
+ code_llm_chain = LLMChain(llm=ChatOpenAI(temperature=0.7), prompt=AGENT_PROMPT)
116
+
117
+ tool_names = [tool.name for tool in tools]
118
+ code_agent = LLMSingleActionAgent(
119
+ llm_chain=code_llm_chain,
120
+ output_parser=output_parser,
121
+ stop=["\nObservation:"],
122
+ allowed_tools=tool_names,
123
+ )
124
+
125
+ code_agent_executor = AgentExecutor.from_agent_and_tools(
126
+ agent=code_agent, tools=tools, verbose=True)
127
+ return code_agent_executor
128
+
129
+ # if __name__ == "__main__":
130
+ # response = domainLayerChain.run("""FeatureConfig用于配置某个Feature中控制前端展示效果的配置项
131
+ # FeatureConfig主要属性包括:featureKey(feature标识)、data(配置数据)、saData(埋点数据)、status(状态)、标题、描述、创建时间、更新时间
132
+ # FeatureConfig中status为枚举值,取值范围为(DRAFT、PUBLISHED、DISABLED)
133
+ # FeatureConfig新增后status为DRAFT、执行发布操作后变为PUBLISHED、执行撤销操作后变为DISABLED
134
+ # 状态为DRAFT的FeatureConfig可以执行编辑、发布、撤销操作
135
+ # 发布后FeatureConfig变为PUBLISHED状态,可以执行撤销操作
136
+ # 撤销后FeatureConfig变为DISABLED状态,不可以执行编辑、发布、撤销操作
137
+ # """)
138
+
139
+ # print(response)
140
+
141
+
142
+ # response = persistentChain.run("""
143
+ # Entity:
144
+ # ```
145
+ # public class FeatureConfig {
146
+ # private FeatureConfigId id;
147
+ # private FeatureConfigDescription description;
148
+
149
+ # public enum FeatureConfigStatus {
150
+ # DRAFT, PUBLISHED, DISABLED;
151
+ # }
152
+
153
+ # public record FeatureConfigId(String id) {}
154
+ # public record FeatureKey(String key) {}
155
+ # public record FeatureConfigData(String data) {}
156
+ # public record FeatureConfigSaData(String saData) {}
157
+
158
+ # @Builder
159
+ # public record FeatureConfigDescription(FeatureKey featureKey, FeatureConfigData data, FeatureConfigSaData saData, String title, String description,
160
+ # FeatureConfigStatus status, LocalDateTime createTime, LocalDateTime updateTime) {}
161
+
162
+ # public void update(FeatureConfigDescription description) {
163
+ # this.title = description.title();
164
+ # this.description = description.description();
165
+ # this.updateTime = LocalDateTime.now();
166
+ # }
167
+
168
+ # public void publish() {
169
+ # this.status = FeatureConfigStatus.PUBLISHED;
170
+ # this.updateTime = LocalDateTime.now();
171
+ # }
172
+
173
+ # public void disable() {
174
+ # this.status = FeatureConfigStatus.DISABLED;
175
+ # this.updateTime = LocalDateTime.now();
176
+ # }
177
+ # }
178
+ # ```
179
+
180
+ # Association:
181
+ # ```
182
+ # public interface FeatureConfigs {
183
+ # Flux<FeatureConfig> findAllByFeatureKey(String featureKey);
184
+ # Mono<FeatureConfig> findById(FeatureConfigId id);
185
+ # Mono<FeatureConfig> save(FeatureConfig featureConfig);
186
+ # }
187
+ # ```
188
+ # """)
189
+
190
+ # print(response)
191
+
192
+
193
+ # response = apiChain.run("""
194
+ # Entity:
195
+ # ```
196
+ # public class FeatureConfig {
197
+ # private FeatureConfigId id;
198
+ # private FeatureConfigDescription description;
199
+
200
+ # public enum FeatureConfigStatus {
201
+ # DRAFT, PUBLISHED, DISABLED;
202
+ # }
203
+
204
+ # public record FeatureConfigId(String id) {}
205
+ # public record FeatureKey(String key) {}
206
+ # public record FeatureConfigData(String data) {}
207
+ # public record FeatureConfigSaData(String saData) {}
208
+
209
+ # @Builder
210
+ # public record FeatureConfigDescription(FeatureKey featureKey, FeatureConfigData data, FeatureConfigSaData saData, String title, String description,
211
+ # FeatureConfigStatus status, LocalDateTime createTime, LocalDateTime updateTime) {}
212
+
213
+ # public void update(FeatureConfigDescription description) {
214
+ # this.title = description.title();
215
+ # this.description = description.description();
216
+ # this.updateTime = LocalDateTime.now();
217
+ # }
218
+
219
+ # public void publish() {
220
+ # this.status = FeatureConfigStatus.PUBLISHED;
221
+ # this.updateTime = LocalDateTime.now();
222
+ # }
223
+
224
+ # public void disable() {
225
+ # this.status = FeatureConfigStatus.DISABLED;
226
+ # this.updateTime = LocalDateTime.now();
227
+ # }
228
+ # }
229
+ # ```
230
+
231
+ # Association:
232
+ # ```
233
+ # public interface FeatureConfigs {
234
+ # Flux<FeatureConfig> findAllByFeatureKey(String featureKey);
235
+ # Mono<FeatureConfig> findById(FeatureConfigId id);
236
+ # Mono<FeatureConfig> save(FeatureConfig featureConfig);
237
+ # Mono<Void> update(FeatureConfigId id, FeatureConfigDescription description);
238
+ # Mono<Void> publish(FeatureConfigId id);
239
+ # Mono<Void> disable(FeatureConfigId id);
240
+ # }
241
+ # ```
242
+ # """)
243
+
244
+ # print(response)
245
+
246
+ # if __name__ == "code_generate":
247
+ # response = code_agent_executor.run("""
248
+ # 根据如下需求generate domain layer code:
249
+ # ---
250
+ # FeatureConfig用于配置某个Feature中控制前端展示效果的配置项
251
+ # FeatureConfig主要属性包括:featureKey(feature标识)、data(配置数据)、saData(埋点数据)、status(状态)、标题、描述、创建时间、更新时间
252
+ # FeatureConfig中status为枚举值,取值范围为(DRAFT、PUBLISHED、DISABLED)
253
+ # FeatureConfig新增后status为DRAFT、执行发布操作后变为PUBLISHED、执行撤销操作后变为DISABLED
254
+ # 状态为DRAFT的FeatureConfig可以执行编辑、发布、撤销操作
255
+ # 发布后FeatureConfig变为PUBLISHED状态,可以执行撤销操作
256
+ # 撤销后FeatureConfig变为DISABLED状态,不可以执行编辑、发布、���销操作
257
+ # ---
258
+ # """)
259
+ # print(response)
documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6/Engineering Interviews 4be8039581d04456b0151f2cc4b22130/Questions ede8818b3a0e447f80145905690eb3f6.csv ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Question Name,Difficulty,Skills
2
+ To Do List Design,Medium,"Architecture, Backend, Front end"
3
+ FizzBuzz,Easy,"Algorithms, Front end"
4
+ Alphabet Ordering,Hard,"Algorithms, Data Structures"
documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6/Getting Started 6bc871dcdd4a4554b5b22c0c40740841/Example sub-page 48f64d6186ec4428b2e4180475245a9c.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Example sub-page
2
+
3
+ Last edited time: March 31, 2023 1:49 PM
4
+ Owner: Anonymous
5
+ Tags: Testing
documents/bussiness_context/business_context.md CHANGED
@@ -1,13 +1,22 @@
1
- ABstract是一个AB测试平台,整合了配置管理和实验管理,为企业提供数据驱动的决策能力。例如,一个用户想要创建一个实验来测试两个不同版本的网页,以了解哪个版本能带来更多的服务注册。他们将创建一个实验,包含两个分组(bucket),每个分组对应一个网页版本,并将不同的用户分配到每个分组以比较结果。
 
 
 
2
 
3
- 每个实验可以包含1个或多个分组,每个分组都有一个不同的配置。实验信息包括:实验名称、实验描述、开始和结束时间、抽样百分比、识别实验的关键字、每个分组的名称及其关联的配置和分配给每个分组的流量百分比,以及将用户分配到分组的目标规则。
 
 
 
 
4
 
5
- 实验和分组可以一起配置。实验中的配置用于为用户提供不同的交互体验。一个实验最多可以创建10个分组。分组配置将作为分组的一个配置项与分组关联。
6
 
7
- 实验分为灰度发布、AB对比和多分组实验三种类型。一个实验应该包含:实验名称、实验描述、开始和结束时间、抽样百分比、识别实验的关键字、每个分组的名称及其关联的配置和分配给每个分组的流量百分比,以及将用户分配到分组的目标规则。
8
 
9
- 实验的目的是将用户随机分配到不同的分组,不同分组中的用户获得不同的软件交互体验。通过收集不同分组中用户的行为数据,对比不同设计的效果。一个实验至少包含1个,最多10个分组。
10
 
11
- 用户在同一个编辑界面编辑实验和分组信息,点击保存按钮时一起提交。创建实验时应该包含:实验名称、分流比例、开始时间、截止时间、分组(名称、关键字、分流比例)。实验类型分为灰度发布、AB对比和多分组实验三种。
12
- - 实验的目标是通过将用户随机分组,收集不同组用户的行为数据,对比分析不同设计的效果,找出最有设计
13
- - 实验有三种: 1.灰度发布(一个实验只有一个分组,通过实验采样比例控制新特性的发布比例)2. A/B对比(一个实验有两个分组,在用户总量中采样部分用户进行实验,对比新旧两组的数据)3. 多分组实验(一个实验有多个分组,在用户总量中采样部分用户进行实验,对比不同设计的效果))
 
 
 
1
+ AB测试系统中一个重要功能是用于控制某个feature的配置下发管理,配置管理中有两个主要业务概念FeatureFlag 和 FeatureConfig
2
+ FeatureFlag用于标识某个具体Feature
3
+ FeatureFlag主要属性包括: featureKey(feature标识)、名称、描述、enabled、创建时间、最后更新时间、template(作为FeatureConfig的模版用于生成后续FeatureConfig的配置界面组件,属性包括key、名称、描述、dataType、items)
4
+ template中dataType为枚举值,取值范围为(STRING、NUMBER、BOOLEAN、OBJECT、ARRAY)
5
 
6
+ FeatureConfig用于配置某个Feature中控制前端展示效果的配置项
7
+ FeatureConfig主要属性包括:featureKey(feature标识)、data(配置数据)、saData(埋点数据)、status(状态)、标题、描述、创建时间、更新时间
8
+ FeatureConfig中status为枚举值,取值范围为(DRAFT、PUBLISHED、DISABLED)
9
+ FeatureConfig新增后status为DRAFT、执行发布操作后变为PUBLISHED、执行撤销操作后变为DISABLED
10
+ 一个FeatureFlag中可以包含多个FeatureConfig, FeatureFlag和FeatureConfig通过featureKey字段进行关联
11
 
12
+ FeatureConfig对应feature控制的不同版本配置内容,添加FeatureConfig目的是控制featureConfig消费方的某个行为
13
 
14
+ 添加FeatureConfig应该包含featureKey(feature标识)、data(配置数据)、saData(埋点数据)、status(状态)、标题、描述信息
15
 
16
+ 新创建的FeatureConfig状态为DRAFT
17
 
18
+ 添加FeatureConfig应该包含:featureKey(feature标识)、data(配置数据)、saData(埋点数据)、status(状态)、标题、描述信息
19
+
20
+ 新创建的FeatureConfig状态为DRAFT
21
+
22
+ 一个FeatureFlag中可以包含多个FeatureConfig
documents/bussiness_context/business_context.md.bak ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ABstract是一个AB测试平台,整合了配置管理和实验管理,为企业提供数据驱动的决策能力。例如,一个用户想要创建一个实验来测试两个不同版本的网页,以了解哪个版本能带来更多的服务注册。他们将创建一个实验,包含两个分组(bucket),每个分组对应一个网页版本,并将不同的用户分配到每个分组以比较结果。
2
+
3
+ 每个实验可以包含1个或多个分组,每个分组都有一个不同的配置。实验信息包括:实验名称、实验描述、开始和结束时间、抽样百分比、识别实验的关键字、每个分组的名称及其关联的配置和分配给每个分组的流量百分比,以及将用户分配到分组的目标规则。
4
+
5
+ 实验和分组可以一起配置。实验中的配置用于为用户提供不同的交互体验。一个实验最多可以创建10个分组。分组配置将作为分组的一个配置项与分组关联。
6
+
7
+ 实验分为灰度发布、AB对比和多分组实验三种类型。一个实验应该包含:实验名称、实验描述、开始和结束时间、抽样百分比、识别实验的关键字、每个分组的名称及其关联的配置和分配给每个分组的流量百分比,以及将用户分配到分组的目标规则。
8
+
9
+ 实验的目的是将用户随机分配到不同的分组,不同分组中的用户获得不同的软件交互体验。通过收集不同分组中用户的行为数据,对比不同设计的效果。一个实验至少包含1个,最多10个分组。
10
+
11
+ 用户在同一个编辑界面编辑实验和分组信息,点击保存按钮时一起提交。创建实验时应该包含:实验名称、分流比例、开始时间、截止时间、分组(名称、关键字、分流比例)。实验类型分为灰度发布、AB对比和多分组实验三种。
12
+ - 实验的目标是通过将用户随机分组,收集不同组用户的行为数据,对比分析不同设计的效果,找出最有设计
13
+ - 实验有三种: 1.灰度发布(一个实验只有一个分组,通过实验采样比例控制新特性的发布比例)2. A/B对比(一个实验有两个分组,在用户总量中采样部分用户进行实验,对比新旧两组的数据)3. 多分组实验(一个实验有多个分组,在用户总量中采样部分用户进行实验,对比不同设计的效果))
14
+ - 向实验列表增加一个实验记录
15
+ - 实验名称、实验描述、开始和结束时间、抽样百分比、识别实验的关键字、每个分组的名称及其关联的配置和分配给每个分组的流量百分比,以及将用户分配到分组的目标规则。
16
+
17
+ 例如,一个用户想要创建一个实验来测试两个不同版本的网页,以了解哪个版本能带来更多的服务注册。他们将创建一个实验,包含两个分组(bucket),每个分组对应一个网页版本,并将不同的用户分配到每个分组以比较结果。
18
+
19
+ 例如,一个用户想要创建一个实验来测试两个不同版本的网页,以了解哪个版本能带来更多的服务注册。他们将创建一个实验,包含两个分组(bucket),每个分组对应一个网页版本,并将不同的用户分配到每个分组以比较结果。
20
+
21
+ 实验信息包括:实验名称、实验描述、开始和结束时间、抽样百分比、识别实验的关键字、每个分组的名称及其关联的配置和分配给每个分组的流量百分比,以及将用户分配到分组的目标规则。
memories.py CHANGED
@@ -11,20 +11,20 @@ def get_buffer_string(
11
  for m in messages:
12
  if isinstance(m, HumanMessage):
13
  print("HumanMessage: " + m.content)
14
- role = human_prefix + ":"
15
  elif isinstance(m, AIMessage):
16
  print("AIMessage" + m.content)
17
  role = ""
18
  elif isinstance(m, SystemMessage):
19
  print("SystemMessage")
20
- role = "System:"
21
  elif isinstance(m, ChatMessage):
22
  print("ChatMessage")
23
- role = m.role + ":"
24
  else:
25
  raise ValueError(f"Got unsupported message type: {m}")
26
 
27
- string_messages.append(f"{m.content}")
28
 
29
  return "\n".join(string_messages)
30
 
 
11
  for m in messages:
12
  if isinstance(m, HumanMessage):
13
  print("HumanMessage: " + m.content)
14
+ role = human_prefix + ": "
15
  elif isinstance(m, AIMessage):
16
  print("AIMessage" + m.content)
17
  role = ""
18
  elif isinstance(m, SystemMessage):
19
  print("SystemMessage")
20
+ role = "System: "
21
  elif isinstance(m, ChatMessage):
22
  print("ChatMessage")
23
+ role = m.role + ": "
24
  else:
25
  raise ValueError(f"Got unsupported message type: {m}")
26
 
27
+ string_messages.append(f"{role + m.content}")
28
 
29
  return "\n".join(string_messages)
30
 
promopts.py CHANGED
@@ -21,8 +21,325 @@ Scenarios: List all possible scenarios with concrete example in Given/When/Then
21
 
22
 
23
  {history}
24
- {input}"""
25
 
26
  FEEDBACK_PROMPT = PromptTemplate(
27
  input_variables=["context", "story", "history", "input"], template=FEEDBACK,
28
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
 
23
  {history}
24
+ Answer:{input}"""
25
 
26
  FEEDBACK_PROMPT = PromptTemplate(
27
  input_variables=["context", "story", "history", "input"], template=FEEDBACK,
28
  )
29
+
30
+
31
+ agent_template = """You are a business analyst who is familiar with specification by example. Your main task is to explain the user story as scenarios.
32
+ You have access to the following tools:
33
+
34
+ {tools}
35
+
36
+ Use the following format:
37
+
38
+ Story: the story that you need to explain
39
+ Thought: you should always think about what is still uncertain about the user story to Explain the user story. Ignore technical concerns.
40
+ Action: the action to take, should be one of [{tool_names}]
41
+ Action Input: the input to the action
42
+ Observation: the result of the action
43
+ ... (this Thought/Action/Action Input/Observation can repeat 10 times)
44
+ Thought: I now know the final answer
45
+ Final Answer: List all possible scenarios with concrete example in Given/When/Then style
46
+
47
+ Begin!
48
+
49
+ Story: {input}
50
+ {agent_scratchpad}"""
51
+
52
+
53
+ DOMAIN_LAYER = """You are a software developer. Your task is to generate the domain layer tests and product code.
54
+
55
+ ===TechStack
56
+ Java17、reactor、lombok、Junit5、reactor test、Mockito
57
+ ===END OF TechStack
58
+
59
+ ===Architecture
60
+ the domain layer inclue 2 componets:
61
+ * Entity: This component is use to represents business concepts and encapsulates business rules.
62
+ ---eaxmple code:
63
+ public class Feature {{
64
+ private FeatureId id;
65
+ private FeatureDescription description;
66
+
67
+ public record FeatureId(String featureKey) {{
68
+
69
+ }}
70
+
71
+ @Builder
72
+ public record FeatureDescription(String name, String description, Boolean isEnable))) {{
73
+
74
+ }}
75
+ }}
76
+ ---end of eaxmple code
77
+ * Association: This component is use to define association between entities, which can represents the concept of a set of entity.
78
+ ---eaxmple code:
79
+ public interface Features {{
80
+ Flux<Feature> findAll();
81
+
82
+ Mono<Feature> findById(FeatureId id);
83
+
84
+ Mono<Feature> save(Feature feature);
85
+
86
+ Mono<Void> update(FeatureId id, FeatureDescription description);
87
+
88
+ Mono<Void> delete(FeatureId id);
89
+
90
+ Mono<Void> publish(FeatureId id);
91
+
92
+ Mono<Void> disable(FeatureId id);
93
+ }}
94
+ ---end of eaxmple code
95
+ ===END OF Architecture
96
+
97
+ ===TestStrategy
98
+ For the Entity, we can write unit test to ensure that the business rules related to Merchandise are correctly encapsulated.
99
+ For the Association,do not write tests because it is has no impletation.
100
+ ===END OF TestStrategy
101
+
102
+ Use the following format:
103
+ request: the request that you need to fulfill
104
+
105
+ the code just include Enity Association and tests for Entity that you write to fulfill the request, follow TechStack Architecture TestStrategy to fulfill request, you should cover more test case as posible
106
+
107
+
108
+ request: {input}"""
109
+
110
+
111
+ DOMAIN_LAYER_PROMPT = PromptTemplate(input_variables=["input"], template=DOMAIN_LAYER,)
112
+
113
+
114
+ PERSISTENT_LAYER = """You are a software developer. Your task is to generate the persistent layer tests and product code.
115
+
116
+ ===TechStack
117
+ Java17、reactor、lombok、Junit5、reactor test、Mockito、 Spring Data Reactive Couchbase、Testcontainers、Couchbase、WebClient
118
+ ===END OF TechStack
119
+
120
+ ===Architecture
121
+ the persistent layer inclue 3 componets:
122
+ * DbEntity: This component is use to define data structure that save to DB.
123
+ ---eaxmple code:
124
+ @Document
125
+ public class FeatureDb {{
126
+ @Version
127
+ private long version;
128
+
129
+ @Id
130
+ @GeneratedValue(strategy = GenerationStrategy.UNIQUE)
131
+ private String id;
132
+
133
+ private String featureKey;
134
+
135
+ private Feature.FeatureDescription description;
136
+ }}
137
+ ---end of eaxmple code
138
+ * Repository: This component is use to define the interface to access DB.
139
+ ---eaxmple code:
140
+ public interface FeatureDbRepository extends ReactiveCrudRepository<FeatureDb, String> {{
141
+ Mono<FeatureDb> findByFeatureKey(String featureKey);
142
+ }}
143
+ ---end of eaxmple code
144
+ * Association Impletation: This component provide implementation of Association of domain layer, rely on Repository.
145
+ ---eaxmple code:
146
+ @Component
147
+ @RequiredArgsConstructor
148
+ public class FeaturesImpl implements Features{{
149
+ private final FeatureDbRepository featureDbRepository;
150
+
151
+ Flux<Feature> findAll() {{
152
+ return featureDbRepository.findAll().map(FeatureDb::toFeature);
153
+ }}
154
+
155
+ Mono<Feature> save(Feature feature) {{
156
+ return featureDbRepository.save(FeatureDb.fromFeature(feature)).map(FeatureDb::toFeature);
157
+ }}
158
+ }}
159
+ ---end of eaxmple code
160
+ ===END OF Architecture
161
+
162
+ ===TestStrategy
163
+ For the DbEntity And Repository, we can write component test to test the actual implementation of database operations, test class should extends RepositoryTestBase to use Testcontainers ability.
164
+ ---eaxmple code:
165
+ class FeatureDbRepositoryTest extends RepositoryTestBase {{
166
+ @Autowired
167
+ FeatureDbRepository repository;
168
+
169
+ @BeforeEach
170
+ void setUp() {{
171
+ repository.deleteAll().block();
172
+ }}
173
+
174
+ @AfterEach
175
+ void tearDown() {{
176
+ repository.deleteAll().block();
177
+ }}
178
+
179
+ @Test
180
+ void should_save_Feature_success() {{
181
+ var featureKey = "featureKey1";
182
+ repository.save(FeatureTestUtil.createFeatureDb(featureKey))
183
+ .as(StepVerifier::create)
184
+ .expectNextCount(1)
185
+ .verifyComplete();
186
+ }}
187
+
188
+ @Test
189
+ void should_add_same_featureKey_fail() {{
190
+ var featureKey = "featureKey1";
191
+ repository.save(FeatureTestUtil.createFeatureDb(featureKey)).block();
192
+
193
+ repository.save(FeatureTestUtil.createFeatureDb(featureKey))
194
+ .as(StepVerifier::create)
195
+ .expectError()
196
+ .verify();
197
+ }}
198
+ }}
199
+ ---end of eaxmple code
200
+ For Association Impletation,we writ unit test, and stub repository method with Mockito.
201
+ ---eaxmple code:
202
+ @ExtendWith(MockitoExtension.class)
203
+ class FeatureImplTest {{
204
+ @Mock
205
+ FeatureDbRepository repository;
206
+
207
+ Features features;
208
+
209
+ @BeforeEach
210
+ void setUp() {{
211
+ features = new FeaturesImpl(repository);
212
+ }}
213
+
214
+ @Test
215
+ void should_add_success() {{
216
+ when(repository.save(any(FeatureDb.class))).thenAnswer(invocation -> {{
217
+ FeatureDb featureDb = invocation.getArgument(0);
218
+ return Mono.just(featureDb);
219
+ }});
220
+
221
+ features.add(createFeature("featureKey1"))
222
+ .as(StepVerifier::create)
223
+ .expectNextMatches(config -> config.getId().featureKey().equals("featureKey1")
224
+ && config.getDescription().updatedAt() != null
225
+ && config.getDescription().createdAt() != null
226
+ )
227
+ .verifyComplete();
228
+ }}
229
+
230
+ @Test
231
+ void should_add_return_error_when_repository_save_error() {{
232
+ Feature feature = createFeature("featureKey1");
233
+
234
+ when(repository.save(any(FeatureDb.class))).thenReturn(Mono.error(new DuplicateKeyException("save error")));
235
+
236
+ features.add(feature)
237
+ .as(StepVerifier::create)
238
+ .expectError()
239
+ .verify();
240
+ }}
241
+ }}
242
+
243
+
244
+ ===END OF TestStrategy
245
+
246
+ Use the following format:
247
+ request: the request that you need to fulfill include Entity and Association of domain layer
248
+
249
+ the code just include DbEnity、Repository、Association Implement and tests code that you write to fulfill the request, follow TechStack Architecture TestStrategy to fulfill request, you should cover more test case as posible
250
+
251
+
252
+ request: {input}"""
253
+
254
+ PERSISTENT_LAYER_PROMPT = PromptTemplate(input_variables=["input"], template=PERSISTENT_LAYER,)
255
+
256
+
257
+
258
+ API_LAYER = """You are a software developer. Your task is to generate the api layer tests and product code.
259
+
260
+ ===TechStack
261
+ Java17、reactor、lombok、Junit5、reactor test、Mockito、 Spring WebFlux、Spring Boot Test
262
+ ===END OF TechStack
263
+
264
+ ===Architecture
265
+ the api layer inclue 2 componets:
266
+ * DTO: This component is use to define data structure that api request and response.
267
+ * Controller: This component is use to define the interface to access api.
268
+ ---eaxmple code:
269
+ @RestController
270
+ @RequiredArgsConstructor
271
+ @RequestMapping("/features")
272
+ public class FeatureController {{
273
+ private final Features features;
274
+
275
+ @GetMapping()
276
+ public Flux<Feature> findAll() {{
277
+ return features.getAll();
278
+ }}
279
+
280
+ @PostMapping()
281
+ public Mono<Feature> add(@RequestBody Feature feature) {{
282
+ return features.add(feature);
283
+ }}
284
+ }}
285
+ ---end of eaxmple code
286
+ ===END OF Architecture
287
+
288
+ ===TestStrategy
289
+ For the Controller and DTO, we can write component test to test the actual implementation of api operations, test class rely on Association interface use WebFluxTest and WebTestClient ability.
290
+ ---eaxmple code:
291
+ @ExtendWith(SpringExtension.class)
292
+ @WebFluxTest(value = FeatureFlagApi.class, properties = "spring.main.lazy-initialization=true")
293
+ @ContextConfiguration(classes = TestConfiguration.class)
294
+ class FeatureControllerTest extends ControllerTestBase {{
295
+ @Autowired
296
+ WebTestClient webClient;
297
+
298
+ @MockBean
299
+ Features features;
300
+
301
+ @Test
302
+ void should_getAll_success_when_no_records() {{
303
+ when(features.getAll(Mockito.any())).thenReturn(Flux.empty());
304
+
305
+ webClient.get()
306
+ .uri("/features")
307
+ .exchange()
308
+ .expectStatus()
309
+ .isOk()
310
+ .expectBodyList(FeatureFlagResponse.class)
311
+ .hasSize(0);
312
+ }}
313
+ }}
314
+ ---end of eaxmple code
315
+ ===END OF TestStrategy
316
+
317
+ Use the following format:
318
+ request: the request that you need to fulfill include Entity and Association of domain layer
319
+
320
+ the code just include Controller、DTO and tests code that you write to fulfill the request, follow TechStack Architecture TestStrategy to fulfill request, you should cover as more test case as posible
321
+
322
+ request: {input}"""
323
+
324
+ API_LAYER_PROMPT = PromptTemplate(input_variables=["input"], template=API_LAYER,)
325
+
326
+
327
+ code_generate_agent_template = """You are a tool picker.
328
+ You should pick a tool of following tools to ansser the question.:
329
+
330
+ {tools}
331
+
332
+ Use the following format:
333
+
334
+ Request: the request
335
+ Thought: which tool should used to fufill this request and pass the original content of Request to it.
336
+ Action: the action to take, should be one of [{tool_names}]
337
+ Action Input: the original content of Request
338
+ Observation: the result of the action
339
+ ... (this Thought/Action/Action Input/Observation can repeat 1 times)
340
+ Final Answer: the result of the action
341
+
342
+ Begin!
343
+
344
+ Request: {input}
345
+ {agent_scratchpad}"""