srush HF staff commited on
Commit
8200c4e
1 Parent(s): 32b9dda

Upload with huggingface_hub

Browse files
Files changed (29) hide show
  1. #agent.pmpt.txt# +35 -0
  2. #chat.py# +34 -34
  3. #ner.py# +29 -54
  4. agent.pmpt.tpl +37 -0
  5. agent.pmpt.tpl~ +38 -0
  6. agent.pmpt.txt +2 -3
  7. agent.pmpt.txt~ +7 -5
  8. agent.py +65 -34
  9. agent.py~ +75 -0
  10. app.py +11 -7
  11. app.py~ +5 -7
  12. bash.py +2 -2
  13. chat.py +16 -6
  14. fake_agent.py +59 -0
  15. fake_agent.py~ +75 -0
  16. gatsby.py +9 -7
  17. gradio_example.py +13 -13
  18. math_demo.py +7 -5
  19. mycache. +0 -0
  20. ner.py +8 -3
  21. pal.py +6 -9
  22. qa.py +10 -8
  23. requirements.gradio_example.txt +2 -0
  24. selfask.py +17 -10
  25. stats.py +41 -7
  26. table.py +13 -16
  27. temp +0 -0
  28. temp.log +0 -0
  29. type_prompt.pmpt.tpl +26 -0
#agent.pmpt.txt# ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Assistant is a large language model trained by OpenAI.
2
+
3
+ Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
4
+
5
+ Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
6
+
7
+ Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
8
+
9
+ TOOLS:
10
+ ------
11
+
12
+ Assistant has access to the following tools:
13
+
14
+ {{tools_names}}
15
+
16
+ To use a tool, please use the following format:
17
+
18
+ ```
19
+ Thought: Do I need to use a tool? Yes
20
+ Action: the action to take, should be one of [{{tool_names}}]
21
+ Action Input: the input to the action
22
+ Observation: the result of the action
23
+ ```
24
+
25
+ When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
26
+
27
+ ```
28
+ Thought: Do I need to use a tool? No
29
+ AI: [your response here]
30
+ ```
31
+
32
+ Begin!
33
+
34
+ New input: {{input}}
35
+ {{agent_scratchpad}}
#chat.py# CHANGED
@@ -1,23 +1,20 @@
1
  # + tags=["hide_inp"]
2
  desc = """
3
- # ChatGPT
 
 
 
 
4
 
5
- "ChatGPT" like examples. Adapted from
6
- [LangChain](https://langchain.readthedocs.io/en/latest/modules/memory/examples/chatgpt_clone.html)'s
7
- version of this [blog post](https://www.engraved.blog/building-a-virtual-machine-inside/).
8
  """
9
  # -
10
 
11
 
12
- import warnings
13
- from dataclasses import dataclass
14
- from typing import List, Tuple
15
- import minichain
16
-
17
- # + tags=["hide_inp"]
18
- warnings.filterwarnings("ignore")
19
- # -
20
 
 
 
 
21
 
22
  # Generic stateful Memory
23
 
@@ -32,26 +29,28 @@ class State:
32
  memory = self.memory if len(self.memory) < MEMORY else self.memory[1:]
33
  return State(memory + [(self.human_input, response)])
34
 
 
 
 
35
  # Chat prompt with memory
36
 
37
- class ChatPrompt(minichain.TemplatePrompt):
38
- template_file = "chatgpt.pmpt.tpl"
39
- def parse(self, out: str, inp: State) -> State:
40
- result = out.split("Assistant:")[-1]
41
- return inp.push(result)
42
 
43
- # class Human(minichain.Prompt):
44
- # def parse(self, out: str, inp: State) -> State:
45
- # return inp.human_input = out
46
-
47
 
48
- with minichain.start_chain("chat") as backend:
49
- prompt = ChatPrompt(backend.OpenAI())
50
- state = State([])
51
 
 
 
 
52
 
 
 
53
  examples = [
54
- "I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.",
55
  "ls ~",
56
  "cd ~",
57
  "{Please make a file jokes.txt inside and put some jokes inside}",
@@ -61,16 +60,17 @@ examples = [
61
  "nvidia-smi"
62
  ]
63
 
64
- gradio = prompt.to_gradio(fields= ["human_input"],
65
- initial_state= state,
66
- examples=examples,
67
- out_type="json",
68
- description=desc
 
 
 
 
69
  )
70
  if __name__ == "__main__":
71
- gradio.launch()
72
 
73
- # for i in range(len(fake_human)):
74
- # human.chain(prompt)
75
 
76
-
 
1
  # + tags=["hide_inp"]
2
  desc = """
3
+ ### Chat
4
+
5
+ A chat-like example for multi-turn chat with state. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/srush/MiniChain/blob/master/examples/chat.ipynb)
6
+
7
+ (Adapted from [LangChain](https://langchain.readthedocs.io/en/latest/modules/memory/examples/chatgpt_clone.html)'s version of this [blog post](https://www.engraved.blog/building-a-virtual-machine-inside/).)
8
 
 
 
 
9
  """
10
  # -
11
 
12
 
13
+ # $
 
 
 
 
 
 
 
14
 
15
+ from dataclasses import dataclass, replace
16
+ from typing import List, Tuple
17
+ from minichain import OpenAI, prompt, show, transform, Mock
18
 
19
  # Generic stateful Memory
20
 
 
29
  memory = self.memory if len(self.memory) < MEMORY else self.memory[1:]
30
  return State(memory + [(self.human_input, response)])
31
 
32
+ def __str__(self):
33
+ return self.memory[-1][-1]
34
+
35
  # Chat prompt with memory
36
 
37
+ @prompt(OpenAI(), template_file="chat.pmpt.tpl")
38
+ def chat_response(model, state: State) -> State:
39
+ return model.stream(state)
 
 
40
 
41
+ @transform()
42
+ def update(state, chat_output):
43
+ result = chat_output.split("Assistant:")[-1]
44
+ return state.push(result)
45
 
 
 
 
46
 
47
+ def chat(command, state):
48
+ state = replace(state, human_input=command)
49
+ return update(state, chat_response(state))
50
 
51
+ # $
52
+
53
  examples = [
 
54
  "ls ~",
55
  "cd ~",
56
  "{Please make a file jokes.txt inside and put some jokes inside}",
 
60
  "nvidia-smi"
61
  ]
62
 
63
+ print(chat("ls", State([])).run())
64
+
65
+ gradio = show(chat,
66
+ initial_state=State([]),
67
+ subprompts=[chat_response],
68
+ examples=examples,
69
+ out_type="json",
70
+ description=desc,
71
+ code=open("chat.py", "r").read().split("$")[1].strip().strip("#").strip(),
72
  )
73
  if __name__ == "__main__":
74
+ gradio.queue().launch()
75
 
 
 
76
 
 
#ner.py# CHANGED
@@ -1,71 +1,46 @@
1
  # + tags=["hide_inp"]
2
 
3
  desc = """
4
- # NER
5
 
6
- Notebook implementation of named entity recognition.
7
- Adapted from [promptify](https://github.com/promptslab/Promptify/blob/main/promptify/prompts/nlp/templates/ner.jinja).
 
8
  """
9
  # -
10
 
11
- import json
12
-
13
- import minichain
14
 
15
- # Prompt to extract NER tags as json
16
-
17
- class NERPrompt(minichain.TemplatePrompt):
18
- template_file = "ner.pmpt.tpl"
19
 
20
- def parse(self, response, inp):
21
- return json.loads(response)
 
22
 
23
- # Use NER to ask a simple queston.
 
 
24
 
25
- class TeamPrompt(minichain.Prompt):
26
- def prompt(self, inp):
27
- return "Can you describe these basketball teams? " + \
28
- " ".join([i["E"] for i in inp if i["T"] =="Team"])
 
29
 
30
- def parse(self, response, inp):
31
- return response
32
 
33
- # Run the system.
 
 
34
 
35
- with minichain.start_chain("ner") as backend:
36
- ner_prompt = NERPrompt(backend.OpenAI())
37
- team_prompt = TeamPrompt(backend.OpenAI())
38
- prompt = ner_prompt.chain(team_prompt)
39
- # results = prompt(
40
- # {"text_input": "An NBA playoff pairing a year ago, the 76ers (39-20) meet the Miami Heat (32-29) for the first time this season on Monday night at home.",
41
- # "labels" : ["Team", "Date"],
42
- # "domain": "Sports"
43
- # }
44
- # )
45
- # print(results)
46
 
47
- gradio = prompt.to_gradio(fields =["text_input", "labels", "domain"],
48
- examples=[["An NBA playoff pairing a year ago, the 76ers (39-20) meet the Miami Heat (32-29) for the first time this season on Monday night at home.", "Team, Date", "Sports"]],
49
- description=desc)
 
 
 
50
 
51
-
52
  if __name__ == "__main__":
53
- gradio.launch()
54
-
55
-
56
- # View prompt examples.
57
-
58
- # + tags=["hide_inp"]
59
- # NERPrompt().show(
60
- # {
61
- # "input": "I went to New York",
62
- # "domain": "Travel",
63
- # "labels": ["City"]
64
- # },
65
- # '[{"T": "City", "E": "New York"}]',
66
- # )
67
- # # -
68
-
69
- # # View log.
70
-
71
- # minichain.show_log("ner.log")
 
1
  # + tags=["hide_inp"]
2
 
3
  desc = """
4
+ ### Named Entity Recognition
5
 
6
+ Chain that does named entity recognition with arbitrary labels. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/srush/MiniChain/blob/master/examples/ner.ipynb)
7
+
8
+ (Adapted from [promptify](https://github.com/promptslab/Promptify/blob/main/promptify/prompts/nlp/templates/ner.jinja)).
9
  """
10
  # -
11
 
12
+ # $
 
 
13
 
14
+ from minichain import prompt, transform, show, OpenAI
15
+ import json
 
 
16
 
17
+ @prompt(OpenAI(), template_file = "ner.pmpt.tpl")
18
+ def ner_extract(model, kwargs):
19
+ return model(kwargs)
20
 
21
+ @transform()
22
+ def to_json(chat_output):
23
+ return json.loads(chat_output)
24
 
25
+ @prompt(OpenAI())
26
+ def team_describe(model, inp):
27
+ query = "Can you describe these basketball teams? " + \
28
+ " ".join([i["E"] for i in inp if i["T"] =="Team"])
29
+ return model(query)
30
 
 
 
31
 
32
+ def ner(text_input, labels, domain):
33
+ extract = to_json(ner_extract(dict(text_input=text_input, labels=labels, domain=domain)))
34
+ return team_describe(extract)
35
 
36
+ # $
 
 
 
 
 
 
 
 
 
 
37
 
38
+ gradio = show(ner,
39
+ examples=[["An NBA playoff pairing a year ago, the 76ers (39-20) meet the Miami Heat (32-29) for the first time this season on Monday night at home.", "Team, Date", "Sports"]],
40
+ description=desc,
41
+ subprompts=[ner_extract, team_describe],
42
+ code=open("ner.py", "r").read().split("$")[1].strip().strip("#").strip(),
43
+ )
44
 
 
45
  if __name__ == "__main__":
46
+ gradio.queue().launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
agent.pmpt.tpl ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Assistant is a large language model trained by OpenAI.
2
+
3
+ Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
4
+
5
+ Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
6
+
7
+ Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
8
+
9
+ TOOLS:
10
+ ------
11
+
12
+ Assistant has access to the following tools:
13
+
14
+ {% for tool in tools%}
15
+ {{tool[0]}}: {{tool[1]}}
16
+ {% endfor %}
17
+
18
+ To use a tool, you MUST use exactly the following format:
19
+
20
+ ```
21
+ Thought: Do I need to use a tool? Yes
22
+ Action: the action to take, should be one of [{% for tool in tools%}{{tool[0]}}, {% endfor %}]
23
+ Action Input: the input to the action
24
+ Observation: the result of the action
25
+ ```
26
+
27
+ When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
28
+
29
+ ```
30
+ Thought: Do I need to use a tool? No
31
+ AI: [your response here]
32
+ ```
33
+
34
+ Do NOT output in any other format. Begin!
35
+
36
+ New input: {{input}}
37
+ {{agent_scratchpad}}
agent.pmpt.tpl~ ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Assistant is a large language model trained by OpenAI.
2
+
3
+ Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
4
+
5
+ Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
6
+
7
+ Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
8
+
9
+ TOOLS:
10
+ ------
11
+
12
+ Assistant has access to the following tools:
13
+
14
+ {{tools_names}}
15
+
16
+ To use a tool, please use the following format:
17
+
18
+ ```
19
+ Thought: Do I need to use a tool? Yes
20
+ Action: the action to take, should be one of [{{tool_names}}]
21
+ Action Input: the input to the action
22
+ Observation: the result of the action
23
+ ```
24
+
25
+ When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
26
+
27
+ ```
28
+ Thought: Do I need to use a tool? No
29
+ AI: [your response here]
30
+ ```
31
+
32
+ Begin!
33
+
34
+ Previous conversation history:
35
+ {{chat_history}}
36
+
37
+ New input: {{input}}
38
+ {{agent_scratchpad}}
agent.pmpt.txt CHANGED
@@ -11,6 +11,8 @@ TOOLS:
11
 
12
  Assistant has access to the following tools:
13
 
 
 
14
  To use a tool, please use the following format:
15
 
16
  ```
@@ -29,8 +31,5 @@ AI: [your response here]
29
 
30
  Begin!
31
 
32
- Previous conversation history:
33
- {{chat_history}}
34
-
35
  New input: {{input}}
36
  {{agent_scratchpad}}
 
11
 
12
  Assistant has access to the following tools:
13
 
14
+ {{tools_names}}
15
+
16
  To use a tool, please use the following format:
17
 
18
  ```
 
31
 
32
  Begin!
33
 
 
 
 
34
  New input: {{input}}
35
  {{agent_scratchpad}}
agent.pmpt.txt~ CHANGED
@@ -11,6 +11,8 @@ TOOLS:
11
 
12
  Assistant has access to the following tools:
13
 
 
 
14
  To use a tool, please use the following format:
15
 
16
  ```
@@ -24,13 +26,13 @@ When you have a response to say to the Human, or if you do not need to use a too
24
 
25
  ```
26
  Thought: Do I need to use a tool? No
27
- {{ai_prefix}}: [your response here]
28
- ```"""
29
 
30
  Begin!
31
 
32
  Previous conversation history:
33
- {chat_history}
34
 
35
- New input: {input}
36
- {agent_scratchpad}
 
11
 
12
  Assistant has access to the following tools:
13
 
14
+ {{tools_names}}
15
+
16
  To use a tool, please use the following format:
17
 
18
  ```
 
26
 
27
  ```
28
  Thought: Do I need to use a tool? No
29
+ AI: [your response here]
30
+ ```
31
 
32
  Begin!
33
 
34
  Previous conversation history:
35
+ {{chat_history}}
36
 
37
+ New input: {{input}}
38
+ {{agent_scratchpad}}
agent.py CHANGED
@@ -1,51 +1,82 @@
1
  # + tags=["hide_inp"]
2
 
3
  desc = """
4
- ### Gradio Tool
5
 
6
- Chain that ask for a command-line question and then runs the bash command. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/srush/MiniChain/blob/master/examples/bash.ipynb)
7
 
8
- (Adapted from LangChain [BashChain](https://langchain.readthedocs.io/en/latest/modules/chains/examples/llm_bash.html))
9
  """
10
  # -
11
 
12
  # $
13
 
14
- from minichain import show, prompt, OpenAI
15
- from gradio_tools.tools import StableDiffusionTool, ImageCaptioningTool
16
-
17
- class Tool1:
18
- description: str = "Tool 1"
19
- def run(self, prompt):
20
- return prompt
21
-
22
- class Tool2:
23
- description: str = "Tool 2"
24
- def run(self, prompt):
25
- return prompt
26
-
27
-
28
- @prompt(StableDiffusionTool())
29
- def gen(model, query):
30
- return model(query)
31
-
32
- @prompt(ImageCaptioningTool())
33
- def caption(model, img_src):
34
- return model(img_src)
35
-
36
-
37
- def gradio_example(query):
38
- return caption(gen(query))
39
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
  # $
42
 
43
- gradio = show(gradio_example,
44
- subprompts=[caption],
45
- examples=['/home/srush/Projects/MiniChain/examples/63dd90c7-9b8d-4ba4-bc07-a378fd932304/tmph3xi9ylr.jpg', 'Make me a flower'],
 
 
 
 
46
  out_type="markdown",
47
- description=desc
 
48
  )
49
  if __name__ == "__main__":
50
- gradio.launch()
51
 
 
1
  # + tags=["hide_inp"]
2
 
3
  desc = """
4
+ ### Agent
5
 
6
+ Chain that executes different tools based on model decisions. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/srush/MiniChain/blob/master/examples/bash.ipynb)
7
 
8
+ (Adapted from LangChain )
9
  """
10
  # -
11
 
12
  # $
13
 
14
+ from minichain import Id, prompt, OpenAI, show, transform, Mock, Break
15
+ from gradio_tools.tools import StableDiffusionTool, ImageCaptioningTool, ImageToMusicTool
16
+
17
+
18
+ # class ImageCaptioningTool:
19
+ # def run(self, inp):
20
+ # return "This is a picture of a smiling huggingface logo."
21
+
22
+ # description = "Image Captioning"
23
+
24
+ tools = [StableDiffusionTool(), ImageCaptioningTool(), ImageToMusicTool()]
25
+
26
+
27
+ @prompt(OpenAI(stop=["Observation:"]),
28
+ template_file="agent.pmpt.tpl")
29
+ def agent(model, query, history):
30
+ return model(dict(tools=[(str(tool.__class__.__name__), tool.description)
31
+ for tool in tools],
32
+ input=query,
33
+ agent_scratchpad=history
34
+ ))
35
+ @transform()
36
+ def tool_parse(out):
37
+ lines = out.split("\n")
38
+ if lines[0].split("?")[-1].strip() == "Yes":
39
+ tool = lines[1].split(":", 1)[-1].strip()
40
+ command = lines[2].split(":", 1)[-1].strip()
41
+ return tool, command
42
+ else:
43
+ return Break()
44
+
45
+ @prompt(tools)
46
+ def tool_use(model, usage):
47
+ selector, command = usage
48
+ for i, tool in enumerate(tools):
49
+ if selector == tool.__class__.__name__:
50
+ return model(command, tool_num=i)
51
+ return ("",)
52
+
53
+ @transform()
54
+ def append(history, new, observation):
55
+ return history + "\n" + new + "Observation: " + observation
56
+
57
+ def run(query):
58
+ history = ""
59
+ observations = []
60
+ for i in range(3):
61
+ select_input = agent(query, history)
62
+ observations.append(tool_use(tool_parse(select_input)))
63
+ history = append(history, select_input, observations[i])
64
+
65
+ return observations[-1]
66
 
67
  # $
68
 
69
+ gradio = show(run,
70
+ subprompts=[agent, tool_use] * 3,
71
+ examples=[
72
+ "I would please like a photo of a dog riding a skateboard. "
73
+ "Please caption this image and create a song for it.",
74
+ 'Use an image generator tool to draw a cat.',
75
+ 'Caption the image https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.png from the internet'],
76
  out_type="markdown",
77
+ description=desc,
78
+ show_advanced=False
79
  )
80
  if __name__ == "__main__":
81
+ gradio.queue().launch()
82
 
agent.py~ ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # + tags=["hide_inp"]
2
+
3
+ desc = """
4
+ ### Gradio Tool
5
+
6
+ Chain that ask for a command-line question and then runs the bash command. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/srush/MiniChain/blob/master/examples/bash.ipynb)
7
+
8
+ (Adapted from LangChain [BashChain](https://langchain.readthedocs.io/en/latest/modules/chains/examples/llm_bash.html))
9
+ """
10
+ # -
11
+
12
+ # $
13
+
14
+ from minichain import Id, prompt, OpenAIStream
15
+ from gradio_tools.tools import StableDiffusionTool, ImageCaptioningTool
16
+
17
+
18
+
19
+ @prompt(StableDiffusionTool())
20
+ def gen(model, query):
21
+ return model(query)
22
+
23
+ @prompt(ImageCaptioningTool())
24
+ def caption(model, img_src):
25
+ return model(img_src)
26
+
27
+ tools = [gen, caption]
28
+
29
+ @prompt(Id(),
30
+ #OpenAIStream(), stream=True,
31
+ template_file="agent.pmpt.tpl")
32
+ def agent(model, query):
33
+ print(model(dict(tools=[(str(tool.backend.__class__), tool.backend.description)
34
+ for tool in tools],
35
+ input=query
36
+ )))
37
+ return ("StableDiffusionTool", "Draw a flower")
38
+ # out = ""
39
+ # for t in model.stream(dict(tools=[(str(tool.backend.__class__), tool.backend.description)
40
+ # for tool in tools],
41
+ # input=query
42
+ # )):
43
+ # out += t
44
+ # yield out
45
+ # lines = out.split("\n")
46
+ # response = lines[0].split("?")[1].strip()
47
+ # if response == "Yes":
48
+ # tool = lines[1].split(":")[1].strip()
49
+ # yield tool
50
+
51
+ @prompt(dynamic=tools)
52
+ def selector(model, input):
53
+ selector, input = input
54
+ if selector == "StableDiffusionTool":
55
+ return model.tool(input, tool_num=0)
56
+ else:
57
+ return model.tool(input, tool_num=1)
58
+
59
+
60
+ def run(query):
61
+ select_input = agent(query)
62
+ return selector(select_input)
63
+
64
+ run("make a pic").run()
65
+ # $
66
+
67
+ gradio = show(run,
68
+ subprompts=[agent, selector],
69
+ examples=['Draw me a flower'],
70
+ out_type="markdown",
71
+ description=desc
72
+ )
73
+ if __name__ == "__main__":
74
+ gradio.launch()
75
+
app.py CHANGED
@@ -8,22 +8,26 @@ from gatsby import gradio as gatsby
8
  from qa import gradio as qa
9
  from stats import gradio as stats
10
  from selfask import gradio as selfask
11
- from backtrack import gradio as backtrack
12
  from table import gradio as table
 
13
  from gradio_example import gradio as gradio_example
14
 
15
  CSS = """
16
- #clean div.form {border: 0px}
17
- #response {border: 0px; background: #ffeec6}
18
- #prompt {border: 0px;background: aliceblue}
19
- div.gradio-container {color: black}
20
  span.head {font-size: 60pt; font-family: cursive;}
 
 
 
21
  body {
22
  --text-sm: 12px;
23
  --text-md: 16px;
24
  --text-lg: 18px;
25
  --input-text-size: 16px;
26
  --section-text-size: 16px;
 
27
  }
28
  """
29
 
@@ -32,8 +36,8 @@ body {
32
  with gr.Blocks(css=CSS, theme=gr.themes.Monochrome()) as demo:
33
  gr.HTML("<center style='background:#B6B7BA'> <span class='head'>Mini</span><img src='https://user-images.githubusercontent.com/35882/227017900-0cacdfb7-37e2-47b1-9347-a233810d3544.png' width='20%' style='display:inline'><span class='head'>Chain</span></center><center> <br><a href='https://github.com/srush/minichain'>[library]</a> </center>")
34
 
35
- gr.TabbedInterface([math_demo, qa, chat, gatsby, ner, bash, pal, table, gradio_example, stats, selfask, backtrack],
36
- ["Math", "QA", "Chat", "Book", "NER", "Bash", "PAL", "Table", "Gradio", "Stats", "SelfAsk", "Backtrack"],
37
  css = CSS)
38
 
39
  demo.queue().launch()
 
8
  from qa import gradio as qa
9
  from stats import gradio as stats
10
  from selfask import gradio as selfask
 
11
  from table import gradio as table
12
+ from agent import gradio as agent
13
  from gradio_example import gradio as gradio_example
14
 
15
  CSS = """
16
+ #clean div.form {border: 0px}
17
+ #response {border: 0px; background: #ffeec6}
18
+ #prompt {border: 0px;background: aliceblue}
19
+ #json {border: 0px}
20
  span.head {font-size: 60pt; font-family: cursive;}
21
+ div.gradio-container {color: black}
22
+ div.form {background: inherit}
23
+ div.form div.block {padding: 0px; background: #fcfcfc}
24
  body {
25
  --text-sm: 12px;
26
  --text-md: 16px;
27
  --text-lg: 18px;
28
  --input-text-size: 16px;
29
  --section-text-size: 16px;
30
+ --input-background: --neutral-50;
31
  }
32
  """
33
 
 
36
  with gr.Blocks(css=CSS, theme=gr.themes.Monochrome()) as demo:
37
  gr.HTML("<center style='background:#B6B7BA'> <span class='head'>Mini</span><img src='https://user-images.githubusercontent.com/35882/227017900-0cacdfb7-37e2-47b1-9347-a233810d3544.png' width='20%' style='display:inline'><span class='head'>Chain</span></center><center> <br><a href='https://github.com/srush/minichain'>[library]</a> </center>")
38
 
39
+ gr.TabbedInterface([math_demo, qa, agent, chat, gatsby, ner, bash, pal, table, gradio_example, stats, selfask],
40
+ ["Math", "QA", "Agent", "Chat", "Book", "NER", "Bash", "PAL", "Table", "Gradio", "Stats", "SelfAsk"],
41
  css = CSS)
42
 
43
  demo.queue().launch()
app.py~ CHANGED
@@ -9,15 +9,13 @@ from qa import gradio as qa
9
  from stats import gradio as stats
10
  from selfask import gradio as selfask
11
  from backtrack import gradio as backtrack
 
 
12
 
13
  CSS = """
14
  #clean div.form {border: 0px}
15
  #response {border: 0px; background: #ffeec6}
16
  #prompt {border: 0px;background: aliceblue}
17
- #json {border: 0px}
18
- #result {border: 0px; background: #c5e0e5}
19
- #inner {margin: 10px; padding: 10px; font-size: 20px; }
20
- #inner textarea {border: 0px}
21
  div.gradio-container {color: black}
22
  span.head {font-size: 60pt; font-family: cursive;}
23
  body {
@@ -34,9 +32,9 @@ body {
34
  with gr.Blocks(css=CSS, theme=gr.themes.Monochrome()) as demo:
35
  gr.HTML("<center style='background:#B6B7BA'> <span class='head'>Mini</span><img src='https://user-images.githubusercontent.com/35882/227017900-0cacdfb7-37e2-47b1-9347-a233810d3544.png' width='20%' style='display:inline'><span class='head'>Chain</span></center><center> <br><a href='https://github.com/srush/minichain'>[library]</a> </center>")
36
 
37
- gr.TabbedInterface([math_demo, qa, chat, gatsby, ner, bash, pal, stats, selfask, backtrack],
38
- ["Math", "QA", "Chat", "Book", "NER", "Bash", "PAL", "Stats", "SelfAsk", "Backtrack"],
39
  css = CSS)
40
 
41
- demo.launch()
42
 
 
9
  from stats import gradio as stats
10
  from selfask import gradio as selfask
11
  from backtrack import gradio as backtrack
12
+ from table import gradio as table
13
+ from gradio_example import gradio as gradio_example
14
 
15
  CSS = """
16
  #clean div.form {border: 0px}
17
  #response {border: 0px; background: #ffeec6}
18
  #prompt {border: 0px;background: aliceblue}
 
 
 
 
19
  div.gradio-container {color: black}
20
  span.head {font-size: 60pt; font-family: cursive;}
21
  body {
 
32
  with gr.Blocks(css=CSS, theme=gr.themes.Monochrome()) as demo:
33
  gr.HTML("<center style='background:#B6B7BA'> <span class='head'>Mini</span><img src='https://user-images.githubusercontent.com/35882/227017900-0cacdfb7-37e2-47b1-9347-a233810d3544.png' width='20%' style='display:inline'><span class='head'>Chain</span></center><center> <br><a href='https://github.com/srush/minichain'>[library]</a> </center>")
34
 
35
+ gr.TabbedInterface([math_demo, qa, chat, gatsby, ner, bash, pal, table, gradio_example, stats, selfask, backtrack],
36
+ ["Math", "QA", "Chat", "Book", "NER", "Bash", "PAL", "Table", "Gradio", "Stats", "SelfAsk", "Backtrack"],
37
  css = CSS)
38
 
39
+ demo.queue().launch()
40
 
bash.py CHANGED
@@ -16,11 +16,11 @@ from minichain import show, prompt, OpenAI, Bash
16
 
17
  @prompt(OpenAI(), template_file = "bash.pmpt.tpl")
18
  def cli_prompt(model, query):
19
- x = model(dict(question=query))
20
- return "\n".join(x.strip().split("\n")[1:-1])
21
 
22
  @prompt(Bash())
23
  def bash_run(model, x):
 
24
  return model(x)
25
 
26
  def bash(query):
 
16
 
17
  @prompt(OpenAI(), template_file = "bash.pmpt.tpl")
18
  def cli_prompt(model, query):
19
+ return model(dict(question=query))
 
20
 
21
  @prompt(Bash())
22
  def bash_run(model, x):
23
+ x = "\n".join(x.strip().split("\n")[1:-1])
24
  return model(x)
25
 
26
  def bash(query):
chat.py CHANGED
@@ -14,7 +14,7 @@ A chat-like example for multi-turn chat with state. [![Open In Colab](https://co
14
 
15
  from dataclasses import dataclass, replace
16
  from typing import List, Tuple
17
- from minichain import OpenAI, prompt, show
18
 
19
  # Generic stateful Memory
20
 
@@ -35,11 +35,19 @@ class State:
35
  # Chat prompt with memory
36
 
37
  @prompt(OpenAI(), template_file="chat.pmpt.tpl")
38
- def chat_prompt(model, state: State) -> State:
39
- out = model(state)
40
- result = out.split("Assistant:")[-1]
 
 
 
41
  return state.push(result)
42
 
 
 
 
 
 
43
  # $
44
 
45
  examples = [
@@ -52,9 +60,11 @@ examples = [
52
  "nvidia-smi"
53
  ]
54
 
55
- gradio = show(lambda command, state: chat_prompt(replace(state, human_input=command)),
 
 
56
  initial_state=State([]),
57
- subprompts=[chat_prompt],
58
  examples=examples,
59
  out_type="json",
60
  description=desc,
 
14
 
15
  from dataclasses import dataclass, replace
16
  from typing import List, Tuple
17
+ from minichain import OpenAI, prompt, show, transform, Mock
18
 
19
  # Generic stateful Memory
20
 
 
35
  # Chat prompt with memory
36
 
37
  @prompt(OpenAI(), template_file="chat.pmpt.tpl")
38
+ def chat_response(model, state: State) -> State:
39
+ return model.stream(state)
40
+
41
+ @transform()
42
+ def update(state, chat_output):
43
+ result = chat_output.split("Assistant:")[-1]
44
  return state.push(result)
45
 
46
+
47
+ def chat(command, state):
48
+ state = replace(state, human_input=command)
49
+ return update(state, chat_response(state))
50
+
51
  # $
52
 
53
  examples = [
 
60
  "nvidia-smi"
61
  ]
62
 
63
+ print(chat("ls", State([])).run())
64
+
65
+ gradio = show(chat,
66
  initial_state=State([]),
67
+ subprompts=[chat_response],
68
  examples=examples,
69
  out_type="json",
70
  description=desc,
fake_agent.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # + tags=["hide_inp"]
2
+
3
+ desc = """
4
+ ### Gradio Tool
5
+
6
+ Chain that ask for a command-line question and then runs the bash command. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/srush/MiniChain/blob/master/examples/bash.ipynb)
7
+
8
+ (Adapted from LangChain [BashChain](https://langchain.readthedocs.io/en/latest/modules/chains/examples/llm_bash.html))
9
+ """
10
+ # -
11
+
12
+ # $
13
+
14
+ from minichain import Mock, prompt, OpenAIStream, show
15
+ from gradio_tools.tools import StableDiffusionTool, ImageCaptioningTool
16
+
17
+
18
+ @prompt(Mock(["Tool1"]))
19
+ def tool1(model, query):
20
+ return model(query)
21
+
22
+ @prompt(Mock(["Tool2"]))
23
+ def tool2(model, img_src):
24
+ return model(img_src)
25
+
26
+ tools = [tool1, tool2]
27
+
28
+ @prompt(Mock(["Tool1: draw a flower", "Tool2: call mom"]))
29
+ def agent(model, query):
30
+ return model(query).split(":")
31
+
32
+ @prompt(dynamic=tools)
33
+ def selector(model, input):
34
+ selector, input = input
35
+ if selector == "Tool1":
36
+ return model.tool(input, tool_num=0)
37
+ else:
38
+ return model.tool(input, tool_num=1)
39
+
40
+
41
+ def run(query):
42
+ select_input = agent(query)
43
+ out = selector(select_input)
44
+ select_input = agent(out)
45
+ return selector(select_input)
46
+
47
+ run("make a pic").run()
48
+ # $
49
+
50
+ gradio = show(run,
51
+ subprompts=[agent, selector, agent, selector],
52
+ examples=['Draw me a flower'],
53
+ out_type="markdown",
54
+ description=desc,
55
+ show_advanced=False
56
+ )
57
+ if __name__ == "__main__":
58
+ gradio.queue().launch()
59
+
fake_agent.py~ ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # + tags=["hide_inp"]
2
+
3
+ desc = """
4
+ ### Gradio Tool
5
+
6
+ Chain that ask for a command-line question and then runs the bash command. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/srush/MiniChain/blob/master/examples/bash.ipynb)
7
+
8
+ (Adapted from LangChain [BashChain](https://langchain.readthedocs.io/en/latest/modules/chains/examples/llm_bash.html))
9
+ """
10
+ # -
11
+
12
+ # $
13
+
14
+ from minichain import Id, prompt, OpenAIStream
15
+ from gradio_tools.tools import StableDiffusionTool, ImageCaptioningTool
16
+
17
+
18
+
19
+ @prompt(StableDiffusionTool())
20
+ def gen(model, query):
21
+ return model(query)
22
+
23
+ @prompt(ImageCaptioningTool())
24
+ def caption(model, img_src):
25
+ return model(img_src)
26
+
27
+ tools = [gen, caption]
28
+
29
+ @prompt(Id(),
30
+ #OpenAIStream(), stream=True,
31
+ template_file="agent.pmpt.tpl")
32
+ def agent(model, query):
33
+ print(model(dict(tools=[(str(tool.backend.__class__), tool.backend.description)
34
+ for tool in tools],
35
+ input=query
36
+ )))
37
+ return ("StableDiffusionTool", "Draw a flower")
38
+ # out = ""
39
+ # for t in model.stream(dict(tools=[(str(tool.backend.__class__), tool.backend.description)
40
+ # for tool in tools],
41
+ # input=query
42
+ # )):
43
+ # out += t
44
+ # yield out
45
+ # lines = out.split("\n")
46
+ # response = lines[0].split("?")[1].strip()
47
+ # if response == "Yes":
48
+ # tool = lines[1].split(":")[1].strip()
49
+ # yield tool
50
+
51
+ @prompt(dynamic=tools)
52
+ def selector(model, input):
53
+ selector, input = input
54
+ if selector == "StableDiffusionTool":
55
+ return model.tool(input, tool_num=0)
56
+ else:
57
+ return model.tool(input, tool_num=1)
58
+
59
+
60
+ def run(query):
61
+ select_input = agent(query)
62
+ return selector(select_input)
63
+
64
+ run("make a pic").run()
65
+ # $
66
+
67
+ gradio = show(run,
68
+ subprompts=[agent, selector],
69
+ examples=['Draw me a flower'],
70
+ out_type="markdown",
71
+ description=desc
72
+ )
73
+ if __name__ == "__main__":
74
+ gradio.launch()
75
+
gatsby.py CHANGED
@@ -12,7 +12,7 @@ Chain that does question answering with Hugging Face embeddings. [![Open In Cola
12
 
13
  import datasets
14
  import numpy as np
15
- from minichain import prompt, show, HuggingFaceEmbed, OpenAI
16
 
17
  # Load data with embeddings (computed beforehand)
18
 
@@ -22,18 +22,20 @@ gatsby.add_faiss_index("embeddings")
22
  # Fast KNN retrieval prompt
23
 
24
  @prompt(HuggingFaceEmbed("sentence-transformers/all-mpnet-base-v2"))
25
- def get_neighbors(model, inp, k=1):
26
- embedding = model(inp)
 
 
 
27
  res = gatsby.get_nearest_examples("embeddings", np.array(embedding), k)
28
  return res.examples["passages"]
29
 
30
- @prompt(OpenAI(),
31
- template_file="gatsby.pmpt.tpl")
32
  def ask(model, query, neighbors):
33
  return model(dict(question=query, docs=neighbors))
34
 
35
  def gatsby_q(query):
36
- n = get_neighbors(query)
37
  return ask(query, n)
38
 
39
 
@@ -41,7 +43,7 @@ def gatsby_q(query):
41
 
42
 
43
  gradio = show(gatsby_q,
44
- subprompts=[get_neighbors, ask],
45
  examples=["What did Gatsby do before he met Daisy?",
46
  "What did the narrator do after getting back to Chicago?"],
47
  keys={"HF_KEY"},
 
12
 
13
  import datasets
14
  import numpy as np
15
+ from minichain import prompt, show, HuggingFaceEmbed, OpenAI, transform
16
 
17
  # Load data with embeddings (computed beforehand)
18
 
 
22
  # Fast KNN retrieval prompt
23
 
24
  @prompt(HuggingFaceEmbed("sentence-transformers/all-mpnet-base-v2"))
25
+ def embed(model, inp):
26
+ return model(inp)
27
+
28
+ @transform()
29
+ def get_neighbors(embedding, k=1):
30
  res = gatsby.get_nearest_examples("embeddings", np.array(embedding), k)
31
  return res.examples["passages"]
32
 
33
+ @prompt(OpenAI(), template_file="gatsby.pmpt.tpl")
 
34
  def ask(model, query, neighbors):
35
  return model(dict(question=query, docs=neighbors))
36
 
37
  def gatsby_q(query):
38
+ n = get_neighbors(embed(query))
39
  return ask(query, n)
40
 
41
 
 
43
 
44
 
45
  gradio = show(gatsby_q,
46
+ subprompts=[ask],
47
  examples=["What did Gatsby do before he met Daisy?",
48
  "What did the narrator do after getting back to Chicago?"],
49
  keys={"HF_KEY"},
gradio_example.py CHANGED
@@ -10,24 +10,25 @@ Examples using the gradio tool [![Open In Colab](https://colab.research.google.c
10
 
11
  # $
12
 
13
- from minichain import show, prompt, OpenAI, OpenAIStream
14
  import gradio as gr
15
  from gradio_tools.tools import StableDiffusionTool, ImageCaptioningTool
16
 
17
- @prompt(OpenAIStream(), stream=True)
18
  def picture(model, query):
19
- out = ""
20
- for r in model.stream(query):
21
- out += r
22
- yield out
23
 
24
- @prompt(StableDiffusionTool(), stream=True, block_input=lambda: gr.Textbox(label=""))
 
 
 
25
  def gen(model, query):
26
- for r in model.stream(query):
27
- yield "https://htmlcolorcodes.com/assets/images/colors/baby-blue-color-solid-background-1920x1080.png"
28
- yield r
29
 
30
- @prompt(ImageCaptioningTool(), block_output=lambda: gr.Textbox(label=""))
 
 
 
31
  def caption(model, img_src):
32
  return model(img_src)
33
 
@@ -43,8 +44,7 @@ gradio = show(gradio_example,
43
  'Describe a one-sentence scene happening on the moon.'],
44
  out_type="markdown",
45
  description=desc,
46
- css="#advanced {display: none}"
47
-
48
  )
49
  if __name__ == "__main__":
50
  gradio.queue().launch()
 
10
 
11
  # $
12
 
13
+ from minichain import show, prompt, OpenAI, GradioConf
14
  import gradio as gr
15
  from gradio_tools.tools import StableDiffusionTool, ImageCaptioningTool
16
 
17
+ @prompt(OpenAI())
18
  def picture(model, query):
19
+ return model(query)
 
 
 
20
 
21
+ @prompt(StableDiffusionTool(),
22
+ gradio_conf=GradioConf(
23
+ block_output= lambda: gr.Image(),
24
+ block_input= lambda: gr.Textbox(show_label=False)))
25
  def gen(model, query):
26
+ return model(query)
 
 
27
 
28
+ @prompt(ImageCaptioningTool(),
29
+ gradio_conf=GradioConf(
30
+ block_input= lambda: gr.Image(),
31
+ block_output=lambda: gr.Textbox(show_label=False)))
32
  def caption(model, img_src):
33
  return model(img_src)
34
 
 
44
  'Describe a one-sentence scene happening on the moon.'],
45
  out_type="markdown",
46
  description=desc,
47
+ show_advanced=False
 
48
  )
49
  if __name__ == "__main__":
50
  gradio.queue().launch()
math_demo.py CHANGED
@@ -10,11 +10,14 @@ Chain that solves a math word problem by first generating and then running Pytho
10
 
11
  # $
12
 
13
- from minichain import show, prompt, OpenAI, Python
 
14
 
15
 
16
- @prompt(OpenAI(), template_file="math.pmpt.tpl")
17
- def math_prompt(model, question):
 
 
18
  "Prompt to call GPT with a Jinja template"
19
  return model(dict(question=question))
20
 
@@ -22,7 +25,7 @@ def math_prompt(model, question):
22
  def python(model, code):
23
  "Prompt to call Python interpreter"
24
  code = "\n".join(code.strip().split("\n")[1:-1])
25
- return int(model(dict(code=code)))
26
 
27
  def math_demo(question):
28
  "Chain them together"
@@ -36,7 +39,6 @@ gradio = show(math_demo,
36
  "What is the sum of the 10 first positive integers?",],
37
  # "Carla is downloading a 200 GB file. She can download 2 GB/minute, but 40% of the way through the download, the download fails. Then Carla has to restart the download from the beginning. How load did it take her to download the file in minutes?"],
38
  subprompts=[math_prompt, python],
39
- out_type="json",
40
  description=desc,
41
  code=open("math_demo.py", "r").read().split("$")[1].strip().strip("#").strip(),
42
  )
 
10
 
11
  # $
12
 
13
+ from minichain import show, prompt, OpenAI, Python, GradioConf
14
+ import gradio as gr
15
 
16
 
17
+ @prompt(OpenAI(), template_file="math.pmpt.tpl",
18
+ gradio_conf=GradioConf(block_input=gr.Markdown))
19
+ def math_prompt(model, question
20
+ ):
21
  "Prompt to call GPT with a Jinja template"
22
  return model(dict(question=question))
23
 
 
25
  def python(model, code):
26
  "Prompt to call Python interpreter"
27
  code = "\n".join(code.strip().split("\n")[1:-1])
28
+ return model(dict(code=code))
29
 
30
  def math_demo(question):
31
  "Chain them together"
 
39
  "What is the sum of the 10 first positive integers?",],
40
  # "Carla is downloading a 200 GB file. She can download 2 GB/minute, but 40% of the way through the download, the download fails. Then Carla has to restart the download from the beginning. How load did it take her to download the file in minutes?"],
41
  subprompts=[math_prompt, python],
 
42
  description=desc,
43
  code=open("math_demo.py", "r").read().split("$")[1].strip().strip("#").strip(),
44
  )
mycache. ADDED
Binary file (24.6 kB). View file
 
ner.py CHANGED
@@ -11,12 +11,17 @@ Chain that does named entity recognition with arbitrary labels. [![Open In Colab
11
 
12
  # $
13
 
14
- from minichain import prompt, show, OpenAI
 
15
 
16
- @prompt(OpenAI(), template_file = "ner.pmpt.tpl", parser="json")
17
  def ner_extract(model, kwargs):
18
  return model(kwargs)
19
 
 
 
 
 
20
  @prompt(OpenAI())
21
  def team_describe(model, inp):
22
  query = "Can you describe these basketball teams? " + \
@@ -25,7 +30,7 @@ def team_describe(model, inp):
25
 
26
 
27
  def ner(text_input, labels, domain):
28
- extract = ner_extract(dict(text_input=text_input, labels=labels, domain=domain))
29
  return team_describe(extract)
30
 
31
 
 
11
 
12
  # $
13
 
14
+ from minichain import prompt, transform, show, OpenAI
15
+ import json
16
 
17
+ @prompt(OpenAI(), template_file = "ner.pmpt.tpl")
18
  def ner_extract(model, kwargs):
19
  return model(kwargs)
20
 
21
+ @transform()
22
+ def to_json(chat_output):
23
+ return json.loads(chat_output)
24
+
25
  @prompt(OpenAI())
26
  def team_describe(model, inp):
27
  query = "Can you describe these basketball teams? " + \
 
30
 
31
 
32
  def ner(text_input, labels, domain):
33
+ extract = to_json(ner_extract(dict(text_input=text_input, labels=labels, domain=domain)))
34
  return team_describe(extract)
35
 
36
 
pal.py CHANGED
@@ -8,20 +8,17 @@ Chain for answering complex problems by code generation and execution. [![Open I
8
 
9
  # $
10
 
11
- from minichain import prompt, show, OpenAIStream, Python
12
  import gradio as gr
13
 
14
- @prompt(OpenAIStream(), template_file="pal.pmpt.tpl", stream=True)
15
  def pal_prompt(model, question):
16
- out = ""
17
- for t in model.stream(dict(question=question)):
18
- out += t
19
- yield out
20
 
21
- @prompt(Python(), block_input=lambda: gr.Code(language="python"))
 
22
  def python(model, inp):
23
- print(inp)
24
- return float(model(inp + "\nprint(solution())"))
25
 
26
  def pal(question):
27
  return python(pal_prompt(question))
 
8
 
9
  # $
10
 
11
+ from minichain import prompt, show, GradioConf, OpenAI, Python
12
  import gradio as gr
13
 
14
+ @prompt(OpenAI(), template_file="pal.pmpt.tpl")
15
  def pal_prompt(model, question):
16
+ return model(dict(question=question))
 
 
 
17
 
18
+ @prompt(Python(),
19
+ gradio_conf=GradioConf(block_input = lambda: gr.Code(language="python")))
20
  def python(model, inp):
21
+ return model(inp + "\nprint(solution())")
 
22
 
23
  def pal(question):
24
  return python(pal_prompt(question))
qa.py CHANGED
@@ -12,7 +12,7 @@ Chain that answers questions with embeedding based retrieval. [![Open In Colab](
12
 
13
  import datasets
14
  import numpy as np
15
- from minichain import prompt, show, OpenAIEmbed, OpenAI
16
  from manifest import Manifest
17
 
18
  # We use Hugging Face Datasets as the database by assigning
@@ -25,18 +25,20 @@ olympics.add_faiss_index("embeddings")
25
  # Fast KNN retieval prompt
26
 
27
  @prompt(OpenAIEmbed())
28
- def get_neighbors(model, inp, k):
29
- embedding = model(inp)
30
- res = olympics.get_nearest_examples("embeddings", np.array(embedding), k)
 
 
 
31
  return res.examples["content"]
32
 
33
- @prompt(OpenAI(),
34
- template_file="qa.pmpt.tpl")
35
  def get_result(model, query, neighbors):
36
  return model(dict(question=query, docs=neighbors))
37
 
38
  def qa(query):
39
- n = get_neighbors(query, 3)
40
  return get_result(query, n)
41
 
42
  # $
@@ -50,7 +52,7 @@ questions = ["Who won the 2020 Summer Olympics men's high jump?",
50
 
51
  gradio = show(qa,
52
  examples=questions,
53
- subprompts=[get_neighbors, get_result],
54
  description=desc,
55
  code=open("qa.py", "r").read().split("$")[1].strip().strip("#").strip(),
56
  )
 
12
 
13
  import datasets
14
  import numpy as np
15
+ from minichain import prompt, transform, show, OpenAIEmbed, OpenAI
16
  from manifest import Manifest
17
 
18
  # We use Hugging Face Datasets as the database by assigning
 
25
  # Fast KNN retieval prompt
26
 
27
  @prompt(OpenAIEmbed())
28
+ def embed(model, inp):
29
+ return model(inp)
30
+
31
+ @transform()
32
+ def get_neighbors(inp, k):
33
+ res = olympics.get_nearest_examples("embeddings", np.array(inp), k)
34
  return res.examples["content"]
35
 
36
+ @prompt(OpenAI(), template_file="qa.pmpt.tpl")
 
37
  def get_result(model, query, neighbors):
38
  return model(dict(question=query, docs=neighbors))
39
 
40
  def qa(query):
41
+ n = get_neighbors(embed(query), 3)
42
  return get_result(query, n)
43
 
44
  # $
 
52
 
53
  gradio = show(qa,
54
  examples=questions,
55
+ subprompts=[embed, get_result],
56
  description=desc,
57
  code=open("qa.py", "r").read().split("$")[1].strip().strip("#").strip(),
58
  )
requirements.gradio_example.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ minichain
2
+ git+https://github.com/freddyaboulton/gradio-tools/
selfask.py CHANGED
@@ -11,7 +11,7 @@ desc = """
11
 
12
  from dataclasses import dataclass, replace
13
  from typing import Optional
14
- from minichain import prompt, show, OpenAI, Google
15
 
16
 
17
  @dataclass
@@ -22,12 +22,14 @@ class State:
22
  final_answer: Optional[str] = None
23
 
24
 
25
- @prompt(OpenAI(),
26
- template_file = "selfask.pmpt.tpl",
27
- stop_template = "\nIntermediate answer:")
28
  def self_ask(model, state):
29
- out = model(state)
30
- res = out.split(":", 1)[1]
 
 
 
31
  if out.startswith("Follow up:"):
32
  return replace(state, next_query=res)
33
  elif out.startswith("So the final answer is:"):
@@ -36,17 +38,22 @@ def self_ask(model, state):
36
  @prompt(Google())
37
  def google(model, state):
38
  if state.next_query is None:
39
- return state
40
 
41
- result = model(state.next_query)
 
 
 
 
 
42
  return State(state.question,
43
  state.history + "\nIntermediate answer: " + result + "\n")
44
 
45
  def selfask(question):
46
  state = State(question)
47
  for i in range(3):
48
- state = self_ask(state)
49
- state = google(state)
50
  return state
51
 
52
  # $
 
11
 
12
  from dataclasses import dataclass, replace
13
  from typing import Optional
14
+ from minichain import prompt, show, OpenAI, Google, transform
15
 
16
 
17
  @dataclass
 
22
  final_answer: Optional[str] = None
23
 
24
 
25
+ @prompt(OpenAI(stop="\nIntermediate answer:"),
26
+ template_file = "selfask.pmpt.tpl")
 
27
  def self_ask(model, state):
28
+ return model(state)
29
+
30
+ @transform()
31
+ def next_step(ask):
32
+ res = ask.split(":", 1)[1]
33
  if out.startswith("Follow up:"):
34
  return replace(state, next_query=res)
35
  elif out.startswith("So the final answer is:"):
 
38
  @prompt(Google())
39
  def google(model, state):
40
  if state.next_query is None:
41
+ return ""
42
 
43
+ return model(state.next_query)
44
+
45
+ @transform()
46
+ def update(state, result):
47
+ if not result:
48
+ return state
49
  return State(state.question,
50
  state.history + "\nIntermediate answer: " + result + "\n")
51
 
52
  def selfask(question):
53
  state = State(question)
54
  for i in range(3):
55
+ state = next_step(self_ask(state))
56
+ state = update(google(state))
57
  return state
58
 
59
  # $
stats.py CHANGED
@@ -9,10 +9,41 @@ Information extraction that is automatically generated from a typed specificatio
9
 
10
  # $
11
 
12
- from minichain import prompt, show, type_to_prompt, OpenAI
13
- from dataclasses import dataclass
14
- from typing import List
15
  from enum import Enum
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
  # Data specification
18
 
@@ -34,15 +65,18 @@ class Player:
34
  # -
35
 
36
 
37
- @prompt(OpenAI(), template_file="stats.pmpt.tpl", parser="json")
38
  def stats(model, passage):
39
- out = model(dict(passage=passage, typ=type_to_prompt(Player)))
40
- return [Player(**j) for j in out]
 
 
 
41
 
42
  # $
43
 
44
  article = open("sixers.txt").read()
45
- gradio = show(lambda passage: stats(passage),
46
  examples=[article],
47
  subprompts=[stats],
48
  out_type="json",
 
9
 
10
  # $
11
 
12
+ from minichain import prompt, show, OpenAI, transform
13
+ from dataclasses import dataclass, is_dataclass, fields
14
+ from typing import List, Type, Dict, Any, get_origin, get_args
15
  from enum import Enum
16
+ from jinja2 import select_autoescape, FileSystemLoader, Environment
17
+ import json
18
+
19
+ def enum(x: Type[Enum]) -> Dict[str, int]:
20
+ d = {e.name: e.value for e in x}
21
+ return d
22
+
23
+
24
+ def walk(x: Any) -> Any:
25
+ if issubclass(x if get_origin(x) is None else get_origin(x), List):
26
+ return {"_t_": "list", "t": walk(get_args(x)[0])}
27
+ if issubclass(x, Enum):
28
+ return enum(x)
29
+
30
+ if is_dataclass(x):
31
+ return {y.name: walk(y.type) for y in fields(x)}
32
+ return x.__name__
33
+
34
+
35
+ def type_to_prompt(out: type) -> str:
36
+ tmp = env.get_template("type_prompt.pmpt.tpl")
37
+ d = walk(out)
38
+ return tmp.render({"typ": d})
39
+
40
+ env = Environment(
41
+ loader=FileSystemLoader("."),
42
+ autoescape=select_autoescape(),
43
+ extensions=["jinja2_highlight.HighlightExtension"],
44
+ )
45
+
46
+
47
 
48
  # Data specification
49
 
 
65
  # -
66
 
67
 
68
+ @prompt(OpenAI(), template_file="stats.pmpt.tpl")
69
  def stats(model, passage):
70
+ return model.stream(dict(passage=passage, typ=type_to_prompt(Player)))
71
+
72
+ @transform()
73
+ def to_data(s:str):
74
+ return [Player(**j) for j in json.loads(s)]
75
 
76
  # $
77
 
78
  article = open("sixers.txt").read()
79
+ gradio = show(lambda passage: to_data(stats(passage)),
80
  examples=[article],
81
  subprompts=[stats],
82
  out_type="json",
table.py CHANGED
@@ -9,12 +9,13 @@ Example of extracting tables from a textual document. [![Open In Colab](https://
9
 
10
  # $
11
  import pandas as pd
12
- from minichain import prompt, Mock, show, OpenAIStream
13
  import minichain
14
  import json
15
  import gradio as gr
 
16
 
17
- rotowire = json.load(open("data.json"))
18
  names = {
19
  '3-pointer percentage': 'FG3_PCT',
20
  '3-pointers attempted': 'FG3A',
@@ -52,30 +53,26 @@ for i in range(few_shot_examples):
52
  examples.append({"input": rotowire[i][1],
53
  "output": to_df(rotowire[i][0][1]).transpose().set_index("player").to_csv(sep="\t")})
54
 
55
- @prompt(OpenAIStream(),
56
- template_file="table.pmpt.txt",
57
- block_output=gr.HTML,
58
- stream=True)
59
- def extract(model, passage, typ):
60
- state = []
61
- out = ""
62
- for token in model.stream(dict(player_keys=names.items(), examples=examples, passage=passage, type=typ)):
63
- out += token
64
- html = "<table><tr><td>" + out.replace("\t", "</td><td>").replace("\n", "</td></tr><tr><td>") + "</td></td></table>"
65
- yield html
66
- yield html
67
-
68
 
 
 
 
 
 
 
69
 
70
  def run(query):
71
  return extract(query, "Player")
72
 
73
  # $
74
 
 
75
  gradio = show(run,
76
  examples = [rotowire[i][1] for i in range(50, 55)],
77
  subprompts=[extract],
78
- code=open("table.py", "r").read().split("$")[1].strip().strip("#").strip(),
79
  out_type="markdown"
80
  )
81
 
 
9
 
10
  # $
11
  import pandas as pd
12
+ from minichain import prompt, Mock, show, OpenAI, GradioConf
13
  import minichain
14
  import json
15
  import gradio as gr
16
+ import requests
17
 
18
+ rotowire = requests.get("https://raw.githubusercontent.com/srush/text2table/main/data.json").json()
19
  names = {
20
  '3-pointer percentage': 'FG3_PCT',
21
  '3-pointers attempted': 'FG3A',
 
53
  examples.append({"input": rotowire[i][1],
54
  "output": to_df(rotowire[i][0][1]).transpose().set_index("player").to_csv(sep="\t")})
55
 
56
+ def make_html(out):
57
+ return "<table><tr><td>" + out.replace("\t", "</td><td>").replace("\n", "</td></tr><tr><td>") + "</td></td></table>"
 
 
 
 
 
 
 
 
 
 
 
58
 
59
+ @prompt(OpenAI(), template_file="table.pmpt.txt",
60
+ gradio_conf=GradioConf(block_output=gr.HTML,
61
+ postprocess_output = make_html)
62
+ )
63
+ def extract(model, passage, typ):
64
+ return model(dict(player_keys=names.items(), examples=examples, passage=passage, type=typ))
65
 
66
  def run(query):
67
  return extract(query, "Player")
68
 
69
  # $
70
 
71
+ import os
72
  gradio = show(run,
73
  examples = [rotowire[i][1] for i in range(50, 55)],
74
  subprompts=[extract],
75
+ code=open("table.py" if os.path.exists("table.py") else "app.py", "r").read().split("$")[1].strip().strip("#").strip(),
76
  out_type="markdown"
77
  )
78
 
temp CHANGED
Binary files a/temp and b/temp differ
 
temp.log CHANGED
Binary files a/temp.log and b/temp.log differ
 
type_prompt.pmpt.tpl ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are a highly intelligent and accurate information extraction system. You take passage as input and your task is to find parts of the passage to answer questions.
2
+
3
+ {% macro describe(typ) -%}
4
+ {% for key, val in typ.items() %}
5
+ You need to classify in to the following types for key: "{{key}}":
6
+ {% if val == "str" %}String
7
+ {% elif val == "int" %}Int {% else %}
8
+ {% if val.get("_t_") == "list" %}List{{describe(val["t"])}}{% else %}
9
+
10
+ {% for k, v in val.items() %}{{k}}
11
+ {% endfor %}
12
+
13
+ Only select from the above list.
14
+ {% endif %}
15
+ {%endif%}
16
+ {% endfor %}
17
+ {% endmacro -%}
18
+ {{describe(typ)}}
19
+ {% macro json(typ) -%}{% for key, val in typ.items() %}{% if val in ["str", "int"] or val.get("_t_") != "list" %}"{{key}}" : "{{key}}" {% else %} "{{key}}" : [{ {{json(val["t"])}} }] {% endif %}{{"" if loop.last else ", "}} {% endfor %}{% endmacro -%}
20
+
21
+ [{ {{json(typ)}} }, ...]
22
+
23
+
24
+
25
+ Make sure every output is exactly seen in the document. Find as many as you can.
26
+ You need to output only JSON.