srush HF staff commited on
Commit
5b30d27
1 Parent(s): be4ddb6

Upload with huggingface_hub

Browse files
Files changed (28) hide show
  1. #agent.py# +46 -0
  2. #base.sh# +2 -0
  3. agent.pmpt.txt +36 -0
  4. agent.pmpt.txt~ +36 -0
  5. agent.py +51 -0
  6. app.py +5 -7
  7. app.py~ +27 -6
  8. backtrack.py +8 -4
  9. backtrack.py~ +24 -20
  10. base.sh +2 -0
  11. bash.py +1 -1
  12. chat.py +4 -1
  13. data.json +0 -0
  14. gatsby.py +4 -4
  15. gradio_example.py +51 -0
  16. gradio_example.py~ +41 -0
  17. gradio_tool.py~ +43 -0
  18. math_demo.py +1 -1
  19. ner.py +1 -1
  20. pal.py +10 -5
  21. qa.py +1 -1
  22. selfask.py +1 -1
  23. stats.py +1 -1
  24. table.pmpt.txt +31 -0
  25. table.py +83 -0
  26. table.py~ +76 -0
  27. temp +0 -0
  28. temp.log +0 -0
#agent.py# ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # + tags=["hide_inp"]
2
+
3
+ desc = """
4
+ ### Gradio Tool
5
+
6
+ Chain that ask for a command-line question and then runs the bash command. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/srush/MiniChain/blob/master/examples/bash.ipynb)
7
+
8
+ (Adapted from LangChain [BashChain](https://langchain.readthedocs.io/en/latest/modules/chains/examples/llm_bash.html))
9
+ """
10
+ # -
11
+
12
+ # $
13
+
14
+ from minichain import show, prompt, OpenAI
15
+ from gradio_tools.tools import StableDiffusionTool, ImageCaptioningTool
16
+
17
+ class Tool1:
18
+ description: str = "Tool 1"
19
+ def run(self, prompt):
20
+ return prompt
21
+
22
+ class Tool2:
23
+ description: str = "Tool 2"
24
+ def run(self, prompt):
25
+ return prompt
26
+
27
+
28
+ @prompt(OpenAI())
29
+ def gen(model, query):
30
+ return model(query)
31
+
32
+ def gradio_example(query):
33
+ return gen(query)
34
+
35
+
36
+ # $
37
+
38
+ gradio = show(gradio_example,
39
+ subprompts=[caption],
40
+ examples=['/home/srush/Projects/MiniChain/examples/63dd90c7-9b8d-4ba4-bc07-a378fd932304/tmph3xi9ylr.jpg', 'Make me a flower'],
41
+ out_type="markdown",
42
+ description=desc
43
+ )
44
+ if __name__ == "__main__":
45
+ gradio.launch()
46
+
#base.sh# ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ export OPENAI_API_KEY="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"
2
+ export SERP_KEY="593a073fa4c730efe918e592a538b36e80841bc8f8dd4070c1566920f75ba140"
agent.pmpt.txt ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Assistant is a large language model trained by OpenAI.
2
+
3
+ Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
4
+
5
+ Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
6
+
7
+ Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
8
+
9
+ TOOLS:
10
+ ------
11
+
12
+ Assistant has access to the following tools:
13
+
14
+ To use a tool, please use the following format:
15
+
16
+ ```
17
+ Thought: Do I need to use a tool? Yes
18
+ Action: the action to take, should be one of [{{tool_names}}]
19
+ Action Input: the input to the action
20
+ Observation: the result of the action
21
+ ```
22
+
23
+ When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
24
+
25
+ ```
26
+ Thought: Do I need to use a tool? No
27
+ AI: [your response here]
28
+ ```
29
+
30
+ Begin!
31
+
32
+ Previous conversation history:
33
+ {{chat_history}}
34
+
35
+ New input: {{input}}
36
+ {{agent_scratchpad}}
agent.pmpt.txt~ ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Assistant is a large language model trained by OpenAI.
2
+
3
+ Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
4
+
5
+ Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
6
+
7
+ Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
8
+
9
+ TOOLS:
10
+ ------
11
+
12
+ Assistant has access to the following tools:
13
+
14
+ To use a tool, please use the following format:
15
+
16
+ ```
17
+ Thought: Do I need to use a tool? Yes
18
+ Action: the action to take, should be one of [{{tool_names}}]
19
+ Action Input: the input to the action
20
+ Observation: the result of the action
21
+ ```
22
+
23
+ When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
24
+
25
+ ```
26
+ Thought: Do I need to use a tool? No
27
+ {{ai_prefix}}: [your response here]
28
+ ```"""
29
+
30
+ Begin!
31
+
32
+ Previous conversation history:
33
+ {chat_history}
34
+
35
+ New input: {input}
36
+ {agent_scratchpad}
agent.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # + tags=["hide_inp"]
2
+
3
+ desc = """
4
+ ### Gradio Tool
5
+
6
+ Chain that ask for a command-line question and then runs the bash command. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/srush/MiniChain/blob/master/examples/bash.ipynb)
7
+
8
+ (Adapted from LangChain [BashChain](https://langchain.readthedocs.io/en/latest/modules/chains/examples/llm_bash.html))
9
+ """
10
+ # -
11
+
12
+ # $
13
+
14
+ from minichain import show, prompt, OpenAI
15
+ from gradio_tools.tools import StableDiffusionTool, ImageCaptioningTool
16
+
17
+ class Tool1:
18
+ description: str = "Tool 1"
19
+ def run(self, prompt):
20
+ return prompt
21
+
22
+ class Tool2:
23
+ description: str = "Tool 2"
24
+ def run(self, prompt):
25
+ return prompt
26
+
27
+
28
+ @prompt(StableDiffusionTool())
29
+ def gen(model, query):
30
+ return model(query)
31
+
32
+ @prompt(ImageCaptioningTool())
33
+ def caption(model, img_src):
34
+ return model(img_src)
35
+
36
+
37
+ def gradio_example(query):
38
+ return caption(gen(query))
39
+
40
+
41
+ # $
42
+
43
+ gradio = show(gradio_example,
44
+ subprompts=[caption],
45
+ examples=['/home/srush/Projects/MiniChain/examples/63dd90c7-9b8d-4ba4-bc07-a378fd932304/tmph3xi9ylr.jpg', 'Make me a flower'],
46
+ out_type="markdown",
47
+ description=desc
48
+ )
49
+ if __name__ == "__main__":
50
+ gradio.launch()
51
+
app.py CHANGED
@@ -9,15 +9,13 @@ from qa import gradio as qa
9
  from stats import gradio as stats
10
  from selfask import gradio as selfask
11
  from backtrack import gradio as backtrack
 
 
12
 
13
  CSS = """
14
  #clean div.form {border: 0px}
15
  #response {border: 0px; background: #ffeec6}
16
  #prompt {border: 0px;background: aliceblue}
17
- #json {border: 0px}
18
- #result {border: 0px; background: #c5e0e5}
19
- #inner {margin: 10px; padding: 10px; font-size: 20px; }
20
- #inner textarea {border: 0px}
21
  div.gradio-container {color: black}
22
  span.head {font-size: 60pt; font-family: cursive;}
23
  body {
@@ -34,9 +32,9 @@ body {
34
  with gr.Blocks(css=CSS, theme=gr.themes.Monochrome()) as demo:
35
  gr.HTML("<center style='background:#B6B7BA'> <span class='head'>Mini</span><img src='https://user-images.githubusercontent.com/35882/227017900-0cacdfb7-37e2-47b1-9347-a233810d3544.png' width='20%' style='display:inline'><span class='head'>Chain</span></center><center> <br><a href='https://github.com/srush/minichain'>[library]</a> </center>")
36
 
37
- gr.TabbedInterface([math_demo, qa, chat, gatsby, ner, bash, pal, stats, selfask, backtrack],
38
- ["Math", "QA", "Chat", "Book", "NER", "Bash", "PAL", "Stats", "SelfAsk", "Backtrack"],
39
  css = CSS)
40
 
41
- demo.launch()
42
 
 
9
  from stats import gradio as stats
10
  from selfask import gradio as selfask
11
  from backtrack import gradio as backtrack
12
+ from table import gradio as table
13
+ from gradio_example import gradio as gradio_example
14
 
15
  CSS = """
16
  #clean div.form {border: 0px}
17
  #response {border: 0px; background: #ffeec6}
18
  #prompt {border: 0px;background: aliceblue}
 
 
 
 
19
  div.gradio-container {color: black}
20
  span.head {font-size: 60pt; font-family: cursive;}
21
  body {
 
32
  with gr.Blocks(css=CSS, theme=gr.themes.Monochrome()) as demo:
33
  gr.HTML("<center style='background:#B6B7BA'> <span class='head'>Mini</span><img src='https://user-images.githubusercontent.com/35882/227017900-0cacdfb7-37e2-47b1-9347-a233810d3544.png' width='20%' style='display:inline'><span class='head'>Chain</span></center><center> <br><a href='https://github.com/srush/minichain'>[library]</a> </center>")
34
 
35
+ gr.TabbedInterface([math_demo, qa, chat, gatsby, ner, bash, pal, table, gradio_example, stats, selfask, backtrack],
36
+ ["Math", "QA", "Chat", "Book", "NER", "Bash", "PAL", "Table", "Gradio", "Stats", "SelfAsk", "Backtrack"],
37
  css = CSS)
38
 
39
+ demo.queue().launch()
40
 
app.py~ CHANGED
@@ -7,15 +7,36 @@ from pal import gradio as pal
7
  from gatsby import gradio as gatsby
8
  from qa import gradio as qa
9
  from stats import gradio as stats
 
 
10
 
11
- css = "#clean div.form {border: 0px} #response {border: 0px; background: #ffeec6} #prompt {border: 0px;background: aliceblue} #json {border: 0px} #result {border: 0px; background: #c5e0e5} #inner {padding: 20px} #inner textarea {border: 0px} .tabs div.tabitem {border: 0px}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
- with gr.Blocks(css=css) as demo:
14
- gr.HTML("<center> <img width='10%' style='display:inline; padding: 5px' src='https://user-images.githubusercontent.com/35882/218286642-67985b6f-d483-49be-825b-f62b72c469cd.png'> <h1 style='display:inline'> Mini-Chain </h1> <img width='10%' style='display:inline;padding: 5px' src='https://avatars.githubusercontent.com/u/25720743?s=200&v=4'> </center><br><center><a href='https://github.com/srush/minichain'>[code]</a> <a href='https://user-images.githubusercontent.com/35882/218286642-67985b6f-d483-49be-825b-f62b72c469cd.png'>[docs]</a></center>")
15
 
16
- gr.TabbedInterface([math_demo, qa, chat, gatsby, ner, bash, pal, stats],
17
- ["Math", "QA", "Chat", "Book", "NER", "Bash", "PAL", "Stats"],
18
- css = css)
 
 
 
 
19
 
20
  demo.launch()
21
 
 
7
  from gatsby import gradio as gatsby
8
  from qa import gradio as qa
9
  from stats import gradio as stats
10
+ from selfask import gradio as selfask
11
+ from backtrack import gradio as backtrack
12
 
13
+ CSS = """
14
+ #clean div.form {border: 0px}
15
+ #response {border: 0px; background: #ffeec6}
16
+ #prompt {border: 0px;background: aliceblue}
17
+ #json {border: 0px}
18
+ #result {border: 0px; background: #c5e0e5}
19
+ #inner {margin: 10px; padding: 10px; font-size: 20px; }
20
+ #inner textarea {border: 0px}
21
+ div.gradio-container {color: black}
22
+ span.head {font-size: 60pt; font-family: cursive;}
23
+ body {
24
+ --text-sm: 12px;
25
+ --text-md: 16px;
26
+ --text-lg: 18px;
27
+ --input-text-size: 16px;
28
+ --section-text-size: 16px;
29
+ }
30
+ """
31
 
 
 
32
 
33
+
34
+ with gr.Blocks(css=CSS, theme=gr.themes.Monochrome()) as demo:
35
+ gr.HTML("<center style='background:#B6B7BA'> <span class='head'>Mini</span><img src='https://user-images.githubusercontent.com/35882/227017900-0cacdfb7-37e2-47b1-9347-a233810d3544.png' width='20%' style='display:inline'><span class='head'>Chain</span></center><center> <br><a href='https://github.com/srush/minichain'>[library]</a> </center>")
36
+
37
+ gr.TabbedInterface([math_demo, qa, chat, gatsby, ner, bash, pal, stats, selfask, backtrack],
38
+ ["Math", "QA", "Chat", "Book", "NER", "Bash", "PAL", "Stats", "SelfAsk", "Backtrack"],
39
+ css = CSS)
40
 
41
  demo.launch()
42
 
backtrack.py CHANGED
@@ -11,11 +11,15 @@ Chain that backtracks on failure. [![Open In Colab](https://colab.research.googl
11
  from minichain import prompt, Mock, show, OpenAI
12
  import minichain
13
 
14
- @prompt(Mock(["dog", "blue", "cat"]))
15
  def prompt_generation(model):
16
- return model("")
 
 
 
 
17
 
18
- @prompt(OpenAI(), template="Answer 'yes' is {{query}} is a color. Answer:")
19
  def prompt_validation(model, x):
20
  out = model(dict(query=x))
21
  if out.strip().lower().startswith("yes"):
@@ -35,4 +39,4 @@ gradio = show(run,
35
  )
36
 
37
  if __name__ == "__main__":
38
- gradio.launch()
 
11
  from minichain import prompt, Mock, show, OpenAI
12
  import minichain
13
 
14
+ @prompt(Mock(["dog", "blue", "cat"]), stream=True)
15
  def prompt_generation(model):
16
+ out = ""
17
+ for token in model.stream(""):
18
+ out += token
19
+ yield out
20
+ yield out
21
 
22
+ @prompt(Mock(["No", "Yes"]), template="Answer 'yes' is {{query}} is a color. Answer:", stream=False)
23
  def prompt_validation(model, x):
24
  out = model(dict(query=x))
25
  if out.strip().lower().startswith("yes"):
 
39
  )
40
 
41
  if __name__ == "__main__":
42
+ gradio.queue().launch()
backtrack.py~ CHANGED
@@ -2,33 +2,37 @@
2
  desc = """
3
  ### Backtrack on Failure
4
 
5
- Chain that backtracks on failure. [[Code](https://github.com/srush/MiniChain/blob/main/examples/backtrack.py)]
6
 
7
  """
8
  # -
9
 
10
-
11
- from minichain import prompt, Mock, show
12
  import minichain
13
 
14
- @prompt(Mock(["red", "blue"]))
15
- def prompt_function1(model, x):
16
- return model(x)
17
 
18
- @prompt(Mock(["b"]), template_file="test.pmpt.tpl")
19
- def prompt_function2(model, x):
20
- if x == "red":
21
- return model.fail(1)
22
- return model(dict(x=x))
 
23
 
24
- def run(query):
25
- x = prompt_function1(query)
26
- return prompt_function2(prompt_function2(x))
27
-
28
-
29
- demo = show(run,
30
- examples=["a"],
31
- subprompts=[prompt_function1, prompt_function2, prompt_function2])
 
 
 
32
 
33
  if __name__ == "__main__":
34
- demo.launch()
 
2
  desc = """
3
  ### Backtrack on Failure
4
 
5
+ Chain that backtracks on failure. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/srush/MiniChain/blob/master/examples/backtrack.ipynb)
6
 
7
  """
8
  # -
9
 
10
+ # $
11
+ from minichain import prompt, Mock, show, OpenAI
12
  import minichain
13
 
14
+ @prompt(Mock(["dog", "blue", "cat"]))
15
+ def prompt_generation(model):
16
+ return model("")
17
 
18
+ @prompt(OpenAI(), template="Answer 'yes' is {{query}} is a color. Answer:")
19
+ def prompt_validation(model, x):
20
+ out = model(dict(query=x))
21
+ if out.strip().lower().startswith("yes"):
22
+ return x
23
+ return model.fail(1)
24
 
25
+ def run():
26
+ x = prompt_generation()
27
+ return prompt_validation(x)
28
+ # $
29
+
30
+ gradio = show(run,
31
+ examples = [],
32
+ subprompts=[prompt_generation, prompt_validation],
33
+ code=open("backtrack.py", "r").read().split("$")[1].strip().strip("#").strip(),
34
+ out_type="markdown"
35
+ )
36
 
37
  if __name__ == "__main__":
38
+ gradio.launch()
base.sh ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ export OPENAI_API_KEY="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"
2
+ export SERP_KEY="593a073fa4c730efe918e592a538b36e80841bc8f8dd4070c1566920f75ba140"
bash.py CHANGED
@@ -39,5 +39,5 @@ gradio = show(bash,
39
  code=open("bash.py", "r").read().split("$")[1].strip().strip("#").strip(),
40
  )
41
  if __name__ == "__main__":
42
- gradio.launch()
43
 
 
39
  code=open("bash.py", "r").read().split("$")[1].strip().strip("#").strip(),
40
  )
41
  if __name__ == "__main__":
42
+ gradio.queue().launch()
43
 
chat.py CHANGED
@@ -29,6 +29,9 @@ class State:
29
  memory = self.memory if len(self.memory) < MEMORY else self.memory[1:]
30
  return State(memory + [(self.human_input, response)])
31
 
 
 
 
32
  # Chat prompt with memory
33
 
34
  @prompt(OpenAI(), template_file="chat.pmpt.tpl")
@@ -58,6 +61,6 @@ gradio = show(lambda command, state: chat_prompt(replace(state, human_input=comm
58
  code=open("chat.py", "r").read().split("$")[1].strip().strip("#").strip(),
59
  )
60
  if __name__ == "__main__":
61
- gradio.launch()
62
 
63
 
 
29
  memory = self.memory if len(self.memory) < MEMORY else self.memory[1:]
30
  return State(memory + [(self.human_input, response)])
31
 
32
+ def __str__(self):
33
+ return self.memory[-1][-1]
34
+
35
  # Chat prompt with memory
36
 
37
  @prompt(OpenAI(), template_file="chat.pmpt.tpl")
 
61
  code=open("chat.py", "r").read().split("$")[1].strip().strip("#").strip(),
62
  )
63
  if __name__ == "__main__":
64
+ gradio.queue().launch()
65
 
66
 
data.json ADDED
The diff for this file is too large to render. See raw diff
 
gatsby.py CHANGED
@@ -19,7 +19,7 @@ from minichain import prompt, show, HuggingFaceEmbed, OpenAI
19
  gatsby = datasets.load_from_disk("gatsby")
20
  gatsby.add_faiss_index("embeddings")
21
 
22
- # Fast KNN retieval prompt
23
 
24
  @prompt(HuggingFaceEmbed("sentence-transformers/all-mpnet-base-v2"))
25
  def get_neighbors(model, inp, k=1):
@@ -32,7 +32,7 @@ def get_neighbors(model, inp, k=1):
32
  def ask(model, query, neighbors):
33
  return model(dict(question=query, docs=neighbors))
34
 
35
- def gatsby(query):
36
  n = get_neighbors(query)
37
  return ask(query, n)
38
 
@@ -40,7 +40,7 @@ def gatsby(query):
40
  # $
41
 
42
 
43
- gradio = show(gatsby,
44
  subprompts=[get_neighbors, ask],
45
  examples=["What did Gatsby do before he met Daisy?",
46
  "What did the narrator do after getting back to Chicago?"],
@@ -49,4 +49,4 @@ gradio = show(gatsby,
49
  code=open("gatsby.py", "r").read().split("$")[1].strip().strip("#").strip()
50
  )
51
  if __name__ == "__main__":
52
- gradio.launch()
 
19
  gatsby = datasets.load_from_disk("gatsby")
20
  gatsby.add_faiss_index("embeddings")
21
 
22
+ # Fast KNN retrieval prompt
23
 
24
  @prompt(HuggingFaceEmbed("sentence-transformers/all-mpnet-base-v2"))
25
  def get_neighbors(model, inp, k=1):
 
32
  def ask(model, query, neighbors):
33
  return model(dict(question=query, docs=neighbors))
34
 
35
+ def gatsby_q(query):
36
  n = get_neighbors(query)
37
  return ask(query, n)
38
 
 
40
  # $
41
 
42
 
43
+ gradio = show(gatsby_q,
44
  subprompts=[get_neighbors, ask],
45
  examples=["What did Gatsby do before he met Daisy?",
46
  "What did the narrator do after getting back to Chicago?"],
 
49
  code=open("gatsby.py", "r").read().split("$")[1].strip().strip("#").strip()
50
  )
51
  if __name__ == "__main__":
52
+ gradio.queue().launch()
gradio_example.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # + tags=["hide_inp"]
2
+
3
+ desc = """
4
+ ### Gradio Tool
5
+
6
+ Examples using the gradio tool [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/srush/MiniChain/blob/master/examples/gradio_example.ipynb)
7
+
8
+ """
9
+ # -
10
+
11
+ # $
12
+
13
+ from minichain import show, prompt, OpenAI, OpenAIStream
14
+ import gradio as gr
15
+ from gradio_tools.tools import StableDiffusionTool, ImageCaptioningTool
16
+
17
+ @prompt(OpenAIStream(), stream=True)
18
+ def picture(model, query):
19
+ out = ""
20
+ for r in model.stream(query):
21
+ out += r
22
+ yield out
23
+
24
+ @prompt(StableDiffusionTool(), stream=True, block_input=lambda: gr.Textbox(label=""))
25
+ def gen(model, query):
26
+ for r in model.stream(query):
27
+ yield "https://htmlcolorcodes.com/assets/images/colors/baby-blue-color-solid-background-1920x1080.png"
28
+ yield r
29
+
30
+ @prompt(ImageCaptioningTool(), block_output=lambda: gr.Textbox(label=""))
31
+ def caption(model, img_src):
32
+ return model(img_src)
33
+
34
+ def gradio_example(query):
35
+ return caption(gen(picture(query)))
36
+
37
+
38
+ # $
39
+
40
+ gradio = show(gradio_example,
41
+ subprompts=[picture, gen, caption],
42
+ examples=['Describe a one-sentence fantasy scene.',
43
+ 'Describe a one-sentence scene happening on the moon.'],
44
+ out_type="markdown",
45
+ description=desc,
46
+ css="#advanced {display: none}"
47
+
48
+ )
49
+ if __name__ == "__main__":
50
+ gradio.queue().launch()
51
+
gradio_example.py~ ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # + tags=["hide_inp"]
2
+
3
+ desc = """
4
+ ### Gradio Tool
5
+
6
+ Chain that ask for a command-line question and then runs the bash command. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/srush/MiniChain/blob/master/examples/bash.ipynb)
7
+
8
+ (Adapted from LangChain [BashChain](https://langchain.readthedocs.io/en/latest/modules/chains/examples/llm_bash.html))
9
+ """
10
+ # -
11
+
12
+ # $
13
+
14
+ from minichain import show, prompt, OpenAI
15
+ from gradio_tools.tools import StableDiffusionTool, ImageCaptioningTool
16
+
17
+
18
+ @prompt(StableDiffusionTool())
19
+ def gen(model, query):
20
+ return model(query)
21
+
22
+ @prompt(ImageCaptioningTool())
23
+ def caption(model, img_src):
24
+ return model(img_src)
25
+
26
+
27
+ def gradio_example(query):
28
+ return caption(gen(query))
29
+
30
+
31
+ # $
32
+
33
+ gradio = show(gradio_example,
34
+ subprompts=[caption],
35
+ examples=['/home/srush/Projects/MiniChain/examples/63dd90c7-9b8d-4ba4-bc07-a378fd932304/tmph3xi9ylr.jpg', 'Make me a flower'],
36
+ out_type="markdown",
37
+ description=desc
38
+ )
39
+ if __name__ == "__main__":
40
+ gradio.launch()
41
+
gradio_tool.py~ ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # + tags=["hide_inp"]
2
+
3
+ desc = """
4
+ ### Gradio Tool
5
+
6
+ Chain that ask for a command-line question and then runs the bash command. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/srush/MiniChain/blob/master/examples/bash.ipynb)
7
+
8
+ (Adapted from LangChain [BashChain](https://langchain.readthedocs.io/en/latest/modules/chains/examples/llm_bash.html))
9
+ """
10
+ # -
11
+
12
+ # $
13
+
14
+ from minichain import show, prompt, OpenAI
15
+
16
+
17
+ @prompt(OpenAI(), template_file = "bash.pmpt.tpl")
18
+ def output_prompt(model, query):
19
+ x = model(dict(question=query))
20
+ return "\n".join(x.strip().split("\n")[1:-1])
21
+
22
+ @prompt(Bash())
23
+ def bash_run(model, x):
24
+ return model(x)
25
+
26
+ def gradio_tool(query):
27
+ return bash_run(cli_prompt(query))
28
+
29
+
30
+ # $
31
+
32
+ gradio = show(bash,
33
+ subprompts=[cli_prompt, bash_run],
34
+ examples=['Go up one directory, and then into the minichain directory,'
35
+ 'and list the files in the directory',
36
+ "Please write a bash script that prints 'Hello World' to the console."],
37
+ out_type="markdown",
38
+ description=desc,
39
+ code=open("bash.py", "r").read().split("$")[1].strip().strip("#").strip(),
40
+ )
41
+ if __name__ == "__main__":
42
+ gradio.launch()
43
+
math_demo.py CHANGED
@@ -41,6 +41,6 @@ gradio = show(math_demo,
41
  code=open("math_demo.py", "r").read().split("$")[1].strip().strip("#").strip(),
42
  )
43
  if __name__ == "__main__":
44
- gradio.launch()
45
  # -
46
 
 
41
  code=open("math_demo.py", "r").read().split("$")[1].strip().strip("#").strip(),
42
  )
43
  if __name__ == "__main__":
44
+ gradio.queue().launch()
45
  # -
46
 
ner.py CHANGED
@@ -39,4 +39,4 @@ gradio = show(ner,
39
  )
40
 
41
  if __name__ == "__main__":
42
- gradio.launch()
 
39
  )
40
 
41
  if __name__ == "__main__":
42
+ gradio.queue().launch()
pal.py CHANGED
@@ -8,14 +8,19 @@ Chain for answering complex problems by code generation and execution. [![Open I
8
 
9
  # $
10
 
11
- from minichain import prompt, show, OpenAI, Python
 
12
 
13
- @prompt(OpenAI(), template_file="pal.pmpt.tpl")
14
  def pal_prompt(model, question):
15
- return model(dict(question=question))
 
 
 
16
 
17
- @prompt(Python())
18
  def python(model, inp):
 
19
  return float(model(inp + "\nprint(solution())"))
20
 
21
  def pal(question):
@@ -37,4 +42,4 @@ gradio = show(pal,
37
  )
38
 
39
  if __name__ == "__main__":
40
- gradio.launch()
 
8
 
9
  # $
10
 
11
+ from minichain import prompt, show, OpenAIStream, Python
12
+ import gradio as gr
13
 
14
+ @prompt(OpenAIStream(), template_file="pal.pmpt.tpl", stream=True)
15
  def pal_prompt(model, question):
16
+ out = ""
17
+ for t in model.stream(dict(question=question)):
18
+ out += t
19
+ yield out
20
 
21
+ @prompt(Python(), block_input=lambda: gr.Code(language="python"))
22
  def python(model, inp):
23
+ print(inp)
24
  return float(model(inp + "\nprint(solution())"))
25
 
26
  def pal(question):
 
42
  )
43
 
44
  if __name__ == "__main__":
45
+ gradio.queue().launch()
qa.py CHANGED
@@ -55,5 +55,5 @@ gradio = show(qa,
55
  code=open("qa.py", "r").read().split("$")[1].strip().strip("#").strip(),
56
  )
57
  if __name__ == "__main__":
58
- gradio.launch()
59
 
 
55
  code=open("qa.py", "r").read().split("$")[1].strip().strip("#").strip(),
56
  )
57
  if __name__ == "__main__":
58
+ gradio.queue().launch()
59
 
selfask.py CHANGED
@@ -59,6 +59,6 @@ gradio = show(selfask,
59
  out_type="json"
60
  )
61
  if __name__ == "__main__":
62
- gradio.launch()
63
 
64
 
 
59
  out_type="json"
60
  )
61
  if __name__ == "__main__":
62
+ gradio.queue().launch()
63
 
64
 
stats.py CHANGED
@@ -50,7 +50,7 @@ gradio = show(lambda passage: stats(passage),
50
  code=open("stats.py", "r").read().split("$")[1].strip().strip("#").strip(),
51
  )
52
  if __name__ == "__main__":
53
- gradio.launch()
54
 
55
 
56
  # ExtractionPrompt().show({"passage": "Harden had 10 rebounds."},
 
50
  code=open("stats.py", "r").read().split("$")[1].strip().strip("#").strip(),
51
  )
52
  if __name__ == "__main__":
53
+ gradio.queue().launch()
54
 
55
 
56
  # ExtractionPrompt().show({"passage": "Harden had 10 rebounds."},
table.pmpt.txt ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are a utility built to extract structured information from documents. You are returning a TSV table. Here are the headers .
2
+
3
+ ----
4
+ {{type}} {% for k in player_keys %}{{k[0]}}{{"\t" if not loop.last}}{% endfor %}
5
+ ----
6
+
7
+ Return the rest of the table in TSV format. Here are some examples
8
+
9
+ {% for example in examples %}
10
+ Example
11
+ ---
12
+ {{example.input}}
13
+ ---
14
+
15
+ Output
16
+ ---
17
+ {{example.output}}
18
+ ---
19
+ {% endfor %}
20
+
21
+ Article:
22
+ ----
23
+ {{passage}}
24
+ ----
25
+
26
+ All other values should be numbers or _.
27
+ Only include numbers that appear explicitly in the passage below.
28
+ If you cannot find the value in the table, output _. Most cells will be _.
29
+
30
+ Ok, here is the correctly valid TSV with headers and nothing else. Remember only include values that are directly written in the article. Do not guess or combine rows.
31
+
table.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # + tags=["hide_inp"]
2
+ desc = """
3
+ ### Table
4
+
5
+ Example of extracting tables from a textual document. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/srush/MiniChain/blob/master/examples/table.ipynb)
6
+
7
+ """
8
+ # -
9
+
10
+ # $
11
+ import pandas as pd
12
+ from minichain import prompt, Mock, show, OpenAIStream
13
+ import minichain
14
+ import json
15
+ import gradio as gr
16
+
17
+ rotowire = json.load(open("data.json"))
18
+ names = {
19
+ '3-pointer percentage': 'FG3_PCT',
20
+ '3-pointers attempted': 'FG3A',
21
+ '3-pointers made': 'FG3M',
22
+ 'Assists': 'AST',
23
+ 'Blocks': 'BLK',
24
+ 'Field goal percentage': 'FG_PCT',
25
+ 'Field goals attempted': 'FGA',
26
+ 'Field goals made': 'FGM',
27
+ 'Free throw percentage': 'FT_PCT',
28
+ 'Free throws attempted': 'FTA',
29
+ 'Free throws made': 'FTM',
30
+ 'Minutes played': 'MIN',
31
+ 'Personal fouls': 'PF',
32
+ 'Points': 'PTS',
33
+ 'Rebounds': 'REB',
34
+ 'Rebounds (Defensive)': 'DREB',
35
+ 'Rebounds (Offensive)': 'OREB',
36
+ 'Steals': 'STL',
37
+ 'Turnovers': 'TO'
38
+ }
39
+ # Convert an example to dataframe
40
+ def to_df(d):
41
+ players = {player for v in d.values() if v is not None for player, _ in v.items()}
42
+ lookup = {k: {a: b for a, b in v.items()} for k,v in d.items()}
43
+ rows = [{"player": p} | {k: "_" if p not in lookup.get(k, []) else lookup[k][p] for k in names.keys()}
44
+ for p in players]
45
+ return pd.DataFrame.from_dict(rows).astype("str").sort_values(axis=0, by="player", ignore_index=True).transpose()
46
+
47
+
48
+ # Make few shot examples
49
+ few_shot_examples = 2
50
+ examples = []
51
+ for i in range(few_shot_examples):
52
+ examples.append({"input": rotowire[i][1],
53
+ "output": to_df(rotowire[i][0][1]).transpose().set_index("player").to_csv(sep="\t")})
54
+
55
+ @prompt(OpenAIStream(),
56
+ template_file="table.pmpt.txt",
57
+ block_output=gr.HTML,
58
+ stream=True)
59
+ def extract(model, passage, typ):
60
+ state = []
61
+ out = ""
62
+ for token in model.stream(dict(player_keys=names.items(), examples=examples, passage=passage, type=typ)):
63
+ out += token
64
+ html = "<table><tr><td>" + out.replace("\t", "</td><td>").replace("\n", "</td></tr><tr><td>") + "</td></td></table>"
65
+ yield html
66
+ yield html
67
+
68
+
69
+
70
+ def run(query):
71
+ return extract(query, "Player")
72
+
73
+ # $
74
+
75
+ gradio = show(run,
76
+ examples = [rotowire[i][1] for i in range(50, 55)],
77
+ subprompts=[extract],
78
+ code=open("table.py", "r").read().split("$")[1].strip().strip("#").strip(),
79
+ out_type="markdown"
80
+ )
81
+
82
+ if __name__ == "__main__":
83
+ gradio.queue().launch()
table.py~ ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # + tags=["hide_inp"]
2
+ desc = """
3
+ ### Table
4
+
5
+ """
6
+ # -
7
+
8
+ # $
9
+ import pandas as pd
10
+ from minichain import prompt, Mock, show, OpenAIStream
11
+ import minichain
12
+
13
+ rotowire = json.load(open("data.json"))
14
+ names = {
15
+ '3-pointer percentage': 'FG3_PCT',
16
+ '3-pointers attempted': 'FG3A',
17
+ '3-pointers made': 'FG3M',
18
+ 'Assists': 'AST',
19
+ 'Blocks': 'BLK',
20
+ 'Field goal percentage': 'FG_PCT',
21
+ 'Field goals attempted': 'FGA',
22
+ 'Field goals made': 'FGM',
23
+ 'Free throw percentage': 'FT_PCT',
24
+ 'Free throws attempted': 'FTA',
25
+ 'Free throws made': 'FTM',
26
+ 'Minutes played': 'MIN',
27
+ 'Personal fouls': 'PF',
28
+ 'Points': 'PTS',
29
+ 'Rebounds': 'REB',
30
+ 'Rebounds (Defensive)': 'DREB',
31
+ 'Rebounds (Offensive)': 'OREB',
32
+ 'Steals': 'STL',
33
+ 'Turnovers': 'TO'
34
+ }
35
+ # Convert an example to dataframe
36
+ def to_df(d):
37
+ players = {player for v in d.values() if v is not None for player, _ in v.items()}
38
+ lookup = {k: {a: b for a, b in v.items()} for k,v in d.items()}
39
+ rows = [{"player": p} | {k: "_" if p not in lookup.get(k, []) else lookup[k][p] for k in names.keys()}
40
+ for p in players]
41
+ return pd.DataFrame.from_dict(rows).astype("str").sort_values(axis=0, by="player", ignore_index=True).transpose()
42
+
43
+
44
+ # Make few shot examples
45
+ few_shot_examples = 2
46
+ examples = []
47
+ for i in range(few_shot_examples):
48
+ examples.append({"input": rotowire[i][1],
49
+ "output": to_df(rotowire[i][0][1]).transpose().set_index("player").to_csv(sep="\t")})
50
+
51
+ @prompt(OpenAIStream(), template_file="table.pmpt.txt", stream=True)
52
+ def extract(model, passage, typ):
53
+ state = []
54
+ out = ""
55
+ for token in model(dict(player_keys=names.items(), examples=examples, passage=passage, type=typ)):
56
+ out += token
57
+ html = "<table><tr><td>" + out.replace("\t", "</td><td>").replace("\n", "</td></tr><tr><td>") + "</td></td></table>"
58
+ yield html
59
+ yield html
60
+
61
+
62
+
63
+ def run(query):
64
+ return extract(query)
65
+
66
+ # $
67
+
68
+ gradio = show(run,
69
+ gr.Examples([rotowire[i][1] for i in range(50, 55)], inputs=[passage])
70
+ subprompts=[prompt_generation, prompt_validation],
71
+ code=open("table.py", "r").read().split("$")[1].strip().strip("#").strip(),
72
+ out_type="markdown"
73
+ )
74
+
75
+ if __name__ == "__main__":
76
+ gradio.queue().launch()
temp CHANGED
Binary files a/temp and b/temp differ
 
temp.log CHANGED
Binary files a/temp.log and b/temp.log differ