File size: 4,835 Bytes
3f53aa9 ec6836c 4bf304b ec6836c 4bf304b 3f53aa9 ce0215c 3f53aa9 380a2db 3f53aa9 ce0215c e3872e4 ce0215c 3f53aa9 ce0215c 3f53aa9 070bfcf 1296c01 9e81864 3f53aa9 070bfcf 3f53aa9 070bfcf 3f53aa9 506423b ec6836c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
import gradio as gr
demo = gr.Blocks()
#'huggingface/facebook/opt-13b'
#'huggingface/EleutherAI/gpt-neox-20b'
#inference not supported
name_list = ['huggingface/bigscience/T0pp', 'huggingface/EleutherAI/gpt-j-6B', 'huggingface/gpt2-xl', 'huggingface/EleutherAI/gpt-neo-2.7B']
#examples from Figure 1 of the paper
examples = [#zero-shot
["Q: A juggler can juggle 16 balls. Half of the balls are golf balls, and half of the golf balls are blue. How many blue golf balls are there?\nA: The answer (arabic numerals) is "],
#zero-shot-CoT
["Q: A juggler can juggle 16 balls. Half of the balls are golf balls, and half of the golf balls are blue. How many blue golf balls are there?\nA: Let’s think step by step."],
#few-shot
["Q: Roger has 5 tennis balls. He buys 2 more cans of tennis balls. Each can has 3 tennis balls. How many tennis balls does he have now?\nA: The answer is 11.\nQ: A juggler can juggle 16 balls. Half of the balls are golf balls, and half of the golf balls are blue. How many blue golf balls are there?\nA:"],
#few-shot-CoT
["Q: Roger has 5 tennis balls. He buys 2 more cans of tennis balls. Each can has 3 tennis balls. How many tennis balls does he have now?\nA: Roger started with 5 balls. 2 cans of 3 tennis balls each is 6 tennis balls. 5 + 6 = 11. The answer is 11.\nQ:A juggler can juggle 16 balls. Half of the balls are golf balls, and half of the golf balls are blue. How many blue golf balls are there?\nA:"],
]
def calculator(num1, operation, num2):
if operation == "add":
return num1 + num2
elif operation == "subtract":
return num1 - num2
elif operation == "multiply":
return num1 * num2
elif operation == "divide":
return num1 / num2
secrets = ["API_KEY1", "API_KEY2", "API_KEY3", "API_KEY4", "API_KEY5", "API_KEY6"]
def complete_with_gpt(text):
for secret in secrets:
try:
interfaces = [gr.Interface.load(name, api_key = "secret") for name in name_list]
except:
print("Error: API key is not valid")
return [interface(text) for interface in interfaces]
def set_example(example: list) -> dict:
return gr.Textbox.update(value=example[0])
with gr.Blocks() as demo:
gr.Markdown(
"""
# Let’s think step by step Is all you need ?
"""
)
with gr.Box():
with gr.Row():
with gr.Column():
input_text = gr.Textbox(label = "Write your riddle here", placeholder="Type here the riddles to see if LM can solve the questions", lines=4)
with gr.Row():
btn = gr.Button("Laguage model think brrr ...")
gr.Markdown(" Note: Due to high number of visitors, inference API rate limit is too high and sometimes results in error, looking for solutions around this problem, thanks for understanding 🤗")
example_text = gr.Dataset(components=[input_text], samples=examples)
example_text.click(fn=set_example,
inputs = example_text,
outputs= example_text.components)
with gr.Column():
gr.Markdown("Let's see how different LM's multiply matrices/ think 💭")
btn.click(complete_with_gpt, inputs = input_text, outputs = [gr.Textbox(label=name_list[_], lines=4) for _ in range(len(name_list))])
with gr.Column():
gr.Markdown("In case you need to count to verify the answer, you can use the calculator below 😉 ")
num1 = gr.Number(placeholder="Type here the first number", lines=1)
num2 = gr.Number(placeholder="Type here the second number", lines=1)
operation = gr.Dropdown(["add", "subtract", "multiply", "divide"], placeholder="Type here the operation", lines=1)
with gr.Row():
calculate = gr.Button("Calculate")
with gr.Column():
calculate.click(calculator, inputs = [num1, operation, num2], outputs = gr.Textbox(label="Result", lines=1))
gr.Markdown(
"""
<p style='text-align: center'><a href='https://arxiv.org/abs/2205.11916' target='_blank'>Large Language Models are Zero-Shot Reasoners</a> | <a href='https://github.com/kojima-takeshi188/zero_shot_cot target='_blank'>Github Repo</a></p>
"""
)
with gr.Row():
gr.Markdown("![visitor badge](https://visitor-badge.glitch.me/badge?page_id=gradio-blocks_zero-and-few-shot-reasoning)")
demo.launch(enable_queue=True, debug=True) |