darragh commited on
Commit
9d3f78d
1 Parent(s): d4a9acc
README.md CHANGED
@@ -1,13 +1,14 @@
1
  ---
2
- title: Bloom Demo Long
3
- emoji: 👁
4
- colorFrom: gray
5
- colorTo: yellow
6
  sdk: gradio
7
- sdk_version: 3.1.7
8
  app_file: app.py
9
  pinned: false
10
- license: apache-2.0
 
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Bloom Demo
3
+ emoji: 🌸
4
+ colorFrom: pink
5
+ colorTo: grey
6
  sdk: gradio
7
+ sdk_version: 3.0.25
8
  app_file: app.py
9
  pinned: false
10
+ models:
11
+ - bigscience/bloom
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ import json
4
+ import os
5
+ from screenshot import (
6
+ before_prompt,
7
+ prompt_to_generation,
8
+ after_generation,
9
+ js_save,
10
+ js_load_script,
11
+ )
12
+ from spaces_info import description, examples, initial_prompt_value
13
+
14
+ API_URL = os.getenv("API_URL")
15
+ HF_API_TOKEN = os.getenv("HF_API_TOKEN")
16
+
17
+
18
+ def query(payload):
19
+ print(payload)
20
+ response = requests.request("POST", API_URL, json=payload, headers={"Authorization": f"Bearer {HF_API_TOKEN}"})
21
+ print(response)
22
+ return json.loads(response.content.decode("utf-8"))
23
+
24
+
25
+ def inference(input_sentence, max_length, sample_or_greedy, seed=42):
26
+ if sample_or_greedy == "Sample":
27
+ parameters = {
28
+ "max_new_tokens": max_length,
29
+ "top_p": 0.9,
30
+ "do_sample": True,
31
+ "seed": seed,
32
+ "early_stopping": False,
33
+ "length_penalty": 0.0,
34
+ "eos_token_id": None,
35
+ }
36
+ else:
37
+ parameters = {
38
+ "max_new_tokens": max_length,
39
+ "do_sample": False,
40
+ "seed": seed,
41
+ "early_stopping": False,
42
+ "length_penalty": 0.0,
43
+ "eos_token_id": None,
44
+ }
45
+
46
+ payload = {"inputs": input_sentence, "parameters": parameters,"options" : {"use_cache": False} }
47
+
48
+ data = query(payload)
49
+
50
+ if "error" in data:
51
+ return (None, None, f"<span style='color:red'>ERROR: {data['error']} </span>")
52
+
53
+ generation = data[0]["generated_text"].split(input_sentence, 1)[1]
54
+ return (
55
+ before_prompt
56
+ + input_sentence
57
+ + prompt_to_generation
58
+ + generation
59
+ + after_generation,
60
+ data[0]["generated_text"],
61
+ "",
62
+ )
63
+
64
+
65
+ if __name__ == "__main__":
66
+ demo = gr.Blocks()
67
+ with demo:
68
+ with gr.Row():
69
+ gr.Markdown(value=description)
70
+ with gr.Row():
71
+ with gr.Column():
72
+ text = gr.Textbox(
73
+ label="Input",
74
+ value=" ", # should be set to " " when plugged into a real API
75
+ )
76
+ tokens = gr.Slider(1, 64, value=32, step=1, label="Tokens to generate")
77
+ sampling = gr.Radio(
78
+ ["Sample", "Greedy"], label="Sample or greedy", value="Sample"
79
+ )
80
+ sampling2 = gr.Radio(
81
+ ["Sample 1", "Sample 2", "Sample 3", "Sample 4", "Sample 5"],
82
+ value="Sample 1",
83
+ label="Sample other generations (only work in 'Sample' mode)",
84
+ type="index",
85
+ )
86
+
87
+ with gr.Row():
88
+ submit = gr.Button("Submit")
89
+ load_image = gr.Button("Generate Image")
90
+ with gr.Column():
91
+ text_error = gr.Markdown(label="Log information")
92
+ text_out = gr.Textbox(label="Output")
93
+ display_out = gr.HTML(label="Image")
94
+ display_out.set_event_trigger(
95
+ "load",
96
+ fn=None,
97
+ inputs=None,
98
+ outputs=None,
99
+ no_target=True,
100
+ js=js_load_script,
101
+ )
102
+ with gr.Row():
103
+ gr.Examples(examples=examples, inputs=[text, tokens, sampling, sampling2])
104
+
105
+ submit.click(
106
+ inference,
107
+ inputs=[text, tokens, sampling, sampling2],
108
+ outputs=[display_out, text_out, text_error],
109
+ )
110
+
111
+ load_image.click(fn=None, inputs=None, outputs=None, _js=js_save)
112
+
113
+ demo.launch()
assets/image_1.png ADDED

Git LFS Details

  • SHA256: f9b425e85f6a1594c9352c383bc8a0eb9a3b8ba253481ef388fbcb841e8d69c8
  • Pointer size: 132 Bytes
  • Size of remote file: 1.64 MB
assets/image_2.png ADDED

Git LFS Details

  • SHA256: 1cd7402fcd59ab77b330eb3478872e93e7f2ff8a0e4961c4c5b96444ff5139dd
  • Pointer size: 132 Bytes
  • Size of remote file: 1.86 MB
assets/image_3.png ADDED

Git LFS Details

  • SHA256: ace8f3d3ac0822ed9936413f8721f908f0d13dac23767dce74f5dcdd48acea45
  • Pointer size: 132 Bytes
  • Size of remote file: 6.08 MB
assets/image_4.png ADDED

Git LFS Details

  • SHA256: 056c04bbdd562ba281893ab7fb9e9e346cb0e211e763ff521960ff25949ee272
  • Pointer size: 131 Bytes
  • Size of remote file: 172 kB
assets/image_5.png ADDED

Git LFS Details

  • SHA256: c1dd02adca8591cb48f487ae774386a01afb3cc8bd73c871daa8aacddf9adec5
  • Pointer size: 130 Bytes
  • Size of remote file: 98.1 kB
assets/image_6.png ADDED

Git LFS Details

  • SHA256: 0dca2e143678d214a826f9805150d4a79fe025bae5638561ffb61d45b8d5c39f
  • Pointer size: 130 Bytes
  • Size of remote file: 11.3 kB
assets/image_7.png ADDED

Git LFS Details

  • SHA256: ed0103421150ba101de34c1a6f63d0e92d118466029e366a113601d82932a9b2
  • Pointer size: 130 Bytes
  • Size of remote file: 11.1 kB
bg.jpg ADDED
screenshot.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## HTML and JS code to give Gradio HTML
2
+ before_prompt = """
3
+ <div id = "img_placeholder">
4
+ </div>
5
+ <div class="relative" id="capture" align="justify" style="display:none;">
6
+ <div class="absolute font-semibold" style="left:7%; right:7%; bottom:32%; top:7%; font-size: 8rem; line-height: 1; padding: 1rem; font-family:-apple-system, BlinkMacSystemFont, 'Arial', sans-serif;" id="text_box">
7
+ <p class="text" style="color:white; white-space:pre-wrap;" dir="auto" id = "prompt">"""
8
+ prompt_to_generation = """</p>
9
+ <p class="text" style="color:#FE57A0; white-space:pre-wrap;" dir="auto" id="generation">"""
10
+ after_generation = """</p>
11
+ </div>
12
+ <img src="https://huggingface.co/spaces/huggingface/bloom_demo/raw/main/bg.jpg" class="w-full" />
13
+ </div>
14
+ """
15
+
16
+ js_save = """() => {
17
+ /*might need to add .root to launch locally */
18
+ var gradioapp = document.body.getElementsByTagName('gradio-app')[0];
19
+
20
+ /* Save image */
21
+ capture = gradioapp.querySelector('#capture')
22
+ img_placeholder = gradioapp.querySelector('#img_placeholder')
23
+ html2canvas(capture, {
24
+ useCORS: true,
25
+ onclone: function (clonedDoc) {
26
+ clonedDoc.querySelector('#capture').style.display = 'block';
27
+
28
+ /*Fits text to box*/
29
+ var text_box = clonedDoc.querySelector('#text_box');
30
+ var prompt = clonedDoc.querySelector('#prompt');
31
+ var generation = clonedDoc.querySelector('#generation');
32
+ console.log(text_box, generation, prompt)
33
+ cur_font_size = getComputedStyle(text_box).getPropertyValue("font-size")
34
+ while( (text_box.clientHeight < text_box.scrollHeight || text_box.clientWidth < text_box.scrollWidth) & parseInt(cur_font_size) > 10) {
35
+ console.log(cur_font_size, text_box.clientHeight, text_box.scrollHeight, text_box.clientWidth, text_box.scrollWidth)
36
+ cur_font_size = 0.98 * parseInt(cur_font_size) + "px"
37
+ cur_line_height = 1.1 * parseInt(cur_font_size) + "px"
38
+ text_box.style.fontSize = cur_font_size
39
+ prompt.style.fontSize = cur_font_size
40
+ generation.style.fontSize = cur_font_size
41
+ text_box.style.lineHeight = cur_line_height
42
+ prompt.style.lineHeight = cur_line_height
43
+ generation.style.lineHeight = cur_line_height
44
+ }
45
+ }
46
+ }).then((canvas)=>{
47
+ img_placeholder.prepend(canvas);
48
+ })
49
+ }"""
50
+
51
+
52
+ js_load_script="""() => {
53
+ var script = document.createElement('script');
54
+ script.src = "https://cdnjs.cloudflare.com/ajax/libs/html2canvas/1.4.1/html2canvas.min.js";
55
+ document.head.appendChild(script);
56
+ }"""
spaces_info.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ description = """Gradio Demo for BLOOM. To use it, simply add your text, or click one of the examples to load them.
2
+ Tips:
3
+ - Do NOT talk to BLOOM as an entity, it's not a chatbot but a webpage/blog/article completion model.
4
+ - For the best results: MIMIC a few sentences of a webpage similar to the content you want to generate.
5
+ Start a paragraph as if YOU were writing a blog, webpage, math post, coding article and BLOOM will generate a coherent follow-up. Longer prompts usually give more interesting results.
6
+ - Content: Please see our [content disclaimer](https://hf.co/spaces/bigscience/bloom-book) before using the model, as it may sometimes behave in unexpected ways.
7
+
8
+ Options:
9
+ - sampling: imaginative completions (may be not super accurate e.g. math/history)
10
+ - greedy: accurate completions (may be more boring or have repetitions)
11
+ """
12
+
13
+ wip_description = """JAX / Flax Gradio Demo for BLOOM. The 176B BLOOM model running on a TPU v3-256 pod, with 2D model parallelism and custom mesh axes.
14
+ Note:
15
+ 1. For this WIP demo, only **sampling** is supported.
16
+ 2. Rendering of the screenshot is currently not optimised. To experience the true speed of JAX / Flax, tick 'just output raw text'.
17
+ """
18
+
19
+ examples = [
20
+ [
21
+ 'A "whatpu" is a small, furry animal native to Tanzania. An example of a sentence that uses the word whatpu is: We were traveling in Africa and we saw these very cute whatpus. To do a "farduddle" means to jump up and down really fast. An example of a sentence that uses the word farduddle is:',
22
+ 32,
23
+ "Sample",
24
+ "Sample 1",
25
+ ],
26
+ [
27
+ "A poem about the beauty of science by Alfred Edgar Brittle\nTitle: The Magic Craft\nIn the old times",
28
+ 50,
29
+ "Sample",
30
+ "Sample 1",
31
+ ],
32
+ ["استخراج العدد العاملي في لغة بايثون:", 30, "Greedy", "Sample 1"],
33
+ ["Pour déguster un ortolan, il faut tout d'abord", 32, "Sample", "Sample 1"],
34
+ [
35
+ "Traduce español de España a español de Argentina\nEl coche es rojo - el auto es rojo\nEl ordenador es nuevo - la computadora es nueva\nel boligrafo es negro -",
36
+ 16,
37
+ "Sample",
38
+ "Sample 1",
39
+ ],
40
+ [
41
+ "Estos ejemplos quitan vocales de las palabras\nEjemplos:\nhola - hl\nmanzana - mnzn\npapas - pps\nalacran - lcrn\npapa -",
42
+ 16,
43
+ "Sample",
44
+ "Sample 1",
45
+ ],
46
+ [
47
+ "Question: If I put cheese into the fridge, will it melt?\nAnswer:",
48
+ 32,
49
+ "Sample",
50
+ "Sample 1",
51
+ ],
52
+ ["Math exercise - answers:\n34+10=44\n54+20=", 16, "Greedy", "Sample 1"],
53
+ [
54
+ "Question: Where does the Greek Goddess Persephone spend half of the year when she is not with her mother?\nAnswer:",
55
+ 24,
56
+ "Greedy",
57
+ "Sample 1",
58
+ ],
59
+ [
60
+ "spelling test answers.\nWhat are the letters in « language »?\nAnswer: l-a-n-g-u-a-g-e\nWhat are the letters in « Romanian »?\nAnswer:",
61
+ 24,
62
+ "Greedy",
63
+ "Sample 1",
64
+ ],
65
+ ]
66
+
67
+ initial_prompt_value = """استخراج العدد العاملي في لغة بايثون :
68
+ def factorial(n):
69
+ if n == 0:
70
+ return 1
71
+ else:
72
+ result = 1
73
+ for i in range(1, n + 1) :
74
+ result *= i
75
+ return result
76
+ print(factorial(5))"""