m3th0d commited on
Commit
c8dd43b
1 Parent(s): 92748dc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -88
app.py CHANGED
@@ -1,88 +1,16 @@
1
- import gradio as gr
2
- import requests
3
- import os
4
-
5
- ##Bloom Inference API
6
- API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom"
7
- HF_TOKEN = os.environ["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"]
8
- headers = {"Authorization": f"Bearer {HF_TOKEN}"}
9
-
10
-
11
- def text_generate(prompt, generated_txt):
12
- #Prints to debug the code
13
- print(f"*****Inside text_generate - Prompt is :{prompt}")
14
- json_ = {"inputs": prompt,
15
- "parameters":
16
- {
17
- "top_p": 0.9,
18
- "temperature": 1.1,
19
- #"max_new_tokens": 64,
20
- "return_full_text": True,
21
- "do_sample":True,
22
- },
23
- "options":
24
- {"use_cache": True,
25
- "wait_for_model": True,
26
- },}
27
- response = requests.post(API_URL, headers=headers, json=json_)
28
- print(f"Response is : {response}")
29
- output = response.json()
30
- print(f"output is : {output}")
31
- output_tmp = output[0]['generated_text']
32
- print(f"output_tmp is: {output_tmp}")
33
- solution = output_tmp.split("\nQ:")[0]
34
- print(f"Final response after splits is: {solution}")
35
- if '\nOutput:' in solution:
36
- final_solution = solution.split("\nOutput:")[0]
37
- print(f"Response after removing output is: {final_solution}")
38
- elif '\n\n' in solution:
39
- final_solution = solution.split("\n\n")[0]
40
- print(f"Response after removing new line entries is: {final_solution}")
41
- else:
42
- final_solution = solution
43
-
44
-
45
- if len(generated_txt) == 0 :
46
- display_output = final_solution
47
- else:
48
- display_output = generated_txt[:-len(prompt)] + final_solution
49
- new_prompt = final_solution[len(prompt):]
50
- print(f"new prompt for next cycle is : {new_prompt}")
51
- print(f"display_output for printing on screen is : {display_output}")
52
- if len(new_prompt) == 0:
53
- temp_text = display_output[::-1]
54
- print(f"What is the last character of sentence? : {temp_text[0]}")
55
- if temp_text[1] == '.':
56
- first_period_loc = temp_text[2:].find('.') + 1
57
- print(f"Location of last Period is: {first_period_loc}")
58
- new_prompt = display_output[-first_period_loc:-1]
59
- print(f"Not sending blank as prompt so new prompt for next cycle is : {new_prompt}")
60
- else:
61
- print("HERE")
62
- first_period_loc = temp_text.find('.')
63
- print(f"Location of last Period is : {first_period_loc}")
64
- new_prompt = display_output[-first_period_loc:-1]
65
- print(f"Not sending blank as prompt so new prompt for next cycle is : {new_prompt}")
66
- display_output = display_output[:-1]
67
-
68
- return display_output, new_prompt
69
-
70
-
71
- demo = gr.Blocks()
72
-
73
- with demo:
74
- gr.Markdown("<h1><center>Write Stories Using Bloom</center></h1>")
75
- gr.Markdown(
76
- """Bloom is a model by [HuggingFace](https://huggingface.co/bigscience/bloom) and a team of more than 1000 researchers coming together as [BigScienceW Bloom](https://twitter.com/BigscienceW).\n\nLarge language models have demonstrated a capability of producing coherent sentences and given a context we can pretty much decide the *theme* of generated text.\n\nHow to Use this App: Use the sample text given as prompt or type in a new prompt as a starting point of your awesome story! Just keep pressing the 'Generate Text' Button and go crazy!\n\nHow this App works: This app operates by feeding back the text generated by Bloom to itself as a Prompt for next generation round and so on. Currently, due to size-limits on Prompt and Token generation, we are only able to feed very limited-length text as Prompt and are getting very few tokens generated in-turn. This makes it difficult to keep a tab on theme of text generation, so please bear with that. In summary, I believe it is a nice little fun App which you can play with for a while.\n\nThis Space is created by [Yuvraj Sharma](https://twitter.com/yvrjsharma) for EuroPython 2022 Demo."""
77
- )
78
- with gr.Row():
79
- input_prompt = gr.Textbox(label="Write some text to get started...", lines=3, value="Dear human philosophers, I read your comments on my abilities and limitations with great interest.")
80
-
81
- with gr.Row():
82
- generated_txt = gr.Textbox(lines=7, visible = True)
83
-
84
- b1 = gr.Button("Generate Your Story")
85
-
86
- b1.click(text_generate, inputs=[input_prompt, generated_txt], outputs=[generated_txt, input_prompt])
87
-
88
- demo.launch(enable_queue=True, debug=True)
 
1
+ async function query(data) {
2
+ const response = await fetch(
3
+ "https://api-inference.huggingface.co/models/bigscience/bloom",
4
+ {
5
+ headers: { Authorization: "Bhf_RryCjUQHSqwobWXNyvzUWdEYdHxizZDLZlxxxxxx" },
6
+ method: "POST",
7
+ body: JSON.stringify(data),
8
+ }
9
+ );
10
+ const result = await response.json();
11
+ return result;
12
+ }
13
+
14
+ query({"inputs": "Can you please let us know more details about your "}).then((response) => {
15
+ console.log(JSON.stringify(response));
16
+ });