ceckenrode commited on
Commit
6d915de
1 Parent(s): b728392

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +179 -0
app.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ import os
4
+
5
+ ##Bloom Inference API
6
+
7
+ API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom" # Models on HF feature inference API which allows direct call and easy interface
8
+
9
+ HF_TOKEN = os.environ["HF_TOKEN"] # Add a token called HF_TOKEN under profile in settings access tokens. Then copy it to the repository secret in this spaces settings panel. os.environ reads from there.
10
+
11
+ # For headers the bearer token needs to incclude your HF_TOKEN value.
12
+ headers = {"Authorization": f"Bearer {HF_TOKEN}"}
13
+
14
+ # Improved text generation function
15
+ def text_generate(prompt, generated_txt):
16
+ # Initialize Thoughts variable to aggregate text
17
+ Thoughts = ""
18
+
19
+ # Debug: display the prompt
20
+ Thoughts += f"Prompt: {prompt}\n"
21
+
22
+ json_ = {
23
+ "inputs": prompt,
24
+ "parameters": {
25
+ "top_p": 0.9,
26
+ "temperature": 1.1,
27
+ "return_full_text": True,
28
+ "do_sample": True,
29
+ },
30
+ "options": {
31
+ "use_cache": True,
32
+ "wait_for_model": True,
33
+ },
34
+ }
35
+ response = requests.post(API_URL, headers=headers, json=json_)
36
+ output = response.json()
37
+
38
+ # Debug: display the output
39
+ Thoughts += f"Output: {output}\n"
40
+ output_tmp = output[0]['generated_text']
41
+
42
+ # Debug: display the output_tmp
43
+ Thoughts += f"output_tmp is: {output_tmp}\n"
44
+ solution = output_tmp.split("\nQ:")[0]
45
+
46
+ # Debug: display the solution after splitting
47
+ Thoughts += f"Final response after splits is: {solution}\n"
48
+
49
+ if '\nOutput:' in solution:
50
+ final_solution = solution.split("\nOutput:")[0]
51
+ Thoughts += f"Response after removing output is: {final_solution}\n"
52
+ elif '\n\n' in solution:
53
+ final_solution = solution.split("\n\n")[0]
54
+ Thoughts += f"Response after removing new line entries is: {final_solution}\n"
55
+ else:
56
+ final_solution = solution
57
+
58
+ if len(generated_txt) == 0:
59
+ display_output = final_solution
60
+ else:
61
+ display_output = generated_txt[:-len(prompt)] + final_solution
62
+
63
+ new_prompt = final_solution[len(prompt):]
64
+
65
+ # Debug: display the new prompt for the next cycle
66
+ Thoughts += f"new prompt for next cycle is: {new_prompt}\n"
67
+ Thoughts += f"display_output for printing on screen is: {display_output}\n"
68
+
69
+ if len(new_prompt) == 0:
70
+ temp_text = display_output[::-1]
71
+ Thoughts += f"What is the last character of the sentence?: {temp_text[0]}\n"
72
+
73
+ if temp_text[1] == '.':
74
+ first_period_loc = temp_text[2:].find('.') + 1
75
+ Thoughts += f"Location of last Period is: {first_period_loc}\n"
76
+ new_prompt = display_output[-first_period_loc:-1]
77
+ Thoughts += f"Not sending blank as prompt so new prompt for next cycle is: {new_prompt}\n"
78
+ else:
79
+ first_period_loc = temp_text.find('.')
80
+ Thoughts += f"Location of last Period is: {first_period_loc}\n"
81
+ new_prompt = display_output[-first_period_loc:-1]
82
+ Thoughts += f"Not sending blank as prompt so new prompt for next cycle is: {new_prompt}\n"
83
+
84
+ display_output = display_output[:-1]
85
+
86
+ return display_output, new_prompt, Thoughts
87
+
88
+
89
+
90
+
91
+ # Text generation
92
+ def text_generate_old(prompt, generated_txt):
93
+ #Prints to debug the code
94
+ print(f"*****Inside text_generate - Prompt is :{prompt}")
95
+ json_ = {"inputs": prompt,
96
+ "parameters":
97
+ {
98
+ "top_p": 0.9,
99
+ "temperature": 1.1,
100
+ #"max_new_tokens": 64,
101
+ "return_full_text": True,
102
+ "do_sample":True,
103
+ },
104
+ "options":
105
+ {"use_cache": True,
106
+ "wait_for_model": True,
107
+ },}
108
+
109
+
110
+ response = requests.post(API_URL, headers=headers, json=json_)
111
+ print(f"Response is : {response}")
112
+ output = response.json()
113
+ print(f"output is : {output}")
114
+ output_tmp = output[0]['generated_text']
115
+ print(f"output_tmp is: {output_tmp}")
116
+ solution = output_tmp.split("\nQ:")[0]
117
+ print(f"Final response after splits is: {solution}")
118
+
119
+
120
+ if '\nOutput:' in solution:
121
+ final_solution = solution.split("\nOutput:")[0]
122
+ print(f"Response after removing output is: {final_solution}")
123
+ elif '\n\n' in solution:
124
+ final_solution = solution.split("\n\n")[0]
125
+ print(f"Response after removing new line entries is: {final_solution}")
126
+ else:
127
+ final_solution = solution
128
+ if len(generated_txt) == 0 :
129
+ display_output = final_solution
130
+ else:
131
+ display_output = generated_txt[:-len(prompt)] + final_solution
132
+
133
+
134
+ new_prompt = final_solution[len(prompt):]
135
+ print(f"New prompt for next cycle: {new_prompt}")
136
+ print(f"Output final is : {display_output}")
137
+ if len(new_prompt) == 0:
138
+ temp_text = display_output[::-1]
139
+ print(f"Last character of sentence: {temp_text[0]}")
140
+ if temp_text[1] == '.':
141
+ first_period_loc = temp_text[2:].find('.') + 1
142
+ print(f"Location of last Period is: {first_period_loc}")
143
+ new_prompt = display_output[-first_period_loc:-1]
144
+ print(f"Not sending blank as prompt so new prompt for next cycle is : {new_prompt}")
145
+ else:
146
+ print("HERE")
147
+ first_period_loc = temp_text.find('.')
148
+ print(f"Last Period is : {first_period_loc}")
149
+ new_prompt = display_output[-first_period_loc:-1]
150
+ print(f"New prompt for next cycle is : {new_prompt}")
151
+ display_output = display_output[:-1]
152
+ return display_output, new_prompt
153
+
154
+ # An insightful and engaging self-care health care demo
155
+ demo = gr.Blocks()
156
+
157
+ with demo:
158
+ with gr.Row():
159
+ input_prompt = gr.Textbox(
160
+ label="Write a self-care or health care related question to get started...",
161
+ lines=3,
162
+ value="Dear AI, please tell me about the importance of self-care and how it contributes to overall health and well-being.",
163
+ )
164
+
165
+ with gr.Row():
166
+ generated_txt = gr.Textbox(lines=5, visible=True)
167
+
168
+ with gr.Row():
169
+ Thoughts = gr.Textbox(lines=10, visible=True)
170
+
171
+ gen = gr.Button("Discover Health Insights")
172
+
173
+ gen.click(
174
+ text_generate,
175
+ inputs=[input_prompt, generated_txt],
176
+ outputs=[generated_txt, input_prompt, Thoughts],
177
+ )
178
+
179
+ demo.launch(enable_queue=True, debug=True)