CobaltZvc commited on
Commit
12a115f
·
1 Parent(s): 17dc578

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +302 -0
  2. requirements.txt +12 -0
app.py ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import openai
3
+ import wget
4
+ import streamlit as st
5
+ from PIL import Image
6
+ from serpapi import GoogleSearch
7
+ import torch
8
+ from diffusers import StableDiffusionPipeline
9
+ from IPython.display import Audio, display
10
+ from google.colab import output
11
+ from IPython.display import Javascript
12
+ from bokeh.models.widgets import Button
13
+ from bokeh.models import CustomJS
14
+ from streamlit_bokeh_events import streamlit_bokeh_events
15
+ import base64
16
+ from streamlit_player import st_player
17
+ from pytube import YouTube
18
+ from pytube import Search
19
+ import io
20
+ import warnings
21
+ from PIL import Image
22
+ from stability_sdk import client
23
+ import stability_sdk.interfaces.gooseai.generation.generation_pb2 as generation
24
+
25
+ os.environ['STABILITY_HOST'] = 'grpc.stability.ai:443'
26
+ os.environ['STABILITY_KEY'] = 'sk-Ndzkpi6OYwM5fQgEJAVwRPbFZSMNyFk0GoZw1EvNtqVExGdi'
27
+ stability_api = client.StabilityInference(
28
+ key=os.environ['STABILITY_KEY'], # API Key reference.
29
+ verbose=True, # Print debug messages.
30
+ engine="stable-diffusion-v1-5", # Set the engine to use for generation.
31
+ # Available engines: stable-diffusion-v1 stable-diffusion-v1-5 stable-diffusion-512-v2-0 stable-diffusion-768-v2-0
32
+ # stable-diffusion-512-v2-1 stable-diffusion-768-v2-1 stable-inpainting-v1-0 stable-inpainting-512-v2-0
33
+ )
34
+
35
+ def search_internet(question):
36
+ params = {
37
+ "q": question,
38
+ "location": "Bengaluru, Karnataka, India",
39
+ "hl": "hi",
40
+ "gl": "in",
41
+ "google_domain": "google.co.in",
42
+ "api_key": "e77d5416608a110ea2babd7b2e33ede48b0c4159ade5cfd5cebbc7483c513ff3"
43
+ }
44
+
45
+ params = {
46
+ "q": question,
47
+ "location": "Bengaluru, Karnataka, India",
48
+ "hl": "hi",
49
+ "gl": "in",
50
+ "google_domain": "google.co.in",
51
+ "api_key": "e77d5416608a110ea2babd7b2e33ede48b0c4159ade5cfd5cebbc7483c513ff3"
52
+ }
53
+
54
+ search = GoogleSearch(params)
55
+ results = search.get_dict()
56
+ organic_results = results["organic_results"]
57
+
58
+
59
+ snippets = ""
60
+ counter = 1
61
+ for item in organic_results:
62
+ snippets += str(counter) + ". " + item.get("snippet", "") + '\n' + item['about_this_result']['source']['source_info_link'] + '\n'
63
+ counter += 1
64
+
65
+ # snippets
66
+
67
+ response = openai.Completion.create(
68
+ model="text-davinci-003",
69
+ prompt=f'''following are snippets from google search with these as knowledge base only answer questions and print reference link as well followed by answer. \n\n {snippets}\n\n question-{question}\n\nAnswer-''',
70
+ temperature=0.49,
71
+ max_tokens=256,
72
+ top_p=1,
73
+ frequency_penalty=0,
74
+ presence_penalty=0)
75
+
76
+ string_temp = response.choices[0].text
77
+
78
+ st.write(string_temp)
79
+ st.write(snippets)
80
+
81
+ openai.api_key = "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"
82
+
83
+ def openai_response(PROMPT):
84
+ response = openai.Image.create(
85
+ prompt=PROMPT,
86
+ n=1,
87
+ size="256x256",
88
+ )
89
+ return response["data"][0]["url"]
90
+
91
+ #page_bg_img = """
92
+ #<style>
93
+ #[data-testid="stAppViewContainer"] {
94
+ #background-color: #ffffff;
95
+ #opacity: 0.8;
96
+ #background-image: repeating-radial-gradient( circle at 0 0, transparent 0, #ffffff 40px ), repeating-linear-gradient( #55a6f655, #55a6f6 );
97
+ #}
98
+ #</style>
99
+ #"""
100
+
101
+ #st.markdown(page_bg_img, unsafe_allow_html=True)
102
+ st.title("Welcome to :red[_HyperChat_]!!🤖")
103
+ st.title("How can I help?")
104
+
105
+ Input_type = st.radio(
106
+ "**Input type:**",
107
+ ('TEXT', 'SPEECH')
108
+ )
109
+
110
+ if Input_type == 'TEXT':
111
+ #page_bg_img2 = """
112
+ #<style>
113
+ #[data-testid="stAppViewContainer"] {
114
+ #background-color: #e5e5f7;
115
+ #opacity: 0.8;
116
+ #background-size: 20px 20px;
117
+ #background-image: repeating-linear-gradient(0deg, #32d947, #32d947 1px, #e5e5f7 1px, #e5e5f7);
118
+ #}
119
+ #</style>
120
+ #"""
121
+ #st.markdown(page_bg_img, unsafe_allow_html=True)
122
+ st.write('**You are now in Text input mode**')
123
+ mytext = st.text_input('**Go on! Ask me anything:**')
124
+ if st.button("SUBMIT"):
125
+ question=mytext
126
+ response = openai.Completion.create(
127
+ model="text-davinci-003",
128
+ prompt=f'''Your name is alexa and knowledge cutoff date is 2021-09, and it is not aware of any events after that time. if the
129
+ Answer to following questions is not from your knowledge base or in case of queries like weather
130
+ updates / stock updates / current news Etc which requires you to have internet connection then print i don't have access to internet to answer your question,
131
+ if question is related to image or painting or drawing generation then print ipython type output function gen_draw("detailed prompt of image to be generated")
132
+ if the question is related to playing a song or video or music of a singer then print ipython type output function vid_tube("relevent search query")
133
+ if the question is related to operating home appliances then print ipython type output function home_app(" action(ON/Off),appliance(TV,Geaser,Fridge,Lights,fans,AC)") .
134
+ if question is realted to sending mail or sms then print ipython type output function messenger_app(" message of us ,messenger(email,sms)")
135
+ \nQuestion-{question}
136
+ \nAnswer -''',
137
+ temperature=0.49,
138
+ max_tokens=256,
139
+ top_p=1,
140
+ frequency_penalty=0,
141
+ presence_penalty=0
142
+ )
143
+ string_temp=response.choices[0].text
144
+
145
+ if ("gen_draw" in string_temp):
146
+ try:
147
+ # Set up our initial generation parameters.
148
+ answers = stability_api.generate(
149
+ prompt = mytext,
150
+ seed=992446758, # If a seed is provided, the resulting generated image will be deterministic.
151
+ # What this means is that as long as all generation parameters remain the same, you can always recall the same image simply by generating it again.
152
+ # Note: This isn't quite the case for Clip Guided generations, which we'll tackle in a future example notebook.
153
+ steps=30, # Amount of inference steps performed on image generation. Defaults to 30.
154
+ cfg_scale=8.0, # Influences how strongly your generation is guided to match your prompt.
155
+ # Setting this value higher increases the strength in which it tries to match your prompt.
156
+ # Defaults to 7.0 if not specified.
157
+ width=512, # Generation width, defaults to 512 if not included.
158
+ height=512, # Generation height, defaults to 512 if not included.
159
+ samples=1, # Number of images to generate, defaults to 1 if not included.
160
+ sampler=generation.SAMPLER_K_DPMPP_2M # Choose which sampler we want to denoise our generation with.
161
+ # Defaults to k_dpmpp_2m if not specified. Clip Guidance only supports ancestral samplers.
162
+ # (Available Samplers: ddim, plms, k_euler, k_euler_ancestral, k_heun, k_dpm_2, k_dpm_2_ancestral, k_dpmpp_2s_ancestral, k_lms, k_dpmpp_2m)
163
+ )
164
+
165
+ # Set up our warning to print to the console if the adult content classifier is tripped.
166
+ # If adult content classifier is not tripped, save generated images.
167
+ for resp in answers:
168
+ for artifact in resp.artifacts:
169
+ if artifact.finish_reason == generation.FILTER:
170
+ warnings.warn(
171
+ "Your request activated the API's safety filters and could not be processed."
172
+ "Please modify the prompt and try again.")
173
+ if artifact.type == generation.ARTIFACT_IMAGE:
174
+ img = Image.open(io.BytesIO(artifact.binary))
175
+ st.image(img)
176
+ img.save(str(artifact.seed)+ ".png") # Save our generated images with their seed number as the filename.
177
+ except:
178
+ st.write('image is being generated please wait...')
179
+ def extract_image_description(input_string):
180
+ return input_string.split('gen_draw("')[1].split('")')[0]
181
+ prompt=extract_image_description(string_temp)
182
+ # model_id = "CompVis/stable-diffusion-v1-4"
183
+ model_id='runwayml/stable-diffusion-v1-5'
184
+ device = "cuda"
185
+
186
+
187
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
188
+ pipe = pipe.to(device)
189
+
190
+ # prompt = "a photo of an astronaut riding a horse on mars"
191
+ image = pipe(prompt).images[0]
192
+
193
+ image.save("astronaut_rides_horse.png")
194
+ st.image(image)
195
+ # image
196
+
197
+ elif ("vid_tube" in string_temp):
198
+ s = Search(mytext)
199
+ search_res = s.results
200
+ first_vid = search_res[0]
201
+ print(first_vid)
202
+ string = str(first_vid)
203
+ video_id = string[string.index('=') + 1:-1]
204
+ # print(video_id)
205
+ YoutubeURL = "https://www.youtube.com/watch?v="
206
+ OurURL = YoutubeURL + video_id
207
+ st.write(OurURL)
208
+ st_player(OurURL)
209
+
210
+ elif ("don't" in string_temp or "internet" in string_temp ):
211
+ st.write('searching internet ')
212
+ search_internet(question)
213
+
214
+ else:
215
+ st.write(string_temp)
216
+
217
+ elif Input_type == 'SPEECH':
218
+ stt_button = Button(label="Speak", width=100)
219
+ stt_button.js_on_event("button_click", CustomJS(code="""
220
+ var recognition = new webkitSpeechRecognition();
221
+ recognition.continuous = true;
222
+ recognition.interimResults = true;
223
+ recognition.onresult = function (e) {
224
+ var value = "";
225
+ for (var i = e.resultIndex; i < e.results.length; ++i) {
226
+ if (e.results[i].isFinal) {
227
+ value += e.results[i][0].transcript;
228
+ }
229
+ }
230
+ if ( value != "") {
231
+ document.dispatchEvent(new CustomEvent("GET_TEXT", {detail: value}));
232
+ }
233
+ }
234
+ recognition.start();
235
+ """))
236
+
237
+ result = streamlit_bokeh_events(
238
+ stt_button,
239
+ events="GET_TEXT",
240
+ key="listen",
241
+ refresh_on_update=False,
242
+ override_height=75,
243
+ debounce_time=0)
244
+
245
+ if result:
246
+ if "GET_TEXT" in result:
247
+ st.write(result.get("GET_TEXT"))
248
+ question = result.get("GET_TEXT")
249
+ response = openai.Completion.create(
250
+ model="text-davinci-003",
251
+ prompt=f'''Your knowledge cutoff is 2021-09, and it is not aware of any events after that time. if the
252
+ Answer to following questions is not from your knowledge base or in case of queries like weather
253
+ updates / stock updates / current news Etc which requires you to have internet connection then print i don't have access to internet to answer your question,
254
+ if question is related to image or painting or drawing generation then print ipython type output function gen_draw("detailed prompt of image to be generated")
255
+ if the question is related to playing a song or video or music of a singer then print ipython type output function vid_tube("relevent search query")
256
+ \nQuestion-{question}
257
+ \nAnswer -''',
258
+ temperature=0.49,
259
+ max_tokens=256,
260
+ top_p=1,
261
+ frequency_penalty=0,
262
+ presence_penalty=0
263
+ )
264
+ string_temp=response.choices[0].text
265
+
266
+ if ("gen_draw" in string_temp):
267
+ st.write('*image is being generated please wait..* ')
268
+ def extract_image_description(input_string):
269
+ return input_string.split('gen_draw("')[1].split('")')[0]
270
+ prompt=extract_image_description(string_temp)
271
+ # model_id = "CompVis/stable-diffusion-v1-4"
272
+ model_id='runwayml/stable-diffusion-v1-5'
273
+ device = "cuda"
274
+
275
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
276
+ pipe = pipe.to(device)
277
+
278
+ # prompt = "a photo of an astronaut riding a horse on mars"
279
+ image = pipe(prompt).images[0]
280
+
281
+ image.save("astronaut_rides_horse.png")
282
+ st.image(image)
283
+ # image
284
+
285
+ elif ("vid_tube" in string_temp):
286
+ s = Search(question)
287
+ search_res = s.results
288
+ first_vid = search_res[0]
289
+ print(first_vid)
290
+ string = str(first_vid)
291
+ video_id = string[string.index('=') + 1:-1]
292
+ # print(video_id)
293
+ YoutubeURL = "https://www.youtube.com/watch?v="
294
+ OurURL = YoutubeURL + video_id
295
+ st.write(OurURL)
296
+ st_player(OurURL)
297
+
298
+ elif ("don't" in string_temp or "internet" in string_temp ):
299
+ st.write('*searching internet*')
300
+ search_internet(question)
301
+ else:
302
+ st.write(string_temp)
requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ stability-sdk
2
+ pytube
3
+ openai
4
+ google-search-results
5
+ accelerate
6
+ streamlit
7
+ wget
8
+ streamlit-bokeh-events
9
+ streamlit-player
10
+ diffusers
11
+ transformers
12
+ scipy