Spaces:
Runtime error
Runtime error
Commit
•
195077e
1
Parent(s):
22545c2
Update app.py
Browse files
app.py
CHANGED
@@ -164,6 +164,15 @@ class VideoClassifier:
|
|
164 |
'News and Politics', 'Personal Finance', 'Pets', 'Pop Culture','Real Estate', 'Religion & Spirituality', 'Science', 'Shopping', 'Sports',
|
165 |
'Style & Fashion','Technology & Computing', 'Television', 'Travel', 'Video Gaming']
|
166 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
167 |
|
168 |
template1 = '''Given below are the different type of main video classes
|
169 |
{main_categories}
|
@@ -183,8 +192,13 @@ class VideoClassifier:
|
|
183 |
chain1 = LLMChain(llm=self.llm, prompt=prompt1)
|
184 |
main_class = chain1.predict(main_categories=main_categories, transcript=transcript, captions=captions)
|
185 |
except:
|
186 |
-
|
187 |
-
|
|
|
|
|
|
|
|
|
|
|
188 |
|
189 |
print(main_class)
|
190 |
print("#######################################################")
|
@@ -252,10 +266,13 @@ class VideoClassifier:
|
|
252 |
chain2 = LLMChain(llm=self.llm, prompt=prompt2)
|
253 |
answer = chain2.predict(sub_categories=sub_categories, transcript=transcript, captions=captions,main_class=main_class)
|
254 |
except:
|
255 |
-
|
256 |
-
|
257 |
-
response
|
258 |
-
|
|
|
|
|
|
|
259 |
print("Preprocess Answer",answer)
|
260 |
|
261 |
|
|
|
164 |
'News and Politics', 'Personal Finance', 'Pets', 'Pop Culture','Real Estate', 'Religion & Spirituality', 'Science', 'Shopping', 'Sports',
|
165 |
'Style & Fashion','Technology & Computing', 'Television', 'Travel', 'Video Gaming']
|
166 |
|
167 |
+
generate_kwargs = {
|
168 |
+
"temperature": 0.9,
|
169 |
+
"max_new_tokens": 256,
|
170 |
+
"top_p": 0.95,
|
171 |
+
"repetition_penalty": 1.0,
|
172 |
+
"do_sample": True,
|
173 |
+
"seed": 42,
|
174 |
+
"return_full_text": False
|
175 |
+
}
|
176 |
|
177 |
template1 = '''Given below are the different type of main video classes
|
178 |
{main_categories}
|
|
|
192 |
chain1 = LLMChain(llm=self.llm, prompt=prompt1)
|
193 |
main_class = chain1.predict(main_categories=main_categories, transcript=transcript, captions=captions)
|
194 |
except:
|
195 |
+
stream = self.client.text_generation(prompt1, **generate_kwargs, stream=True, details=True)
|
196 |
+
output = ""
|
197 |
+
for response in stream:
|
198 |
+
output += response['token'].text
|
199 |
+
print("Streaming output:", output)
|
200 |
+
|
201 |
+
main_class = output.strip()
|
202 |
|
203 |
print(main_class)
|
204 |
print("#######################################################")
|
|
|
266 |
chain2 = LLMChain(llm=self.llm, prompt=prompt2)
|
267 |
answer = chain2.predict(sub_categories=sub_categories, transcript=transcript, captions=captions,main_class=main_class)
|
268 |
except:
|
269 |
+
stream = self.client.text_generation(prompt2, **generate_kwargs, stream=True, details=True)
|
270 |
+
output = ""
|
271 |
+
for response in stream:
|
272 |
+
output += response['token'].text
|
273 |
+
print("Streaming output:", output)
|
274 |
+
|
275 |
+
main_class = output.strip()
|
276 |
print("Preprocess Answer",answer)
|
277 |
|
278 |
|