Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -75,16 +75,19 @@ def synthesize(article_url,progress_audio=gr.Progress()):
|
|
75 |
chat.append(
|
76 |
{
|
77 |
"role": "user",
|
78 |
-
"content": text+"""\n Convert the text
|
|
|
79 |
{
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
}
|
|
|
85 |
}
|
86 |
)
|
87 |
|
|
|
88 |
messages = tok.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
|
89 |
model_inputs = tok([messages], return_tensors="pt").to(device)
|
90 |
streamer = TextIteratorStreamer(
|
|
|
75 |
chat.append(
|
76 |
{
|
77 |
"role": "user",
|
78 |
+
"content": text + """\n Convert the provided text into a short, informative podcast conversation between two experts. The tone should be professional and engaging. Please adhere to the following format and return the conversation in JSON:
|
79 |
+
|
80 |
{
|
81 |
+
"conversation": [
|
82 |
+
{"speaker": "", "text": ""},
|
83 |
+
{"speaker": "", "text": ""}
|
84 |
+
]
|
85 |
+
}
|
86 |
+
""",
|
87 |
}
|
88 |
)
|
89 |
|
90 |
+
|
91 |
messages = tok.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
|
92 |
model_inputs = tok([messages], return_tensors="pt").to(device)
|
93 |
streamer = TextIteratorStreamer(
|