Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -25,7 +25,7 @@ models = {name: AutoModelForCausalLM.from_pretrained(model_id, device_map="auto"
|
|
25 |
tokenizers = {name: AutoTokenizer.from_pretrained(model_id) for name, model_id in model_details.items()}
|
26 |
|
27 |
@spaces.GPU(enable_queue=True, duration=90)
|
28 |
-
|
29 |
model_choice: str,
|
30 |
message: str,
|
31 |
chat_history: list[tuple[str, str]],
|
@@ -38,7 +38,7 @@ async def generate(
|
|
38 |
) -> str:
|
39 |
model = models[model_choice]
|
40 |
tokenizer = tokenizers[model_choice]
|
41 |
-
|
42 |
conversation = [{"role": "system", "content": system_prompt}] if system_prompt else []
|
43 |
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}] for user, assistant in chat_history)
|
44 |
conversation.append({"role": "user", "content": message})
|
@@ -51,6 +51,7 @@ async def generate(
|
|
51 |
|
52 |
return output_text
|
53 |
|
|
|
54 |
chat_interface = gr.ChatInterface(
|
55 |
theme='ehristoforu/RE_Theme',
|
56 |
fn=generate,
|
|
|
25 |
tokenizers = {name: AutoTokenizer.from_pretrained(model_id) for name, model_id in model_details.items()}
|
26 |
|
27 |
@spaces.GPU(enable_queue=True, duration=90)
|
28 |
+
def generate(
|
29 |
model_choice: str,
|
30 |
message: str,
|
31 |
chat_history: list[tuple[str, str]],
|
|
|
38 |
) -> str:
|
39 |
model = models[model_choice]
|
40 |
tokenizer = tokenizers[model_choice]
|
41 |
+
|
42 |
conversation = [{"role": "system", "content": system_prompt}] if system_prompt else []
|
43 |
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}] for user, assistant in chat_history)
|
44 |
conversation.append({"role": "user", "content": message})
|
|
|
51 |
|
52 |
return output_text
|
53 |
|
54 |
+
|
55 |
chat_interface = gr.ChatInterface(
|
56 |
theme='ehristoforu/RE_Theme',
|
57 |
fn=generate,
|