Spaces:
Sleeping
Sleeping
import os | |
import gradio as gr | |
import copy | |
import time | |
import llama_cpp | |
from llama_cpp import Llama | |
from huggingface_hub import hf_hub_download | |
llm = Llama( | |
model_path=hf_hub_download( | |
repo_id="FinancialSupport/saiga-7b-gguf", | |
filename="saiga-7b.Q4_K_M.gguf", | |
), | |
n_ctx=4086, | |
) | |
history = [] | |
def generate_text(message, history): | |
temp = "" | |
input_prompt = "Conversazione tra umano ed un assistente AI di nome saiaga-7b\n" | |
for interaction in history: | |
input_prompt += "[|Umano|] " + interaction[0] + "\n" | |
input_prompt += "[|Assistente|]" + interaction[1] | |
input_prompt += "[|Umano|] " + message + "\n[|Assistente|]" | |
print(input_prompt) | |
output = llm( | |
input_prompt, | |
temperature=0.15, | |
top_p=0.1, | |
top_k=40, | |
repeat_penalty=1.1, | |
max_tokens=1024, | |
stop=[ | |
"[|Umano|]", | |
"[|Assistente|]", | |
], | |
stream=True, | |
) | |
for out in output: | |
stream = copy.deepcopy(out) | |
temp += stream["choices"][0]["text"] | |
yield temp | |
history = ["init", input_prompt] | |
demo = gr.ChatInterface( | |
generate_text, | |
title="saiga-7b running on CPU (quantized Q4_K)", | |
description="This is a quantized version of saiga-7b running on CPU (very slow). It is less powerful than the original version, but it can even run on the free tier of huggingface.", | |
examples=[ | |
"Dammi 3 idee di ricette che posso fare con i pistacchi", | |
"Prepara un piano di esercizi da poter fare a casa", | |
"Scrivi una poesia sulla nuova AI chiamata cerbero-7b" | |
], | |
cache_examples=False, | |
retry_btn=None, | |
undo_btn="Delete Previous", | |
clear_btn="Clear", | |
) | |
demo.queue(concurrency_count=1, max_size=5) | |
demo.launch() |