Spaces:
Runtime error
Runtime error
import os | |
import urllib.request | |
import gradio as gr | |
from llama_cpp import Llama | |
def download_file(file_link, filename): | |
# Checks if the file already exists before downloading | |
if not os.path.isfile(filename): | |
urllib.request.urlretrieve(file_link, filename) | |
print("File downloaded successfully.") | |
else: | |
print("File already exists.") | |
# Dowloading GGML model from HuggingFace | |
ggml_model_path = "https://huggingface.co/TheBloke/Starling-LM-7B-alpha-GGUF/resolve/main/starling-lm-7b-alpha.Q4_K_S.gguf" | |
filename = "starling-lm-7b-alpha.Q4_K_S.gguf" | |
download_file(ggml_model_path, filename) | |
llm = Llama(model_path=filename, n_ctx=512, n_batch=126) | |
def create_prompt(text): | |
prompt = f"""GPT4 Correct User: {text}<|end_of_turn|>GPT4 Correct Assistant:""" | |
return prompt | |
def generate_text(prompt="Who is the CEO of Apple?"): | |
input_text = create_prompt(prompt) | |
output = llm( | |
input_text, | |
max_tokens=256, | |
temperature=0.1, | |
top_p=0.5, | |
echo=False, | |
stop=["#"], | |
) | |
output_text = output["choices"][0]["text"].strip() | |
# Remove Prompt Echo from Generated Text | |
cleaned_output_text = output_text.replace(prompt, "") | |
return cleaned_output_text | |
description = "Starling-7B GGUF" | |
gradio_interface = gr.Interface( | |
fn=generate_text, | |
inputs="text", | |
outputs="text", | |
title="Starling-7B GGUF", | |
) | |
gradio_interface.launch() | |