Spaces:
Runtime error
Runtime error
import os | |
import gradio as gr | |
import copy | |
from llama_cpp import Llama | |
from huggingface_hub import hf_hub_download | |
import logging | |
# Set up logging | |
logging.basicConfig(level=logging.INFO) | |
logger = logging.getLogger(__name__) | |
# Load the model using specified environment variables or default values | |
llm = Llama( | |
model_path=hf_hub_download( | |
repo_id=os.environ.get("REPO_ID", "suko/Meta-Llama-3-8B-CHT"), | |
filename=os.environ.get("MODEL_FILE", "Meta-Llama-3-8B-CHT-Q4-unsloth.Q4_K_M.gguf"), | |
), | |
n_ctx=2048, | |
# n_gpu_layers=50, # Adjust n_gpu_layers based on your VRAM capacity | |
) | |
def generate_text( | |
message, | |
history: list[tuple[str, str]], | |
system_message, | |
max_tokens, | |
temperature, | |
top_p, | |
): | |
try: | |
logger.info("Starting text generation...") | |
input_prompt = "{{ if .System }}system\n\n{{ .System }}{{ end }}{{ if .Prompt }}user\n\n{{ .Prompt }}{{ end }}assistant\n\n{{ .Response }}" | |
input_prompt += f"\n\n{system_message}\n\n" | |
for interaction in history: | |
input_prompt += f"{interaction[0]} [/INST] {interaction[1]} </s><s> [INST] " | |
input_prompt += f"{message} [/INST] " | |
logger.info("Input prompt constructed.") | |
# Call the model to generate the output | |
output = llm( | |
input_prompt, | |
temperature=temperature, | |
top_p=top_p, | |
top_k=40, | |
repeat_penalty=1.1, | |
max_tokens=max_tokens, | |
stop=[ | |
"</s>", # Example stop token; adjust based on actual model expectations | |
], | |
) | |
logger.info(f"Output received: {output}") | |
logger.info(f"Type of output: {type(output)}") | |
response_text = "" | |
if isinstance(output, dict) and 'choices' in output: | |
for choice in output['choices']: | |
response_text += choice.get('text', '') | |
logger.info("Generated text: " + response_text) | |
else: | |
logger.error("Unexpected output format or no output generated") | |
return response_text if response_text else "No response generated." | |
except Exception as e: | |
logger.error(f"An error occurred: {e}") | |
return "Sorry, something went wrong." | |
# Define the Gradio interface | |
demo = gr.ChatInterface( | |
generate_text, | |
title="Llama 3 8B Cht", | |
description="Running LLM with https://github.com/abetlen/llama-cpp-python", | |
examples=[ | |
['How to setup a human base on Mars? Give a short answer.'], | |
['Explain the theory of relativity to me like I’m 8 years old.'], | |
['中秋節的由來?'], | |
['告訴我有關Taylor Swift的事情'], | |
], | |
cache_examples=False, | |
retry_btn=None, | |
undo_btn="Delete Previous", | |
clear_btn="Clear", | |
additional_inputs=[ | |
gr.Textbox(value="You are a friendly Chatbot.", label="System message"), | |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), | |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
gr.Slider( | |
minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)" | |
), | |
], | |
) | |
if __name__ == "__main__": | |
demo.launch() | |