Spaces:
Runtime error
Runtime error
import gradio as gr | |
from confidence import run_nli | |
DESCRIPTION = """\ | |
# Llama-2 13B Chat | |
This Space demonstrates model [Llama-2-13b-chat](https://huggingface.co/meta-llama/Llama-2-13b-chat) by Meta, a Llama 2 model with 13B parameters fine-tuned for chat instructions. Feel free to play with it, or duplicate to run generations without a queue! If you want to run your own service, you can also [deploy the model on Inference Endpoints](https://huggingface.co/inference-endpoints). | |
π For more details about the Llama 2 family of models and how to use them with `transformers`, take a look [at our blog post](https://huggingface.co/blog/llama2). | |
π¨ Looking for an even more powerful model? Check out the large [**70B** model demo](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI). | |
π For a smaller model that you can run on many GPUs, check our [7B model demo](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat). | |
""" | |
def greet(query, history): | |
results = run_nli(query, sample_size=3) | |
return results | |
#return "this is the result" | |
sample_list = [ | |
"Tell me something about Albert Einstein, e.g., a short bio with birth date and birth place", | |
"Tell me something about Lihu Chen, e.g., a short bio with birth date and birth place", | |
] | |
iface = gr.ChatInterface( | |
fn=greet, | |
stop_btn=None, | |
# inputs="text", | |
# outputs="text", | |
examples=sample_list, | |
cache_examples=True | |
) | |
with gr.Blocks() as demo: | |
gr.Markdown(DESCRIPTION) | |
iface.render() | |
#gr.Markdown(LICENSE) | |
iface.launch() |