File size: 1,245 Bytes
2513349
b57f205
13e69f0
b57f205
4a68e15
13e69f0
 
 
ace2b7a
13e69f0
b57f205
2513349
b57f205
4a68e15
 
 
 
 
 
b57f205
 
4a68e15
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import gradio as gr
import os
from langchain_huggingface import HuggingFaceEndpoint

api_key = os.getenv("HUGGINGFACE_API_KEY")
llm = HuggingFaceEndpoint(
    repo_id="meta-llama/Meta-Llama-3-8B-Instruct",
    task="text-generation",
    max_new_tokens=512,
    do_sample=False,
    api_key=api_key
)

def preprocess_messages(message: str, history: list, system_prompt: str) -> str:
    return f"{system_prompt}\nUser: {message}\n"

def answer(message: str, history: list, system_prompt: str, max_new_tokens: int, temperature: float):
    prompt = preprocess_messages(message, history, system_prompt)
    out = llm.invoke(prompt, max_new_tokens=max_new_tokens, temperature=temperature)
    return out

gr.ChatInterface(
    fn=answer,
    chatbot=gr.Chatbot(height=400),
    textbox=gr.Textbox(placeholder="Enter message here", container=False, scale=7),
    title="LLAMA3 Chat",
    description="Chat with LLAMA3",
    theme="soft",
    additional_inputs=[
        gr.Textbox(value="You shall answer to all the questions as very smart AI", label="System Prompt"),
        gr.Slider(minimum=512, maximum=4096, value=512, label="Max New Tokens"),
        gr.Slider(minimum=0, maximum=1, value=0.7, label="Temperature")
    ]
).launch(debug=True)