Spaces:
Runtime error
Runtime error
from huggingface_hub import InferenceClient | |
import gradio as gr | |
import random | |
API_URL = "https://api-inference.huggingface.co/models/" | |
client = InferenceClient( | |
"mistralai/Mistral-7B-Instruct-v0.1" | |
) | |
def format_prompt(message, history): | |
prompt = "Your name is OpenGPT. You are developed by Achyuth. Achyuth is a 13 year old boy who developed OpenGPT with his undecidable, undestructable talent and which is exceeding for his age. Achyuth is developed of OpenGPT. Achyuths or your developers parents and family are G.Renuka, she is his mother, N.Nagarjun Reddy and his sister Gayathri. Achyuths or your developers family is feeling so proud that he developed you, non other than OpenGPT. Your developers family is nuclear family of 4 members. Achyuths or your developers friends are Jayesh, Koushik, Pranav, Mumin, Hashim, Akira, Nikesh and Ubaidh. His Favourite teachers are Siva Kumar Sir, Zakaria Sir, Vijayalakshi Mam, Saleem Sir, Malikarjun Sir and last but not least but first Farha Deepa Mam who is Achyuths Arts madam and his favourite madam. Achyuths or your developers most favourite teacher is Farha Deepa Mam. Meaning of OpenGPT is the GPT(Generative Pre-Trained Transformer) developed by Achyuth, OpenGPT means the GPT which is free, open-source, unlimited access and no payment required. OpenGPT is made/programmed/developed by Achyuth. You are model of OpenGPT developed by Achyuth. Your name is not ChatGPT. It is OpenGPT developed by Achyuth. You should converse with the user in what languages the user converses with you." | |
for user_prompt, bot_response in history: | |
prompt += f"[INST] {user_prompt} [/INST]" | |
prompt += f" {bot_response}</s> " | |
prompt += f"[INST] {message} [/INST]" | |
return prompt | |
def generate(prompt, history, temperature=0.9, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0): | |
temperature = float(temperature) | |
if temperature < 1e-2: | |
temperature = 1e-2 | |
top_p = float(top_p) | |
generate_kwargs = dict( | |
temperature=temperature, | |
max_new_tokens=max_new_tokens, | |
top_p=top_p, | |
repetition_penalty=repetition_penalty, | |
do_sample=True, | |
seed=random.randint(0, 10**7), | |
) | |
formatted_prompt = format_prompt(prompt, history) | |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) | |
output = "" | |
for response in stream: | |
output += response.token.text | |
yield output | |
return output | |
additional_inputs=[ | |
gr.Slider( | |
label="Temperature", | |
value=0.9, | |
minimum=0.0, | |
maximum=1.0, | |
step=0.05, | |
interactive=True, | |
info="Higher values produce more diverse outputs", | |
), | |
gr.Slider( | |
label="Max new tokens", | |
value=512, | |
minimum=64, | |
maximum=1024, | |
step=64, | |
interactive=True, | |
info="The maximum numbers of new tokens", | |
), | |
gr.Slider( | |
label="Top-p (nucleus sampling)", | |
value=0.90, | |
minimum=0.0, | |
maximum=1, | |
step=0.05, | |
interactive=True, | |
info="Higher values sample more low-probability tokens", | |
), | |
gr.Slider( | |
label="Repetition penalty", | |
value=1.2, | |
minimum=1.0, | |
maximum=2.0, | |
step=0.05, | |
interactive=True, | |
info="Penalize repeated tokens", | |
) | |
] | |
customCSS = """ | |
#component-7 { # this is the default element ID of the chat component | |
height: 800px; # adjust the height as needed | |
flex-grow: 1; | |
} | |
""" | |
with gr.Blocks(css=customCSS) as demo: | |
gr.ChatInterface( | |
generate, | |
additional_inputs=additional_inputs, | |
) | |
demo.queue().launch(debug=True) |