import gradio as gr from huggingface_hub import InferenceClient client = InferenceClient("mistralai/Mistral-7B-v0.1") # Fixed temperature value fixed_temperature = 0.9 # You can adjust this value as needed def generate(prompt, max_new_tokens=6056, top_p=0.95, repetition_penalty=1.0): top_p = float(top_p) generate_kwargs = dict( temperature=fixed_temperature, max_new_tokens=max_new_tokens, top_p=top_p, repetition_penalty=repetition_penalty, do_sample=True, seed=42, ) formatted_prompt = f"[INST] {prompt} [/INST]" stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) output = "" for response in stream: output += response.token.text yield output return output iface = gr.Interface( fn=generate, inputs="text", outputs="text", title="Mistralai-Mistral-7B-Instruct Chat", live=False # Set live to False to add a "Submit" button ) iface.launch()