Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from huggingface_hub import InferenceClient | |
| import os | |
| client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3") | |
| # Setze einen Standardwert, falls SECRET_PROMPT nicht gesetzt ist | |
| secret_prompt = os.getenv("SECRET_PROMPT", "Default prompt: ") | |
| def format_prompt(message, history): | |
| prompt = secret_prompt | |
| for user_prompt, bot_response in history: | |
| prompt += f"[INST] {user_prompt} [/INST]" | |
| prompt += f" {bot_response}</s> " | |
| prompt += f"[INST] {message} [/INST]" | |
| return prompt | |
| def generate(prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0): | |
| temperature = float(temperature) | |
| if temperature < 1e-2: | |
| temperature = 1e-2 | |
| top_p = float(top_p) | |
| generate_kwargs = dict( | |
| temperature=temperature, | |
| max_new_tokens=max_new_tokens, | |
| top_p=top_p, | |
| repetition_penalty=repetition_penalty, | |
| do_sample=True, | |
| seed=42, | |
| ) | |
| formatted_prompt = format_prompt(prompt, history) | |
| stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) | |
| output = "" | |
| for response in stream: | |
| output += response.token.text | |
| yield output | |
| return output | |
| st.title("Einfach.Mistral 7B v0.3") | |
| history = [] | |
| with st.sidebar: | |
| temperature = st.slider( | |
| "Temperature", | |
| value=0.9, | |
| min_value=0.0, | |
| max_value=1.0, | |
| step=0.05, | |
| help="Higher values produce more diverse outputs", | |
| ) | |
| max_new_tokens = st.slider( | |
| "Max new tokens", | |
| value=256, | |
| min_value=0, | |
| max_value=1048, | |
| step=64, | |
| help="The maximum numbers of new tokens", | |
| ) | |
| top_p = st.slider( | |
| "Top-p (nucleus sampling)", | |
| value=0.90, | |
| min_value=0.0, | |
| max_value=1.0, | |
| step=0.05, | |
| help="Higher values sample more low-probability tokens", | |
| ) | |
| repetition_penalty = st.slider( | |
| "Repetition penalty", | |
| value=1.2, | |
| min_value=1.0, | |
| max_value=2.0, | |
| step=0.05, | |
| help="Penalize repeated tokens", | |
| ) | |
| message = st.text_input("Your message:", "") | |
| if st.button("Generate"): | |
| if message: | |
| for output in generate(message, history, temperature, max_new_tokens, top_p, repetition_penalty): | |
| st.text_area("Generated Text", value=output, height=400) | |
| history.append((message, output)) | |
| else: | |
| st.warning("Please enter a message.") | |