import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer import torch # Laden des Modells und Tokenizers model_name = "dumb-dev/TinyLlama-1.1B-Chat-rust-cpp-encodings" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) def generate_text(prompt, top_p, top_k, max_tokens, temperature): inputs = tokenizer(prompt, return_tensors="pt") input_ids = inputs["input_ids"] with torch.no_grad(): outputs = model.generate( input_ids, do_sample=True, max_length=max_tokens, top_p=top_p, top_k=top_k, temperature=temperature, ) generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) return generated_text # Gradio Interface interface = gr.Interface( fn=generate_text, inputs=[ gr.Textbox(lines=2, placeholder="Enter your prompt here..."), gr.Slider(0, 1, step=0.01, value=0.9, label="Top-p"), gr.Slider(0, 100, step=1, value=50, label="Top-k"), gr.Slider(1, 512, step=1, value=100, label="Max tokens"), gr.Slider(0.1, 2, step=0.1, value=1, label="Temperature") ], outputs="text", title="TinyLlama-1.1B Chat", description="Generate stuff using my finetuned version of the TinyLlama-1.1B-Chat model with adjustable parameters." ) if __name__ == "__main__": interface.launch()