import streamlit as st from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM def main(): st.title("Codestral Inference with Hugging Face") # Load the model and tokenizer st.text("Loading model...") tokenizer = AutoTokenizer.from_pretrained("mistralai/Codestral-22B-v0.1") model = AutoModelForCausalLM.from_pretrained("mistralai/Codestral-22B-v0.1") generator = pipeline("text-generation", model=model, tokenizer=tokenizer) st.success("Model loaded successfully!") user_input = st.text_area("Enter your instruction", "Explain Machine Learning to me in a nutshell.") max_tokens = st.slider("Max Tokens", min_value=10, max_value=500, value=64) temperature = st.slider("Temperature", min_value=0.0, max_value=1.0, value=0.7) if st.button("Generate"): with st.spinner("Generating response..."): result = generate_response(generator, user_input, max_tokens, temperature) st.success("Response generated!") st.text_area("Generated Response", result, height=200) def generate_response(generator, user_input, max_tokens, temperature): response = generator(user_input, max_new_tokens=max_tokens, do_sample=True, temperature=temperature) result = response[0]['generated_text'] return result if __name__ == "__main__": main()