Spaces:
Runtime error
Runtime error
| import os | |
| import streamlit as st | |
| from transformers import T5ForConditionalGeneration, T5Tokenizer | |
| import torch | |
| # Define the model path and check if it exists | |
| model_path = "Cegil/code_generation/pytorch_model.bin" # Update to your model's actual path | |
| if not os.path.exists(model_path): | |
| raise FileNotFoundError(f"Model path '{model_path}' does not exist.") | |
| # Load the model and tokenizer | |
| code_gen_model = T5ForConditionalGeneration.from_pretrained(model_path) | |
| tokenizer = T5Tokenizer.from_pretrained(model_path) | |
| # Streamlit UI | |
| st.title("Code Generation Interface") | |
| # Input prompt for code generation | |
| prompt = st.text_input("Enter your code generation prompt:", "Example prompt") | |
| # Button to generate code | |
| if st.button("Generate Code"): | |
| # Tokenize the input prompt | |
| inputs = tokenizer(prompt, return_tensors="pt") # Ensure input is properly tokenized | |
| # Generate code using the code generation model | |
| output = code_gen_model.generate(inputs['input_ids']) | |
| # Decode the output to get the generated code | |
| generated_code = tokenizer.decode(output[0], skip_special_tokens=True) | |
| # Display the generated code in a formatted way | |
| st.write("Generated Code:") | |
| st.code(generated_code) | |