chandrujobs commited on
Commit
928e881
·
verified ·
1 Parent(s): 2339211

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -21
app.py CHANGED
@@ -1,31 +1,30 @@
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
 
4
- # Load the model and tokenizer from Hugging Face
5
- model_name = "Salesforce/codet5-small"
6
- tokenizer = AutoTokenizer.from_pretrained(model_name)
7
- model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
 
 
8
 
9
- # Streamlit UI
10
- st.title("Code Generator")
11
- st.write("Generate code snippets from natural language prompts using CodeT5!")
12
 
13
- # Input for natural language prompt
14
- prompt = st.text_area("Enter your coding task:", placeholder="Write a Python function to calculate the factorial of a number.")
 
15
 
16
- # Slider to control output length
17
- max_length = st.slider("Maximum length of generated code:", 20, 200, 50)
18
 
19
- # Button to trigger code generation
20
  if st.button("Generate Code"):
21
  if prompt.strip():
22
- # Tokenize and generate code
23
- inputs = tokenizer(prompt, return_tensors="pt", truncation=True, padding=True)
24
- outputs = model.generate(inputs.input_ids, max_length=max_length, num_beams=4, early_stopping=True)
25
- generated_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
26
-
27
- # Display generated code
28
- st.write("### Generated Code:")
29
- st.code(generated_code, language="python")
30
  else:
31
- st.warning("Please enter a prompt to generate code.")
 
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
 
4
+ @st.cache_resource
5
+ def load_model():
6
+ model_name = "Salesforce/codet5-small"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
9
+ return tokenizer, model
10
 
11
+ # Load the model and tokenizer (cached)
12
+ with st.spinner("Loading model..."):
13
+ tokenizer, model = load_model()
14
 
15
+ # Streamlit UI
16
+ st.title("Code Generator with Hugging Face")
17
+ st.write("Generate code snippets from natural language prompts!")
18
 
19
+ prompt = st.text_area("Enter your coding task:", placeholder="Write a Python function to calculate factorial.")
20
+ max_length = st.slider("Select maximum length of generated code:", min_value=20, max_value=200, value=50, step=10)
21
 
 
22
  if st.button("Generate Code"):
23
  if prompt.strip():
24
+ with st.spinner("Generating code..."):
25
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True, padding=True)
26
+ outputs = model.generate(inputs.input_ids, max_length=max_length, num_beams=4, early_stopping=True)
27
+ generated_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
28
+ st.text_area("Generated Code:", generated_code, height=200)
 
 
 
29
  else:
30
+ st.warning("Please enter a prompt!")