kasaliyusufoloriegbe commited on
Commit
94bc0e3
1 Parent(s): e4cea83

update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -1
app.py CHANGED
@@ -1,3 +1,36 @@
1
  import gradio as gr
2
 
3
- gr.load("models/Salesforce/codet5p-220m").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
 
3
+ gr.load("models/Salesforce/codet5p-220m").launch()
4
+ import streamlit as st
5
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
6
+ import torch
7
+
8
+ def main():
9
+ st.title("Python Code Generation App")
10
+
11
+ # Load the model and tokenizer
12
+ tokenizer = AutoTokenizer.from_pretrained("models/Salesforce/codet5p-220m")
13
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
14
+ model = AutoModelForSeq2SeqLM.from_pretrained("models/Salesforce/codet5p-220m").to(device)
15
+
16
+ # Get user input
17
+ st.subheader("Instructions")
18
+ st.write("Use the following format to enter prompts: Write python code for SBERT vector embedding of a sentence")
19
+ st.write("")
20
+ query = st.text_input("Enter a prompt here: ")
21
+ if st.button("Generate Code"):
22
+ if query.strip().lower() == 'exit':
23
+ st.stop()
24
+ else:
25
+ # Generate summary
26
+ inputs = tokenizer(f"summarize:{query}", return_tensors="pt")
27
+ inputs = {k: v.to(device) for k, v in inputs.items()}
28
+ output = model.generate(**inputs, max_length=750)
29
+ generated_text = tokenizer.decode(output[0]).replace("summarize:", "")
30
+
31
+ # Display the generated summary
32
+ st.subheader("Generated Code:")
33
+ st.code(generated_text)
34
+
35
+ if __name__ == "__main__":
36
+ main()