Spaces:
Sleeping
Sleeping
SHAMIL SHAHBAZ AWAN
commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,7 +6,7 @@ from accelerate import init_empty_weights, load_checkpoint_and_dispatch
|
|
| 6 |
# Load the model using Accelerate for memory optimization
|
| 7 |
@st.cache_resource()
|
| 8 |
def load_model():
|
| 9 |
-
MODEL_NAME = "
|
| 10 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
| 11 |
|
| 12 |
# Load model with accelerate to optimize for memory usage
|
|
@@ -30,8 +30,8 @@ def load_model():
|
|
| 30 |
code_generator = load_model()
|
| 31 |
|
| 32 |
# Streamlit UI
|
| 33 |
-
st.title("
|
| 34 |
-
st.subheader("Generate code snippets using
|
| 35 |
|
| 36 |
# User input
|
| 37 |
prompt = st.text_area("Enter a coding prompt (e.g., 'Write a Python function to sort a list'): ")
|
|
@@ -41,7 +41,7 @@ if st.button("Generate Code"):
|
|
| 41 |
if prompt.strip():
|
| 42 |
st.info("Generating code... Please wait ⏳")
|
| 43 |
try:
|
| 44 |
-
# Generate code using the
|
| 45 |
response = code_generator(
|
| 46 |
prompt,
|
| 47 |
max_length=512, # Increase for longer code generation
|
|
@@ -57,4 +57,4 @@ if st.button("Generate Code"):
|
|
| 57 |
else:
|
| 58 |
st.warning("Please enter a prompt.")
|
| 59 |
|
| 60 |
-
st.caption("
|
|
|
|
| 6 |
# Load the model using Accelerate for memory optimization
|
| 7 |
@st.cache_resource()
|
| 8 |
def load_model():
|
| 9 |
+
MODEL_NAME = "huggingface/CodeGen-2B" # Updated model name
|
| 10 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
| 11 |
|
| 12 |
# Load model with accelerate to optimize for memory usage
|
|
|
|
| 30 |
code_generator = load_model()
|
| 31 |
|
| 32 |
# Streamlit UI
|
| 33 |
+
st.title("CodeGen-2B Code Bot 🚀")
|
| 34 |
+
st.subheader("Generate code snippets using Hugging Face CodeGen-2B")
|
| 35 |
|
| 36 |
# User input
|
| 37 |
prompt = st.text_area("Enter a coding prompt (e.g., 'Write a Python function to sort a list'): ")
|
|
|
|
| 41 |
if prompt.strip():
|
| 42 |
st.info("Generating code... Please wait ⏳")
|
| 43 |
try:
|
| 44 |
+
# Generate code using the CodeGen-2B model
|
| 45 |
response = code_generator(
|
| 46 |
prompt,
|
| 47 |
max_length=512, # Increase for longer code generation
|
|
|
|
| 57 |
else:
|
| 58 |
st.warning("Please enter a prompt.")
|
| 59 |
|
| 60 |
+
st.caption("Created by Shamil")
|