SHAMIL SHAHBAZ AWAN commited on
Commit
b1fca7a
·
verified ·
1 Parent(s): 63f5e34

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -0
app.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
+ import torch
4
+
5
+ # Load the fine-tuned MFTCoder model
6
+ @st.cache_resource()
7
+ def load_model():
8
+ MODEL_NAME = "path-to-your-finetuned-model" # Replace with your MFTCoder fine-tuned model path
9
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
10
+ model = AutoModelForCausalLM.from_pretrained(
11
+ MODEL_NAME,
12
+ torch_dtype=torch.float16, # Use float16 for performance optimization
13
+ device_map="auto" # Automatically allocate to CPU/GPU
14
+ )
15
+ return pipeline("text-generation", model=model, tokenizer=tokenizer)
16
+
17
+ # Initialize pipeline
18
+ code_generator = load_model()
19
+
20
+ # Streamlit UI
21
+ st.title("MFTCoder-powered Code Bot 🚀")
22
+ st.subheader("Generate high-quality code snippets with fine-tuned CodeLlama!")
23
+
24
+ # User input
25
+ prompt = st.text_area("Enter a code prompt to generate code:")
26
+
27
+ # Generate code
28
+ if st.button("Generate Code"):
29
+ if prompt.strip():
30
+ st.info("Generating code... Please wait ⏳")
31
+ try:
32
+ # Generate code using the fine-tuned MFTCoder model
33
+ response = code_generator(
34
+ prompt,
35
+ max_length=256, # Adjust as needed
36
+ temperature=0.3, # Lower temperature for accurate outputs
37
+ num_return_sequences=1,
38
+ do_sample=True
39
+ )
40
+ generated_code = response[0]['generated_text']
41
+ # Display the code output
42
+ st.code(generated_code, language="python") # Default to Python for generated output
43
+ except Exception as e:
44
+ st.error(f"Error: {str(e)}")
45
+ else:
46
+ st.warning("Please enter a prompt.")
47
+
48
+ st.caption("Created by Shamil")