Naveen288 commited on
Commit
5926735
·
verified ·
1 Parent(s): 6c056ef

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -0
app.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pip install transformers huggingface_hub gradio torch
2
+ from huggingface_hub import login
3
+
4
+ # Login using your Hugging Face token (replace "your_token" with your actual token)
5
+ login("your_token")
6
+ from transformers import AutoTokenizer, AutoModelForCausalLM
7
+
8
+ # Load the tokenizer and model
9
+ tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen-350M-multi")
10
+ model = AutoModelForCausalLM.from_pretrained("Salesforce/codegen-350M-multi")
11
+ # Input text for code generation
12
+ text = "def bubble_sort(list_elements):"
13
+
14
+ # Tokenize the input text
15
+ input_ids = tokenizer(text, return_tensors="pt").input_ids
16
+
17
+ # Generate code based on the input text
18
+ generated_ids = model.generate(
19
+ input_ids,
20
+ max_length=200, # Adjust as needed
21
+ num_return_sequences=1, # Number of generated sequences to return
22
+ pad_token_id=tokenizer.eos_token_id # Handle padding tokens
23
+ )
24
+
25
+ # Decode the generated tokens to text
26
+ generated_code = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
27
+
28
+ print(generated_code)
29
+ from huggingface_hub import HfApi, Repository
30
+
31
+ # Replace with your Hugging Face username and repo name
32
+ repo_name = "your-username/codegen-350M-multi-bubble-sort"
33
+
34
+ # Initialize the repository to push your model and tokenizer
35
+ api = HfApi()
36
+
37
+ # Create a new repo (if you haven't already)
38
+ api.create_repo(repo_name, exist_ok=True)
39
+
40
+ # Push the tokenizer and model to Hugging Face Hub
41
+ model.push_to_hub(repo_name)
42
+ tokenizer.push_to_hub(repo_name)
43
+
44
+ print(f"Model and tokenizer pushed to Hugging Face Hub under: {repo_name}")