Spaces:
Paused
Paused
testing lyric generation again
Browse files
LyricGeneratorModel.py
CHANGED
@@ -1,14 +1,19 @@
|
|
1 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
2 |
|
|
|
|
|
3 |
|
4 |
class LyricGeneratorModel:
|
5 |
def __init__(self, repo_id: str):
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
|
|
|
|
|
|
12 |
|
13 |
def generate_lyrics(self, prompt: str, max_length: int):
|
14 |
input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids
|
|
|
1 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
2 |
|
3 |
+
from peft import LoraConfig, get_peft_model, PeftModel, PeftConfig
|
4 |
+
|
5 |
|
6 |
class LyricGeneratorModel:
|
7 |
def __init__(self, repo_id: str):
|
8 |
+
config = PeftConfig.from_pretrained(repo_id)
|
9 |
+
model = AutoModelForCausalLM.from_pretrained(
|
10 |
+
config.base_model_name_or_path,
|
11 |
+
return_dict=True,
|
12 |
+
load_in_8bit=True,
|
13 |
+
device_map="auto",
|
14 |
+
)
|
15 |
+
self.tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
|
16 |
+
self.model = PeftModel.from_pretrained(model, repo_id)
|
17 |
|
18 |
def generate_lyrics(self, prompt: str, max_length: int):
|
19 |
input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids
|
__pycache__/ArtistCoherencyModel.cpython-312.pyc
ADDED
Binary file (5.43 kB). View file
|
|
__pycache__/ArtistCoherencyModel.cpython-39.pyc
ADDED
Binary file (3.4 kB). View file
|
|
__pycache__/FFNN.cpython-312.pyc
ADDED
Binary file (5.49 kB). View file
|
|
__pycache__/LyricGeneratorModel.cpython-312.pyc
ADDED
Binary file (1.68 kB). View file
|
|
requirements.txt
CHANGED
@@ -2,4 +2,5 @@ transformers==4.40.0
|
|
2 |
huggingface_hub==0.22.2
|
3 |
torch==2.2.2
|
4 |
numpy==1.26.4
|
5 |
-
pandas==2.2.2
|
|
|
|
2 |
huggingface_hub==0.22.2
|
3 |
torch==2.2.2
|
4 |
numpy==1.26.4
|
5 |
+
pandas==2.2.2
|
6 |
+
peft==0.10.0
|