Spaces:
Sleeping
Sleeping
more LLM load for home windows
Browse files- app.py +10 -2
- requirements.txt +6 -1
app.py
CHANGED
@@ -6,8 +6,13 @@ import torch
|
|
6 |
from huggingface_hub import login
|
7 |
from transformers import pipeline
|
8 |
from transformers import GPT2Tokenizer, GPT2LMHeadModel,set_seed
|
|
|
9 |
import datetime
|
10 |
-
|
|
|
|
|
|
|
|
|
11 |
|
12 |
login(os.environ["HF_TOKEN"])
|
13 |
|
@@ -28,7 +33,10 @@ print("loading models")
|
|
28 |
tokenizer = GPT2Tokenizer.from_pretrained('microsoft/DialoGPT-medium',cache_dir="C:\\Users\\zmbfeng\\Google Drive\\language_models_windows")
|
29 |
original_model = GPT2LMHeadModel.from_pretrained('microsoft/DialoGPT-medium',cache_dir="C:\\Users\\zmbfeng\\Google Drive\\Avatar\\language_models_windows")
|
30 |
untethered_model = GPT2LMHeadModel.from_pretrained('zmbfeng/untethered_20240225_epochs_500',cache_dir="C:\\Users\\zmbfeng\\Google Drive\\Avatar\\language_models_windows")
|
31 |
-
|
|
|
|
|
|
|
32 |
default_temperature=0.01
|
33 |
default_seed=43
|
34 |
def create_response(input_str,
|
|
|
6 |
from huggingface_hub import login
|
7 |
from transformers import pipeline
|
8 |
from transformers import GPT2Tokenizer, GPT2LMHeadModel,set_seed
|
9 |
+
from transformers import AutoTokenizer, AutoModelWithLMHead,AutoModelForSeq2SeqLM
|
10 |
import datetime
|
11 |
+
import nltk
|
12 |
+
nltk.download('stopwords')
|
13 |
+
nltk.download('punctuation')
|
14 |
+
nltk.download('punkt')
|
15 |
+
from rake_nltk import Rake
|
16 |
|
17 |
login(os.environ["HF_TOKEN"])
|
18 |
|
|
|
33 |
tokenizer = GPT2Tokenizer.from_pretrained('microsoft/DialoGPT-medium',cache_dir="C:\\Users\\zmbfeng\\Google Drive\\language_models_windows")
|
34 |
original_model = GPT2LMHeadModel.from_pretrained('microsoft/DialoGPT-medium',cache_dir="C:\\Users\\zmbfeng\\Google Drive\\Avatar\\language_models_windows")
|
35 |
untethered_model = GPT2LMHeadModel.from_pretrained('zmbfeng/untethered_20240225_epochs_500',cache_dir="C:\\Users\\zmbfeng\\Google Drive\\Avatar\\language_models_windows")
|
36 |
+
question_generation_tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap",cache_dir="C:\\Users\\zmbfeng\\Google Drive\\Avatar\\language_models_windows")
|
37 |
+
question_generation_model = AutoModelWithLMHead.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap",cache_dir="C:\\Users\\zmbfeng\\Google Drive\\Avatar\\language_models_windows")
|
38 |
+
paraphrase_tokenizer = AutoTokenizer.from_pretrained("Vamsi/T5_Paraphrase_Paws",cache_dir="C:\\Users\\zmbfeng\\Google Drive\\Avatar\\language_models_windows")
|
39 |
+
paraphrase_model = AutoModelForSeq2SeqLM.from_pretrained("Vamsi/T5_Paraphrase_Paws",cache_dir="C:\\Users\\zmbfeng\\Google Drive\\Avatar\\language_models_windows")
|
40 |
default_temperature=0.01
|
41 |
default_seed=43
|
42 |
def create_response(input_str,
|
requirements.txt
CHANGED
@@ -1,4 +1,9 @@
|
|
1 |
gradio
|
2 |
transformers
|
3 |
gTTS
|
4 |
-
torch
|
|
|
|
|
|
|
|
|
|
|
|
1 |
gradio
|
2 |
transformers
|
3 |
gTTS
|
4 |
+
torch
|
5 |
+
rake-nltk
|
6 |
+
versions
|
7 |
+
gensim
|
8 |
+
sentencepiece
|
9 |
+
protobuf
|