Increased temperature and added alternate openings
Browse files
app.py
CHANGED
@@ -13,7 +13,7 @@
|
|
13 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
# See the License for the specific language governing permissions and
|
15 |
# limitations under the License.
|
16 |
-
#
|
17 |
"""
|
18 |
Example command with bag of words:
|
19 |
python examples/run_pplm.py -B space --cond_text "The president" --length 100 --gamma 1.5 --num_iterations 3 --num_samples 10 --stepsize 0.01 --window_length 5 --kl_scale 0.01 --gm_scale 0.95
|
@@ -28,7 +28,6 @@ import json
|
|
28 |
from operator import add
|
29 |
from typing import List, Optional, Tuple, Union
|
30 |
from random import choice, randint
|
31 |
-
from matplotlib import use
|
32 |
import numpy as np
|
33 |
import torch
|
34 |
import torch.nn.functional as F
|
@@ -37,7 +36,6 @@ from tqdm import trange
|
|
37 |
from transformers import GPT2Tokenizer
|
38 |
from transformers.file_utils import cached_path
|
39 |
from transformers.modeling_gpt2 import GPT2LMHeadModel
|
40 |
-
|
41 |
from pplm_classification_head import ClassificationHead
|
42 |
|
43 |
PPLM_BOW = 1
|
@@ -749,8 +747,8 @@ discrim_weights=None
|
|
749 |
discrim_meta=None
|
750 |
class_label=0
|
751 |
length=100
|
752 |
-
stepsize=
|
753 |
-
temperature=1.
|
754 |
top_k=2
|
755 |
sample=True
|
756 |
num_iterations=0
|
@@ -765,8 +763,8 @@ seed=0
|
|
765 |
no_cuda=False
|
766 |
colorama=False
|
767 |
verbosity="quiet"
|
768 |
-
fp="./paper_code/discrim_models/persoothe_classifier.pt"
|
769 |
-
model_fp=None
|
770 |
calc_perplexity=False
|
771 |
is_deep=False
|
772 |
is_deeper=True
|
@@ -812,17 +810,22 @@ tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model)
|
|
812 |
for param in model.parameters():
|
813 |
param.requires_grad = False
|
814 |
|
|
|
815 |
eot_token = "<|endoftext|>"
|
816 |
|
817 |
-
def get_reply(response, username = None, histories = {}, in_stepsize =
|
818 |
if username == None or username == "": return "<div class='chatbot'>Enter a username</div>", histories
|
819 |
stepsize = in_stepsize
|
820 |
horizon_length = int(in_horizon_length)
|
821 |
num_iterations = int(in_num_iterations)
|
822 |
top_k = int(in_top_k)
|
823 |
-
if response.endswith(("bye", "Bye", "bye.", "Bye.", "bye!", "Bye!")):
|
824 |
-
|
825 |
-
|
|
|
|
|
|
|
|
|
826 |
history = histories.get(username, None)
|
827 |
convo_hist = (history if history != None else "How are you?<|endoftext|>") + response + eot_token
|
828 |
# figure out conditioning text
|
@@ -878,8 +881,12 @@ def get_reply(response, username = None, histories = {}, in_stepsize = 5.12, in_
|
|
878 |
convo_hist = eot_token.join(convo_hist_split)
|
879 |
|
880 |
except:
|
881 |
-
|
882 |
-
|
|
|
|
|
|
|
|
|
883 |
histories[username] = convo_hist
|
884 |
return html, histories
|
885 |
|
@@ -893,8 +900,8 @@ css = """
|
|
893 |
|
894 |
gr.Interface(fn=get_reply,
|
895 |
theme="default",
|
896 |
-
inputs=[gr.inputs.Textbox(placeholder="How are you?"),
|
897 |
-
gr.inputs.Textbox(label="Username"),
|
898 |
"state"],
|
899 |
outputs=["html", "state"],
|
900 |
-
css=css).launch(debug=True, enable_queue=True)
|
|
|
13 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
# See the License for the specific language governing permissions and
|
15 |
# limitations under the License.
|
16 |
+
# temperature
|
17 |
"""
|
18 |
Example command with bag of words:
|
19 |
python examples/run_pplm.py -B space --cond_text "The president" --length 100 --gamma 1.5 --num_iterations 3 --num_samples 10 --stepsize 0.01 --window_length 5 --kl_scale 0.01 --gm_scale 0.95
|
|
|
28 |
from operator import add
|
29 |
from typing import List, Optional, Tuple, Union
|
30 |
from random import choice, randint
|
|
|
31 |
import numpy as np
|
32 |
import torch
|
33 |
import torch.nn.functional as F
|
|
|
36 |
from transformers import GPT2Tokenizer
|
37 |
from transformers.file_utils import cached_path
|
38 |
from transformers.modeling_gpt2 import GPT2LMHeadModel
|
|
|
39 |
from pplm_classification_head import ClassificationHead
|
40 |
|
41 |
PPLM_BOW = 1
|
|
|
747 |
discrim_meta=None
|
748 |
class_label=0
|
749 |
length=100
|
750 |
+
stepsize=0.32
|
751 |
+
temperature=1.3
|
752 |
top_k=2
|
753 |
sample=True
|
754 |
num_iterations=0
|
|
|
763 |
no_cuda=False
|
764 |
colorama=False
|
765 |
verbosity="quiet"
|
766 |
+
fp="./paper_code/discrim_models/persoothe_classifier.pt"
|
767 |
+
model_fp=None
|
768 |
calc_perplexity=False
|
769 |
is_deep=False
|
770 |
is_deeper=True
|
|
|
810 |
for param in model.parameters():
|
811 |
param.requires_grad = False
|
812 |
|
813 |
+
starters = ["How are you feeling and why?", "Tell me about your day", "What would you like to talk about?"]
|
814 |
eot_token = "<|endoftext|>"
|
815 |
|
816 |
+
def get_reply(response, username = None, histories = {}, in_stepsize = 0.32, in_horizon_length = 1, in_num_iterations = 0, in_top_k = 2):
|
817 |
if username == None or username == "": return "<div class='chatbot'>Enter a username</div>", histories
|
818 |
stepsize = in_stepsize
|
819 |
horizon_length = int(in_horizon_length)
|
820 |
num_iterations = int(in_num_iterations)
|
821 |
top_k = int(in_top_k)
|
822 |
+
if response.endswith(("bye", "Bye", "bye.", "Bye.", "bye!", "Bye!","Hello", "Hi", "hello")):
|
823 |
+
starter = choice(starters)
|
824 |
+
histories[username] = starter+"<|endoftext|>"
|
825 |
+
html = "<div class='chatbot'> Chatbot restarted"
|
826 |
+
html += "<div class='msg user'>"+starter+"</div>"
|
827 |
+
html += "</div>"
|
828 |
+
return html, histories
|
829 |
history = histories.get(username, None)
|
830 |
convo_hist = (history if history != None else "How are you?<|endoftext|>") + response + eot_token
|
831 |
# figure out conditioning text
|
|
|
881 |
convo_hist = eot_token.join(convo_hist_split)
|
882 |
|
883 |
except:
|
884 |
+
starter = choice(starters)
|
885 |
+
histories[username] = starter+"<|endoftext|>"
|
886 |
+
html = "<div class='chatbot'> Chatbot restarted"
|
887 |
+
html += "<div class='msg user'>"+starter+"</div>"
|
888 |
+
html += "</div>"
|
889 |
+
return html, histories
|
890 |
histories[username] = convo_hist
|
891 |
return html, histories
|
892 |
|
|
|
900 |
|
901 |
gr.Interface(fn=get_reply,
|
902 |
theme="default",
|
903 |
+
inputs=[gr.inputs.Textbox(placeholder="How are you?"),
|
904 |
+
gr.inputs.Textbox(label="Username"),
|
905 |
"state"],
|
906 |
outputs=["html", "state"],
|
907 |
+
css=css).launch(debug=True, enable_queue=True, share=True)
|