osiria commited on
Commit
beae39b
1 Parent(s): 247d4d8

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +4 -3
  2. app_it.py +4 -2
app.py CHANGED
@@ -93,7 +93,8 @@ class Prime:
93
  output = output[0].upper() + output[1:]
94
  if output[-1] == tokenizer.decode(stop_tokens[0]):
95
  output = output[:-1]
96
- output = re.sub(" \d\.", "\n•", output)
 
97
  return output
98
 
99
  model.eval()
@@ -102,7 +103,7 @@ prime = Prime(tokenizer = tokenizer, model = model)
102
 
103
  def process_input(user_input, max_tokens, sample, top_k, message):
104
  return prime.generate(prompt = user_input, message = message,
105
- max_tokens = 150, sample = sample,
106
  top_k = top_k)
107
 
108
 
@@ -139,7 +140,7 @@ with gr.Blocks(title="primo", css="footer {visibility: hidden}", theme=gr.themes
139
  gr.Markdown("<b>options</b>")
140
  max_tokens = gr.Slider(1, 250, value=150, label="max tokens", info="choose a limit between 1 and 250")
141
  sample = gr.Checkbox(label="sampling")
142
- top_k = gr.Slider(1, 5, step=1, value=1, label="creativity", info="choose a level between 1 and 5")
143
  message = gr.Textbox(label="system message", value = "")
144
  clear = gr.Button("clear chat")
145
  with gr.Column(scale=8):
 
93
  output = output[0].upper() + output[1:]
94
  if output[-1] == tokenizer.decode(stop_tokens[0]):
95
  output = output[:-1]
96
+ if len(re.findall("\d\.", output)) > 1:
97
+ output = re.sub("\d\.", "<br>•", output)
98
  return output
99
 
100
  model.eval()
 
103
 
104
  def process_input(user_input, max_tokens, sample, top_k, message):
105
  return prime.generate(prompt = user_input, message = message,
106
+ max_tokens = max_tokens, sample = sample,
107
  top_k = top_k)
108
 
109
 
 
140
  gr.Markdown("<b>options</b>")
141
  max_tokens = gr.Slider(1, 250, value=150, label="max tokens", info="choose a limit between 1 and 250")
142
  sample = gr.Checkbox(label="sampling")
143
+ top_k = gr.Slider(1, 5, value=1, label="creativity", info="choose a level between 1 and 5")
144
  message = gr.Textbox(label="system message", value = "")
145
  clear = gr.Button("clear chat")
146
  with gr.Column(scale=8):
app_it.py CHANGED
@@ -93,7 +93,9 @@ class Prime:
93
  output = output[0].upper() + output[1:]
94
  if output[-1] == tokenizer.decode(stop_tokens[0]):
95
  output = output[:-1]
96
- output = re.sub(" \d\.", "\n•", output)
 
 
97
  return output
98
 
99
  model.eval()
@@ -102,7 +104,7 @@ prime = Prime(tokenizer = tokenizer, model = model)
102
 
103
  def process_input(user_input, max_tokens, sample, top_k, message):
104
  return prime.generate(prompt = user_input, message = message,
105
- max_tokens = 150, sample = sample,
106
  top_k = top_k)
107
 
108
 
 
93
  output = output[0].upper() + output[1:]
94
  if output[-1] == tokenizer.decode(stop_tokens[0]):
95
  output = output[:-1]
96
+ if len(re.findall("\d\.", output)) > 1:
97
+ output = re.sub("\d\.", "<br>•", output)
98
+ output = re.sub("^\<br\>", "", output)
99
  return output
100
 
101
  model.eval()
 
104
 
105
  def process_input(user_input, max_tokens, sample, top_k, message):
106
  return prime.generate(prompt = user_input, message = message,
107
+ max_tokens = max_tokens, sample = sample,
108
  top_k = top_k)
109
 
110