Elijahbodden commited on
Commit
0edb841
1 Parent(s): d2652c3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -61,9 +61,10 @@ presets = {
61
  }
62
 
63
 
64
- def custom_lp_logits_processor(ids, logits):
65
- print(ids)
66
- print(logits)
 
67
 
68
  def respond(
69
  message,
@@ -103,8 +104,7 @@ def respond(
103
  max_tokens=128,
104
  frequency_penalty=frequency_penalty,
105
  presence_penalty=presence_penalty,
106
- logits_processor=custom_lp_logits_processor
107
- # lambda ids, logits: ExponentialDecayLengthPenalty((lp_start, lp_decay), tokenizer.eos_token_id, len(convo))(ids, torch.from_numpy(logits))
108
  ):
109
  token = message["choices"][0]["text"]
110
 
 
61
  }
62
 
63
 
64
+ def custom_lp_logits_processor(ids, logits, lp_start, lp_decay):
65
+ if (len(ids) > lp_start):
66
+ logits[tokenizer.eos_token_id] *= pow(lp_decay, len(ids)-lp_start)
67
+ return logits
68
 
69
  def respond(
70
  message,
 
104
  max_tokens=128,
105
  frequency_penalty=frequency_penalty,
106
  presence_penalty=presence_penalty,
107
+ logits_processor=lambda ids, logits: custom_lp_logits_processor(ids, logits, lp_start, lp_decay)
 
108
  ):
109
  token = message["choices"][0]["text"]
110