Elijahbodden commited on
Commit
d2652c3
1 Parent(s): 3fcefb4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -1
app.py CHANGED
@@ -61,6 +61,10 @@ presets = {
61
  }
62
 
63
 
 
 
 
 
64
  def respond(
65
  message,
66
  history: list[tuple[str, str]],
@@ -99,7 +103,8 @@ def respond(
99
  max_tokens=128,
100
  frequency_penalty=frequency_penalty,
101
  presence_penalty=presence_penalty,
102
- logits_processor=lambda ids, logits: ExponentialDecayLengthPenalty((lp_start, lp_decay), tokenizer.eos_token_id, len(convo))(ids, torch.from_numpy(logits))
 
103
  ):
104
  token = message["choices"][0]["text"]
105
 
 
61
  }
62
 
63
 
64
+ def custom_lp_logits_processor(ids, logits):
65
+ print(ids)
66
+ print(logits)
67
+
68
  def respond(
69
  message,
70
  history: list[tuple[str, str]],
 
103
  max_tokens=128,
104
  frequency_penalty=frequency_penalty,
105
  presence_penalty=presence_penalty,
106
+ logits_processor=custom_lp_logits_processor
107
+ # lambda ids, logits: ExponentialDecayLengthPenalty((lp_start, lp_decay), tokenizer.eos_token_id, len(convo))(ids, torch.from_numpy(logits))
108
  ):
109
  token = message["choices"][0]["text"]
110