Elijahbodden commited on
Commit
3fcefb4
1 Parent(s): 56aab7f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -9,6 +9,7 @@ os.system('pip install llama-cpp-python transformers torch')
9
 
10
  from llama_cpp import Llama
11
  from transformers import AutoTokenizer
 
12
  from transformers.generation.logits_process import ExponentialDecayLengthPenalty
13
  model_id = "Elijahbodden/eliGPTv1.1"
14
 
@@ -98,7 +99,7 @@ def respond(
98
  max_tokens=128,
99
  frequency_penalty=frequency_penalty,
100
  presence_penalty=presence_penalty,
101
- logits_processor=ExponentialDecayLengthPenalty((lp_start, lp_decay), tokenizer.eos_token_id, len(convo))
102
  ):
103
  token = message["choices"][0]["text"]
104
 
 
9
 
10
  from llama_cpp import Llama
11
  from transformers import AutoTokenizer
12
+ import torch
13
  from transformers.generation.logits_process import ExponentialDecayLengthPenalty
14
  model_id = "Elijahbodden/eliGPTv1.1"
15
 
 
99
  max_tokens=128,
100
  frequency_penalty=frequency_penalty,
101
  presence_penalty=presence_penalty,
102
+ logits_processor=lambda ids, logits: ExponentialDecayLengthPenalty((lp_start, lp_decay), tokenizer.eos_token_id, len(convo))(ids, torch.from_numpy(logits))
103
  ):
104
  token = message["choices"][0]["text"]
105