gokaygokay commited on
Commit
c491b9d
1 Parent(s): 57d539d

repetition penalty

Browse files
Files changed (2) hide show
  1. config.json +3 -3
  2. moondream.py +10 -2
config.json CHANGED
@@ -1,11 +1,11 @@
1
  {
2
- "_name_or_path": "vikhyatk/moondream2",
3
  "architectures": [
4
  "Moondream"
5
  ],
6
  "auto_map": {
7
- "AutoConfig": "vikhyatk/moondream2--configuration_moondream.MoondreamConfig",
8
- "AutoModelForCausalLM": "vikhyatk/moondream2--moondream.Moondream"
9
  },
10
  "model_type": "moondream1",
11
  "phi_config": {
 
1
  {
2
+ "_name_or_path": "gokaygokay/moondream-prompt",
3
  "architectures": [
4
  "Moondream"
5
  ],
6
  "auto_map": {
7
+ "AutoConfig": "gokaygokay/moondream-prompt--configuration_moondream.MoondreamConfig",
8
+ "AutoModelForCausalLM": "gokaygokay/moondream-prompt--moondream.Moondream"
9
  },
10
  "model_type": "moondream1",
11
  "phi_config": {
moondream.py CHANGED
@@ -64,6 +64,7 @@ class Moondream(PreTrainedModel):
64
  tokenizer,
65
  eos_text="<END>",
66
  max_new_tokens=128,
 
67
  **kwargs,
68
  ):
69
  eos_tokens = tokenizer(eos_text, add_special_tokens=False)[0].ids
@@ -73,6 +74,7 @@ class Moondream(PreTrainedModel):
73
  "bos_token_id": tokenizer.bos_token_id,
74
  "pad_token_id": tokenizer.eos_token_id,
75
  "max_new_tokens": max_new_tokens,
 
76
  **kwargs,
77
  }
78
 
@@ -90,6 +92,8 @@ class Moondream(PreTrainedModel):
90
  question,
91
  tokenizer,
92
  chat_history="",
 
 
93
  result_queue=None,
94
  **kwargs,
95
  ):
@@ -99,7 +103,8 @@ class Moondream(PreTrainedModel):
99
  prompt,
100
  eos_text="<END>",
101
  tokenizer=tokenizer,
102
- max_new_tokens=512,
 
103
  **kwargs,
104
  )[0]
105
  cleaned_answer = re.sub("<$|<END$", "", answer).strip()
@@ -115,6 +120,8 @@ class Moondream(PreTrainedModel):
115
  images,
116
  prompts,
117
  tokenizer,
 
 
118
  **kwargs,
119
  ):
120
  eos_tokens = tokenizer("<END>", add_special_tokens=False)[0].ids
@@ -162,7 +169,8 @@ class Moondream(PreTrainedModel):
162
  "eos_token_id": eos_tokens,
163
  "bos_token_id": tokenizer.bos_token_id,
164
  "pad_token_id": tokenizer.eos_token_id,
165
- "max_new_tokens": 512,
 
166
  **kwargs,
167
  }
168
 
 
64
  tokenizer,
65
  eos_text="<END>",
66
  max_new_tokens=128,
67
+ repetition_penalty=1.5,
68
  **kwargs,
69
  ):
70
  eos_tokens = tokenizer(eos_text, add_special_tokens=False)[0].ids
 
74
  "bos_token_id": tokenizer.bos_token_id,
75
  "pad_token_id": tokenizer.eos_token_id,
76
  "max_new_tokens": max_new_tokens,
77
+ "repetition_penalty": repetition_penalty,
78
  **kwargs,
79
  }
80
 
 
92
  question,
93
  tokenizer,
94
  chat_history="",
95
+ repetition_penalty=1.5,
96
+ max_new_tokens=128,
97
  result_queue=None,
98
  **kwargs,
99
  ):
 
103
  prompt,
104
  eos_text="<END>",
105
  tokenizer=tokenizer,
106
+ max_new_tokens=max_new_tokens,
107
+ repetition_penalty=repetition_penalty,
108
  **kwargs,
109
  )[0]
110
  cleaned_answer = re.sub("<$|<END$", "", answer).strip()
 
120
  images,
121
  prompts,
122
  tokenizer,
123
+ repetition_penalty=1.5,
124
+ max_new_tokens=128,
125
  **kwargs,
126
  ):
127
  eos_tokens = tokenizer("<END>", add_special_tokens=False)[0].ids
 
169
  "eos_token_id": eos_tokens,
170
  "bos_token_id": tokenizer.bos_token_id,
171
  "pad_token_id": tokenizer.eos_token_id,
172
+ "max_new_tokens": max_new_tokens,
173
+ "repetition_penalty": repetition_penalty,
174
  **kwargs,
175
  }
176