peter szemraj commited on
Commit
9c6d59b
1 Parent(s): 106fd91
Files changed (1) hide show
  1. ai_single_response.py +10 -6
ai_single_response.py CHANGED
@@ -118,7 +118,7 @@ def query_gpt_model(
118
  batch_size=64,
119
  verbose=False,
120
  use_gpu=False,
121
- beams=2,
122
  ):
123
  """
124
  query_gpt_model - the main function that calls the model.
@@ -156,17 +156,21 @@ def query_gpt_model(
156
  print("\n... generating...")
157
  this_result = ai.generate(
158
  n=1,
159
- top_k=kparam,
160
  batch_size=batch_size,
161
  # the prompt input counts for text length constraints
162
  max_length=resp_length + pr_len,
163
  min_length=resp_min + pr_len,
164
  prompt=this_prompt,
165
- temperature=temp,
166
- top_p=top_p,
167
- do_sample=True,
 
 
 
 
 
168
  return_as_list=True,
169
- use_cache=True,
170
  )
171
  if verbose:
172
  print("\n... generated:\n")
 
118
  batch_size=64,
119
  verbose=False,
120
  use_gpu=False,
121
+ beams=4,
122
  ):
123
  """
124
  query_gpt_model - the main function that calls the model.
 
156
  print("\n... generating...")
157
  this_result = ai.generate(
158
  n=1,
 
159
  batch_size=batch_size,
160
  # the prompt input counts for text length constraints
161
  max_length=resp_length + pr_len,
162
  min_length=resp_min + pr_len,
163
  prompt=this_prompt,
164
+ num_beams=beams,
165
+ early_stopping=True,
166
+ no_repeat_ngram_size=2,
167
+ # temperature=temp,
168
+ # top_k=kparam,
169
+ # top_p=top_p,
170
+ # do_sample=True,
171
+ # use_cache=True,
172
  return_as_list=True,
173
+
174
  )
175
  if verbose:
176
  print("\n... generated:\n")