minhdang commited on
Commit
65461b2
1 Parent(s): 74780e2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -12
app.py CHANGED
@@ -208,7 +208,7 @@ def predict(
208
  temperature,
209
  repetition_penalty,
210
  max_length_tokens,
211
- max_context_length_tokens,
212
  model_select_dropdown,
213
  ):
214
  """
@@ -247,7 +247,7 @@ def predict(
247
  # history,
248
  vl_chat_processor,
249
  tokenizer,
250
- max_length=max_context_length_tokens,
251
  )
252
  prompts = convert_conversation_to_prompts(conversation)
253
 
@@ -296,7 +296,7 @@ def retry(
296
  temperature,
297
  repetition_penalty,
298
  max_length_tokens,
299
- max_context_length_tokens,
300
  model_select_dropdown,
301
  ):
302
  if len(history) == 0:
@@ -318,7 +318,7 @@ def retry(
318
  temperature,
319
  repetition_penalty,
320
  max_length_tokens,
321
- max_context_length_tokens,
322
  model_select_dropdown,
323
  )
324
 
@@ -403,14 +403,14 @@ def build_demo(MODELS):
403
  interactive=True,
404
  label="Max Generation Tokens",
405
  )
406
- max_context_length_tokens = gr.Slider(
407
- minimum=0,
408
- maximum=2048,
409
- value=2048,
410
- step=128,
411
- interactive=True,
412
- label="Max History Tokens",
413
- )
414
  model_select_dropdown = gr.Dropdown(
415
  label="Select Models",
416
  choices=MODELS,
 
208
  temperature,
209
  repetition_penalty,
210
  max_length_tokens,
211
+ # max_context_length_tokens,
212
  model_select_dropdown,
213
  ):
214
  """
 
247
  # history,
248
  vl_chat_processor,
249
  tokenizer,
250
+ # max_length=max_context_length_tokens,
251
  )
252
  prompts = convert_conversation_to_prompts(conversation)
253
 
 
296
  temperature,
297
  repetition_penalty,
298
  max_length_tokens,
299
+ # max_context_length_tokens,
300
  model_select_dropdown,
301
  ):
302
  if len(history) == 0:
 
318
  temperature,
319
  repetition_penalty,
320
  max_length_tokens,
321
+ # max_context_length_tokens,
322
  model_select_dropdown,
323
  )
324
 
 
403
  interactive=True,
404
  label="Max Generation Tokens",
405
  )
406
+ # max_context_length_tokens = gr.Slider(
407
+ # minimum=0,
408
+ # maximum=2048,
409
+ # value=2048,
410
+ # step=128,
411
+ # interactive=True,
412
+ # label="Max History Tokens",
413
+ # )
414
  model_select_dropdown = gr.Dropdown(
415
  label="Select Models",
416
  choices=MODELS,