chansung commited on
Commit
05a5156
1 Parent(s): 23771bd

Update vid2persona/pipeline/llm.py

Browse files
Files changed (1) hide show
  1. vid2persona/pipeline/llm.py +4 -2
vid2persona/pipeline/llm.py CHANGED
@@ -67,9 +67,11 @@ async def chat(
67
  "repetition_penalty": repetition_penalty
68
  }
69
 
70
- if hf_token is None:
71
  for response in local_openllm.send_message(messages, model_id, max_input_token_length, parameters):
72
  yield response
73
- else:
 
 
74
  async for response in tgi_openllm.send_messages(messages, model_id, hf_token, parameters):
75
  yield response
 
67
  "repetition_penalty": repetition_penalty
68
  }
69
 
70
+ try:
71
  for response in local_openllm.send_message(messages, model_id, max_input_token_length, parameters):
72
  yield response
73
+ except ThreadPoolBuildError as e:
74
+ gr.Warning("Something went wrong. Switching to TGI remotely hosted model")
75
+ finally:
76
  async for response in tgi_openllm.send_messages(messages, model_id, hf_token, parameters):
77
  yield response