NCTCMumbai commited on
Commit
91b5133
·
verified ·
1 Parent(s): 8146b4b

Update backend/query_llm.py

Browse files
Files changed (1) hide show
  1. backend/query_llm.py +2 -2
backend/query_llm.py CHANGED
@@ -106,8 +106,8 @@ def generate_hf(prompt: str, history: str, temperature: float = 0.9, max_new_tok
106
  for response in stream:
107
  print(response.token.text)
108
  output += response.token.text
109
- return output
110
- #yield output
111
 
112
  except Exception as e:
113
  if "Too Many Requests" in str(e):
 
106
  for response in stream:
107
  print(response.token.text)
108
  output += response.token.text
109
+ #return output
110
+ yield output
111
 
112
  except Exception as e:
113
  if "Too Many Requests" in str(e):