imperialwool commited on
Commit
3cd9e10
1 Parent(s): ad33635

Update gradio_app.py

Browse files
Files changed (1) hide show
  1. gradio_app.py +5 -0
gradio_app.py CHANGED
@@ -56,6 +56,7 @@ with open('system.prompt', 'r', encoding='utf-8') as f:
56
  prompt = f.read()
57
 
58
  def generate_answer(request: str, max_tokens: int = 256, language: str = "en", custom_prompt: str = None):
 
59
  try:
60
  maxTokens = max_tokens if 16 <= max_tokens <= 256 else 64
61
  if isinstance(custom_prompt, str):
@@ -69,6 +70,7 @@ def generate_answer(request: str, max_tokens: int = 256, language: str = "en", c
69
  output = llm(userPrompt, max_tokens=maxTokens, stop=["User:"], echo=False)
70
  text = output["choices"][0]["text"]
71
  if language in languages:
 
72
  encoded_input = translator_tokenizer(text, return_tensors="pt")
73
  generated_tokens = translator_model.generate(
74
  **encoded_input, forced_bos_token_id=translator_tokenizer.get_lang_id(language)
@@ -76,11 +78,14 @@ def generate_answer(request: str, max_tokens: int = 256, language: str = "en", c
76
  translated_text = translator_tokenizer.batch_decode(
77
  generated_tokens, skip_special_tokens=True
78
  )[0]
 
79
  return translated_text
 
80
  return text
81
  except Exception as e:
82
  print(e)
83
  return "Oops! Internal server error. Check the logs of space/instance."
 
84
 
85
  print("! LOAD GRADIO INTERFACE !")
86
  demo = gr.Interface(
 
56
  prompt = f.read()
57
 
58
  def generate_answer(request: str, max_tokens: int = 256, language: str = "en", custom_prompt: str = None):
59
+ print("Request:", request, "\nMax tokens:", max_tokens, "\nLanguage:", language, "\nCustom prompt:", custom_prompt, "\n")
60
  try:
61
  maxTokens = max_tokens if 16 <= max_tokens <= 256 else 64
62
  if isinstance(custom_prompt, str):
 
70
  output = llm(userPrompt, max_tokens=maxTokens, stop=["User:"], echo=False)
71
  text = output["choices"][0]["text"]
72
  if language in languages:
73
+ print("Translating from en to", language)
74
  encoded_input = translator_tokenizer(text, return_tensors="pt")
75
  generated_tokens = translator_model.generate(
76
  **encoded_input, forced_bos_token_id=translator_tokenizer.get_lang_id(language)
 
78
  translated_text = translator_tokenizer.batch_decode(
79
  generated_tokens, skip_special_tokens=True
80
  )[0]
81
+ print("Translated:", translated_text, "\nOriginal:", text)
82
  return translated_text
83
+ print(text)
84
  return text
85
  except Exception as e:
86
  print(e)
87
  return "Oops! Internal server error. Check the logs of space/instance."
88
+ print("\n\n\n")
89
 
90
  print("! LOAD GRADIO INTERFACE !")
91
  demo = gr.Interface(