vishwask commited on
Commit
5c147cc
·
verified ·
1 Parent(s): fd61954

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -46,6 +46,7 @@ def english_to_indian(sentence):
46
  forced_bos_token_id=translation_tokenizer.lang_code_to_id[lang_global] )
47
  x = translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
48
  translated_sentence = translated_sentence + x[0]
 
49
  return translated_sentence
50
 
51
  def indian_to_english(sentence):
@@ -57,6 +58,7 @@ def indian_to_english(sentence):
57
  generated_tokens = translation_model.generate(**encoded_hi, forced_bos_token_id=translation_tokenizer.lang_code_to_id["en_XX"] )
58
  x = translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
59
  translated_sentence = translated_sentence + x[0]
 
60
  return translated_sentence
61
 
62
 
@@ -121,9 +123,6 @@ def initialize_llmchain(temperature, max_tokens, top_k, vector_db, progress=gr.P
121
 
122
  # HuggingFaceHub uses HF inference endpoints
123
  progress(0.5, desc="Initializing HF Hub...")
124
- # Use of trust_remote_code as model_kwargs
125
- # Warning: langchain issue
126
- # URL: https://github.com/langchain-ai/langchain/issues/6080
127
 
128
  llm = HuggingFaceHub(repo_id=llm_model, model_kwargs={"temperature": temperature,
129
  "max_new_tokens": max_tokens,
 
46
  forced_bos_token_id=translation_tokenizer.lang_code_to_id[lang_global] )
47
  x = translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
48
  translated_sentence = translated_sentence + x[0]
49
+ print(translated_sentence)
50
  return translated_sentence
51
 
52
  def indian_to_english(sentence):
 
58
  generated_tokens = translation_model.generate(**encoded_hi, forced_bos_token_id=translation_tokenizer.lang_code_to_id["en_XX"] )
59
  x = translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
60
  translated_sentence = translated_sentence + x[0]
61
+ print(translated_sentence)
62
  return translated_sentence
63
 
64
 
 
123
 
124
  # HuggingFaceHub uses HF inference endpoints
125
  progress(0.5, desc="Initializing HF Hub...")
 
 
 
126
 
127
  llm = HuggingFaceHub(repo_id=llm_model, model_kwargs={"temperature": temperature,
128
  "max_new_tokens": max_tokens,