Tonic commited on
Commit
9e7ff1d
β€’
1 Parent(s): 683284e

fix for include prompt error

Browse files
Files changed (2) hide show
  1. langchainapp.py +3 -1
  2. requirements.txt +1 -1
langchainapp.py CHANGED
@@ -44,7 +44,9 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
44
  hf_token, yi_token = load_env_variables()
45
 
46
  tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token, trust_remote_code=True)
47
- nvidiamodel = AutoModel.from_pretrained(model_name, token=hf_token, trust_remote_code=True).to(device)
 
 
48
  def clear_cuda_cache():
49
  torch.cuda.empty_cache()
50
 
 
44
  hf_token, yi_token = load_env_variables()
45
 
46
  tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token, trust_remote_code=True)
47
+ nvidiamodel = AutoModel.from_pretrained(model_name, token=hf_token, trust_remote_code=True).to(device)
48
+ nvidiamodel.set_pooling_include_prompt(include_prompt=False)
49
+
50
  def clear_cuda_cache():
51
  torch.cuda.empty_cache()
52
 
requirements.txt CHANGED
@@ -15,4 +15,4 @@ gradio
15
  # tesseract
16
  # libxml2
17
  # libxslt
18
- InstructorEmbedding
 
15
  # tesseract
16
  # libxml2
17
  # libxslt
18
+ git+https://github.com/xlang-ai/instructor-embedding.git@5cca65eb0ed78ab354b086a5386fb2c528809caa