Lahiru Menikdiwela commited on
Commit
32e6acc
1 Parent(s): 96ace13

trying to fix config.json issues in led

Browse files
Files changed (1) hide show
  1. model.py +3 -2
model.py CHANGED
@@ -1,5 +1,6 @@
1
  import os
2
  from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline
 
3
  from langchain_openai import OpenAI
4
  # from huggingface_hub import login
5
  from dotenv import load_dotenv
@@ -18,11 +19,11 @@ def get_local_model(model_name_or_path:str)->pipeline:
18
 
19
  #print(f"Model is running on {device}")
20
 
21
- tokenizer = AutoTokenizer.from_pretrained(
22
  model_name_or_path,
23
  token = hf_token
24
  )
25
- model = AutoModelForSeq2SeqLM.from_pretrained(
26
  model_name_or_path,
27
  torch_dtype=torch.float32,
28
  token = hf_token
 
1
  import os
2
  from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline
3
+ from transformers import LEDForConditionalGeneration, LEDTokenizer
4
  from langchain_openai import OpenAI
5
  # from huggingface_hub import login
6
  from dotenv import load_dotenv
 
19
 
20
  #print(f"Model is running on {device}")
21
 
22
+ tokenizer = LEDTokenizer.from_pretrained( #AutoTokenizer.from_pretrained( news changes to support led
23
  model_name_or_path,
24
  token = hf_token
25
  )
26
+ model = LEDForConditionalGeneration.from_pretrained( #AutoModelForSeq2SeqLM.from_pretrained( new changes to support led
27
  model_name_or_path,
28
  torch_dtype=torch.float32,
29
  token = hf_token