meghanaraok commited on
Commit
fa3049c
1 Parent(s): a827e92

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -109,7 +109,6 @@ def predict_icd(text_input, model_name, label_count):
109
  "args": training_args
110
  }
111
  model = model_class.from_pretrained(model_path, **kwargs)
112
- model.to(torch.device("cuda:0"))
113
  tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, padding_side="right")
114
  results = segment_tokenize_dataset(tokenizer, text, labels,
115
  data_args.max_seq_length,
@@ -124,10 +123,10 @@ def predict_icd(text_input, model_name, label_count):
124
 
125
 
126
  with torch.no_grad():
127
- input_ids = input_ids.to(torch.device("cuda:0"))
128
- attention_mask = attention_mask.to(torch.device("cuda:0"))
129
- token_type_ids = token_type_ids.to(torch.device("cuda:0"))
130
- targets = targets.to(torch.device("cuda:0"))
131
  model_inputs = {
132
  "input_ids": input_ids,
133
  "attention_mask": attention_mask,
 
109
  "args": training_args
110
  }
111
  model = model_class.from_pretrained(model_path, **kwargs)
 
112
  tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, padding_side="right")
113
  results = segment_tokenize_dataset(tokenizer, text, labels,
114
  data_args.max_seq_length,
 
123
 
124
 
125
  with torch.no_grad():
126
+ # input_ids = input_ids.to(torch.device("cuda:0"))
127
+ # attention_mask = attention_mask.to(torch.device("cuda:0"))
128
+ # token_type_ids = token_type_ids.to(torch.device("cuda:0"))
129
+ # targets = targets.to(torch.device("cuda:0"))
130
  model_inputs = {
131
  "input_ids": input_ids,
132
  "attention_mask": attention_mask,