egoriya commited on
Commit
2f33dc9
1 Parent(s): 2468cee

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -34,10 +34,10 @@ The preferable usage:
34
  # pip install transformers
35
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
36
  import torch
37
- tokenizer = AutoTokenizer.from_pretrained("/mnt/chatbot_models2/chit-chat/experiments/crossencoder_hf/rubert-base-sentence/dialogs_whole")
38
- model = AutoModelForSequenceClassification.from_pretrained("/mnt/chatbot_models2/chit-chat/experiments/crossencoder_hf/rubert-base-sentence/dialogs_whole")
39
  # model.cuda()
40
- inputs = tokenizer('привет[SEP]привет![SEP]как дела?[RESPONSE_TOKEN]норм',
41
  padding=True, max_length=128, truncation=True, add_special_tokens=False, return_tensors='pt')
42
  with torch.inference_mode():
43
  logits = model(**inputs).logits
 
34
  # pip install transformers
35
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
36
  import torch
37
+ tokenizer = AutoTokenizer.from_pretrained("tinkoff-ai/response-quality-classifier-tiny")
38
+ model = AutoModelForSequenceClassification.from_pretrained("tinkoff-ai/response-quality-classifier-tiny")
39
  # model.cuda()
40
+ inputs = tokenizer('привет[SEP]привет![SEP]как дела?[RESPONSE_TOKEN]норм, у тя как?',
41
  padding=True, max_length=128, truncation=True, add_special_tokens=False, return_tensors='pt')
42
  with torch.inference_mode():
43
  logits = model(**inputs).logits