Chirayu commited on
Commit
3adba90
1 Parent(s): 1521740

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +42 -0
README.md ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This model predicts the sentiment('Negative'/'Positive') for the input sentence. It is fine-tuned mt5-small
2
+
3
+ The present model supports 6 languages -
4
+ 1) English
5
+ 2) Hindi
6
+ 3) German
7
+ 4) Korean
8
+ 5) Japanese
9
+ 6) Portuguese
10
+
11
+ Here is how to use this model
12
+
13
+ ```python
14
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
15
+ import torch
16
+ model = AutoModelForSeq2SeqLM.from_pretrained("Chirayu/mt5-multilingual-sentiment")
17
+ tokenizer = AutoTokenizer.from_pretrained("Chirayu/mt5-multilingual-sentiment")
18
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
19
+ model = model.to(device)
20
+
21
+ def get_subject(text, num_beams=2,max_length=512, repetition_penalty=2.5, length_penalty=1, early_stopping=True,top_p=.95, top_k=50, num_return_sequences=1):
22
+
23
+ input_ids = tokenizer.encode(
24
+ text, return_tensors="pt", add_special_tokens=True
25
+ )
26
+
27
+ input_ids = input_ids.to(device)
28
+ generated_ids = model.generate(
29
+ input_ids=input_ids,
30
+
31
+ num_beams=num_beams,
32
+ max_length=max_length,
33
+ repetition_penalty=repetition_penalty,
34
+ length_penalty=length_penalty,
35
+ early_stopping=early_stopping,
36
+ top_p=top_p,
37
+ top_k=top_k,
38
+ num_return_sequences=num_return_sequences,
39
+ )
40
+ sentiment = [tokenizer.decode(generated_id,skip_special_tokens=True,clean_up_tokenization_spaces=True,) for generated_id in generated_ids]
41
+ return sentiment
42
+ ```