AleVento commited on
Commit
752ecd5
1 Parent(s): e6d12b1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -2
app.py CHANGED
@@ -1,11 +1,74 @@
1
  import streamlit as st
2
  import torch
3
  from transformers import BertTokenizer
4
- import EmotionTagger from EmotionTaggerClass
5
 
6
  BERT_MODEL_NAME = 'bert-base-cased'
7
- tokenizer = BertTokenizer.from_pretrained(BERT_MODEL_NAME)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  model = torch.load("./model.pt")
10
  st.title("Analisis de Sentimientos")
11
  txt = st.text_area(label="Please write what you want to analyze...")
 
1
  import streamlit as st
2
  import torch
3
  from transformers import BertTokenizer
 
4
 
5
  BERT_MODEL_NAME = 'bert-base-cased'
6
+ LABEL_COLUMNS = ['anger','joy','fear','surprise','sadness', 'neutral']
7
+
8
+ class EmotionTagger(pl.LightningModule):
9
+ def __init__(self, n_classes: int, n_training_steps=None, n_warmup_steps=None):
10
+ super().__init__()
11
+ self.bert = BertModel.from_pretrained(BERT_MODEL_NAME, return_dict=True)
12
+ self.classifier = nn.Linear(self.bert.config.hidden_size, n_classes)
13
+ self.n_training_steps = n_training_steps
14
+ self.n_warmup_steps = n_warmup_steps
15
+ self.criterion = nn.BCELoss()
16
+
17
+ def forward(self, input_ids, attention_mask, labels=None):
18
+ output = self.bert(input_ids, attention_mask=attention_mask)
19
+ output = self.classifier(output.pooler_output)
20
+ output = torch.sigmoid(output)
21
+ loss = 0
22
+ if labels is not None:
23
+ loss = self.criterion(output, labels)
24
+ return loss, output
25
+
26
+ def training_step(self, batch, batch_idx):
27
+ input_ids = batch["input_ids"]
28
+ attention_mask = batch["attention_mask"]
29
+ labels = batch["labels"]
30
+ loss, outputs = self(input_ids, attention_mask, labels)
31
+ self.log("train_loss", loss, prog_bar=True, logger=True)
32
+ return {"loss": loss, "predictions": outputs, "labels": labels}
33
+
34
+ def validation_step(self, batch, batch_idx):
35
+ input_ids = batch["input_ids"]
36
+ attention_mask = batch["attention_mask"]
37
+ labels = batch["labels"]
38
+ loss, outputs = self(input_ids, attention_mask, labels)
39
+ self.log("val_loss", loss, prog_bar=True, logger=True)
40
+ return loss
41
+
42
+ def test_step(self, batch, batch_idx):
43
+ input_ids = batch["input_ids"]
44
+ attention_mask = batch["attention_mask"]
45
+ labels = batch["labels"]
46
+ loss, outputs = self(input_ids, attention_mask, labels)
47
+ self.log("test_loss", loss, prog_bar=True, logger=True)
48
+ return loss
49
 
50
+ for i, name in enumerate(LABEL_COLUMNS):
51
+ class_roc_auc = pytorch_lightning.metrics.functional.auroc(predictions[:, i], labels[:, i])
52
+ self.logger.experiment.add_scalar(f"{name}_roc_auc/Train", class_roc_auc, self.current_epoch)
53
+
54
+ def configure_optimizers(self):
55
+ optimizer = AdamW(self.parameters(), lr=2e-5)
56
+
57
+ scheduler = get_linear_schedule_with_warmup(
58
+ optimizer,
59
+ num_warmup_steps=self.n_warmup_steps,
60
+ num_training_steps=self.n_training_steps
61
+ )
62
+
63
+ return dict(
64
+ optimizer=optimizer,
65
+ lr_scheduler=dict(
66
+ scheduler=scheduler,
67
+ interval='step'
68
+ )
69
+ )
70
+
71
+ tokenizer = BertTokenizer.from_pretrained(BERT_MODEL_NAME)
72
  model = torch.load("./model.pt")
73
  st.title("Analisis de Sentimientos")
74
  txt = st.text_area(label="Please write what you want to analyze...")