Lurunchik commited on
Commit
924d432
1 Parent(s): 86c445d

add test model

Browse files
Files changed (2) hide show
  1. app.py +25 -4
  2. nfqa_model.py +97 -0
app.py CHANGED
@@ -1,7 +1,28 @@
1
  import gradio as gr
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
 
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer
3
 
4
+ from nfqa_model import RobertaNFQAClassification
 
5
 
6
+
7
+ index_to_label = {0: 'NOT-A-QUESTION',
8
+ 1: 'FACTOID',
9
+ 2: 'DEBATE',
10
+ 3: 'EVIDENCE-BASED',
11
+ 4: 'INSTRUCTION',
12
+ 5: 'REASON',
13
+ 6: 'EXPERIENCE',
14
+ 7: 'COMPARISON'}
15
+
16
+
17
+ model = RobertaNFQAClassification.from_pretrained("Lurunchik/nf-cats")
18
+ nfqa_tokenizer = AutoTokenizer.from_pretrained("deepset/roberta-base-squad2")
19
+
20
+
21
+ def get_nfqa_prediction(text):
22
+ output = model(**nfqa_tokenizer(text, return_tensors="pt"))
23
+ index = output.logits.argmax()
24
+ return index_to_label[int(index)]
25
+
26
+
27
+ iface = gr.Interface(fn=get_nfqa_prediction, inputs="text", outputs="text")
28
+ iface.launch()
nfqa_model.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Sequence, Tuple, Union
2
+
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import CrossEntropyLoss, functional
6
+ from transformers import RobertaConfig
7
+ from transformers.modeling_outputs import SequenceClassifierOutput
8
+ from transformers.models.roberta.modeling_roberta import RobertaModel, RobertaPooler, RobertaPreTrainedModel
9
+
10
+
11
+ class MishActivation(nn.Module):
12
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
13
+ return x * torch.tanh(torch.nn.functional.softplus(x))
14
+
15
+
16
+ class NFQAClassificationHead(nn.Module):
17
+ def __init__(
18
+ self, input_dim: int, num_labels: int, hidden_dims: Sequence[int] = (768, 512), dropout: float = 0.0,
19
+ ) -> None:
20
+ super().__init__()
21
+
22
+ self.linear_layers = nn.Sequential(*(nn.Linear(input_dim, dim) for dim in hidden_dims))
23
+ self.classification_layer = torch.nn.Linear(hidden_dims[-1], num_labels)
24
+ self.activations = [MishActivation()] * len(hidden_dims)
25
+ self.dropouts = [torch.nn.Dropout(p=dropout)] * len(hidden_dims)
26
+
27
+ def forward(self, inputs: torch.Tensor) -> torch.Tensor:
28
+ output = inputs
29
+ for layer, activation, dropout in zip(self.linear_layers, self.activations, self.dropouts):
30
+ output = dropout(activation(layer(output)))
31
+ return self.classification_layer(output)
32
+
33
+
34
+ class RobertaNFQAClassification(RobertaPreTrainedModel):
35
+ _keys_to_ignore_on_load_missing = [r'position_ids']
36
+ _DROPOUT = 0.0
37
+
38
+ def __init__(self, config: RobertaConfig):
39
+ super().__init__(config)
40
+ self.num_labels = config.num_labels
41
+ self.config = config
42
+
43
+ self.embedder = RobertaModel(config, add_pooling_layer=True)
44
+ self.pooler = RobertaPooler(config)
45
+ self.feedforward = NFQAClassificationHead(config.hidden_size, config.num_labels)
46
+ self.dropout = torch.nn.Dropout(self._DROPOUT)
47
+
48
+ self.init_weights()
49
+
50
+ def forward(
51
+ self,
52
+ input_ids: Optional[torch.LongTensor] = None,
53
+ attention_mask: Optional[torch.FloatTensor] = None,
54
+ token_type_ids: Optional[torch.LongTensor] = None,
55
+ position_ids: Optional[torch.LongTensor] = None,
56
+ head_mask: Optional[torch.FloatTensor] = None,
57
+ inputs_embeds: Optional[torch.FloatTensor] = None,
58
+ labels: Optional[torch.LongTensor] = None,
59
+ output_attentions: Optional[bool] = None,
60
+ output_hidden_states: Optional[bool] = None,
61
+ return_dict: Optional[bool] = None,
62
+ ) -> Union[Tuple[torch.Tensor, ...], SequenceClassifierOutput]:
63
+ r"""
64
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
65
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
66
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
67
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
68
+ """
69
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
70
+
71
+ outputs = self.embedder(
72
+ input_ids,
73
+ attention_mask=attention_mask,
74
+ token_type_ids=token_type_ids,
75
+ position_ids=position_ids,
76
+ head_mask=head_mask,
77
+ inputs_embeds=inputs_embeds,
78
+ output_attentions=output_attentions,
79
+ output_hidden_states=output_hidden_states,
80
+ return_dict=return_dict,
81
+ )
82
+ sequence_output = outputs[0]
83
+
84
+ logits = self.feedforward(self.dropout(self.pooler(sequence_output)))
85
+
86
+ loss = None
87
+ if labels is not None:
88
+ loss_fct = CrossEntropyLoss()
89
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
90
+
91
+ if not return_dict:
92
+ output = (logits,) + outputs[2:]
93
+ return ((loss,) + output) if loss is not None else output
94
+
95
+ return SequenceClassifierOutput(
96
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions,
97
+ )