Fixed the Python implementation in the README
Browse files
README.md
CHANGED
@@ -27,18 +27,47 @@ This model is designed for integration into IT call center software systems, whe
|
|
27 |
2. Load the model using the following code snippet:
|
28 |
|
29 |
```python
|
30 |
-
from
|
|
|
31 |
|
32 |
-
|
33 |
-
|
34 |
-
|
|
|
|
|
|
|
|
|
35 |
|
36 |
-
def
|
37 |
-
|
38 |
-
|
39 |
-
|
|
|
40 |
|
41 |
-
|
42 |
-
|
43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
|
|
|
27 |
2. Load the model using the following code snippet:
|
28 |
|
29 |
```python
|
30 |
+
from __future__ import annotations
|
31 |
+
from transformers import RobertaConfig, RobertaModel, RobertaTokenizer, AutoModel, AutoTokenizer
|
32 |
|
33 |
+
# Add a custom regression head to RoBERTa
|
34 |
+
class SITCC(torch.nn.Module):
|
35 |
+
def __init__(self, model):
|
36 |
+
super(SITCC, self).__init__()
|
37 |
+
self.roberta = model
|
38 |
+
self.regressor = torch.nn.Linear(config.hidden_size, 1) # Outputs a single value
|
39 |
+
|
40 |
|
41 |
+
def forward(self, input_ids, attention_mask):
|
42 |
+
outputs = self.roberta(input_ids=input_ids, attention_mask=attention_mask)
|
43 |
+
sequence_output = outputs[1] # The last hidden-state is the first element of the output tuple
|
44 |
+
logits = self.regressor(sequence_output)
|
45 |
+
return logits
|
46 |
|
47 |
+
def init_model() -> SITCC:
|
48 |
+
# Load the model from huggingface
|
49 |
+
model_name = "KameronB/sitcc-roberta"
|
50 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, from_tf=False)
|
51 |
+
config = RobertaConfig.from_pretrained(model_name)
|
52 |
+
|
53 |
+
# create the model based on the RoBERTa base model
|
54 |
+
model = SITCC(RobertaModel(config))
|
55 |
+
# fetch the statedict to apply the fine-tuned weights
|
56 |
+
state_dict = torch.hub.load_state_dict_from_url(f"https://huggingface.co/{model_name}/resolve/main/pytorch_model.bin")
|
57 |
+
model.load_state_dict(state_dict)
|
58 |
+
return model
|
59 |
+
|
60 |
+
|
61 |
+
model = init_model()
|
62 |
+
|
63 |
+
|
64 |
+
def predict(sentences):
|
65 |
+
model.eval()
|
66 |
+
inputs = tokenizer(sentences, padding=True, truncation=True, max_length=512, return_tensors="pt")
|
67 |
+
input_ids = inputs['input_ids']
|
68 |
+
attention_mask = inputs['attention_mask']
|
69 |
+
|
70 |
+
with torch.no_grad():
|
71 |
+
outputs = model(input_ids, attention_mask)
|
72 |
+
return outputs
|
73 |
|