File size: 8,330 Bytes
85d0291
 
ae0974a
 
b37695a
987e9e5
 
 
 
5805854
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
987e9e5
 
 
 
5805854
 
b37695a
 
 
 
 
 
 
 
 
 
 
5805854
 
b37695a
5805854
 
 
 
b37695a
5805854
 
 
b37695a
 
 
 
5805854
b37695a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
987e9e5
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
---
license: mit
language:
- en
---
<details>
  <summary>
    TinyBERT based model
  </summary>

### Fetching the model
```python
# Load the TinyBERT tokenizer and model
tokenizer = AutoTokenizer.from_pretrained('huawei-noah/TinyBERT_General_4L_312D')
model = AutoModelForSequenceClassification.from_pretrained('huawei-noah/TinyBERT_General_4L_312D', num_labels=2)

# fetch the statedict to apply the fine-tuned weights
state_dict = torch.hub.load_state_dict_from_url(f"https://huggingface.co/KameronB/SITCC-Incident-Request-Classifier/resolve/main/tiny_bert_model.bin")
# if running on cpu
# state_dict = torch.hub.load_state_dict_from_url(f"https://huggingface.co/KameronB/SITCC-Incident-Request-Classifier/resolve/main/tiny_bert_model.bin", map_location=torch.device('cpu'))

model.load_state_dict(state_dict)

model = model.to(torch.device('cuda' if torch.cuda.is_available() else 'cpu'))

```


### Using the model

```python
def predict_description(model, tokenizer, text, max_length=512):
    model.eval()  # Set the model to evaluation mode
    
    # Ensure model is on the correct device
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)

    # Encode the input text
    inputs = tokenizer.encode_plus(
        text,
        None,
        add_special_tokens=True,
        max_length=max_length,
        padding='max_length',
        return_token_type_ids=False,
        return_tensors='pt',
        truncation=True
    )

    # Move tensors to the correct device
    inputs = {key: value.to(device) for key, value in inputs.items()}

    # Make prediction
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        probabilities = torch.softmax(logits, dim=-1)
        predicted_class_id = torch.argmax(probabilities, dim=-1).item()

    return predicted_class_id, probabilities.cpu().tolist()



#Example usage

tickets = [
  """Inquiry about the possibility of customizing Docker to better meet department-specific needs. 
Gathered requirements for desired customizations.""",
  """We've encountered a recurring problem with DEVEnv shutting down anytime we try to save documents.
I looked over the error logs for any clues about what's going wrong. I'm passing this on to the team responsible for software upkeep."""
]

for i, row in df.sample(frac=0.01).iterrows():
    prediction, probabilities = predict_description(model, tokenizer, row['content'])
    prediction = (['INCIDENT', 'TASK'])[prediction]
    print(f"{prediction} ({probabilities}) <== {row['content']}")
```

### Additional fine-tuning

```python

# The dataset class 
class TextDataset(Dataset):
    def __init__(self, descriptions, labels, tokenizer, max_len):
        self.descriptions = descriptions
        self.labels = labels
        self.tokenizer = tokenizer
        self.max_len = max_len

    def __len__(self):
        return len(self.descriptions)

    def __getitem__(self, idx):
        text = self.descriptions[idx]
        inputs = self.tokenizer.encode_plus(
            text,
            None,
            add_special_tokens=True,
            max_length=self.max_len,
            padding='max_length',
            return_token_type_ids=False,
            truncation=True
        )
        return {
            'input_ids': torch.tensor(inputs['input_ids'], dtype=torch.long),
            'attention_mask': torch.tensor(inputs['attention_mask'], dtype=torch.long),
            'labels': torch.tensor(self.labels[idx], dtype=torch.long)
        }

# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# load the data 
df = pd.read_csv('..\\data\\final_data.csv')
df['label'] = df['type'].astype('category').cat.codes  # Convert labels to category codes if they aren't already

# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# create the training and validation sets and data loaders
print( "cuda is available" if torch.cuda.is_available() else "cuda is unavailable: running on cpu")

# Split the data into training and validation sets
train_df, val_df = train_test_split(df, test_size=0.15)

# Create PyTorch datasets
train_dataset = TextDataset(train_df['content'].tolist(), train_df['label'].tolist(), tokenizer, max_len=512)
val_dataset = TextDataset(val_df['content'].tolist(), val_df['label'].tolist(), tokenizer, max_len=512)

# Create data loaders
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=32)

# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# Train the model

# only these layers will be trained, customize this to your liking to freeze the ones you dont want to retrain
training_layers = [
    "bert.encoder.layer.3.output.dense.weight",
    "bert.encoder.layer.3.output.dense.bias",
    "bert.encoder.layer.3.output.LayerNorm.weight",
    "bert.encoder.layer.3.output.LayerNorm.bias",
    "bert.pooler.dense.weight",
    "bert.pooler.dense.bias",
    "classifier.weight",
    "classifier.bias",
]

for name, param in model.named_parameters():
    if name not in training_layers:  # Freeze layers that are not part of the classifier
        param.requires_grad = False
        
# Training setup
optimizer = AdamW(model.parameters(), lr=5e-5)
epochs = 2

for epoch in range(epochs):
    model.train()
    loss_item = float('+inf')
    for batch in tqdm(train_loader, desc=f"Training Loss: {loss_item}"):
        batch = {k: v.to(model.device) for k, v in batch.items()}
        outputs = model(**batch)
        loss = outputs.loss
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()
        loss_item = loss.item()

    model.eval()
    total_eval_accuracy = 0
    for batch in tqdm(val_loader, desc=f"Validation Accuracy: {total_eval_accuracy}"):
        batch = {k: v.to(model.device) for k, v in batch.items()}
        with torch.no_grad():
            outputs = model(**batch)
        logits = outputs.logits
        predictions = torch.argmax(logits, dim=-1)
        accuracy = (predictions == batch['labels']).cpu().numpy().mean()
        total_eval_accuracy += accuracy

    print(f"Validation Accuracy: {total_eval_accuracy / len(val_loader)}")
```
</details>

<details>
<summary>RoBERT based model</summary>

### Base model
```python
import torch
from torch.utils.data import DataLoader, Dataset
from transformers import RobertaTokenizer, RobertaForSequenceClassification, AdamW
from sklearn.model_selection import train_test_split
import pandas as pd

# Load the tokenizer
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')

# Load RoBERTa pre-trained model
model = RobertaForSequenceClassification.from_pretrained('roberta-base', num_labels=2)


# fetch the statedict to apply the fine-tuned weights
state_dict = torch.hub.load_state_dict_from_url(f"https://huggingface.co/KameronB/SITCC-Incident-Request-Classifier/resolve/main/pytorch_model.bin")
# if running on cpu
# state_dict = torch.hub.load_state_dict_from_url(f"https://huggingface.co/KameronB/SITCC-Incident-Request-Classifier/resolve/main/pytorch_model.bin", map_location=torch.device('cpu'))

model.load_state_dict(state_dict)

model = model.to(torch.device('cuda' if torch.cuda.is_available() else 'cpu'))


```

### Use model to make predictions
```python

def predict_description(model, tokenizer, text, max_length=512):
    model.eval()  # Set the model to evaluation mode
    
    # Ensure model is on the correct device
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)

    # Encode the input text
    inputs = tokenizer.encode_plus(
        text,
        None,
        add_special_tokens=True,
        max_length=max_length,
        padding='max_length',
        return_token_type_ids=False,
        return_tensors='pt',
        truncation=True
    )

    # Move tensors to the correct device
    inputs = {key: value.to(device) for key, value in inputs.items()}

    # Make prediction
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        probabilities = torch.softmax(logits, dim=-1)
        predicted_class_id = torch.argmax(probabilities, dim=-1).item()

    return predicted_class_id


(['INCIDENT', 'REQUEST'])[predict_description(model, tokenizer, """My ID card is not being detected.""")]

```
</details>