ProCreations
commited on
Commit
•
5ee4795
1
Parent(s):
2602368
We
Browse files
ai.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ai.py
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.nn as nn
|
5 |
+
import torch.optim as optim
|
6 |
+
from torch.utils.data import DataLoader
|
7 |
+
from sklearn.model_selection import train_test_split
|
8 |
+
from datasets import load_dataset
|
9 |
+
from transformers import DistilBertTokenizer, DistilBertForSequenceClassification
|
10 |
+
from tqdm import tqdm
|
11 |
+
|
12 |
+
# Load IMDb dataset
|
13 |
+
dataset = load_dataset("imdb")
|
14 |
+
texts, labels = dataset["train"]["text"], dataset["train"]["label"]
|
15 |
+
|
16 |
+
# Split the dataset into training and validation sets
|
17 |
+
train_texts, val_texts, train_labels, val_labels = train_test_split(texts, labels, test_size=0.1, random_state=42)
|
18 |
+
|
19 |
+
# Tokenize and preprocess the data
|
20 |
+
tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
|
21 |
+
train_encodings = tokenizer(train_texts, truncation=True, padding=True, return_tensors="pt", max_length=256)
|
22 |
+
val_encodings = tokenizer(val_texts, truncation=True, padding=True, return_tensors="pt", max_length=256)
|
23 |
+
|
24 |
+
# Define Sentiment Analysis Model
|
25 |
+
class SentimentAnalysisModel(nn.Module):
|
26 |
+
def __init__(self):
|
27 |
+
super(SentimentAnalysisModel, self).__init__()
|
28 |
+
self.distilbert = DistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased", num_labels=2)
|
29 |
+
|
30 |
+
def forward(self, input_ids, attention_mask):
|
31 |
+
return self.distilbert(input_ids, attention_mask=attention_mask).logits
|
32 |
+
|
33 |
+
# Initialize model, criterion, and optimizer
|
34 |
+
model = SentimentAnalysisModel()
|
35 |
+
criterion = nn.CrossEntropyLoss()
|
36 |
+
optimizer = optim.AdamW(model.parameters(), lr=5e-5)
|
37 |
+
|
38 |
+
# Convert labels to tensor
|
39 |
+
train_labels = torch.tensor(train_labels)
|
40 |
+
val_labels = torch.tensor(val_labels)
|
41 |
+
|
42 |
+
# Prepare DataLoader
|
43 |
+
train_dataset = torch.utils.data.TensorDataset(train_encodings["input_ids"], train_encodings["attention_mask"], train_labels)
|
44 |
+
train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True)
|
45 |
+
|
46 |
+
val_dataset = torch.utils.data.TensorDataset(val_encodings["input_ids"], val_encodings["attention_mask"], val_labels)
|
47 |
+
val_loader = DataLoader(val_dataset, batch_size=8, shuffle=False)
|
48 |
+
|
49 |
+
# Train the model
|
50 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
51 |
+
model.to(device)
|
52 |
+
|
53 |
+
num_epochs = 5 # Increase the number of epochs
|
54 |
+
for epoch in range(num_epochs):
|
55 |
+
model.train()
|
56 |
+
total_loss = 0.0
|
57 |
+
|
58 |
+
for input_ids, attention_mask, labels in tqdm(train_loader, desc=f"Epoch {epoch + 1}/{num_epochs}"):
|
59 |
+
input_ids, attention_mask, labels = input_ids.to(device), attention_mask.to(device), labels.to(device)
|
60 |
+
|
61 |
+
optimizer.zero_grad()
|
62 |
+
outputs = model(input_ids, attention_mask=attention_mask)
|
63 |
+
loss = criterion(outputs, labels)
|
64 |
+
loss.backward()
|
65 |
+
optimizer.step()
|
66 |
+
|
67 |
+
total_loss += loss.item()
|
68 |
+
|
69 |
+
print(f"Epoch {epoch + 1}/{num_epochs}, Average Loss: {total_loss / len(train_loader)}")
|
70 |
+
|
71 |
+
# Save the trained model
|
72 |
+
torch.save(model.state_dict(), "sentiment_analysis_model.pth")
|