first model
Browse files- README.md +2 -0
- model.py +10 -0
- pipeline_model.py +16 -0
- simple_model.pt +3 -0
- train.py +21 -0
README.md
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
# Simple Classifier
|
2 |
+
A toy model that classifies 10-feature input into positive/negative.
|
model.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch import nn
|
3 |
+
|
4 |
+
class SimpleClassifier(nn.Module):
|
5 |
+
def __init__(self):
|
6 |
+
super().__init__()
|
7 |
+
self.linear = nn.Linear(10, 2) # input 10 features → 2 classes
|
8 |
+
|
9 |
+
def forward(self, x):
|
10 |
+
return self.linear(x)
|
pipeline_model.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from model import SimpleClassifier
|
3 |
+
|
4 |
+
class HFModel:
|
5 |
+
def __init__(self):
|
6 |
+
self.model = SimpleClassifier()
|
7 |
+
self.model.load_state_dict(torch.load("simple_model.pt"))
|
8 |
+
self.model.eval()
|
9 |
+
|
10 |
+
def __call__(self, inputs):
|
11 |
+
# Expect a list of lists with 10 numbers
|
12 |
+
X = torch.tensor(inputs, dtype=torch.float32)
|
13 |
+
with torch.no_grad():
|
14 |
+
logits = self.model(X)
|
15 |
+
preds = torch.argmax(logits, dim=1).tolist()
|
16 |
+
return [{"label": "positive" if p == 1 else "negative", "score": float(logits[i][p])} for i, p in enumerate(preds)]
|
simple_model.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d51b0dd95f88f2849ac0cb9fb4e0321298fa9439409cbc5a106d825e2632977a
|
3 |
+
size 2061
|
train.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch import nn, optim
|
3 |
+
from model import SimpleClassifier
|
4 |
+
|
5 |
+
# Dummy data (10 features → 2 classes)
|
6 |
+
X = torch.randn(100, 10)
|
7 |
+
y = torch.randint(0, 2, (100,))
|
8 |
+
|
9 |
+
model = SimpleClassifier()
|
10 |
+
criterion = nn.CrossEntropyLoss()
|
11 |
+
optimizer = optim.Adam(model.parameters())
|
12 |
+
|
13 |
+
for epoch in range(10): # tiny training loop
|
14 |
+
optimizer.zero_grad()
|
15 |
+
out = model(X)
|
16 |
+
loss = criterion(out, y)
|
17 |
+
loss.backward()
|
18 |
+
optimizer.step()
|
19 |
+
print(f"Epoch {epoch}: loss={loss.item():.4f}")
|
20 |
+
|
21 |
+
torch.save(model.state_dict(), "simple_model.pt")
|