MarkProMaster229 commited on
Commit
4564d0a
·
verified ·
1 Parent(s): e236cbb
Files changed (2) hide show
  1. cnn_letters.safetensors +2 -2
  2. dowload.py +39 -21
cnn_letters.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5fb7a8a2aa2b31911683753a2c1ec8ec9e2654b5d022d91bd6f096a2bead31ed
3
- size 428216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fff965569b21ebb62dc528fd298d06ce10ad823870d14166a462126ea4337d96
3
+ size 159969280
dowload.py CHANGED
@@ -2,46 +2,64 @@ import torch
2
  import torch.nn as nn
3
  import torch.nn.functional as F
4
  from safetensors.torch import load_file
 
 
 
 
5
 
6
  class CNN(nn.Module):
7
  def __init__(self):
8
  super(CNN, self).__init__()
9
- self.conv1 = nn.Conv2d(1, 16, kernel_size=3, padding=1)
10
- self.conv2 = nn.Conv2d(16, 32, kernel_size=3, padding=1)
11
- self.fc1 = nn.Linear(32*7*7, 64)
12
- self.fc2 = nn.Linear(64, 26)
 
 
 
 
 
13
 
14
  def forward(self, x):
15
  x = F.relu(self.conv1(x))
16
- x = F.max_pool2d(x, 2)
17
  x = F.relu(self.conv2(x))
18
  x = F.max_pool2d(x, 2)
 
 
 
 
19
  x = x.view(x.size(0), -1)
20
  x = F.relu(self.fc1(x))
21
- x = self.fc2(x)
 
 
22
  return x
23
-
24
- model = CNN()
25
  weights_dict = load_file("cnn_letters.safetensors")
26
  model.load_state_dict(weights_dict)
27
  model.eval()
28
 
29
  #using
30
 
31
- #from PIL import Image
32
- #from torchvision import transforms
33
  #get you image
34
- #img = Image.open("my_letter.png").convert("L")
35
 
36
- #transform = transforms.Compose([
37
- # transforms.Resize((28,28)),
38
- # transforms.ToTensor(),
39
- # transforms.Normalize((0.5,), (0.5,))
40
- #])
41
 
42
- #x = transform(img).unsqueeze(0)
43
 
44
- #with torch.no_grad():
45
- # output = model(x)
46
- # pred = output.argmax(dim=1)
47
- #print(f"Predicted class: {pred.item() + 1}")
 
 
 
 
2
  import torch.nn as nn
3
  import torch.nn.functional as F
4
  from safetensors.torch import load_file
5
+ from PIL import Image
6
+ from torchvision import transforms
7
+ import string
8
+ torch.set_num_threads(20)
9
 
10
  class CNN(nn.Module):
11
  def __init__(self):
12
  super(CNN, self).__init__()
13
+ self.conv1 = nn.Conv2d(1,64,kernel_size=3,padding=1)
14
+ self.conv2 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
15
+ self.conv3 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
16
+ self.conv4 = nn.Conv2d(256, 512, kernel_size=3, padding=1)
17
+ self.conv5 = nn.Conv2d(512, 1024, kernel_size=3, padding=1)
18
+ self.fc1 = nn.Linear(1024*8*8, 512)
19
+ self.fc2 = nn.Linear(512, 256)
20
+ self.fc3 = nn.Linear(256, 128)
21
+ self.fc4 = nn.Linear(128, 26)
22
 
23
  def forward(self, x):
24
  x = F.relu(self.conv1(x))
25
+ x = F.max_pool2d(x,2)
26
  x = F.relu(self.conv2(x))
27
  x = F.max_pool2d(x, 2)
28
+ x = F.relu(self.conv3(x))
29
+ x = F.max_pool2d(x, 2)
30
+ x = F.relu(self.conv4(x))
31
+ x = F.relu(self.conv5(x))
32
  x = x.view(x.size(0), -1)
33
  x = F.relu(self.fc1(x))
34
+ x = F.relu(self.fc2(x))
35
+ x = F.relu(self.fc3(x))
36
+ x = self.fc4(x)
37
  return x
38
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
39
+ model = CNN().to(device)
40
  weights_dict = load_file("cnn_letters.safetensors")
41
  model.load_state_dict(weights_dict)
42
  model.eval()
43
 
44
  #using
45
 
46
+ from PIL import Image
47
+ from torchvision import transforms
48
  #get you image
49
+ img = Image.open("my_letter.png").convert("L")
50
 
51
+ transform = transforms.Compose([
52
+ transforms.Resize((64,64)),
53
+ transforms.ToTensor(),
54
+ transforms.Normalize((0.5,), (0.5,))
55
+ ])
56
 
57
+ x = transform(img).unsqueeze(0).to(device)
58
 
59
+ with torch.no_grad():
60
+ output = model(x)
61
+ pred_idx = output.argmax(dim=1).item()
62
+
63
+ letters = list(string.ascii_uppercase)
64
+ pred_letter = letters[pred_idx]
65
+ print(f"Predicted class: {pred_idx + 1}, Letter: {pred_letter}")