naterawdata@gmail.com commited on
Commit
3682607
1 Parent(s): e083087

commit files to HF hub

Browse files
Files changed (3) hide show
  1. hf_src/src/model.py +6 -11
  2. pytorch_model.bin +2 -2
  3. requirements.txt +3 -0
hf_src/src/model.py CHANGED
@@ -1,13 +1,13 @@
1
  from argparse import Namespace
2
- from typing import Union, List, Tuple
3
 
4
  import torch.nn.functional as F
5
- from auto_anything import ModelHubMixin
6
  from torch import nn
7
 
 
8
 
9
- class Dense(nn.Module):
10
 
 
11
  def __init__(self, input_dim, output_dim, bias=True, activation=nn.LeakyReLU, **kwargs):
12
  super().__init__()
13
  self.fc = nn.Linear(input_dim, output_dim, bias=bias)
@@ -26,8 +26,9 @@ class Encoder(nn.Module):
26
  super().__init__()
27
  dims = (input_dim,) + dims
28
  self.layers = nn.Sequential(
29
- *[Dense(dims[i], dims[i+1], negative_slope=0.4, inplace=True) for i in range(len(dims) - 1)]
30
  )
 
31
  def forward(self, x):
32
  return self.layers(x)
33
 
@@ -39,12 +40,12 @@ class Decoder(nn.Module):
39
  *[Dense(dims[i], dims[i + 1], negative_slope=0.4, inplace=True) for i in range(len(dims) - 1)]
40
  + [Dense(dims[-1], output_dim, activation=nn.Sigmoid)]
41
  )
 
42
  def forward(self, x):
43
  return self.layers(x)
44
 
45
 
46
  class Autoencoder(nn.Module, ModelHubMixin):
47
-
48
  def __init__(self, input_dim: int = 784, hidden_dims: Tuple[int] = (256, 64, 16, 4, 2)):
49
  super().__init__()
50
  self.config = Namespace(input_dim=input_dim, hidden_dims=hidden_dims)
@@ -57,9 +58,3 @@ class Autoencoder(nn.Module, ModelHubMixin):
57
  recon = self.decoder(latent)
58
  loss = F.mse_loss(recon, x)
59
  return recon, latent, loss
60
-
61
- # def save_pretrained(self, save_directory, **kwargs):
62
- # # assert 'config' not in kwargs, \
63
- # # "save_pretrained handles passing model config for you, please dont pass it"
64
- # super().save_pretrained(save_directory, config=self.config.__dict__, **kwargs)
65
- # # super().save_pretrained(save_directory, **kwargs)
1
  from argparse import Namespace
2
+ from typing import List, Tuple, Union
3
 
4
  import torch.nn.functional as F
 
5
  from torch import nn
6
 
7
+ from auto_anything import ModelHubMixin
8
 
 
9
 
10
+ class Dense(nn.Module):
11
  def __init__(self, input_dim, output_dim, bias=True, activation=nn.LeakyReLU, **kwargs):
12
  super().__init__()
13
  self.fc = nn.Linear(input_dim, output_dim, bias=bias)
26
  super().__init__()
27
  dims = (input_dim,) + dims
28
  self.layers = nn.Sequential(
29
+ *[Dense(dims[i], dims[i + 1], negative_slope=0.4, inplace=True) for i in range(len(dims) - 1)]
30
  )
31
+
32
  def forward(self, x):
33
  return self.layers(x)
34
 
40
  *[Dense(dims[i], dims[i + 1], negative_slope=0.4, inplace=True) for i in range(len(dims) - 1)]
41
  + [Dense(dims[-1], output_dim, activation=nn.Sigmoid)]
42
  )
43
+
44
  def forward(self, x):
45
  return self.layers(x)
46
 
47
 
48
  class Autoencoder(nn.Module, ModelHubMixin):
 
49
  def __init__(self, input_dim: int = 784, hidden_dims: Tuple[int] = (256, 64, 16, 4, 2)):
50
  super().__init__()
51
  self.config = Namespace(input_dim=input_dim, hidden_dims=hidden_dims)
58
  recon = self.decoder(latent)
59
  loss = F.mse_loss(recon, x)
60
  return recon, latent, loss
 
 
 
 
 
 
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d1b0411d22b3f34529c90197908d55b5f94a349ff49a811ccfdc6f161ae47e25
3
- size 6453930
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4672a5e4aecae6d3e13d33241f8b31eae63e44a95b94254adca32702923f2ad3
3
+ size 6453931
requirements.txt CHANGED
@@ -0,0 +1,3 @@
 
 
 
1
+ huggingface-hub
2
+ torch
3
+ torchvision