vlbthambawita commited on
Commit
99d8658
1 Parent(s): 17fb380

Upload DeepFakeECGFromPulse2Pulse

Browse files
config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DeepFakeECGFromPulse2Pulse"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "configurations_deepfake.DeepFakeConfig",
7
+ "AutoModel": "modeling_deepfake.DeepFakeECGFromPulse2Pulse"
8
+ },
9
+ "model_type": "pulse2pulse",
10
+ "torch_dtype": "float32",
11
+ "transformers_version": "4.26.1"
12
+ }
configurations_deepfake.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+ from typing import List
3
+
4
+ class DeepFakeConfig(PretrainedConfig):
5
+ model_type = "pulse2pulse"
6
+
7
+ def __init__(self, **kwargs):
8
+ # if block_type not in ["basic", "bottleneck"]:
9
+ # raise ValueError(f"`block_type` must be 'basic' or bottleneck', got {block_type}.")
10
+ # if stem_type not in ["", "deep", "deep-tiered"]:
11
+ # raise ValueError(f"`stem_type` must be '', 'deep' or 'deep-tiered', got {stem_type}.")
12
+
13
+
14
+ super().__init__(**kwargs)
modeling_deepfake.py ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PreTrainedModel
2
+
3
+
4
+ # Modified version:Vajira Thambawita
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+ import torch.utils.data
10
+ from .configurations_deepfake import DeepFakeConfig
11
+
12
+ class Transpose1dLayer(nn.Module):
13
+
14
+ def __init__(self, in_channels, out_channels, kernel_size, stride, padding=11, upsample=None, output_padding=1):
15
+ super(Transpose1dLayer, self).__init__()
16
+ self.upsample = upsample
17
+
18
+ self.upsample_layer = torch.nn.Upsample(scale_factor=upsample)
19
+ reflection_pad = kernel_size // 2
20
+ self.reflection_pad = nn.ConstantPad1d(reflection_pad, value=0)
21
+ self.conv1d = torch.nn.Conv1d(in_channels, out_channels, kernel_size, stride)
22
+ self.Conv1dTrans = nn.ConvTranspose1d(in_channels, out_channels, kernel_size, stride, padding, output_padding)
23
+
24
+ def forward(self, x):
25
+ if self.upsample:
26
+ #x = torch.cat((x, in_feature), 1)
27
+ return self.conv1d(self.reflection_pad(self.upsample_layer(x)))
28
+ else:
29
+ return self.Conv1dTrans(x)
30
+
31
+ class Transpose1dLayer_multi_input(nn.Module):
32
+ def __init__(self, in_channels, out_channels, kernel_size, stride, padding=11, upsample=None, output_padding=1):
33
+ super(Transpose1dLayer_multi_input, self).__init__()
34
+ self.upsample = upsample
35
+
36
+ self.upsample_layer = torch.nn.Upsample(scale_factor=upsample)
37
+ reflection_pad = kernel_size // 2
38
+ self.reflection_pad = nn.ConstantPad1d(reflection_pad, value=0)
39
+ self.conv1d = torch.nn.Conv1d(in_channels, out_channels, kernel_size, stride)
40
+ self.Conv1dTrans = nn.ConvTranspose1d(in_channels, out_channels, kernel_size, stride, padding, output_padding)
41
+
42
+ def forward(self, x, in_feature):
43
+ if self.upsample:
44
+ x = torch.cat((x, in_feature), 1)
45
+ return self.conv1d(self.reflection_pad(self.upsample_layer(x)))
46
+ else:
47
+ return self.Conv1dTrans(x)
48
+
49
+
50
+ class Pulse2pulseGenerator(nn.Module):
51
+
52
+ def __init__(self, model_size=50, ngpus=1, num_channels=8,
53
+ latent_dim=100, post_proc_filt_len=512,
54
+ verbose=False, upsample=True):
55
+ super(Pulse2pulseGenerator, self).__init__()
56
+ self.ngpus = ngpus
57
+ self.model_size = model_size # d
58
+ self.num_channels = num_channels # c
59
+ self.latent_di = latent_dim
60
+ self.post_proc_filt_len = post_proc_filt_len
61
+ self.verbose = verbose
62
+ # "Dense" is the same meaning as fully connection.
63
+ self.fc1 = nn.Linear(latent_dim, 10 * model_size)
64
+
65
+ stride = 4
66
+ if upsample:
67
+ stride = 1
68
+ upsample = 5
69
+ self.deconv_1 = Transpose1dLayer(5 * model_size , 5 * model_size, 25, stride, upsample=upsample)
70
+ self.deconv_2 = Transpose1dLayer_multi_input(5 * model_size * 2, 3 * model_size, 25, stride, upsample=upsample)
71
+ self.deconv_3 = Transpose1dLayer_multi_input(3 * model_size * 2, model_size, 25, stride, upsample=upsample)
72
+ # self.deconv_4 = Transpose1dLayer( model_size, model_size, 25, stride, upsample=upsample)
73
+ self.deconv_5 = Transpose1dLayer_multi_input( model_size * 2, int(model_size / 2), 25, stride, upsample=2)
74
+ self.deconv_6 = Transpose1dLayer_multi_input( int(model_size / 2) * 2, int(model_size / 5), 25, stride, upsample=upsample)
75
+ self.deconv_7 = Transpose1dLayer( int(model_size / 5), num_channels, 25, stride, upsample=2)
76
+
77
+ #new convolutional layers
78
+ self.conv_1 = nn.Conv1d(num_channels, int(model_size / 5), 25, stride=2, padding=25 // 2)
79
+ self.conv_2 = nn.Conv1d(model_size // 5, model_size // 2, 25, stride=5, padding= 25 // 2)
80
+ self.conv_3 = nn.Conv1d(model_size // 2, model_size , 25, stride=2, padding= 25 // 2)
81
+ self.conv_4 = nn.Conv1d(model_size, model_size * 3 , 25, stride=5, padding= 25 // 2)
82
+ self.conv_5 = nn.Conv1d(model_size * 3, model_size * 5 , 25, stride=5, padding= 25 // 2)
83
+ self.conv_6 = nn.Conv1d(model_size * 5, model_size * 5 , 25, stride=5, padding= 25 // 2)
84
+
85
+ if post_proc_filt_len:
86
+ self.ppfilter1 = nn.Conv1d(num_channels, num_channels, post_proc_filt_len)
87
+
88
+ for m in self.modules():
89
+ if isinstance(m, nn.ConvTranspose1d) or isinstance(m, nn.Linear):
90
+ nn.init.kaiming_normal(m.weight.data)
91
+
92
+ def forward(self, x):
93
+
94
+ #print("x shape:", x.shape)
95
+ conv_1_out = F.leaky_relu(self.conv_1(x)) # x = (bs, 8, 5000)
96
+ # print("conv_1_out shape:", conv_1_out.shape)
97
+ conv_2_out = F.leaky_relu(self.conv_2(conv_1_out))
98
+ # print("conv_2_out shape:", conv_2_out.shape)
99
+ conv_3_out = F.leaky_relu(self.conv_3(conv_2_out))
100
+ # print("conv_3_out shape:", conv_3_out.shape)
101
+ conv_4_out = F.leaky_relu(self.conv_4(conv_3_out))
102
+ # print("conv_4_out shape:", conv_4_out.shape)
103
+ conv_5_out = F.leaky_relu(self.conv_5(conv_4_out))
104
+ # print("conv_5_out shape:", conv_5_out.shape)
105
+ x = F.leaky_relu(self.conv_6(conv_5_out))
106
+ #print("last x shape:", x.shape)
107
+
108
+
109
+
110
+ #x = self.fc1(x).view(-1, 5*self.model_size, 2) #x = self.fc1(x).view(-1, 16 * self.model_size, 16)
111
+ #x = F.relu(x)
112
+ #if self.verbose:
113
+ # print(x.shape)
114
+
115
+ x = F.relu(self.deconv_1(x))
116
+ if self.verbose:
117
+ print(x.shape)
118
+
119
+ x = F.relu(self.deconv_2(x, conv_5_out))
120
+ if self.verbose:
121
+ print(x.shape)
122
+
123
+ x = F.relu(self.deconv_3(x, conv_4_out))
124
+ if self.verbose:
125
+ print(x.shape)
126
+
127
+ x = F.relu(self.deconv_5(x, conv_3_out))
128
+ if self.verbose:
129
+ print(x.shape)
130
+
131
+ x = F.relu(self.deconv_6(x, conv_2_out))
132
+ if self.verbose:
133
+ print(x.shape)
134
+
135
+ output = torch.tanh(self.deconv_7(x))
136
+
137
+ if self.verbose:
138
+ print(output.shape)
139
+ return output
140
+
141
+
142
+ class PhaseShuffle(nn.Module):
143
+ """
144
+ Performs phase shuffling, i.e. shifting feature axis of a 3D tensor
145
+ by a random integer in {-n, n} and performing reflection padding where
146
+ necessary.
147
+ """
148
+ # Copied from https://github.com/jtcramer/wavegan/blob/master/wavegan.py#L8
149
+ def __init__(self, shift_factor):
150
+ super(PhaseShuffle, self).__init__()
151
+ self.shift_factor = shift_factor
152
+
153
+ def forward(self, x):
154
+ if self.shift_factor == 0:
155
+ return x
156
+ # uniform in (L, R)
157
+ k_list = torch.Tensor(x.shape[0]).random_(0, 2 * self.shift_factor + 1) - self.shift_factor
158
+ k_list = k_list.numpy().astype(int)
159
+
160
+ # Combine sample indices into lists so that less shuffle operations
161
+ # need to be performed
162
+ k_map = {}
163
+ for idx, k in enumerate(k_list):
164
+ k = int(k)
165
+ if k not in k_map:
166
+ k_map[k] = []
167
+ k_map[k].append(idx)
168
+
169
+ # Make a copy of x for our output
170
+ x_shuffle = x.clone()
171
+
172
+ # Apply shuffle to each sample
173
+ for k, idxs in k_map.items():
174
+ if k > 0:
175
+ x_shuffle[idxs] = F.pad(x[idxs][..., :-k], (k, 0), mode='reflect')
176
+ else:
177
+ x_shuffle[idxs] = F.pad(x[idxs][..., -k:], (0, -k), mode='reflect')
178
+
179
+ assert x_shuffle.shape == x.shape, "{}, {}".format(x_shuffle.shape,
180
+ x.shape)
181
+ return x_shuffle
182
+
183
+
184
+ class PhaseRemove(nn.Module):
185
+ def __init__(self):
186
+ super(PhaseRemove, self).__init__()
187
+
188
+ def forward(self, x):
189
+ pass
190
+
191
+
192
+ class Pulse2pulseDiscriminator(nn.Module):
193
+ def __init__(self, model_size=64, ngpus=1, num_channels=8, shift_factor=2,
194
+ alpha=0.2, verbose=False):
195
+ super(Pulse2pulseDiscriminator, self).__init__()
196
+ self.model_size = model_size # d
197
+ self.ngpus = ngpus
198
+ self.num_channels = num_channels # c
199
+ self.shift_factor = shift_factor # n
200
+ self.alpha = alpha
201
+ self.verbose = verbose
202
+
203
+ self.conv1 = nn.Conv1d(num_channels, model_size, 25, stride=2, padding=11)
204
+ self.conv2 = nn.Conv1d(model_size, 2 * model_size, 25, stride=2, padding=11)
205
+ self.conv3 = nn.Conv1d(2 * model_size, 5 * model_size, 25, stride=2, padding=11)
206
+ self.conv4 = nn.Conv1d(5 * model_size, 10 * model_size, 25, stride=2, padding=11)
207
+ self.conv5 = nn.Conv1d(10 * model_size, 20 * model_size, 25, stride=4, padding=11)
208
+ self.conv6 = nn.Conv1d(20 * model_size, 25 * model_size, 25, stride=4, padding=11)
209
+ self.conv7 = nn.Conv1d(25 * model_size, 100 * model_size, 25, stride=4, padding=11)
210
+
211
+ self.ps1 = PhaseShuffle(shift_factor)
212
+ self.ps2 = PhaseShuffle(shift_factor)
213
+ self.ps3 = PhaseShuffle(shift_factor)
214
+ self.ps4 = PhaseShuffle(shift_factor)
215
+ self.ps5 = PhaseShuffle(shift_factor)
216
+ self.ps6 = PhaseShuffle(shift_factor)
217
+
218
+ self.fc1 = nn.Linear(25000, 1)
219
+
220
+ for m in self.modules():
221
+ if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
222
+ nn.init.kaiming_normal(m.weight.data)
223
+
224
+ def forward(self, x):
225
+ x = F.leaky_relu(self.conv1(x), negative_slope=self.alpha)
226
+ if self.verbose:
227
+ print(x.shape)
228
+ x = self.ps1(x)
229
+
230
+ x = F.leaky_relu(self.conv2(x), negative_slope=self.alpha)
231
+ if self.verbose:
232
+ print(x.shape)
233
+ x = self.ps2(x)
234
+
235
+ x = F.leaky_relu(self.conv3(x), negative_slope=self.alpha)
236
+ if self.verbose:
237
+ print(x.shape)
238
+ x = self.ps3(x)
239
+
240
+ x = F.leaky_relu(self.conv4(x), negative_slope=self.alpha)
241
+ if self.verbose:
242
+ print(x.shape)
243
+ x = self.ps4(x)
244
+
245
+ x = F.leaky_relu(self.conv5(x), negative_slope=self.alpha)
246
+ if self.verbose:
247
+ print(x.shape)
248
+ x = self.ps5(x)
249
+
250
+ x = F.leaky_relu(self.conv6(x), negative_slope=self.alpha)
251
+ if self.verbose:
252
+ print(x.shape)
253
+ x = self.ps6(x)
254
+
255
+ x = F.leaky_relu(self.conv7(x), negative_slope=self.alpha)
256
+ if self.verbose:
257
+ print(x.shape)
258
+ #print("x shape:", x.shape)
259
+ x = x.view(-1, x.shape[1] * x.shape[2])
260
+ if self.verbose:
261
+ print(x.shape)
262
+
263
+ return self.fc1(x)
264
+
265
+
266
+ """
267
+ from torch.autograd import Variable
268
+ x = Variable(torch.randn(10, 100))
269
+ G = WaveGANGenerator(verbose=True, upsample=False)
270
+ out = G(x)
271
+ print(out.shape)
272
+ D = WaveGANDiscriminator(verbose=True)
273
+ out2 = D(out)
274
+ print(out2.shape)
275
+ """
276
+
277
+ class DeepFakeECGFromPulse2Pulse(PreTrainedModel):
278
+
279
+ config_class = DeepFakeConfig
280
+
281
+ def __init__(self, config):
282
+ super().__init__(config)
283
+ # block_layer = BLOCK_MAPPING[config.block_type]
284
+ self.model = Pulse2pulseGenerator(model_size=50, ngpus=1, num_channels=8,
285
+ latent_dim=100, post_proc_filt_len=512,
286
+ verbose=False, upsample=True)
287
+
288
+ def forward(self, tensor, labels=None):
289
+ x = self.model(tensor)
290
+
291
+ return {"x": x}
292
+
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e646eae78b7c9e48db0d38f094059ab89b53479bbc174a900ae3086517761827
3
+ size 42375017