drjieliu commited on
Commit
2e9cf56
1 Parent(s): 890b6a3

Upload 31 files

Browse files
cop/__init__.py ADDED
File without changes
cop/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (153 Bytes). View file
 
cop/__pycache__/hic_model.cpython-39.pyc ADDED
Binary file (7.57 kB). View file
 
cop/__pycache__/micro_model.cpython-39.pyc ADDED
Binary file (4.16 kB). View file
 
cop/hic_model.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os,sys
2
+ from pretrain.track.layers import AttentionPool,CNN
3
+ from pretrain.track.transformers import Transformer
4
+ from einops.layers.torch import Rearrange
5
+ from einops import rearrange
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+ import numpy as np
10
+
11
+ import torchvision.transforms as T
12
+
13
+ class Convblock(nn.Module):
14
+ def __init__(self,in_channel,kernel_size,dilate_size,dropout=0.1):
15
+ super().__init__()
16
+ self.conv=nn.Sequential(
17
+ nn.Conv2d(
18
+ in_channel, in_channel,
19
+ kernel_size, padding=self.pad(kernel_size, dilate_size),
20
+ dilation=dilate_size),
21
+ nn.GroupNorm(16, in_channel),
22
+ nn.Dropout(dropout)
23
+ )
24
+ def pad(self,kernelsize, dialte_size):
25
+ return (kernelsize - 1) * dialte_size // 2
26
+ def symmetric(self,x):
27
+ return (x + x.permute(0,1,3,2)) / 2
28
+ def forward(self,x):
29
+ identity=x
30
+ out=self.conv(x)
31
+ x=out+identity
32
+ x=self.symmetric(x)
33
+ return F.relu(x)
34
+
35
+ class dilated_tower(nn.Module):
36
+ def __init__(self,embed_dim,in_channel=48,kernel_size=9,dilate_rate=4):
37
+ super().__init__()
38
+ dilate_convs=[]
39
+ for i in range(dilate_rate+1):
40
+ dilate_convs.append(
41
+ Convblock(in_channel,kernel_size=kernel_size,dilate_size=2**i))
42
+
43
+ self.cnn=nn.Sequential(
44
+ Rearrange('b l n d -> b d l n'),
45
+ nn.Conv2d(embed_dim, in_channel, kernel_size=1),
46
+ *dilate_convs,
47
+ nn.Conv2d(in_channel, in_channel, kernel_size=1),
48
+ Rearrange('b d l n -> b l n d'),
49
+ )
50
+ def forward(self,x,crop):
51
+ x=self.cnn(x)
52
+ x=x[:,crop:-crop,crop:-crop,:]
53
+ return x
54
+
55
+ class discriminator(nn.Module):
56
+ def __init__(self,):
57
+ super(discriminator, self).__init__()
58
+ self.conv=nn.Sequential(
59
+ nn.Conv2d(1,32,kernel_size=7,padding=3),
60
+ nn.ReLU(),
61
+ nn.MaxPool1d(4),
62
+ nn.Conv2d(32,64,kernel_size=7,padding=3),
63
+ nn.ReLU(),
64
+ nn.MaxPool1d(4),
65
+ nn.Conv2d(64, 64, kernel_size=1),
66
+ )
67
+ self.linear=nn.Linear(64,1)
68
+
69
+ def complete_mat(self,x,smooth=True):
70
+ tmp=x.copy()
71
+ tmp.fill_diagonal_(0)
72
+ if smooth:
73
+ t = T.GaussianBlur(kernel_size=5, sigma=0.5)
74
+ tmp = t(tmp.T + x)
75
+ else:
76
+ tmp=tmp.T + x
77
+ return tmp
78
+ def forward(self,x1,x2):
79
+ x1,x2=self.complete_mat(x1),self.complete_mat(x2)
80
+ diff_mat=(x1-x2)**2
81
+ diff_mat=self.conv(diff_mat).mean(dim=(-2, -1))
82
+ return self.linear(diff_mat)
83
+
84
+
85
+
86
+ class Tranmodel(nn.Module):
87
+ def __init__(self, backbone, transfomer):
88
+ super().__init__()
89
+ self.backbone = backbone
90
+ self.transformer = transfomer
91
+ hidden_dim = transfomer.d_model
92
+ self.input_proj = nn.Conv1d(backbone.num_channels, hidden_dim, kernel_size=1)
93
+ def forward(self, input):
94
+ input=rearrange(input,'b n c l -> (b n) c l')
95
+ src = self.backbone(input)
96
+ src=self.input_proj(src)
97
+ src = self.transformer(src)
98
+ return src
99
+
100
+ class finetunemodel(nn.Module):
101
+ def __init__(
102
+ self,
103
+ pretrain_model,
104
+ hidden_dim,
105
+ embed_dim,
106
+ device,
107
+ bins=200,
108
+ in_dim=64,
109
+ max_bin=10,
110
+ crop=4,
111
+ output_dim=1
112
+ ):
113
+ super().__init__()
114
+ self.pretrain_model = pretrain_model
115
+ self.bins = bins
116
+ self.max_bin = max_bin
117
+ self.attention_pool = AttentionPool(hidden_dim)
118
+ self.crop = crop
119
+ self.project = nn.Sequential(
120
+ Rearrange('(b n) c -> b c n', n=bins * 5),
121
+ nn.Conv1d(hidden_dim, hidden_dim, kernel_size=15, padding=7, groups=hidden_dim),
122
+ nn.InstanceNorm1d(hidden_dim, affine=True),
123
+ nn.Conv1d(hidden_dim, embed_dim, kernel_size=1),
124
+ nn.ReLU(inplace=True),
125
+ nn.Dropout(0.2)
126
+ )
127
+
128
+ self.cnn = nn.Sequential(
129
+ nn.Conv1d(embed_dim, embed_dim, kernel_size=15, padding=7),
130
+ nn.GroupNorm(32, embed_dim),
131
+ nn.MaxPool1d(kernel_size=5, stride=5),
132
+ nn.ReLU(inplace=True),
133
+ nn.Conv1d(embed_dim, embed_dim, kernel_size=1),
134
+ nn.Dropout(0.2),
135
+ Rearrange('b c n -> b n c')
136
+ )
137
+ encoder_layer = nn.TransformerEncoderLayer(d_model=embed_dim, nhead=4, dim_feedforward=2 * embed_dim,
138
+ batch_first=True, norm_first=True)
139
+ self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=4)
140
+
141
+ self.distance_embed = nn.Embedding(max_bin + 1, embed_dim)
142
+
143
+ self.dilate_tower = dilated_tower(embed_dim=embed_dim, in_channel=in_dim)
144
+ self.prediction_head1 = nn.Linear(in_dim, output_dim)
145
+ self.dist_dropout = nn.Dropout(0.1)
146
+ self.device = device
147
+
148
+ def position_matrix(self, bins, b, maxbin):
149
+ pos1 = np.tile(np.arange(bins), (bins, 1))
150
+ pos2 = pos1.T
151
+ pos = np.abs(pos1 - pos2)
152
+ pos = np.where(pos > maxbin, maxbin, pos)
153
+ pos = np.tile(pos, (b, 1, 1))
154
+ return torch.tensor(pos).long().to(self.device)
155
+
156
+ def upper_tri(self, x, bins):
157
+ triu_tup = np.triu_indices(bins)
158
+ d = np.array(list(triu_tup[1] + bins * triu_tup[0]))
159
+ return x[:, d, :]
160
+
161
+ def output_head(self, x, dist_embed, bins):
162
+ x1 = torch.tile(x.unsqueeze(1), (1, bins, 1, 1))
163
+ x2 = x1.permute(0, 2, 1, 3)
164
+ mean_out = (x1 + x2) / 2
165
+ dot_out = x1 * x2
166
+ return mean_out + dot_out + dist_embed
167
+
168
+ def forward(self, x):
169
+ b = x.shape[0]
170
+ x = self.pretrain_model(x)
171
+ x = self.attention_pool(x)
172
+ x = self.project(x)
173
+ x = self.cnn(x)
174
+ x = self.transformer(x)
175
+ dist_embed = self.dist_dropout(self.distance_embed(self.position_matrix(self.bins, b=b, maxbin=self.max_bin)))
176
+ x = self.output_head(x, dist_embed, self.bins)
177
+ x = self.dilate_tower(x, self.crop)
178
+ x = rearrange(x, 'b l n d -> b (l n) d')
179
+ x = self.upper_tri(x, self.bins - 2 * self.crop)
180
+ x = self.prediction_head1(x)
181
+ return x
182
+
183
+ def build_backbone():
184
+ model = CNN()
185
+ return model
186
+ def build_transformer(args):
187
+ return Transformer(
188
+ d_model=args.hidden_dim,
189
+ dropout=args.dropout,
190
+ nhead=args.nheads,
191
+ dim_feedforward=args.dim_feedforward,
192
+ num_encoder_layers=args.enc_layers,
193
+ num_decoder_layers=args.dec_layers
194
+ )
195
+
196
+
197
+ def build_hic_model(args):
198
+ backbone = build_backbone()
199
+ transformer = build_transformer(args)
200
+ pretrain_model = Tranmodel(
201
+ backbone=backbone,
202
+ transfomer=transformer,
203
+ )
204
+
205
+ device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
206
+ model=finetunemodel(pretrain_model,hidden_dim=args.hidden_dim,embed_dim=args.embed_dim,device=device,bins=args.bins,crop=args.crop,output_dim=3)
207
+ return model
cop/micro_model.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os,sys
2
+ import math
3
+ from pretrain.track.model import build_track_model
4
+ import torch.nn as nn
5
+ import torch
6
+ import torch.nn.functional as F
7
+ from einops.layers.torch import Rearrange
8
+ from einops import rearrange
9
+ import numpy as np
10
+
11
+ class Convblock(nn.Module):
12
+ def __init__(self,in_channel,kernel_size,dilate_size,dropout=0.1):
13
+ super().__init__()
14
+ self.conv=nn.Sequential(
15
+ nn.Conv2d(
16
+ in_channel, in_channel,
17
+ kernel_size, padding=self.pad(kernel_size,1)),
18
+ nn.GroupNorm(16, in_channel),
19
+ nn.ReLU(),
20
+ nn.Dropout(dropout),
21
+ nn.Conv2d(
22
+ in_channel, in_channel,
23
+ kernel_size, padding=self.pad(kernel_size, dilate_size),
24
+ dilation=dilate_size),
25
+ )
26
+ def pad(self,kernelsize, dialte_size):
27
+ return (kernelsize - 1) * dialte_size // 2
28
+ def symmetric(self,x):
29
+ return (x + x.permute(0,1,3,2)) / 2
30
+ def forward(self,x):
31
+ identity=x
32
+ out=self.conv(x)
33
+ x=out+identity
34
+ x=self.symmetric(x)
35
+ return F.relu(x)
36
+
37
+ class dilated_tower(nn.Module):
38
+ def __init__(self,embed_dim,in_channel=64,kernel_size=7,dilate_rate=5):
39
+ super().__init__()
40
+ dilate_convs=[]
41
+ for i in range(dilate_rate+1):
42
+ dilate_convs.append(
43
+ Convblock(in_channel,kernel_size=kernel_size,dilate_size=2**i))
44
+
45
+ self.cnn=nn.Sequential(
46
+ Rearrange('b l n d -> b d l n'),
47
+ nn.Conv2d(embed_dim, in_channel, kernel_size=1),
48
+ *dilate_convs,
49
+ nn.Conv2d(in_channel, in_channel, kernel_size=1),
50
+ Rearrange('b d l n -> b l n d'),
51
+ )
52
+ def forward(self,x,crop):
53
+ x=self.cnn(x)
54
+ x=x[:,crop:-crop,crop:-crop,:]
55
+ return x
56
+
57
+ class Downstream_microc_model(nn.Module):
58
+ def __init__(
59
+ self,
60
+ pretrain_model,
61
+ embed_dim,
62
+ hidden_dim=256,
63
+ in_dim=64,
64
+ crop=10,
65
+ ):
66
+ super().__init__()
67
+ self.project = nn.Sequential(
68
+ nn.Linear(embed_dim, 512),
69
+ nn.ReLU(),
70
+ nn.Linear(512, hidden_dim),
71
+ )
72
+
73
+ self.pretrain_model=pretrain_model
74
+ self.dilate_tower = dilated_tower(embed_dim=hidden_dim, in_channel=in_dim,dilate_rate=5)
75
+ self.prediction_head = nn.Linear(in_dim, 1)
76
+ self.crop=crop
77
+
78
+ def output_head(self, x):
79
+ bins=x.shape[1]
80
+ x1 = torch.tile(x.unsqueeze(1), (1, bins, 1, 1))
81
+ x2 = x1.permute(0, 2, 1, 3)
82
+ mean_out = (x1 + x2) / 2
83
+ dot_out = (x1 * x2)/math.sqrt(x.shape[-1])
84
+ return mean_out + dot_out
85
+ def upper_tri(self, x,bins):
86
+ triu_tup = np.triu_indices(bins)
87
+ d = np.array(list(triu_tup[1] + bins * triu_tup[0]))
88
+ return x[:, d, :]
89
+
90
+ def forward(self,x):
91
+ x=self.pretrain_model(x)
92
+ x=self.project(x)
93
+ x = self.output_head(x)
94
+ x = self.dilate_tower(x, self.crop)
95
+ bins = x.shape[1]
96
+ x = rearrange(x, 'b l n d -> b (l n) d')
97
+ x = self.upper_tri(x,bins)
98
+ x = self.prediction_head(x)
99
+ return x
100
+
101
+
102
+
103
+ def build_microc_model(args):
104
+ pretrain_model=build_track_model(args)
105
+ model=Downstream_microc_model(
106
+ pretrain_model=pretrain_model,
107
+ embed_dim=args.embed_dim,
108
+ crop=args.crop
109
+ )
110
+ return model
111
+
data/.DS_Store ADDED
Binary file (6.15 kB). View file
 
data/black_list.bed ADDED
@@ -0,0 +1,910 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ chr1 628903 635104
2
+ chr1 5850087 5850571
3
+ chr1 8909610 8910014
4
+ chr1 9574580 9574997
5
+ chr1 32043823 32044203
6
+ chr1 33818964 33819344
7
+ chr1 38674335 38674715
8
+ chr1 50017081 50017546
9
+ chr1 52996949 52997329
10
+ chr1 55372488 55372869
11
+ chr1 67971776 67972156
12
+ chr1 73258720 73259100
13
+ chr1 76971068 76971595
14
+ chr1 93936365 93936747
15
+ chr1 93937447 93937827
16
+ chr1 102160407 102160787
17
+ chr1 103620975 103621378
18
+ chr1 106803432 106803816
19
+ chr1 106804021 106804224
20
+ chr1 106804753 106805343
21
+ chr1 121609948 125063427
22
+ chr1 125166231 125184683
23
+ chr1 143184599 143276861
24
+ chr1 146992422 146992802
25
+ chr1 158449073 158449453
26
+ chr1 158872114 158872494
27
+ chr1 159295111 159295493
28
+ chr1 169473895 169474338
29
+ chr1 170006204 170006584
30
+ chr1 172710350 172710732
31
+ chr1 181422611 181423158
32
+ chr1 191961694 191962163
33
+ chr1 195288048 195288429
34
+ chr1 199487949 199488149
35
+ chr1 214709795 214710175
36
+ chr1 215499615 215500014
37
+ chr1 226652017 226652398
38
+ chr1 227699752 227700133
39
+ chr1 229019365 229019745
40
+ chr1 233139985 233140365
41
+ chr1 235520204 235520404
42
+ chr1 235537405 235537785
43
+ chr1 235538899 235540112
44
+ chr1 235540243 235540623
45
+ chr1 235540886 235541649
46
+ chr1 235870625 235871005
47
+ chr1 237940595 237940979
48
+ chr1 237941045 237941514
49
+ chr1 237941893 237942746
50
+ chr1 237943028 237943416
51
+ chr1 237943490 237945232
52
+ chr1 237945285 237946507
53
+ chr1 237948983 237949365
54
+ chr1 237951294 237951802
55
+ chr10 2235555 2235756
56
+ chr10 19746628 19747247
57
+ chr10 19747314 19748342
58
+ chr10 25638376 25638756
59
+ chr10 26873147 26873538
60
+ chr10 30565118 30565501
61
+ chr10 36432964 36433344
62
+ chr10 36434047 36435188
63
+ chr10 37600616 37601002
64
+ chr10 37601246 37601787
65
+ chr10 37601884 37602850
66
+ chr10 38481300 38596500
67
+ chr10 38782600 38967900
68
+ chr10 39000365 41916630
69
+ chr10 42066792 42104971
70
+ chr10 45577925 45578305
71
+ chr10 46706229 46706611
72
+ chr10 47633790 47634172
73
+ chr10 55597861 55600059
74
+ chr10 55626794 55627174
75
+ chr10 57668682 57669062
76
+ chr10 59261793 59262173
77
+ chr10 69590538 69590738
78
+ chr10 69591475 69591858
79
+ chr10 69592355 69592740
80
+ chr10 69592776 69593482
81
+ chr10 69594378 69594760
82
+ chr10 69595141 69595573
83
+ chr10 69595681 69596061
84
+ chr10 77166388 77166768
85
+ chr10 79411056 79411468
86
+ chr10 89786504 89786889
87
+ chr10 100057235 100058064
88
+ chr10 112894488 112894870
89
+ chr10 115056512 115056712
90
+ chr10 123032371 123032751
91
+ chr10 125819621 125820001
92
+ chr10 133689373 133689523
93
+ chr11 8023287 8023667
94
+ chr11 10507706 10510499
95
+ chr11 10768339 10768719
96
+ chr11 10815184 10815384
97
+ chr11 24839563 24839944
98
+ chr11 27850562 27850942
99
+ chr11 47323881 47324333
100
+ chr11 50424039 50813393
101
+ chr11 51081363 54424064
102
+ chr11 64187168 64187556
103
+ chr11 65069483 65069863
104
+ chr11 73510500 73510992
105
+ chr11 81551734 81551934
106
+ chr11 81553835 81554282
107
+ chr11 81556152 81556537
108
+ chr11 81556717 81557101
109
+ chr11 87813427 87814320
110
+ chr11 87815683 87816063
111
+ chr11 103270627 103271007
112
+ chr11 103403270 103403650
113
+ chr11 103404014 103404527
114
+ chr11 103404779 103405289
115
+ chr11 103405809 103406376
116
+ chr11 103406653 103407036
117
+ chr11 103407110 103407310
118
+ chr11 103408089 103409893
119
+ chr11 103410074 103411211
120
+ chr11 110876919 110877308
121
+ chr11 114021166 114021546
122
+ chr11 123003425 123003857
123
+ chr11 123139919 123140301
124
+ chr12 9923 10481
125
+ chr12 2539174 2539982
126
+ chr12 3887955 3888335
127
+ chr12 19795477 19795864
128
+ chr12 20769413 20769432
129
+ chr12 21052950 21053330
130
+ chr12 22005655 22006093
131
+ chr12 27925108 27925488
132
+ chr12 31247541 31247923
133
+ chr12 31247963 31248343
134
+ chr12 34665177 37429869
135
+ chr12 40286245 40286625
136
+ chr12 41363462 41363903
137
+ chr12 41698591 41698971
138
+ chr12 41699048 41699573
139
+ chr12 49817252 49817634
140
+ chr12 62773865 62774257
141
+ chr12 80623983 80624183
142
+ chr12 101486970 101487350
143
+ chr12 123053921 123054301
144
+ chr12 126583199 126583772
145
+ chr12 126584137 126584530
146
+ chr12 130315425 130315904
147
+ chr13 16226300 18171400
148
+ chr13 25984718 25984918
149
+ chr13 31866923 31867303
150
+ chr13 33516898 33517278
151
+ chr13 36065385 36065836
152
+ chr13 40768206 40768595
153
+ chr13 53891451 53891831
154
+ chr13 55971453 55971922
155
+ chr13 56688341 56688749
156
+ chr13 72344211 72344591
157
+ chr13 75592084 75592468
158
+ chr13 83688313 83688693
159
+ chr13 84521524 84522274
160
+ chr13 84522848 84523233
161
+ chr13 88308157 88308357
162
+ chr13 95692549 95692935
163
+ chr13 95693013 95693215
164
+ chr13 95694449 95695698
165
+ chr13 95696145 95696512
166
+ chr13 105488067 105488448
167
+ chr13 107058662 107059042
168
+ chr13 109423944 109424560
169
+ chr14 16000600 18173660
170
+ chr14 23426306 23426691
171
+ chr14 32483953 32485298
172
+ chr14 37490106 37490486
173
+ chr14 40643840 40644220
174
+ chr14 43116742 43117122
175
+ chr14 45238635 45239016
176
+ chr14 45430378 45430758
177
+ chr14 46048457 46048837
178
+ chr14 46847040 46847420
179
+ chr14 51587295 51587847
180
+ chr14 83587331 83587894
181
+ chr14 83588229 83589060
182
+ chr14 84171262 84171729
183
+ chr14 84171838 84172846
184
+ chr14 84173508 84173969
185
+ chr14 84174279 84174691
186
+ chr14 86498937 86499317
187
+ chr15 17058500 19838644
188
+ chr15 30477565 30477945
189
+ chr15 32529779 32530159
190
+ chr15 34715310 34715692
191
+ chr15 35396110 35396495
192
+ chr15 40133887 40134759
193
+ chr15 41157028 41157408
194
+ chr15 52100391 52100771
195
+ chr15 54583731 54584111
196
+ chr15 58152409 58153114
197
+ chr15 58153292 58153690
198
+ chr15 58155859 58156155
199
+ chr15 67040730 67041122
200
+ chr15 91960163 91960543
201
+ chr16 3367430 3368546
202
+ chr16 3369658 3370039
203
+ chr16 3370150 3370542
204
+ chr16 3370932 3371445
205
+ chr16 3371688 3372222
206
+ chr16 10719290 10720105
207
+ chr16 10720417 10720781
208
+ chr16 10721235 10721874
209
+ chr16 10721900 10722280
210
+ chr16 10723423 10723623
211
+ chr16 10723815 10724200
212
+ chr16 10724415 10724654
213
+ chr16 20720929 20721312
214
+ chr16 20721365 20721746
215
+ chr16 20722103 20722552
216
+ chr16 34071571 34071629
217
+ chr16 34131996 34289269
218
+ chr16 34571482 34597852
219
+ chr16 34661168 34661267
220
+ chr16 34919141 34919184
221
+ chr16 35966577 38269112
222
+ chr16 38275767 38280684
223
+ chr16 46380676 46381095
224
+ chr16 46386376 46386491
225
+ chr16 46388622 46389053
226
+ chr16 46390180 46390788
227
+ chr16 46394471 46395088
228
+ chr16 46398828 46401647
229
+ chr16 60470624 60471006
230
+ chr16 65701465 65701846
231
+ chr16 67590312 67590692
232
+ chr16 69358523 69358990
233
+ chr16 73161120 73161500
234
+ chr16 82119745 82120125
235
+ chr17 141682 142062
236
+ chr17 14171308 14171688
237
+ chr17 15568187 15568567
238
+ chr17 19597515 19597985
239
+ chr17 19598613 19599532
240
+ chr17 19599799 19600210
241
+ chr17 19600300 19602064
242
+ chr17 19602160 19602545
243
+ chr17 19602886 19603595
244
+ chr17 19603847 19604047
245
+ chr17 19604922 19605588
246
+ chr17 20851029 20851409
247
+ chr17 21851150 21992060
248
+ chr17 22519042 22520149
249
+ chr17 22520322 22521025
250
+ chr17 22521116 22526407
251
+ chr17 22526636 22530152
252
+ chr17 22530381 22532156
253
+ chr17 22532315 22532940
254
+ chr17 22551066 22551446
255
+ chr17 22813591 26716670
256
+ chr17 26885752 26885795
257
+ chr17 35654769 35655182
258
+ chr17 43251640 43251763
259
+ chr17 43309853 43310048
260
+ chr17 43315021 43316491
261
+ chr17 43997535 43997957
262
+ chr17 53105552 53106565
263
+ chr17 54902920 54903301
264
+ chr17 59279406 59279787
265
+ chr17 63076394 63076777
266
+ chr17 63393238 63393438
267
+ chr17 65555244 65555624
268
+ chr17 72316258 72316638
269
+ chr17 80617407 80617802
270
+ chr18 2842087 2842534
271
+ chr18 8103913 8104113
272
+ chr18 8846332 8846713
273
+ chr18 15457976 20865732
274
+ chr18 34571460 34571840
275
+ chr18 47853089 47853617
276
+ chr18 52883627 52884007
277
+ chr18 59288306 59288686
278
+ chr18 61874562 61874960
279
+ chr18 77455900 77456280
280
+ chr19 246899 247452
281
+ chr19 12105016 12105399
282
+ chr19 13362989 13363369
283
+ chr19 24182199 27257542
284
+ chr19 27741787 27741868
285
+ chr19 36271917 36272148
286
+ chr19 37572465 37572846
287
+ chr19 37576134 37576516
288
+ chr19 46122944 46123324
289
+ chr19 47941356 47941426
290
+ chr19 54794749 54795129
291
+ chr19 56691535 56691736
292
+ chr19 56922158 56922601
293
+ chr2 638427 638808
294
+ chr2 1087103 1087484
295
+ chr2 16271753 16272134
296
+ chr2 22316878 22317258
297
+ chr2 24644617 24644997
298
+ chr2 32916201 32916632
299
+ chr2 33767290 33767703
300
+ chr2 33964664 33965045
301
+ chr2 36276769 36277149
302
+ chr2 40784787 40785278
303
+ chr2 49229452 49230058
304
+ chr2 50588765 50589566
305
+ chr2 54451654 54452034
306
+ chr2 57648677 57649057
307
+ chr2 67953669 67954049
308
+ chr2 75063567 75063994
309
+ chr2 81666317 81666849
310
+ chr2 82814941 82815321
311
+ chr2 82815451 82816236
312
+ chr2 82816261 82816647
313
+ chr2 82818378 82818748
314
+ chr2 82820800 82821005
315
+ chr2 85068666 85069046
316
+ chr2 87824709 87825530
317
+ chr2 89272789 89273133
318
+ chr2 89827607 89827706
319
+ chr2 89828636 89828710
320
+ chr2 89828842 89828942
321
+ chr2 89833685 89833793
322
+ chr2 89839592 89839709
323
+ chr2 89909317 89909789
324
+ chr2 90379778 90402456
325
+ chr2 92081223 92081398
326
+ chr2 92188125 94293463
327
+ chr2 94499181 94570956
328
+ chr2 94898976 94899645
329
+ chr2 94900639 94900840
330
+ chr2 94901421 94901808
331
+ chr2 97189431 97189813
332
+ chr2 102482582 102482962
333
+ chr2 102505606 102505987
334
+ chr2 110072034 110072434
335
+ chr2 110299106 110299346
336
+ chr2 116751234 116751614
337
+ chr2 116752004 116752448
338
+ chr2 116752517 116752897
339
+ chr2 117020171 117020552
340
+ chr2 117021107 117022152
341
+ chr2 117022438 117024038
342
+ chr2 117024277 117025093
343
+ chr2 117025205 117025670
344
+ chr2 117026130 117026512
345
+ chr2 120211535 120212064
346
+ chr2 120212685 120213069
347
+ chr2 120213761 120214143
348
+ chr2 120214590 120215370
349
+ chr2 121220135 121220515
350
+ chr2 124680743 124681182
351
+ chr2 125812046 125812548
352
+ chr2 129090774 129091154
353
+ chr2 130272174 130272615
354
+ chr2 130273451 130273981
355
+ chr2 130274326 130274992
356
+ chr2 130275174 130275744
357
+ chr2 130276119 130276500
358
+ chr2 130277774 130278727
359
+ chr2 130279995 130280729
360
+ chr2 130280827 130281440
361
+ chr2 130557359 130557607
362
+ chr2 130563142 130563396
363
+ chr2 131369643 131369925
364
+ chr2 131370949 131371562
365
+ chr2 131371916 131372361
366
+ chr2 131372758 131373137
367
+ chr2 131379317 131380344
368
+ chr2 131381592 131381973
369
+ chr2 131382344 131382728
370
+ chr2 131382772 131382974
371
+ chr2 131383079 131384016
372
+ chr2 131384051 131384621
373
+ chr2 131384898 131385281
374
+ chr2 131385356 131385794
375
+ chr2 140217229 140218044
376
+ chr2 140220209 140220840
377
+ chr2 140220940 140221140
378
+ chr2 140221198 140222369
379
+ chr2 140222545 140223623
380
+ chr2 140223647 140224297
381
+ chr2 143088644 143089042
382
+ chr2 143089938 143090358
383
+ chr2 143090898 143091662
384
+ chr2 143092255 143092646
385
+ chr2 143093556 143093941
386
+ chr2 143094515 143094999
387
+ chr2 143095614 143095994
388
+ chr2 143096048 143096428
389
+ chr2 143096470 143097336
390
+ chr2 143097466 143097981
391
+ chr2 143100621 143101005
392
+ chr2 147048574 147048955
393
+ chr2 147244849 147245229
394
+ chr2 147265034 147265432
395
+ chr2 148822913 148823295
396
+ chr2 148881545 148882032
397
+ chr2 155196092 155196473
398
+ chr2 155263345 155264313
399
+ chr2 155264362 155264562
400
+ chr2 155264599 155264982
401
+ chr2 155311420 155311995
402
+ chr2 155313539 155313922
403
+ chr2 156828628 156829008
404
+ chr2 162517271 162517651
405
+ chr2 164117001 164117382
406
+ chr2 166414323 166414779
407
+ chr2 167378863 167379244
408
+ chr2 168652433 168652813
409
+ chr2 179739184 179739689
410
+ chr2 190593881 190594262
411
+ chr2 196204680 196205060
412
+ chr2 201212170 201212612
413
+ chr2 201212648 201212854
414
+ chr2 201212903 201213386
415
+ chr2 201214659 201215040
416
+ chr2 201549404 201549784
417
+ chr2 201550130 201550513
418
+ chr2 201557568 201557948
419
+ chr2 202614117 202614527
420
+ chr2 202615371 202615757
421
+ chr2 202617016 202617398
422
+ chr2 202618435 202618819
423
+ chr2 202619754 202620134
424
+ chr2 211773627 211774158
425
+ chr2 211774322 211775192
426
+ chr2 211775641 211776712
427
+ chr2 211777034 211777417
428
+ chr2 211777802 211778269
429
+ chr2 211778916 211779562
430
+ chr2 215573163 215573544
431
+ chr2 226722088 226722596
432
+ chr2 237521663 237522775
433
+ chr2 237522862 237523652
434
+ chr20 5999469 5999849
435
+ chr20 9168743 9169145
436
+ chr20 10441916 10442296
437
+ chr20 13167142 13167534
438
+ chr20 18449173 18449556
439
+ chr20 22078162 22078542
440
+ chr20 24024376 24024757
441
+ chr20 26438448 28554562
442
+ chr20 28644084 29015573
443
+ chr20 29125977 29294639
444
+ chr20 30744370 30744939
445
+ chr20 30746748 30747241
446
+ chr20 31051540 31106909
447
+ chr20 31157044 31159116
448
+ chr20 31161652 31223331
449
+ chr20 34688743 34689039
450
+ chr20 47894699 47896109
451
+ chr20 57063873 57064279
452
+ chr20 57357555 57358134
453
+ chr20 57358221 57359428
454
+ chr20 57359451 57360972
455
+ chr20 63644937 63645318
456
+ chr21 6369257 6372342
457
+ chr21 7201205 7327885
458
+ chr21 7919585 7919691
459
+ chr21 8211710 8211892
460
+ chr21 8212412 8212570
461
+ chr21 8213694 8213987
462
+ chr21 8219372 8220330
463
+ chr21 8234456 8234568
464
+ chr21 8394767 8394902
465
+ chr21 8395471 8395591
466
+ chr21 8396751 8397011
467
+ chr21 8445918 8446080
468
+ chr21 8446629 8446729
469
+ chr21 8446925 8447070
470
+ chr21 8595669 8595768
471
+ chr21 8844362 8844855
472
+ chr21 8846669 8847382
473
+ chr21 10014674 10015194
474
+ chr21 10650900 12965800
475
+ chr21 16645305 16645685
476
+ chr21 32095835 32096215
477
+ chr21 35890413 35890796
478
+ chr21 44474913 44475301
479
+ chr21 45376056 45376517
480
+ chr22 10863370 10863448
481
+ chr22 11210951 11215489
482
+ chr22 11854150 11854643
483
+ chr22 11856460 11857173
484
+ chr22 11974159 11974336
485
+ chr22 12135181 12135894
486
+ chr22 12137711 12138204
487
+ chr22 12691742 12694097
488
+ chr22 12954427 15057495
489
+ chr22 15153934 15211502
490
+ chr22 15940533 16085728
491
+ chr22 32894952 32895345
492
+ chr22 33819338 33819538
493
+ chr22 35885491 35885898
494
+ chr22 36172705 36173085
495
+ chr22 36177875 36178257
496
+ chr22 46470112 46470493
497
+ chr22 50086003 50086529
498
+ chr22 50806858 50808224
499
+ chr3 3571912 3572292
500
+ chr3 24705149 24705529
501
+ chr3 25467328 25467722
502
+ chr3 29797534 29797914
503
+ chr3 33548103 33548483
504
+ chr3 40252107 40253916
505
+ chr3 41532177 41532556
506
+ chr3 43229296 43229733
507
+ chr3 68658875 68659467
508
+ chr3 68670345 68670734
509
+ chr3 73054640 73055020
510
+ chr3 82655447 82655827
511
+ chr3 89588895 89589538
512
+ chr3 90269605 90722189
513
+ chr3 90774880 91249595
514
+ chr3 91519649 93657524
515
+ chr3 93705477 93800019
516
+ chr3 96475262 96475643
517
+ chr3 96617014 96618680
518
+ chr3 106894019 106894441
519
+ chr3 106895181 106895568
520
+ chr3 106896124 106896504
521
+ chr3 106898661 106899022
522
+ chr3 106899753 106900122
523
+ chr3 106901799 106902741
524
+ chr3 106903188 106903605
525
+ chr3 119947198 119947578
526
+ chr3 120721858 120722610
527
+ chr3 122688557 122688938
528
+ chr3 125982519 125982900
529
+ chr3 127005357 127005745
530
+ chr3 128988979 128989359
531
+ chr3 137095968 137096348
532
+ chr3 142662232 142662612
533
+ chr3 152919604 152919995
534
+ chr3 153658704 153659087
535
+ chr3 160947473 160948127
536
+ chr3 166159726 166160108
537
+ chr3 166160260 166160644
538
+ chr3 166161631 166162087
539
+ chr3 166226563 166226945
540
+ chr3 166232406 166232886
541
+ chr3 166232970 166233355
542
+ chr3 166474023 166474223
543
+ chr3 171534313 171534700
544
+ chr3 177010776 177011156
545
+ chr3 192880587 192880967
546
+ chr4 5404508 5404897
547
+ chr4 12640142 12640815
548
+ chr4 14506099 14506467
549
+ chr4 17061824 17062213
550
+ chr4 18949310 18949691
551
+ chr4 22502173 22502553
552
+ chr4 25717756 25718136
553
+ chr4 25718275 25718655
554
+ chr4 25719398 25719626
555
+ chr4 27730251 27730747
556
+ chr4 30884524 30884906
557
+ chr4 32280109 32280489
558
+ chr4 41023064 41023448
559
+ chr4 47772100 47772544
560
+ chr4 49136056 49136102
561
+ chr4 49141052 49141147
562
+ chr4 49246355 49246848
563
+ chr4 49548607 49549100
564
+ chr4 49631231 49658125
565
+ chr4 49708086 51743949
566
+ chr4 51793952 51817249
567
+ chr4 55327979 55328462
568
+ chr4 64606369 64606752
569
+ chr4 64606841 64607360
570
+ chr4 64607395 64607789
571
+ chr4 64607976 64608801
572
+ chr4 64608937 64609326
573
+ chr4 64609811 64610876
574
+ chr4 64611176 64611617
575
+ chr4 66065193 66065631
576
+ chr4 68050141 68050521
577
+ chr4 68572333 68572774
578
+ chr4 78008402 78008882
579
+ chr4 83383282 83383662
580
+ chr4 89731703 89732163
581
+ chr4 92701787 92702300
582
+ chr4 107501924 107502304
583
+ chr4 112372589 112372969
584
+ chr4 116296652 116297040
585
+ chr4 116297165 116297545
586
+ chr4 116297659 116298726
587
+ chr4 116299003 116300416
588
+ chr4 128081280 128081956
589
+ chr4 140929567 140929947
590
+ chr4 143017907 143018107
591
+ chr4 143347973 143348354
592
+ chr4 144379497 144379877
593
+ chr4 155076906 155077288
594
+ chr4 155452733 155452935
595
+ chr4 155453928 155454313
596
+ chr4 155454407 155455447
597
+ chr4 155455566 155455766
598
+ chr4 155457624 155458008
599
+ chr4 155459547 155459747
600
+ chr4 155460171 155460553
601
+ chr4 155461093 155461689
602
+ chr4 155462078 155463456
603
+ chr4 155463701 155464839
604
+ chr4 155464895 155465305
605
+ chr4 155465580 155466624
606
+ chr4 157628391 157628774
607
+ chr4 160044429 160044815
608
+ chr4 161449477 161449857
609
+ chr4 161788291 161788671
610
+ chr4 162421207 162421721
611
+ chr4 172036714 172037094
612
+ chr4 179069259 179069639
613
+ chr4 183489243 183489623
614
+ chr4 189844495 189844576
615
+ chr5 12284 12523
616
+ chr5 12952 13361
617
+ chr5 5395563 5395943
618
+ chr5 5396182 5396616
619
+ chr5 5396675 5397057
620
+ chr5 8619083 8619464
621
+ chr5 8619927 8620307
622
+ chr5 8620707 8621192
623
+ chr5 8621953 8622333
624
+ chr5 8622354 8622753
625
+ chr5 32927394 32927776
626
+ chr5 37164286 37164673
627
+ chr5 45913363 50265419
628
+ chr5 60761358 60762176
629
+ chr5 66253509 66253889
630
+ chr5 73775720 73776112
631
+ chr5 79089860 79090240
632
+ chr5 80649841 80652548
633
+ chr5 94567275 94571098
634
+ chr5 97678633 97679016
635
+ chr5 98409947 98410327
636
+ chr5 98410700 98411257
637
+ chr5 99813005 99813388
638
+ chr5 100045805 100055225
639
+ chr5 106553187 106553689
640
+ chr5 111488864 111489244
641
+ chr5 119127218 119127602
642
+ chr5 121030820 121031445
643
+ chr5 122338658 122339042
644
+ chr5 123760111 123760622
645
+ chr5 123760719 123761918
646
+ chr5 134923133 134928692
647
+ chr5 136533606 136533986
648
+ chr5 137305006 137305387
649
+ chr5 152198765 152199145
650
+ chr5 160600365 160600745
651
+ chr5 163146853 163147234
652
+ chr5 163959711 163960091
653
+ chr5 164673914 164674288
654
+ chr5 166530241 166530641
655
+ chr5 170635389 170635774
656
+ chr6 1705930 1706304
657
+ chr6 3943769 3944149
658
+ chr6 29454054 29454435
659
+ chr6 32706020 32706850
660
+ chr6 43490986 43491370
661
+ chr6 54899048 54899248
662
+ chr6 58554346 59830578
663
+ chr6 61278527 61521106
664
+ chr6 61573960 61574809
665
+ chr6 72747981 72748361
666
+ chr6 72799169 72799549
667
+ chr6 76708390 76708770
668
+ chr6 88555202 88555591
669
+ chr6 91726616 91727363
670
+ chr6 94446937 94447370
671
+ chr6 96941571 96941951
672
+ chr6 104699855 104700055
673
+ chr6 114377334 114377534
674
+ chr6 122764824 122765204
675
+ chr6 126478329 126478709
676
+ chr6 127735330 127735710
677
+ chr6 132799554 132799939
678
+ chr6 133150492 133150881
679
+ chr6 133930809 133931190
680
+ chr6 138133082 138133462
681
+ chr6 143077647 143078031
682
+ chr6 153666229 153666618
683
+ chr6 153667363 153667744
684
+ chr6 153668187 153668753
685
+ chr6 153669025 153669419
686
+ chr6 156547729 156548118
687
+ chr6 163638068 163638448
688
+ chr7 18021726 18022106
689
+ chr7 22748471 22748854
690
+ chr7 33749120 33749500
691
+ chr7 36228567 36229008
692
+ chr7 37387570 37387950
693
+ chr7 45251808 45252289
694
+ chr7 55369049 55369429
695
+ chr7 57167688 57168071
696
+ chr7 57168472 57168852
697
+ chr7 57169046 57169430
698
+ chr7 57169550 57169932
699
+ chr7 57170307 57170523
700
+ chr7 57170675 57171410
701
+ chr7 57171502 57172122
702
+ chr7 57173798 57174181
703
+ chr7 57174854 57175239
704
+ chr7 57185615 57185995
705
+ chr7 57186105 57186589
706
+ chr7 57187287 57188033
707
+ chr7 57188305 57188872
708
+ chr7 57189116 57189730
709
+ chr7 57190949 57191332
710
+ chr7 57191618 57191818
711
+ chr7 57192132 57192860
712
+ chr7 57193489 57193872
713
+ chr7 57193974 57194701
714
+ chr7 57194829 57195210
715
+ chr7 57196302 57197490
716
+ chr7 57198263 57198644
717
+ chr7 57879605 58032504
718
+ chr7 58166363 62995324
719
+ chr7 63094673 63095057
720
+ chr7 64104133 64104513
721
+ chr7 64105294 64106415
722
+ chr7 64106627 64107010
723
+ chr7 64108329 64108798
724
+ chr7 64110007 64110707
725
+ chr7 64111376 64111804
726
+ chr7 64111957 64112849
727
+ chr7 67627830 67628213
728
+ chr7 68097607 68097990
729
+ chr7 68736347 68736811
730
+ chr7 69331805 69332005
731
+ chr7 69332037 69332438
732
+ chr7 69333013 69333393
733
+ chr7 69333597 69334167
734
+ chr7 72088575 72088955
735
+ chr7 83100026 83100406
736
+ chr7 83469984 83470184
737
+ chr7 83855080 83855464
738
+ chr7 95851249 95851629
739
+ chr7 104989516 104989896
740
+ chr7 112372484 112372865
741
+ chr7 112374724 112374950
742
+ chr7 117263552 117264184
743
+ chr7 117264231 117264614
744
+ chr7 130116678 130117058
745
+ chr7 141173000 141173384
746
+ chr7 141801916 141802451
747
+ chr7 141802901 141803366
748
+ chr7 141804074 141804274
749
+ chr7 141804814 141805507
750
+ chr7 142665099 142667846
751
+ chr7 143187483 143187863
752
+ chr7 145997159 145997608
753
+ chr7 150131843 150132229
754
+ chr7 153968598 153968979
755
+ chr7 159294463 159294846
756
+ chr8 13353292 13353679
757
+ chr8 16056863 16057063
758
+ chr8 18849121 18849571
759
+ chr8 20551162 20551554
760
+ chr8 32805708 32806092
761
+ chr8 33010514 33010894
762
+ chr8 33011359 33014071
763
+ chr8 33014510 33014895
764
+ chr8 33015020 33015853
765
+ chr8 36277446 36278060
766
+ chr8 36278272 36278791
767
+ chr8 36278835 36279634
768
+ chr8 40070431 40070867
769
+ chr8 43237631 43242390
770
+ chr8 43937900 45969600
771
+ chr8 46827305 46827914
772
+ chr8 46828298 46829961
773
+ chr8 46830195 46831222
774
+ chr8 46837581 46837961
775
+ chr8 46838101 46838484
776
+ chr8 50758259 50758639
777
+ chr8 56736733 56736933
778
+ chr8 61303079 61303460
779
+ chr8 67580689 67581493
780
+ chr8 67581588 67581972
781
+ chr8 67582178 67582568
782
+ chr8 67585216 67585693
783
+ chr8 67585787 67586175
784
+ chr8 67587282 67587922
785
+ chr8 69102851 69103234
786
+ chr8 72985528 72985923
787
+ chr8 74828644 74829025
788
+ chr8 76201592 76202319
789
+ chr8 76645407 76645800
790
+ chr8 97907908 97908279
791
+ chr8 99495689 99496133
792
+ chr8 102774315 102774695
793
+ chr8 103082925 103083379
794
+ chr8 103083704 103084399
795
+ chr8 103084730 103085110
796
+ chr8 103085323 103085806
797
+ chr8 103086859 103087242
798
+ chr8 108533901 108534281
799
+ chr8 110933150 110933533
800
+ chr8 110934510 110935010
801
+ chr8 111248936 111249316
802
+ chr8 120224204 120224584
803
+ chr8 127053876 127054257
804
+ chr8 127968653 127969034
805
+ chr8 133615761 133616142
806
+ chr8 133755390 133755856
807
+ chr9 5091131 5091511
808
+ chr9 5091962 5093013
809
+ chr9 5093063 5094123
810
+ chr9 5094192 5094697
811
+ chr9 5094931 5095816
812
+ chr9 5096206 5096816
813
+ chr9 5097188 5097890
814
+ chr9 5098134 5098516
815
+ chr9 5099352 5099552
816
+ chr9 5100044 5100427
817
+ chr9 5108063 5108592
818
+ chr9 5109193 5109986
819
+ chr9 5110030 5110411
820
+ chr9 9896970 9897350
821
+ chr9 15866612 15866992
822
+ chr9 18336471 18336854
823
+ chr9 31498260 31498640
824
+ chr9 33656533 33658316
825
+ chr9 33658346 33659299
826
+ chr9 34998988 34999474
827
+ chr9 36466192 36466572
828
+ chr9 43153721 45525161
829
+ chr9 64045550 64046043
830
+ chr9 64047855 64048422
831
+ chr9 65048153 65079624
832
+ chr9 68251002 68251071
833
+ chr9 72788174 72788555
834
+ chr9 78741395 78741775
835
+ chr9 78742155 78742969
836
+ chr9 78743199 78743630
837
+ chr9 78744108 78744492
838
+ chr9 78810721 78811113
839
+ chr9 79804550 79804933
840
+ chr9 80564643 80565085
841
+ chr9 80565478 80565941
842
+ chr9 81747641 81748021
843
+ chr9 82427689 82428071
844
+ chr9 92108965 92109347
845
+ chr9 92539106 92539763
846
+ chr9 95876956 95877338
847
+ chr9 117109914 117110296
848
+ chr9 122505687 122506067
849
+ chr9 129878699 129879081
850
+ chr9 134164478 134165354
851
+ chr9 134170819 134171060
852
+ chrX 4059512 4059712
853
+ chrX 5168678 5169232
854
+ chrX 5169733 5170646
855
+ chrX 15727702 15728089
856
+ chrX 17116414 17116794
857
+ chrX 24056083 24056470
858
+ chrX 24375345 24375545
859
+ chrX 33762401 33762781
860
+ chrX 55178596 55179289
861
+ chrX 55179434 55180459
862
+ chrX 55181196 55182790
863
+ chrX 55183051 55184112
864
+ chrX 58061543 62821716
865
+ chrX 62841379 62841765
866
+ chrX 62842257 62842639
867
+ chrX 70119464 70119845
868
+ chrX 70127233 70127620
869
+ chrX 77501934 77502314
870
+ chrX 78561721 78561921
871
+ chrX 84403779 84404168
872
+ chrX 100027094 100027475
873
+ chrX 102010329 102010712
874
+ chrX 102011531 102011915
875
+ chrX 102772405 102772791
876
+ chrX 102785904 102786287
877
+ chrX 102798001 102798386
878
+ chrX 102802747 102803161
879
+ chrX 102809395 102809788
880
+ chrX 104409869 104410249
881
+ chrX 106239694 106239894
882
+ chrX 111416893 111417294
883
+ chrX 126471558 126473451
884
+ chrX 126728884 126729272
885
+ chrX 126729326 126729709
886
+ chrX 126729837 126730217
887
+ chrX 126730716 126731106
888
+ chrX 126731624 126732029
889
+ chrX 129983338 129983538
890
+ chrX 133041871 133042251
891
+ chrX 135292293 135292493
892
+ chrX 143430213 143430837
893
+ chrX 143431144 143431537
894
+ chrX 143431716 143432219
895
+ chrX 143432410 143433212
896
+ chrX 143433510 143434156
897
+ chrX 143543636 143544023
898
+ chrX 146995842 146996224
899
+ chrY 4344757 4344879
900
+ chrY 9141870 9141995
901
+ chrY 10203380 10266932
902
+ chrY 10316749 10544446
903
+ chrY 10594583 10626838
904
+ chrY 10663669 10663716
905
+ chrY 10744417 10921497
906
+ chrY 11290797 11334278
907
+ chrY 11493053 11592850
908
+ chrY 11671014 11671046
909
+ chrY 11721528 11749472
910
+ chrY 56694632 56889743
data/chrom_size_hg38.txt ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ chr1 248956422
2
+ chr2 242193529
3
+ chr3 198295559
4
+ chr4 190214555
5
+ chr5 181538259
6
+ chr6 170805979
7
+ chr7 159345973
8
+ chrX 156040895
9
+ chr8 145138636
10
+ chr9 138394717
11
+ chr11 135086622
12
+ chr10 133797422
13
+ chr12 133275309
14
+ chr13 114364328
15
+ chr14 107043718
16
+ chr15 101991189
17
+ chr16 90338345
18
+ chr17 83257441
19
+ chr18 80373285
20
+ chr20 64444167
21
+ chr19 58617616
22
+ chr22 50818468
23
+ chr21 46709983
data/epigenomes.txt ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CTCF
2
+ RAD21
3
+ ATF7
4
+ CEBPB
5
+ MAX
6
+ CREB1
7
+ DPF2
8
+ POLR2A
9
+ JUND
10
+ ZBTB33
11
+ ELF1
12
+ MAZ
13
+ MAFK
14
+ REST
15
+ EGR1
16
+ EP300
17
+ SIN3A
18
+ ZBTB40
19
+ GABPA
20
+ RCOR1
21
+ ESRRA
22
+ NBN
23
+ NRF1
24
+ HDAC2
25
+ MYC
26
+ ARID3A
27
+ FOXM1
28
+ RAD51
29
+ MTA3
30
+ TCF12
31
+ HCFC1
32
+ FOXK2
33
+ SRF
34
+ SP1
35
+ ELK1
36
+ RFX5
37
+ SUZ12
38
+ ZNF687
39
+ PKNOX1
40
+ IKZF1
41
+ NFIC
42
+ CEBPG
43
+ ZFX
44
+ NR2F1
45
+ ATF2
46
+ BHLHE40
47
+ SMC3
48
+ POLR2AphosphoS5
49
+ ZNF217
50
+ NR2C2
51
+ ZNF143
52
+ NR2F2
53
+ CREM
54
+ MLLT1
55
+ TRIM22
56
+ TEAD4
57
+ YY1
58
+ MTA2
59
+ KDM1A
60
+ MNT
61
+ TCF3
62
+ UBTF
63
+ MXI1
64
+ ZNF384
65
+ RFX1
66
+ ZNF592
67
+ TCF7L2
68
+ SMARCA5
69
+ PML
70
+ E2F1
71
+ SMARCE1
72
+ MTA1
73
+ TAF1
74
+ FOXA1
75
+ CBFB
76
+ GATAD2B
77
+ ZNF24
78
+ TOE1
79
+ CBX5
80
+ JUN
81
+ TARDBP
82
+ ZFP36
83
+ NFATC3
84
+ ETV6
85
+ MEF2A
86
+ ARNT
87
+ TCF7
88
+ CHD2
89
+ HDGF
90
+ ETS1
91
+ TBL1XR1
92
+ SMAD1
93
+ CUX1
94
+ POLR2AphosphoS2
95
+ USF2
96
+ ZSCAN29
97
+ E2F4
98
+ LARP7
99
+ BMI1
100
+ ZKSCAN1
101
+ CHD1
102
+ BCLAF1
103
+ CHD4
104
+ NFRKB
105
+ E2F8
106
+ GTF2F1
107
+ ZNF207
108
+ SREBF1
109
+ BRCA1
110
+ NONO
111
+ ZZZ3
112
+ NFXL1
113
+ E4F1
114
+ ZMYM3
115
+ HDAC1
116
+ SMAD4
117
+ MAFF
118
+ CTBP1
119
+ ATF3
120
+ SPI1
121
+ GABPB1
122
+ FOSL2
123
+ MEIS2
124
+ FOSL1
125
+ STAG1
126
+ GATAD2A
127
+ ZNF281
128
+ FOXA3
129
+ PHF8
130
+ EED
131
+ KDM5B
132
+ KLF16
133
+ MAFG
134
+ ETV5
135
+ TEAD1
136
+ PRDM10
137
+ TFE3
138
+ TFDP1
139
+ PHF21A
140
+ SKIL
141
+ PATZ1
142
+ ATF4
143
+ BCL6
144
+ ZNF766
145
+ ERF
146
+ NR2F6
147
+ ZNF574
148
+ IRF2
149
+ BCL3
150
+ GATA2
151
+ JUNB
152
+ BACH1
153
+ RB1
154
+ ZNF644
155
+ ZBTB7A
156
+ MEF2D
157
+ KAT8
158
+ ZBTB7B
159
+ SOX6
160
+ NKRF
161
+ RXRA
162
+ NFE2
163
+ RBPJ
164
+ ADNP
165
+ GMEB1
166
+ NCOR1
167
+ RREB1
168
+ ZKSCAN8
169
+ PHF20
170
+ SMAD5
171
+ ZNF512
172
+ ELF4
173
+ NFYB
174
+ HOMEZ
175
+ EHMT2
176
+ MYBL2
177
+ THRA
178
+ RNF2
179
+ SMAD3
180
+ ZNF444
181
+ E2F5
182
+ KLF9
183
+ HMG20A
184
+ ZNF12
185
+ NR2C1
186
+ MBD2
187
+ KLF6
188
+ NFIB
189
+ NCOA1
190
+ CBX1
191
+ BRD4
192
+ RBBP5
193
+ FOXJ3
194
+ ZNF7
195
+ HIVEP1
196
+ ZNF274
197
+ ZEB1
198
+ STAT5A
199
+ HINFP
200
+ TBP
201
+ ZBED1
202
+ ZHX2
203
+ NEUROD1
204
+ ZNF740
205
+ ZNF83
206
+ NFYA
207
+ ZNF280B
208
+ IRF1
209
+ CREB3
210
+ MLX
211
+ ATF1
212
+ ZNF318
213
+ SMARCC2
214
+ ZFP1
215
+ SIX4
216
+ ZNF512B
217
+ PRDM15
218
+ CLOCK
219
+ HMG20B
220
+ SIN3B
221
+ KLF13
222
+ ZNF507
223
+ IRF5
224
+ ATF6
225
+ RELA
226
+ ZNF639
227
+ ZC3H8
228
+ HES1
229
+ KDM4B
230
+ DNMT1
231
+ DDX20
232
+ ZC3H4
233
+ NFE2L1
234
+ ZNF282
235
+ PBX2
236
+ ZNF3
237
+ H3K27me3
238
+ H3K9ac
239
+ H3K4me1
240
+ H3K27ac
241
+ H3K36me3
242
+ H2AFZ
243
+ H3K4me3
244
+ H3K9me3
245
+ H3K4me2
models/cage.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd3a532d752b3067f5424a0a0357abc9eba010f878c5a324177aedbc80909d12
3
+ size 128747867
models/epi_bind.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d71047f59dab64333b93d2cb10640155ca9749ebf8856bb2dacb79b6c4b9ae5
3
+ size 59823081
models/epi_track.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb75d025e1e689165b56a46209ef9c9e9c5f575d7751886a83c1b470d9a69b5b
3
+ size 128348451
models/hic.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbfe0c2e5b9eb0a4e2f2eba7076e0263f42d4e763b04e0368aabf25d46d6b8a9
3
+ size 54561869
models/microc.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8244fc07de18d110e28820b860bf8715a87a58b1835615b8253ffc43727aacc1
3
+ size 140192327
pretrain/__init__.py ADDED
File without changes
pretrain/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (158 Bytes). View file
 
pretrain/__pycache__/layers.cpython-39.pyc ADDED
Binary file (2.57 kB). View file
 
pretrain/__pycache__/model.cpython-39.pyc ADDED
Binary file (2.96 kB). View file
 
pretrain/__pycache__/transformer.cpython-39.pyc ADDED
Binary file (6.54 kB). View file
 
pretrain/layers.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import numpy as np
4
+ import torch.nn.functional as F
5
+
6
+
7
+ class CNN(nn.Module):
8
+ def __init__(self):
9
+ super(CNN, self).__init__()
10
+ conv_kernel_size1 = 10
11
+ conv_kernel_size2 = 8
12
+ pool_kernel_size1 = 5
13
+ pool_kernel_size2 = 4
14
+ self.conv_net = nn.Sequential(
15
+ nn.Conv1d(5, 256, kernel_size=conv_kernel_size1),
16
+ nn.ReLU(inplace=True),
17
+ nn.Dropout(p=0.1),
18
+ nn.Conv1d(256, 256, kernel_size=conv_kernel_size1),
19
+ nn.BatchNorm1d(256),
20
+ nn.ReLU(inplace=True),
21
+ nn.MaxPool1d(kernel_size=pool_kernel_size1, stride=pool_kernel_size1),
22
+ nn.Dropout(p=0.1),
23
+ nn.Conv1d(256, 360, kernel_size=conv_kernel_size2),
24
+ nn.ReLU(inplace=True),
25
+ nn.Dropout(p=0.1),
26
+ nn.Conv1d(360, 360, kernel_size=conv_kernel_size2),
27
+ nn.BatchNorm1d(360),
28
+ nn.ReLU(inplace=True),
29
+ nn.MaxPool1d(kernel_size=pool_kernel_size2, stride=pool_kernel_size2),
30
+ nn.Dropout(p=0.1),
31
+ nn.Conv1d(360, 512, kernel_size=conv_kernel_size2),
32
+ nn.ReLU(inplace=True),
33
+ nn.Dropout(p=0.2),
34
+ nn.Conv1d(512, 512, kernel_size=conv_kernel_size2),
35
+ nn.BatchNorm1d(512),
36
+ nn.ReLU(inplace=True),
37
+ nn.Dropout(p=0.2))
38
+ self.num_channels = 512
39
+ def forward(self, x):
40
+ out = self.conv_net(x)
41
+ return out
42
+
43
+
44
+ # borrow from https://github.com/Alibaba-MIIL/ASL/blob/main/src/loss_functions/losses.py
45
+ class Balanced_AsymmetricLoss(nn.Module):
46
+ def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, alpha=None, eps=1e-8, disable_torch_grad_focal_loss=True):
47
+ super(Balanced_AsymmetricLoss, self).__init__()
48
+
49
+ self.gamma_neg = gamma_neg
50
+ self.gamma_pos = gamma_pos
51
+ self.clip = clip
52
+ self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss
53
+ self.eps = eps
54
+ self.alpha = alpha
55
+
56
+ def forward(self, x, y, mask):
57
+ # Calculating Probabilities
58
+ assert y.shape == mask.shape
59
+ x_sigmoid = torch.sigmoid(x)
60
+ xs_pos = x_sigmoid
61
+ xs_neg = 1 - x_sigmoid
62
+ # Asymmetric Clipping
63
+ if self.clip is not None and self.clip > 0:
64
+ xs_neg = (xs_neg + self.clip).clamp(max=1)
65
+
66
+ # Basic CE calculation
67
+ los_pos = y * torch.log(xs_pos.clamp(min=self.eps))
68
+ los_neg = (1 - y) * torch.log(xs_neg.clamp(min=self.eps))
69
+ if self.alpha is not None:
70
+ los_pos = self.alpha * los_pos
71
+ loss = los_pos + los_neg
72
+ # Asymmetric Focusing
73
+ if self.gamma_neg > 0 or self.gamma_pos > 0:
74
+ if self.disable_torch_grad_focal_loss:
75
+ torch.set_grad_enabled(False)
76
+ pt0 = xs_pos * y
77
+ pt1 = xs_neg * (1 - y) # pt = p if t > 0 else 1-p
78
+ pt = pt0 + pt1
79
+ one_sided_gamma = self.gamma_pos * y + self.gamma_neg * (1 - y)
80
+ one_sided_w = torch.pow(1 - pt, one_sided_gamma)
81
+ if self.disable_torch_grad_focal_loss:
82
+ torch.set_grad_enabled(True)
83
+ loss *= one_sided_w
84
+ loss *= mask
85
+ return -loss.sum() / (torch.sum(mask) + self.eps)
86
+
87
+
pretrain/model.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #most of the codes below are copied from Query2label
2
+ import torch,math
3
+ import numpy as np
4
+ from torch import nn, Tensor
5
+ from pretrain.layers import CNN
6
+ from pretrain.transformer import Transformer
7
+
8
+ class GroupWiseLinear(nn.Module):
9
+ def __init__(self, num_class, hidden_dim, bias=True):
10
+ super().__init__()
11
+ self.num_class = num_class
12
+ self.hidden_dim = hidden_dim
13
+ self.bias = bias
14
+ self.W = nn.Parameter(torch.Tensor(1, num_class, hidden_dim))
15
+ if bias:
16
+ self.b = nn.Parameter(torch.Tensor(1, num_class))
17
+ self.reset_parameters()
18
+ def reset_parameters(self):
19
+ stdv = 1. / math.sqrt(self.W.size(2))
20
+ for i in range(self.num_class):
21
+ self.W[0][i].data.uniform_(-stdv, stdv)
22
+ if self.bias:
23
+ for i in range(self.num_class):
24
+ self.b[0][i].data.uniform_(-stdv, stdv)
25
+ def forward(self, x):
26
+ x = (self.W * x).sum(-1)
27
+ if self.bias:
28
+ x = x + self.b
29
+ return x
30
+
31
+ class Tranmodel(nn.Module):
32
+ def __init__(self, backbone, transfomer, num_class):
33
+ super().__init__()
34
+ self.backbone = backbone
35
+ self.transformer = transfomer
36
+ self.num_class = num_class
37
+ hidden_dim = transfomer.d_model
38
+ self.label_input = torch.Tensor(np.arange(num_class)).view(1, -1).long()
39
+ self.input_proj = nn.Conv1d(backbone.num_channels, hidden_dim, kernel_size=1)
40
+ self.query_embed = nn.Embedding(num_class, hidden_dim)
41
+ self.fc = GroupWiseLinear(num_class, hidden_dim, bias=True)
42
+
43
+ def forward(self, input):
44
+ src = self.backbone(input)
45
+ label_inputs=self.label_input.repeat(src.size(0),1).to(input.device)
46
+ label_embed=self.query_embed(label_inputs)
47
+ src=self.input_proj(src)
48
+ hs = self.transformer(src, label_embed)
49
+ out = self.fc(hs)
50
+ return out
51
+
52
+ def build_backbone():
53
+ model = CNN()
54
+ return model
55
+ def build_transformer(args):
56
+ return Transformer(
57
+ d_model=args.hidden_dim,
58
+ dropout=args.dropout,
59
+ nhead=args.nheads,
60
+ dim_feedforward=args.dim_feedforward,
61
+ num_encoder_layers=args.enc_layers,
62
+ num_decoder_layers=args.dec_layers
63
+ )
64
+ def build_epd_model(args):
65
+ backbone = build_backbone()
66
+ transformer = build_transformer(args)
67
+ model = Tranmodel(
68
+ backbone=backbone,
69
+ transfomer=transformer,
70
+ num_class=args.num_class,
71
+ )
72
+ return model
pretrain/track/__init__.py ADDED
File without changes
pretrain/track/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (164 Bytes). View file
 
pretrain/track/__pycache__/layers.cpython-39.pyc ADDED
Binary file (9.37 kB). View file
 
pretrain/track/__pycache__/model.cpython-39.pyc ADDED
Binary file (4.07 kB). View file
 
pretrain/track/__pycache__/transformers.cpython-39.pyc ADDED
Binary file (4.17 kB). View file
 
pretrain/track/layers.py ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # some of following codes are borrowed from https://github.com/lucidrains/enformer-pytorch
2
+
3
+ import math
4
+ import torch
5
+ from torch import nn, einsum
6
+ from einops import rearrange, reduce
7
+ from einops.layers.torch import Rearrange
8
+ import torch.nn.functional as F
9
+
10
+ def exists(val):
11
+ return val is not None
12
+
13
+ def default(val, d):
14
+ return val if exists(val) else d
15
+
16
+ def map_values(fn, d):
17
+ return {key: fn(values) for key, values in d.items()}
18
+
19
+ def exponential_linspace_int(start, end, num, divisible_by = 1):
20
+ def _round(x):
21
+ return int(round(x / divisible_by) * divisible_by)
22
+
23
+ base = math.exp(math.log(end / start) / (num - 1))
24
+ return [_round(start * base**i) for i in range(num)]
25
+
26
+ def log(t, eps = 1e-20):
27
+ return torch.log(t.clamp(min = eps))
28
+
29
+ # losses and metrics
30
+
31
+
32
+ def pearson_corr_coef(x, y, eps = 1e-8):
33
+ x2 = x * x
34
+ y2 = y * y
35
+ xy = x * y
36
+ ex = x.mean(dim = 1)
37
+ ey = y.mean(dim = 1)
38
+ exy = xy.mean(dim = 1)
39
+ ex2 = x2.mean(dim = 1)
40
+ ey2 = y2.mean(dim = 1)
41
+ r = (exy - ex * ey) / (torch.sqrt(ex2 - (ex * ex)) * torch.sqrt(ey2 - (ey * ey)) + eps)
42
+ return r.mean(dim = -1)
43
+
44
+ # relative positional encoding functions
45
+
46
+ def get_positional_features_exponential(positions, features, seq_len, min_half_life = 3.):
47
+ max_range = math.log(seq_len) / math.log(2.)
48
+ half_life = 2 ** torch.linspace(min_half_life, max_range, features, device = positions.device)
49
+ half_life = half_life[None, ...]
50
+ positions = positions.abs()[..., None]
51
+ return torch.exp(-math.log(2.) / half_life * positions)
52
+
53
+ def get_positional_features_central_mask(positions, features, seq_len):
54
+ center_widths = 2 ** torch.arange(1, features + 1, device = positions.device).float()
55
+ center_widths = center_widths - 1
56
+ return (center_widths[None, ...] > positions.abs()[..., None]).float()
57
+
58
+ def gamma_pdf(x, concentration, rate):
59
+ log_unnormalized_prob = torch.xlogy(concentration - 1., x) - rate * x
60
+ log_normalization = (torch.lgamma(concentration) - concentration * torch.log(rate))
61
+ return torch.exp(log_unnormalized_prob - log_normalization)
62
+
63
+ def get_positional_features_gamma(positions, features, seq_len, stddev = None, start_mean = None, eps = 1e-8):
64
+ if not exists(stddev):
65
+ stddev = seq_len / (2 * features)
66
+
67
+ if not exists(start_mean):
68
+ start_mean = seq_len / features
69
+
70
+ mean = torch.linspace(start_mean, seq_len, features, device = positions.device)
71
+ mean = mean[None, ...]
72
+ concentration = (mean / stddev) ** 2
73
+ rate = mean / stddev ** 2
74
+ probabilities = gamma_pdf(positions.float().abs()[..., None], concentration, rate)
75
+ probabilities = probabilities + eps
76
+ outputs = probabilities / torch.amax(probabilities)
77
+ return outputs
78
+
79
+ def get_positional_embed(seq_len, feature_size, device):
80
+ distances = torch.arange(-seq_len + 1, seq_len, device = device)
81
+
82
+ feature_functions = [
83
+ get_positional_features_exponential,
84
+ get_positional_features_central_mask,
85
+ get_positional_features_gamma
86
+ ]
87
+
88
+ num_components = len(feature_functions) * 2
89
+
90
+ if (feature_size % num_components) != 0:
91
+ raise ValueError(f'feature size is not divisible by number of components ({num_components})')
92
+
93
+ num_basis_per_class = feature_size // num_components
94
+
95
+ embeddings = []
96
+ for fn in feature_functions:
97
+ embeddings.append(fn(distances, num_basis_per_class, seq_len))
98
+
99
+ embeddings = torch.cat(embeddings, dim = -1)
100
+ embeddings = torch.cat((embeddings, torch.sign(distances)[..., None] * embeddings), dim = -1)
101
+ return embeddings
102
+
103
+ def relative_shift(x):
104
+ to_pad = torch.zeros_like(x[..., :1])
105
+ x = torch.cat((to_pad, x), dim = -1)
106
+ _, h, t1, t2 = x.shape
107
+ x = x.reshape(-1, h, t2, t1)
108
+ x = x[:, :, 1:, :]
109
+ x = x.reshape(-1, h, t1, t2 - 1)
110
+ return x[..., :((t2 + 1) // 2)]
111
+
112
+ # classes
113
+
114
+ class Residual(nn.Module):
115
+ def __init__(self, fn):
116
+ super().__init__()
117
+ self.fn = fn
118
+
119
+ def forward(self, x, **kwargs):
120
+ return self.fn(x, **kwargs) + x
121
+
122
+ class GELU(nn.Module):
123
+ def forward(self, x):
124
+ return torch.sigmoid(1.702 * x) * x
125
+
126
+
127
+ class Attention(nn.Module):
128
+ def __init__(
129
+ self,dim,num_rel_pos_features,
130
+ heads = 8,
131
+ dim_key = 64,
132
+ dim_value = 64,
133
+ dropout = 0.,
134
+ pos_dropout = 0.
135
+ ):
136
+ super().__init__()
137
+ self.scale = dim_key ** -0.5
138
+ self.heads = heads
139
+
140
+ self.to_q = nn.Linear(dim, dim_key * heads, bias = False)
141
+ self.to_k = nn.Linear(dim, dim_key * heads, bias = False)
142
+ self.to_v = nn.Linear(dim, dim_value * heads, bias = False)
143
+
144
+ self.to_out = nn.Linear(dim_value * heads, dim)
145
+ nn.init.zeros_(self.to_out.weight)
146
+ nn.init.zeros_(self.to_out.bias)
147
+
148
+ # relative positional encoding
149
+
150
+ self.num_rel_pos_features = num_rel_pos_features
151
+
152
+ self.to_rel_k = nn.Linear(num_rel_pos_features, dim_key * heads, bias = False)
153
+ self.rel_content_bias = nn.Parameter(torch.randn(1, heads, 1, dim_key))
154
+ self.rel_pos_bias = nn.Parameter(torch.randn(1, heads, 1, dim_key))
155
+
156
+ self.pos_dropout = nn.Dropout(pos_dropout)
157
+ self.attn_dropout = nn.Dropout(dropout)
158
+
159
+ def forward(self, x):
160
+ n, h, device = x.shape[-2], self.heads, x.device
161
+
162
+ q = self.to_q(x)
163
+ k = self.to_k(x)
164
+ v = self.to_v(x)
165
+
166
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
167
+
168
+ q = q * self.scale
169
+
170
+ content_logits = einsum('b h i d, b h j d -> b h i j', q + self.rel_content_bias, k)
171
+
172
+ positions = get_positional_embed(n, self.num_rel_pos_features, device)
173
+ positions = self.pos_dropout(positions)
174
+ rel_k = self.to_rel_k(positions)
175
+
176
+ rel_k = rearrange(rel_k, 'n (h d) -> h n d', h = h)
177
+ rel_logits = einsum('b h i d, h j d -> b h i j', q + self.rel_pos_bias, rel_k)
178
+ rel_logits = relative_shift(rel_logits)
179
+
180
+ logits = content_logits + rel_logits
181
+ attn = logits.softmax(dim = -1)
182
+ attn = self.attn_dropout(attn)
183
+
184
+ out = einsum('b h i j, b h j d -> b h i d', attn, v)
185
+ out = rearrange(out, 'b h n d -> b n (h d)')
186
+ return self.to_out(out)
187
+
188
+ class Enformer(nn.Module):
189
+ def __init__(
190
+ self,
191
+ dim = 512,
192
+ depth = 4,
193
+ heads = 6,
194
+ attn_dim_key = 64,
195
+ dropout_rate = 0.2,
196
+ attn_dropout = 0.05,
197
+ pos_dropout = 0.01,
198
+ ):
199
+ super().__init__()
200
+ self.dim = dim
201
+ transformer = []
202
+ for _ in range(depth):
203
+ transformer.append(nn.Sequential(
204
+ Residual(nn.Sequential(
205
+ nn.LayerNorm(dim),
206
+ Attention(
207
+ dim,
208
+ heads = heads,
209
+ dim_key = attn_dim_key,
210
+ dim_value = dim // heads,
211
+ dropout = attn_dropout,
212
+ pos_dropout = pos_dropout,
213
+ num_rel_pos_features = dim // heads
214
+ ),
215
+ nn.Dropout(dropout_rate)
216
+ )),
217
+ Residual(nn.Sequential(
218
+ nn.LayerNorm(dim),
219
+ nn.Linear(dim, dim * 2),
220
+ nn.Dropout(dropout_rate),
221
+ nn.ReLU(),
222
+ nn.Linear(dim * 2, dim),
223
+ nn.Dropout(dropout_rate)
224
+ ))
225
+ ))
226
+
227
+ self.transformer = nn.Sequential(
228
+ # Rearrange('b d n -> b n d'),
229
+ *transformer
230
+ )
231
+
232
+ def forward(self,x):
233
+
234
+ x = self.transformer(x)
235
+
236
+ return x
237
+
238
+ class CNN(nn.Module):
239
+ def __init__(self):
240
+ super(CNN, self).__init__()
241
+ conv_kernel_size1 = 10
242
+ conv_kernel_size2 = 8
243
+ pool_kernel_size1 = 5
244
+ pool_kernel_size2 = 4
245
+ self.conv_net = nn.Sequential(
246
+ nn.Conv1d(5, 256, kernel_size=conv_kernel_size1),
247
+ nn.ReLU(inplace=True),
248
+ nn.Dropout(p=0.1),
249
+ nn.Conv1d(256, 256, kernel_size=conv_kernel_size1),
250
+ # nn.GroupNorm(16, 256),
251
+ nn.BatchNorm1d(256,track_running_stats=False),
252
+ nn.ReLU(inplace=True),
253
+ nn.MaxPool1d(kernel_size=pool_kernel_size1, stride=pool_kernel_size1),
254
+ nn.Dropout(p=0.1),
255
+ nn.Conv1d(256, 360, kernel_size=conv_kernel_size2),
256
+ nn.ReLU(inplace=True),
257
+ nn.Dropout(p=0.1),
258
+ nn.Conv1d(360, 360, kernel_size=conv_kernel_size2),
259
+ nn.BatchNorm1d(360,track_running_stats=False),
260
+ # nn.GroupNorm(36, 360),
261
+ nn.ReLU(inplace=True),
262
+ nn.MaxPool1d(kernel_size=pool_kernel_size2, stride=pool_kernel_size2),
263
+ nn.Dropout(p=0.1),
264
+ nn.Conv1d(360, 512, kernel_size=conv_kernel_size2),
265
+ nn.ReLU(inplace=True),
266
+ nn.Dropout(p=0.2),
267
+ nn.Conv1d(512, 512, kernel_size=conv_kernel_size2),
268
+ nn.BatchNorm1d(512,track_running_stats=False),
269
+ # nn.GroupNorm(32, 512),
270
+ nn.ReLU(inplace=True),
271
+ nn.Dropout(p=0.2))
272
+ self.num_channels = 512
273
+ def forward(self, x):
274
+ out = self.conv_net(x)
275
+ return out
276
+
277
+
278
+ class AttentionPool(nn.Module):
279
+ def __init__(self, dim):
280
+ super().__init__()
281
+ self.pool_fn = Rearrange('b (n p) d-> b n p d', n=1)
282
+ self.to_attn_logits = nn.Parameter(torch.eye(dim))
283
+
284
+ def forward(self, x):
285
+ attn_logits = einsum('b n d, d e -> b n e', x, self.to_attn_logits)
286
+ x = self.pool_fn(x)
287
+ logits = self.pool_fn(attn_logits)
288
+
289
+ attn = logits.softmax(dim = -2)
290
+ return (x * attn).sum(dim = -2).squeeze()
291
+
292
+
293
+
294
+ # class MSE_loss(nn.Module):
295
+ # def __init__(self):
296
+ # super().__init__()
297
+ # def forward(self,pred,target):
298
+
299
+
pretrain/track/model.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch,math
2
+ from torch import nn, Tensor
3
+ from pretrain.track.transformers import Transformer
4
+ from einops import rearrange,repeat
5
+ from pretrain.track.layers import CNN,Enformer,AttentionPool
6
+ from einops.layers.torch import Rearrange
7
+
8
+ import numpy as np
9
+ import torch.nn.functional as F
10
+ class Tranmodel(nn.Module):
11
+ def __init__(self, backbone, transfomer):
12
+ super().__init__()
13
+ self.backbone = backbone
14
+ self.transformer = transfomer
15
+ hidden_dim = transfomer.d_model
16
+ self.input_proj = nn.Conv1d(backbone.num_channels, hidden_dim, kernel_size=1)
17
+ def forward(self, input):
18
+ input=rearrange(input,'b n c l -> (b n) c l')
19
+ src = self.backbone(input)
20
+ src=self.input_proj(src)
21
+ src = self.transformer(src)
22
+ return src
23
+
24
+ class PositionalEncoding(nn.Module):
25
+
26
+ def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000):
27
+ super().__init__()
28
+ self.dropout = nn.Dropout(p=dropout)
29
+
30
+ position = torch.arange(max_len).unsqueeze(1)
31
+ div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
32
+ pe = torch.zeros(max_len, 1, d_model)
33
+ pe[:, 0, 0::2] = torch.sin(position * div_term)
34
+ pe[:, 0, 1::2] = torch.cos(position * div_term)
35
+ self.register_buffer('pe', pe)
36
+
37
+ def forward(self, x: Tensor) -> Tensor:
38
+ x = x + self.pe[:x.size(0)]
39
+ return self.dropout(x)
40
+
41
+ class finetunemodel(nn.Module):
42
+ def __init__(self,pretrain_model,hidden_dim,embed_dim,bins,crop=50,num_class=245,return_embed=True):
43
+ super().__init__()
44
+ self.pretrain_model = pretrain_model
45
+ self.bins=bins
46
+ self.crop=crop
47
+ self.return_embed = return_embed
48
+ self.attention_pool = AttentionPool(hidden_dim)
49
+ self.project=nn.Sequential(
50
+ Rearrange('(b n) c -> b c n', n=bins),
51
+ nn.Conv1d(hidden_dim, hidden_dim, kernel_size=15, padding=7,groups=hidden_dim),
52
+ nn.InstanceNorm1d(hidden_dim, affine=True),
53
+ nn.Conv1d(hidden_dim, embed_dim, kernel_size=1),
54
+ nn.ReLU(inplace=True),
55
+ nn.Dropout(0.1),
56
+ nn.Conv1d(embed_dim, embed_dim, kernel_size=9, padding=4),
57
+ nn.InstanceNorm1d(embed_dim, affine=True),
58
+ nn.ReLU(inplace=True),
59
+ nn.Dropout(0.1),
60
+ )
61
+
62
+
63
+ self.transformer = Enformer(dim=embed_dim, depth=4, heads=8)
64
+ self.prediction_head=nn.Linear(embed_dim,num_class)
65
+
66
+
67
+ def forward(self, x):
68
+ x=self.pretrain_model(x)
69
+ x = self.attention_pool(x)
70
+ x = self.project(x)
71
+ x= rearrange(x,'b c n -> b n c')
72
+
73
+ x = self.transformer(x)
74
+ out = self.prediction_head(x[:, self.crop:-self.crop, :])
75
+ if self.return_embed:
76
+ return x
77
+ else:
78
+ return out
79
+
80
+
81
+
82
+
83
+ def build_backbone():
84
+ model = CNN()
85
+ return model
86
+ def build_transformer(args):
87
+ return Transformer(
88
+ d_model=args.hidden_dim,
89
+ dropout=args.dropout,
90
+ nhead=args.nheads,
91
+ dim_feedforward=args.dim_feedforward,
92
+ num_encoder_layers=args.enc_layers,
93
+ num_decoder_layers=args.dec_layers
94
+ )
95
+ def build_track_model(args):
96
+ backbone = build_backbone()
97
+ transformer = build_transformer(args)
98
+ pretrain_model = Tranmodel(
99
+ backbone=backbone,
100
+ transfomer=transformer,
101
+ )
102
+ model=finetunemodel(pretrain_model,hidden_dim=args.hidden_dim,embed_dim=args.embed_dim,
103
+ bins=args.bins,crop=args.crop,return_embed=args.return_embed)
104
+
105
+ return model
pretrain/track/transformers.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # most of the codes are borrowed from Query2label and DETR
2
+ import torch
3
+ import torch.nn.functional as F
4
+ from torch import nn, Tensor
5
+ from torch.nn import MultiheadAttention
6
+ from typing import Optional
7
+ import copy
8
+ def _get_activation_fn(activation):
9
+ if activation == "relu":
10
+ return F.relu
11
+ if activation == "gelu":
12
+ return F.gelu
13
+ if activation == "glu":
14
+ return F.glu
15
+ raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
16
+ class TransformerEncoderLayer(nn.Module):
17
+ def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
18
+ activation="relu"):
19
+ super().__init__()
20
+ self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
21
+ self.linear1 = nn.Linear(d_model, dim_feedforward)
22
+ self.dropout = nn.Dropout(dropout)
23
+ self.linear2 = nn.Linear(dim_feedforward, d_model)
24
+ self.norm1 = nn.LayerNorm(d_model)
25
+ self.norm2 = nn.LayerNorm(d_model)
26
+ self.dropout1 = nn.Dropout(dropout)
27
+ self.dropout2 = nn.Dropout(dropout)
28
+ self.activation = _get_activation_fn(activation)
29
+ def with_pos_embed(self, tensor, pos: Optional[Tensor]):
30
+ return tensor if pos is None else tensor + pos
31
+ def forward(self, src,
32
+ src_mask: Optional[Tensor] = None,
33
+ src_key_padding_mask: Optional[Tensor] = None,
34
+ pos: Optional[Tensor] = None):
35
+ src2 = self.norm1(src)
36
+ q = k = self.with_pos_embed(src2, pos)
37
+ src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,
38
+ key_padding_mask=src_key_padding_mask)[0]
39
+ src = src + self.dropout1(src2)
40
+ src2 = self.norm2(src)
41
+ src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
42
+ src = src + self.dropout2(src2)
43
+ return src
44
+
45
+
46
+ def _get_clones(module, N):
47
+ return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
48
+ class TransformerEncoder(nn.Module):
49
+ def __init__(self, encoder_layer, num_layers, norm=None):
50
+ super().__init__()
51
+ self.layers = _get_clones(encoder_layer, num_layers)
52
+ self.num_layers = num_layers
53
+ self.norm = norm
54
+ def forward(self, src,
55
+ mask: Optional[Tensor] = None,
56
+ src_key_padding_mask: Optional[Tensor] = None,
57
+ pos: Optional[Tensor] = None):
58
+ output = src
59
+ for layer in self.layers:
60
+ output = layer(output, src_mask=mask,
61
+ src_key_padding_mask=src_key_padding_mask, pos=pos)
62
+ if self.norm is not None:
63
+ output = self.norm(output)
64
+ return output
65
+
66
+
67
+ class Transformer(nn.Module):
68
+ def __init__(self, d_model=512, nhead=8, num_encoder_layers=6,
69
+ num_decoder_layers=6, dim_feedforward=2048, dropout=0.1,
70
+ activation="relu"
71
+ ):
72
+ super().__init__()
73
+ self.num_encoder_layers = num_encoder_layers
74
+ if num_decoder_layers > 0:
75
+ encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,
76
+ dropout, activation)
77
+ encoder_norm = nn.LayerNorm(d_model)
78
+ self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
79
+
80
+ self._reset_parameters()
81
+ self.d_model = d_model
82
+ self.nhead = nhead
83
+
84
+ def _reset_parameters(self):
85
+ for p in self.parameters():
86
+ if p.dim() > 1:
87
+ nn.init.xavier_uniform_(p)
88
+
89
+ def forward(self, src, pos_embed=None, mask=None):
90
+ src = src.permute(2, 0, 1)
91
+ if mask is not None:
92
+ mask = mask.flatten(1)
93
+ memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)
94
+ return memory.transpose(0,1)
pretrain/transformer.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #most of the codes below are copied from Query2label and DETR
2
+ import torch
3
+ import torch.nn.functional as F
4
+ from torch import nn, Tensor
5
+ from torch.nn import MultiheadAttention
6
+ from typing import Optional, List
7
+ import copy
8
+ def _get_activation_fn(activation):
9
+ if activation == "relu":
10
+ return F.relu
11
+ if activation == "gelu":
12
+ return F.gelu
13
+ if activation == "glu":
14
+ return F.glu
15
+ raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
16
+ class TransformerEncoderLayer(nn.Module):
17
+ def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
18
+ activation="relu"):
19
+ super().__init__()
20
+ self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
21
+ self.linear1 = nn.Linear(d_model, dim_feedforward)
22
+ self.dropout = nn.Dropout(dropout)
23
+ self.linear2 = nn.Linear(dim_feedforward, d_model)
24
+ self.norm1 = nn.LayerNorm(d_model)
25
+ self.norm2 = nn.LayerNorm(d_model)
26
+ self.dropout1 = nn.Dropout(dropout)
27
+ self.dropout2 = nn.Dropout(dropout)
28
+ self.activation = _get_activation_fn(activation)
29
+ def with_pos_embed(self, tensor, pos: Optional[Tensor]):
30
+ return tensor if pos is None else tensor + pos
31
+ def forward(self, src,
32
+ src_mask: Optional[Tensor] = None,
33
+ src_key_padding_mask: Optional[Tensor] = None,
34
+ pos: Optional[Tensor] = None):
35
+ src2 = self.norm1(src)
36
+ q = k = self.with_pos_embed(src2, pos)
37
+ src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,
38
+ key_padding_mask=src_key_padding_mask)[0]
39
+ src = src + self.dropout1(src2)
40
+ src2 = self.norm2(src)
41
+ src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
42
+ src = src + self.dropout2(src2)
43
+ return src
44
+
45
+ class TransformerDecoderLayer(nn.Module):
46
+ def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
47
+ activation="relu"):
48
+ super().__init__()
49
+ self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
50
+ self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
51
+ # Implementation of Feedforward model
52
+ self.linear1 = nn.Linear(d_model, dim_feedforward)
53
+ self.dropout = nn.Dropout(dropout)
54
+ self.linear2 = nn.Linear(dim_feedforward, d_model)
55
+ self.norm1 = nn.LayerNorm(d_model)
56
+ self.norm2 = nn.LayerNorm(d_model)
57
+ self.norm3 = nn.LayerNorm(d_model)
58
+ self.dropout1 = nn.Dropout(dropout)
59
+ self.dropout2 = nn.Dropout(dropout)
60
+ self.dropout3 = nn.Dropout(dropout)
61
+ self.activation = _get_activation_fn(activation)
62
+ def with_pos_embed(self, tensor, pos: Optional[Tensor]):
63
+ return tensor if pos is None else tensor + pos
64
+ def forward(self, tgt, memory,
65
+ tgt_mask: Optional[Tensor] = None,
66
+ memory_mask: Optional[Tensor] = None,
67
+ tgt_key_padding_mask: Optional[Tensor] = None,
68
+ memory_key_padding_mask: Optional[Tensor] = None,
69
+ pos: Optional[Tensor] = None,
70
+ query_pos: Optional[Tensor] = None):
71
+ tgt2 = self.norm1(tgt)
72
+ q = k = self.with_pos_embed(tgt2, query_pos)
73
+ tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
74
+ key_padding_mask=tgt_key_padding_mask)[0]
75
+ tgt = tgt + self.dropout1(tgt2)
76
+ tgt2 = self.norm2(tgt)
77
+ tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
78
+ key=self.with_pos_embed(memory, pos),
79
+ value=memory, attn_mask=memory_mask,
80
+ key_padding_mask=memory_key_padding_mask)[0]
81
+ tgt = tgt + self.dropout2(tgt2)
82
+ tgt2 = self.norm3(tgt)
83
+ tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
84
+ tgt = tgt + self.dropout3(tgt2)
85
+ return tgt
86
+
87
+ def _get_clones(module, N):
88
+ return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
89
+ class TransformerEncoder(nn.Module):
90
+ def __init__(self, encoder_layer, num_layers, norm=None):
91
+ super().__init__()
92
+ self.layers = _get_clones(encoder_layer, num_layers)
93
+ self.num_layers = num_layers
94
+ self.norm = norm
95
+ def forward(self, src,
96
+ mask: Optional[Tensor] = None,
97
+ src_key_padding_mask: Optional[Tensor] = None,
98
+ pos: Optional[Tensor] = None):
99
+ output = src
100
+ for layer in self.layers:
101
+ output = layer(output, src_mask=mask,
102
+ src_key_padding_mask=src_key_padding_mask, pos=pos)
103
+ if self.norm is not None:
104
+ output = self.norm(output)
105
+ return output
106
+
107
+ class TransformerDecoder(nn.Module):
108
+ def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
109
+ super().__init__()
110
+ self.layers = _get_clones(decoder_layer, num_layers)
111
+ self.num_layers = num_layers
112
+ self.norm = norm
113
+ self.return_intermediate = return_intermediate
114
+ def forward(self, tgt, memory,
115
+ tgt_mask: Optional[Tensor] = None,
116
+ memory_mask: Optional[Tensor] = None,
117
+ tgt_key_padding_mask: Optional[Tensor] = None,
118
+ memory_key_padding_mask: Optional[Tensor] = None,
119
+ pos: Optional[Tensor] = None,
120
+ query_pos: Optional[Tensor] = None):
121
+ output = tgt
122
+ for layer in self.layers:
123
+ output = layer(output, memory, tgt_mask=tgt_mask,
124
+ memory_mask=memory_mask,
125
+ tgt_key_padding_mask=tgt_key_padding_mask,
126
+ memory_key_padding_mask=memory_key_padding_mask,
127
+ pos=pos, query_pos=query_pos)
128
+ if self.norm is not None:
129
+ output = self.norm(output)
130
+ return output
131
+
132
+
133
+ class Transformer(nn.Module):
134
+ def __init__(self, d_model=512, nhead=8, num_encoder_layers=6,
135
+ num_decoder_layers=6, dim_feedforward=2048, dropout=0.1,
136
+ activation="relu"
137
+ ):
138
+ super().__init__()
139
+ self.num_encoder_layers = num_encoder_layers
140
+ if num_decoder_layers > 0:
141
+ encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,
142
+ dropout, activation)
143
+ encoder_norm = nn.LayerNorm(d_model)
144
+ self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
145
+
146
+ decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward,
147
+ dropout, activation)
148
+ decoder_norm = nn.LayerNorm(d_model)
149
+ self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm)
150
+
151
+ self._reset_parameters()
152
+ self.d_model = d_model
153
+ self.nhead = nhead
154
+
155
+ def _reset_parameters(self):
156
+ for p in self.parameters():
157
+ if p.dim() > 1:
158
+ nn.init.xavier_uniform_(p)
159
+
160
+ def forward(self, src, query_embed, pos_embed=None, mask=None):
161
+ bs, c, w = src.shape
162
+ src = src.permute(2, 0, 1)
163
+ query_embed = query_embed.transpose(0,1)
164
+ # .unsqueeze(1).repeat(1, bs, 1)
165
+ if mask is not None:
166
+ mask = mask.flatten(1)
167
+
168
+ memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)
169
+ hs = self.decoder(query_embed, memory, memory_key_padding_mask=mask,
170
+ pos=pos_embed, query_pos=None)
171
+ return hs.transpose(0,1)
172
+ # return hs.transpose(1, 2), memory[:h * w].permute(1, 2, 0).view(bs, c, h, w)