Wuhuwill commited on
Commit
696bcce
·
verified ·
1 Parent(s): 4c0f344

Upload ProDiff/diffProModel/protoTrans.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. ProDiff/diffProModel/protoTrans.py +99 -0
ProDiff/diffProModel/protoTrans.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+ class TrajectoryTransformer(nn.Module):
6
+ """Transformer model to learn trajectory embeddings and a set of learnable prototypes."""
7
+ def __init__(self, input_dim, embed_dim, num_layers, num_heads, forward_dim, seq_len, n_cluster, dropout=0.1):
8
+ """Initializes the TrajectoryTransformer.
9
+
10
+ Args:
11
+ input_dim (int): Dimension of the input trajectory points (e.g., 3 for time, lat, lon).
12
+ embed_dim (int): Dimension of the embeddings within the transformer.
13
+ num_layers (int): Number of transformer encoder layers.
14
+ num_heads (int): Number of attention heads in the transformer.
15
+ forward_dim (int): Dimension of the feed-forward network in transformer layers.
16
+ seq_len (int): Length of the input trajectory sequences.
17
+ n_cluster (int): Number of prototypes to learn.
18
+ dropout (float, optional): Dropout rate. Defaults to 0.1.
19
+ """
20
+ super(TrajectoryTransformer, self).__init__()
21
+ self.input_dim = input_dim
22
+ self.embed_dim = embed_dim
23
+ self.n_cluster = n_cluster
24
+
25
+ self.linear_projection = nn.Linear(input_dim, embed_dim)
26
+
27
+ # Positional embedding for the sequence
28
+ self.pos_embedding = nn.Parameter(torch.randn(1, seq_len, embed_dim))
29
+
30
+ # Standard Transformer Encoder
31
+ encoder_layer = nn.TransformerEncoderLayer(d_model=embed_dim, nhead=num_heads, dim_feedforward=forward_dim, dropout=dropout, batch_first=True)
32
+ self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
33
+
34
+ # Layer Normalization layers
35
+ self.layer_norm1 = nn.LayerNorm(embed_dim)
36
+ self.layer_norm2 = nn.LayerNorm(embed_dim)
37
+ self.layer_norm_features = nn.LayerNorm(embed_dim)
38
+
39
+ # Learnable prototypes
40
+ self.prototypes = nn.Parameter(torch.randn(n_cluster, embed_dim))
41
+
42
+ # Initialize weights
43
+ self._init_weights()
44
+
45
+ def _init_weights(self):
46
+ """Initializes weights of the linear layers, transformer components, and prototypes."""
47
+ nn.init.xavier_uniform_(self.linear_projection.weight)
48
+ if self.linear_projection.bias is not None:
49
+ nn.init.zeros_(self.linear_projection.bias)
50
+
51
+ # Initialize transformer layers (already done by PyTorch's default, but can be overridden)
52
+ for layer in self.transformer_encoder.layers:
53
+ nn.init.xavier_uniform_(layer.linear1.weight)
54
+ if layer.linear1.bias is not None: nn.init.zeros_(layer.linear1.bias)
55
+ nn.init.xavier_uniform_(layer.linear2.weight)
56
+ if layer.linear2.bias is not None: nn.init.zeros_(layer.linear2.bias)
57
+ # Self-attention weights are more complex (in_proj_weight, out_proj.weight)
58
+ # Default Pytorch init is usually fine for these.
59
+ if hasattr(layer.self_attn, 'in_proj_weight') and layer.self_attn.in_proj_weight is not None:
60
+ nn.init.xavier_uniform_(layer.self_attn.in_proj_weight)
61
+ if hasattr(layer.self_attn, 'in_proj_bias') and layer.self_attn.in_proj_bias is not None:
62
+ nn.init.zeros_(layer.self_attn.in_proj_bias)
63
+ if hasattr(layer.self_attn.out_proj, 'weight') and layer.self_attn.out_proj.weight is not None:
64
+ nn.init.xavier_uniform_(layer.self_attn.out_proj.weight)
65
+ if hasattr(layer.self_attn.out_proj, 'bias') and layer.self_attn.out_proj.bias is not None:
66
+ nn.init.zeros_(layer.self_attn.out_proj.bias)
67
+
68
+ # Initialize prototypes (e.g., Xavier uniform)
69
+ nn.init.xavier_uniform_(self.prototypes.data)
70
+
71
+ def forward(self, x):
72
+ """Forward pass of the TrajectoryTransformer.
73
+
74
+ Args:
75
+ x (torch.Tensor): Input trajectory batch, shape (batch_size, seq_len, input_dim).
76
+
77
+ Returns:
78
+ Tuple[torch.Tensor, torch.Tensor]:
79
+ - prototypes (torch.Tensor): Learned prototypes, shape (n_cluster, embed_dim).
80
+ - features (torch.Tensor): Trajectory features, shape (batch_size, embed_dim).
81
+ """
82
+ batch_size, seq_len, _ = x.size()
83
+
84
+ x = self.linear_projection(x) # Project to (batch_size, seq_len, embed_dim)
85
+ x = self.layer_norm1(x) # Apply layer normalization
86
+ x = x + self.pos_embedding[:, :seq_len, :] # Add positional embedding
87
+
88
+ x = self.transformer_encoder(x) # Input: (batch_size, seq_len, embed_dim)
89
+
90
+ x = self.layer_norm2(x) # Apply layer normalization after transformer
91
+
92
+ # Aggregate features from the sequence (e.g., by summing along sequence length)
93
+ features = x.sum(dim=1) # (batch_size, embed_dim)
94
+ features = self.layer_norm_features(features) # Normalize aggregated features
95
+
96
+ # The first returned value `prototypes_from_transformer` in train.py was from the old `output_layer`.
97
+ # Now we return the learnable self.prototypes.
98
+ return self.prototypes, features
99
+