|
import torch |
|
import os |
|
from transformers import PreTrainedModel, PretrainedConfig |
|
|
|
|
|
|
|
import torch |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
import math |
|
import numpy as np |
|
from math import sqrt |
|
from datetime import timedelta |
|
|
|
|
|
class PositionalEmbedding(nn.Module): |
|
def __init__(self, d_model, max_len=5000): |
|
super(PositionalEmbedding, self).__init__() |
|
pos_emb = torch.zeros(max_len, d_model).float() |
|
pos_emb.require_grad = False |
|
position = torch.arange(0, max_len).float().unsqueeze(1) |
|
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)).exp() |
|
pos_emb[:, 0::2] = torch.sin(position * div_term) |
|
pos_emb[:, 1::2] = torch.cos(position * div_term) |
|
pos_emb = pos_emb.unsqueeze(0) |
|
self.register_buffer('pos_emb', pos_emb) |
|
def forward(self, x): |
|
return self.pos_emb[:, :x.size(1)] |
|
|
|
class TokenEmbedding(nn.Module): |
|
def __init__(self, d_model): |
|
super(TokenEmbedding, self).__init__() |
|
D_INP = 1 |
|
self.conv = nn.Conv1d(in_channels=D_INP, out_channels=d_model, kernel_size=3, padding=1, padding_mode='circular') |
|
def forward(self, x): |
|
x = self.conv(x.transpose(-1, 1)).transpose(-1, 1) |
|
return x |
|
|
|
class TemporalEmbedding(nn.Module): |
|
def __init__(self, d_model, num_features): |
|
super(TemporalEmbedding, self).__init__() |
|
self.embed = nn.Linear(num_features, d_model) |
|
def forward(self, x): |
|
x = x.float() |
|
return self.embed(x) |
|
|
|
class SubjectEmbedding(nn.Module): |
|
def __init__(self, d_model): |
|
super(SubjectEmbedding, self).__init__() |
|
self.id_embedding = nn.Linear(1, d_model) |
|
def forward(self, x): |
|
x = x.float().unsqueeze(1) |
|
embed_x = self.id_embedding(x) |
|
return embed_x |
|
|
|
class DataEmbedding(nn.Module): |
|
def __init__(self, d_model, r_drop, num_features): |
|
super(DataEmbedding, self).__init__() |
|
self.value_embedding = TokenEmbedding(d_model) |
|
self.time_embedding = TemporalEmbedding(d_model, num_features) |
|
self.positional_embedding = PositionalEmbedding(d_model) |
|
self.subject_embedding = SubjectEmbedding(d_model) |
|
self.dropout = nn.Dropout(r_drop) |
|
def forward(self, x_id, x, x_mark): |
|
x = self.value_embedding(x) + self.positional_embedding(x) + self.time_embedding(x_mark) |
|
x = torch.cat((self.subject_embedding(x_id).unsqueeze(1), x), dim=1) |
|
return self.dropout(x) |
|
|
|
|
|
class CausalConv1d(torch.nn.Conv1d): |
|
def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True): |
|
self.__padding = (kernel_size - 1) * dilation |
|
super(CausalConv1d, self).__init__(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=self.__padding, dilation=dilation, groups=groups, bias=bias) |
|
def forward(self, input): |
|
result = super(CausalConv1d, self).forward(input) |
|
if self.__padding != 0: |
|
return result[:, :, :-self.__padding] |
|
return result |
|
|
|
class TriangularCausalMask(): |
|
def __init__(self, b, n, device="cpu"): |
|
mask_shape = [b, 1, n, n] |
|
with torch.no_grad(): |
|
self._mask = torch.triu(torch.ones(mask_shape, dtype=torch.bool), diagonal=1).to(device) |
|
@property |
|
def mask(self): |
|
return self._mask |
|
|
|
class MultiheadAttention(nn.Module): |
|
def __init__(self, d_model, n_heads, d_keys, mask_flag, r_att_drop=0.1): |
|
super(MultiheadAttention, self).__init__() |
|
self.h, self.d, self.mask_flag = n_heads, d_keys, mask_flag |
|
self.proj_q = nn.Linear(d_model, self.h * self.d) |
|
self.proj_k = nn.Linear(d_model, self.h * self.d) |
|
self.proj_v = nn.Linear(d_model, self.h * self.d) |
|
self.proj_out = nn.Linear(self.h * self.d, d_model) |
|
self.dropout = nn.Dropout(r_att_drop) |
|
def forward(self, q, k, v): |
|
b, n_q, n_k, h, d = q.size(0), q.size(1), k.size(1), self.h, self.d |
|
q, k, v = self.proj_q(q), self.proj_k(k), self.proj_v(v) |
|
q, k, v = map(lambda x: x.reshape(b, -1, h, d), [q, k, v]) |
|
scores = torch.einsum('bnhd,bmhd->bhnm', (q, k)) |
|
if self.mask_flag: |
|
att_mask = TriangularCausalMask(b, n_q, device=q.device) |
|
scores.masked_fill_(att_mask.mask, -np.inf) |
|
att = F.softmax(scores / (self.d ** .5), dim=-1) |
|
att = self.dropout(att) |
|
att_out = torch.einsum('bhnm,bmhd->bnhd', (att, v)) |
|
att_out = att_out.reshape(b, -1, h * d) |
|
out = self.proj_out(att_out) |
|
return out |
|
|
|
|
|
class ConvLayer(nn.Module): |
|
def __init__(self, d_model): |
|
super(ConvLayer, self).__init__() |
|
self.downConv = nn.Conv1d(in_channels=d_model, out_channels=d_model, kernel_size=3, padding=1, padding_mode='circular') |
|
self.norm = nn.BatchNorm1d(d_model) |
|
self.activ = nn.ELU() |
|
self.maxPool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1) |
|
def forward(self, x): |
|
x = self.downConv(x.transpose(-1, 1)) |
|
x = self.norm(x) |
|
x = self.activ(x) |
|
x = self.maxPool(x) |
|
x = x.transpose(-1, 1) |
|
return x |
|
|
|
class EncoderLayer(nn.Module): |
|
def __init__(self, att, d_model, d_fcn, r_drop, activ="relu"): |
|
super(EncoderLayer, self).__init__() |
|
self.att = att |
|
self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_fcn, kernel_size=1) |
|
self.conv2 = nn.Conv1d(in_channels=d_fcn, out_channels=d_model, kernel_size=1) |
|
self.norm1 = nn.LayerNorm(d_model) |
|
self.norm2 = nn.LayerNorm(d_model) |
|
self.dropout = nn.Dropout(r_drop) |
|
self.activ = F.relu if activ == "relu" else F.gelu |
|
def forward(self, x): |
|
new_x = self.att(x, x, x) |
|
x = x + self.dropout(new_x) |
|
res = x = self.norm1(x) |
|
res = self.dropout(self.activ(self.conv1(res.transpose(-1, 1)))) |
|
res = self.dropout(self.conv2(res).transpose(-1, 1)) |
|
return self.norm2(x + res) |
|
|
|
class Encoder(nn.Module): |
|
def __init__(self, enc_layers, conv_layers=None, norm_layer=None): |
|
super(Encoder, self).__init__() |
|
self.enc_layers = nn.ModuleList(enc_layers) |
|
self.conv_layers = nn.ModuleList(conv_layers) if conv_layers is not None else None |
|
self.norm = norm_layer |
|
def forward(self, x): |
|
if self.conv_layers is not None: |
|
for enc_layer, conv_layer in zip(self.enc_layers, self.conv_layers): |
|
x = enc_layer(x) |
|
x = conv_layer(x) |
|
x = self.enc_layers[-1](x) |
|
else: |
|
for enc_layer in self.enc_layers: |
|
x = enc_layer(x) |
|
if self.norm is not None: |
|
x = self.norm(x) |
|
return x |
|
|
|
|
|
class DecoderLayer(nn.Module): |
|
def __init__(self, self_att, cross_att, d_model, d_fcn, r_drop, activ="relu"): |
|
super(DecoderLayer, self).__init__() |
|
self.self_att = self_att |
|
self.cross_att = cross_att |
|
self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_fcn, kernel_size=1) |
|
self.conv2 = nn.Conv1d(in_channels=d_fcn, out_channels=d_model, kernel_size=1) |
|
self.norm1 = nn.LayerNorm(d_model) |
|
self.norm2 = nn.LayerNorm(d_model) |
|
self.norm3 = nn.LayerNorm(d_model) |
|
self.dropout = nn.Dropout(r_drop) |
|
self.activ = F.relu if activ == "relu" else F.gelu |
|
def forward(self, x_dec, x_enc): |
|
x_dec = x_dec + self.self_att(x_dec, x_dec, x_dec) |
|
x_dec = self.norm1(x_dec) |
|
x_dec = x_dec + self.cross_att(x_dec, x_enc, x_enc) |
|
res = x_dec = self.norm2(x_dec) |
|
res = self.dropout(self.activ(self.conv1(res.transpose(-1, 1)))) |
|
res = self.dropout(self.conv2(res).transpose(-1, 1)) |
|
return self.norm3(x_dec + res) |
|
|
|
class Decoder(nn.Module): |
|
def __init__(self, layers, norm_layer=None): |
|
super(Decoder, self).__init__() |
|
self.layers = nn.ModuleList(layers) |
|
self.norm = norm_layer |
|
def forward(self, x_dec, x_enc): |
|
for layer in self.layers: |
|
x_dec = layer(x_dec, x_enc) |
|
if self.norm is not None: |
|
x_dec = self.norm(x_dec) |
|
return x_dec |
|
|
|
|
|
class Variance(nn.Module): |
|
def __init__(self, d_model, r_drop, len_seq): |
|
super(Variance, self).__init__() |
|
self.proj1 = nn.Linear(d_model, 1) |
|
self.dropout = nn.Dropout(r_drop) |
|
self.activ1 = nn.ReLU() |
|
self.proj2 = nn.Linear(len_seq + 1, 1) |
|
self.activ2 = nn.Tanh() |
|
def forward(self, x): |
|
x = self.proj1(x) |
|
x = self.activ1(x) |
|
x = self.dropout(x) |
|
x = x.transpose(-1, 1) |
|
x = self.proj2(x) |
|
x = 10 * self.activ2(x) |
|
return x |
|
|
|
|
|
class Gluformer(nn.Module): |
|
def __init__(self, d_model, n_heads, d_fcn, r_drop, activ, num_enc_layers, num_dec_layers, distil, len_seq, len_pred, num_features=5): |
|
super(Gluformer, self).__init__() |
|
self.len_pred = len_pred |
|
self.enc_embedding = DataEmbedding(d_model, r_drop, num_features) |
|
self.dec_embedding = DataEmbedding(d_model, r_drop, num_features) |
|
self.encoder = Encoder( |
|
[ |
|
EncoderLayer( |
|
att=MultiheadAttention(d_model=d_model, n_heads=n_heads, d_keys=d_model // n_heads, mask_flag=False, r_att_drop=r_drop), |
|
d_model=d_model, |
|
d_fcn=d_fcn, |
|
r_drop=r_drop, |
|
activ=activ) for l in range(num_enc_layers) |
|
], |
|
[ |
|
ConvLayer(d_model) for l in range(num_enc_layers - 1) |
|
] if distil else None, |
|
norm_layer=torch.nn.LayerNorm(d_model) |
|
) |
|
self.decoder = Decoder( |
|
[ |
|
DecoderLayer( |
|
self_att=MultiheadAttention(d_model=d_model, n_heads=n_heads, d_keys=d_model // n_heads, mask_flag=True, r_att_drop=r_drop), |
|
cross_att=MultiheadAttention(d_model=d_model, n_heads=n_heads, d_keys=d_model // n_heads, mask_flag=False, r_att_drop=r_drop), |
|
d_model=d_model, |
|
d_fcn=d_fcn, |
|
r_drop=r_drop, |
|
activ=activ) for l in range(num_dec_layers) |
|
], |
|
norm_layer=torch.nn.LayerNorm(d_model) |
|
) |
|
D_OUT = 1 |
|
self.projection = nn.Linear(d_model, D_OUT, bias=True) |
|
self.var = Variance(d_model, r_drop, len_seq) |
|
|
|
def forward(self, x_id, x_enc, x_mark_enc, x_dec, x_mark_dec): |
|
enc_out = self.enc_embedding(x_id, x_enc, x_mark_enc) |
|
var_out = self.var(enc_out) |
|
enc_out = self.encoder(enc_out) |
|
dec_out = self.dec_embedding(x_id, x_dec, x_mark_dec) |
|
dec_out = self.decoder(dec_out, enc_out) |
|
dec_out = self.projection(dec_out) |
|
return dec_out[:, -self.len_pred:, :], var_out |
|
|
|
class GluformerConfig(PretrainedConfig): |
|
model_type = "gluformer" |
|
def __init__(self, d_model=64, n_heads=4, d_fcn=128, r_drop=0.1, activ="relu", num_enc_layers=2, num_dec_layers=2, distil=False, len_seq=48, len_pred=12, num_features=5, **kwargs): |
|
super().__init__(**kwargs) |
|
self.d_model = d_model |
|
self.n_heads = n_heads |
|
self.d_fcn = d_fcn |
|
self.r_drop = r_drop |
|
self.activ = activ |
|
self.num_enc_layers = num_enc_layers |
|
self.num_dec_layers = num_dec_layers |
|
self.distil = distil |
|
self.len_seq = len_seq |
|
self.len_pred = len_pred |
|
self.num_features = num_features |
|
|
|
|
|
|
|
|
|
|
|
|
|
class Preprocessor: |
|
UPPER = 402 |
|
LOWER = 38 |
|
SCALE_1 = 5 |
|
SCALE_2 = 2 |
|
def __init__(self, len_seq, len_pred, len_label): |
|
self.len_seq = len_seq |
|
self.len_pred = len_pred |
|
self.len_label = len_label |
|
|
|
def normalize_glucose(self, glucose): |
|
return (glucose - self.LOWER) / (self.UPPER - self.LOWER) * (self.SCALE_1 * self.SCALE_2) - self.SCALE_1 |
|
|
|
def unnormalize_glucose(self, glucose): |
|
return (glucose + self.SCALE_1) / (self.SCALE_1 * self.SCALE_2) * (self.UPPER - self.LOWER) + self.LOWER |
|
|
|
def normalize_datetime(self, date): |
|
DAYS_YEAR = 182.5 |
|
DAYS_MONTH = 15.5 |
|
DAYS_WEEK = 3.5 |
|
HOURS_DAY = 12.0 |
|
MINUTES_HOUR = 30.0 |
|
OFFSET = 1 |
|
return np.array([date.timetuple().tm_yday / DAYS_YEAR - OFFSET, |
|
date.day / DAYS_MONTH - OFFSET, |
|
date.weekday() / DAYS_WEEK - OFFSET, |
|
date.hour / HOURS_DAY - OFFSET, |
|
date.minute / MINUTES_HOUR - OFFSET], dtype = float) |
|
|
|
def __call__(self, subject_id, timestamps, glucose_values): |
|
subject_id = torch.tensor([subject_id]).float() |
|
glucose_values = torch.tensor(glucose_values).reshape(1, self.len_seq, 1).float() |
|
glucose_values = self.normalize_glucose(glucose_values) |
|
|
|
|
|
|
|
|
|
y_timestamps = timestamps[-self.len_label:] + [timestamps[-1] + timedelta(minutes=5 * i) for i in range(self.len_pred)] |
|
decoder_input = torch.cat([glucose_values[:,-self.len_label:,:], torch.zeros(1, self.len_pred, 1).float()], dim=1) |
|
|
|
x_ts = torch.tensor(np.vstack([self.normalize_datetime(date) for date in timestamps])).float().unsqueeze(0) |
|
y_ts = torch.tensor(np.vstack([self.normalize_datetime(date) for date in y_timestamps])).float().unsqueeze(0) |
|
return subject_id, glucose_values, decoder_input, x_ts, y_ts |
|
|
|
class GluformerForTimeSeries(PreTrainedModel): |
|
config_class = GluformerConfig |
|
base_model_prefix = "gluformer" |
|
|
|
def __init__(self, config: GluformerConfig): |
|
super().__init__(config) |
|
self.model = Gluformer( |
|
d_model=config.d_model, |
|
n_heads=config.n_heads, |
|
d_fcn=config.d_fcn, |
|
r_drop=config.r_drop, |
|
activ=config.activ, |
|
num_enc_layers=config.num_enc_layers, |
|
num_dec_layers=config.num_dec_layers, |
|
distil=config.distil, |
|
len_seq=config.len_seq, |
|
len_pred=config.len_pred, |
|
num_features=config.num_features |
|
) |
|
self.preprocessor = Preprocessor(config.len_seq, config.len_pred, config.len_label) |
|
|
|
def forward(self, subject_id, timestamps, glucose_values): |
|
x_id, x_enc, x_dec, x_mark_enc, y_mark_dec = self.preprocessor(subject_id, timestamps, glucose_values) |
|
output, log_var = self.model(x_id, x_enc, x_mark_enc, x_dec, y_mark_dec) |
|
return self.preprocessor.unnormalize_glucose(output), log_var |