import torch
from model.ATSTSED_contrastive import ATSTSED
from model.modules.cnn import CNN
from model.modules.classifier import LinearAttentionCls, EmbeddingCls
from model.modules.rnn import BidirectionalGRU

def prev_key_align(key, mode):
    k = key.replace(f"sed_{mode}.", "")
    k = k.replace("cat_tf", "merge_layer")
    k = k.replace("dense", "classifier.linear")
    return k

def load_prev_model_ckpt(model, ckpt_path, mode):
    checkpoint = torch.load(ckpt_path, map_location="cpu")
    state_dict = checkpoint['state_dict']
    model_dict = {prev_key_align(k, mode): v for k, v in state_dict.items() if k.startswith(f"sed_{mode}.")}
    model.load_state_dict(model_dict, strict=False)
    print(f"Loaded model weights from previous model: {ckpt_path}")
    return model


if __name__ == "__main__":
    cnn = CNN(
        n_in_channel=1,
        activation='cg',
        channels=[16, 32, 64, 128, 128, 128, 128],
        kernel_size=[3, 3, 3, 3, 3, 3, 3],
        stride=[1, 1, 1, 1, 1, 1, 1],
        padding=[1, 1, 1, 1, 1, 1, 1],
        pooling=[[2, 2],[2, 2],[1, 2],[1, 2],[1, 2],[1, 2],[1, 2]],
        dropout=0.5,
        normalization='batch'
    )
    rnn = BidirectionalGRU(
        n_in=128,
        n_hidden=128,
        num_layers=2,
        dropout=0.0
    )
    classifier = LinearAttentionCls(
        n_in=256,
        n_classes=10,
        dropout=0.5
    )
    projector = EmbeddingCls(
        n_in=256,
        n_classes=None,
        dropout=0.5
    )
    model = ATSTSED(
        cnn=cnn,
        rnn=rnn,
        classifier=classifier,
        projector=projector,
    )
    
    ckpt_path="/home/shaonian/sed/pretrained_ckpts/epoch=199-step=5799.ckpt"
    
    model = load_prev_model_ckpt(model, ckpt_path)