import os
import torch
import wandb
import argparse

from ecgcmr.signal.sig_datasets.ECGDataset import ECGDataset
from mmcl.models import ECGEncoder
from ecgcmr.utils.misc import attention_forward_wrapper, collate_fn


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('-ecg_data_path', type=str, default="/vol/aimspace/projects/ukbb/data/cardiac/cardiac_segmentations/projects/ecg/ecgs_train_ecg_imaging_noBase_gn.pt")
    parser.add_argument('-checkpoint_path', type=str, default="TIME-ECG-CMR/MMCL-ECG-CMR/model_weights/signal_encoder_mdm.pth")
    parser.add_argument('-output_dir', type=str, default='TIME-ECG-CMR/ecgcmr/output')
    parser.add_argument('-ecg_time_steps', type=int, default=2500)
    parser.add_argument('-input_electrodes', type=int, default=12)
    parser.add_argument('-patch_width', type=int, default=1)
    parser.add_argument('-patch_height', type=int, default=100)
    
    parser.add_argument('-batch_size', type=int, default=8)
    parser.add_argument('-num_workers', type=int, default=2)
    
    args = parser.parse_args()
    return args


def get_embeddings(args):

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(device)
    
    args.input_size = (1, args.input_electrodes, args.ecg_time_steps)
    args.patch_size = (args.patch_width, args.patch_height)
    args.crop_resize = True
    args.find_es_ed = False

    args.sampling_rate = 500

    # os.environ["WANDB__SERVICE_WAIT"]="300"
    # wandb.init(project='ecg_embeds', entity='alsalivan')

    dataset = ECGDataset(data_path=args.ecg_data_path,
                         crop_resize=args.crop_resize,
                         find_es_ed=args.find_es_ed,
                         args=args)
    
    sampler = torch.utils.data.SequentialSampler(dataset)

    data_loader = torch.utils.data.DataLoader(
        dataset,
        sampler=sampler,
        batch_size=args.batch_size,
        num_workers=args.num_workers,
        collate_fn=collate_fn,
        pin_memory=True,
        drop_last=False,
    )
    
    print("Training set size: ", len(dataset))

    model = ECGEncoder.__dict__['vit_tiny_patchX'](
        img_size=args.input_size,
        patch_size=args.patch_size,
        global_pool=False)
    
    model.blocks[-1].attn.forward = attention_forward_wrapper(model.blocks[-1].attn)

    model.eval()
    model.to(device)
    
    checkpoint = torch.load(args.checkpoint_path, map_location='cpu')
    
    model.load_state_dict(checkpoint['model'], strict=False)

    print(model)

    embeddings = torch.empty((len(dataset), 300, 384))
    embeddings_process = []

    with torch.cuda.amp.autocast():

        for data in data_loader:
            data = data['ecgs'].to(device, non_blocking=True)
            batch_features = model.forward_features(data, localized=True)
            embeddings_process.extend(batch_features.detach().cpu())

        embeddings = torch.stack(embeddings_process, dim=0)
        
        torch.save(embeddings, os.path.join(args.output_dir, "ecg_embeddings_2.pt"))

if __name__=="__main__":
    args = parse_args()
    get_embeddings(args)