import os
import torch
import wandb
import psutil
import torch.distributed as dist

from ecgcmr.signal.sig_datasets.ECGDataset_xml import ECGDataset
from ecgcmr.utils.distributed import init_dist_gpu
from mmcl.models import ECGEncoder
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data.distributed import DistributedSampler
from ecgcmr.utils.misc import attention_forward_wrapper, ecg_collate_fn


def get_embeddings(gpu, args):

    init_dist_gpu(gpu, args)

    args.input_size = (1, args.input_electrodes, args.ecg_time_steps)
    args.patch_size = (args.patch_width, args.patch_height)

    if args.rank == 0:
        import os
        os.environ["WANDB__SERVICE_WAIT"]="300"
        wandb.init(project='ecg_embeds', entity='alsalivan')

    dataset = ECGDataset(data_path=args.ecg_data_path,
                         transform=True,
                         args=args)
    
    sampler = DistributedSampler(dataset, shuffle=False,
                                 num_replicas = args.world_size,
                                 rank = args.rank)

    data_loader = torch.utils.data.DataLoader(
        dataset,
        sampler=sampler,
        batch_size=args.batch_size,
        num_workers=args.num_workers,
        collate_fn=ecg_collate_fn,
        pin_memory=True,
        drop_last=False,
    )
    
    print("Training set size: ", len(dataset))

    model = ECGEncoder.__dict__['vit_tiny_patchX'](
        img_size=args.input_size,
        patch_size=args.patch_size,
        global_pool=False)
    
    model.blocks[-1].attn.forward = attention_forward_wrapper(model.blocks[-1].attn)

    model.eval()
    
    checkpoint = torch.load(args.checkpoint_path, map_location='cpu')
    
    model.load_state_dict(checkpoint['model'], strict=False)

    print(model)

    model.to(args.gpu)

    model = DDP(model, device_ids=[args.gpu])

    embeddings = torch.empty((len(dataset), 300, 384))
    embeddings_process = []

    with torch.cuda.amp.autocast():

        for data in data_loader:

            data = data.to(args.gpu, non_blocking=True)
            batch_features = model.module.forward_features(data, localized=True)
            embeddings_process.extend(batch_features.detach().cpu())

        embeddings = torch.stack(embeddings_process, dim=0)
        
        if args.rank == 0:
            gathered_embeddings = [torch.empty_like(embeddings) for _ in range(dist.get_world_size())]
            dist.gather(embeddings, gather_list=gathered_embeddings, dst=0)

            final_embeddings = torch.cat(gathered_embeddings, dim=0)
            print(final_embeddings.shape)
            torch.save(final_embeddings, os.path.join(args.output_dir, "ecg_embeddings_2.pt"))

        else:
            dist.gather(embeddings, dst=0)
