import torch
import torch.nn as nn
from models.TransBTS.Transformer import TransformerModel
from models.TransBTS.PositionalEncoding import FixedPositionalEncoding,LearnedPositionalEncoding
from models.TransBTS.Unet_skipconnection import Unet
from models.TransBTS.botunet import _make_d_layer,_make_n_layer,_make_o_layer
import numpy as np


size = 16
if __name__ == '__main__':
    with torch.no_grad():
        import os
        os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        cuda0 = torch.device('cuda:0')
        torch.manual_seed(2021)
        x = torch.rand((4,2,size,size), device=cuda0)

        a = x

        Q_h = Q_w = int(size**0.5)
        N, C, H, W = a.shape
        P_h, P_w = H // Q_h, W // Q_w
        out = a.reshape(N, C, Q_h, P_h, Q_w, P_w)

        out2 = out.permute( 2, 4, 3, 5, 0, 1)
        out2 = out2.reshape(Q_h * Q_w, P_h * P_w, N * C )
        for i in range(out2.shape[0]):
            idx = torch.randperm(out2.shape[1])
            out2[i] = out2[i][idx]
        out2 = out2.reshape(Q_h * Q_w, P_h * P_w, N , C )
        out2 = out2.permute(2, 1, 3, 0)
        out2 = out2.reshape(N* P_h * P_w, C, Q_h, Q_w)
        #Here is MHSA.
        out2 = out2.reshape(N,P_h,P_w, C, Q_h, Q_w)
        out2 = out2.permute(0, 3, 4, 1, 5, 2)
        out2 = out2.reshape(N, C, int(H), int(W))