import torch.nn as nn
import torch
from new_vit import TBAM2 as TBAM
import torch.nn.functional as F
'''
使用nn.Sequential， 将一系列的层结构打包，形成一个整体
'''

class Amend_raf(nn.Module):  # moren
    def __init__(self, inplace=2):
        super(Amend_raf, self).__init__()
        # self.de_albino = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=32, stride=8, padding=0, bias=False)
        # self.bn = nn.BatchNorm2d(inplace)
        self.alpha = nn.Parameter(torch.tensor([1.0]))
        # self.pool = nn.Linear(112,64)

    def forward(self, x):
        Bt = x.size(0)
        # mask = torch.tensor([]).cuda()
        # createVar = locals()
        # for i in range(x.size(1)):
        #     createVar['x' + str(i)] = torch.unsqueeze(x[:, i], 1)
        #     createVar['x' + str(i)] = self.de_albino(createVar['x' + str(i)])
        #     mask = torch.cat((mask, createVar['x' + str(i)]), 1)
        # x = self.bn(mask)
        # print(x.size())
        local = torch.sum(x,dim=1,keepdim=True)
        # print(local.size())
        # xmax, _ = torch.max(x, 1, keepdim=True)

        global_mean = local.mean(dim=[0])
        # print(global_mean.size())
        # xmean = self.pool(x).view(Bt,-1)
        # # print(x.size())
        # print(xmean.size())
        # # xmin, _ = torch.min(x, 1, keepdim=True)
        x = local + self.alpha * global_mean
        # print(x.size())


        return x



class TS_model(nn.Module):
    def __init__(self, num_classes=7):
        super(TS_model, self).__init__()
        # self.conv = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=5, stride=2)
        self.TBAM1 = TBAM(ch_embed_dim = 224*224, num_heads=5, ch_num_patchs=3,ffn_dim =128,patch_size=4,out_dim=64)
        # self.TBAM1_1 = TBAM(ch_embed_dim = 112*112, num_heads=5, ch_num_patchs=64,ffn_dim =128,patch_size=2,out_dim=64)
        self.TBAM2 = TBAM(ch_embed_dim = 56*56, num_heads=5, ch_num_patchs=64,ffn_dim =128,patch_size=2,out_dim=128)
        self.TBAM3 = TBAM(ch_embed_dim = 28*28, num_heads=5, ch_num_patchs=128,ffn_dim =128,patch_size=2,out_dim=256)
        self.TBAM4 = TBAM(ch_embed_dim = 14*14, num_heads=5, ch_num_patchs=256,ffn_dim =128,patch_size=2,out_dim=512)
        # self.TBAM5 = TBAM(ch_embed_dim = 7*7, num_heads=6, ch_num_patchs=512,ffn_dim =512,patch_size=1,out_dim=1024)
        # self.arrangement = nn.PixelShuffle(16)
        # self.arm = Amend_raf()
        self.fc = nn.Linear(512, 7)
        # self.fc2 = nn.Linear(1024, 7)
        # self.fc3 = nn.Linear(512, 7)
        # self.fc = nn.Linear(128, 7)
        # self.classfication = Classfication(
        #     image_size = 7,
        #     patch_size = 7,
        #     num_classes = 7,
        #     dim = 512,
        #     depth = 6,
        #     heads = 6,
        #     mlp_dim = 512,
        #     dropout = 0.1,
        #     emb_dropout = 0.1
        #     )
        

    def forward(self, x):
        # x = self.conv(x)
        # print(x.size())
        x = self.TBAM1(x)
        # x = self.TBAM1_1(x)
        x = self.TBAM2(x)
        x = self.TBAM3(x)
        x = self.TBAM4(x)
        # x = self.arrangement(x)
        # x = self.arm(x)
        
        x = F.adaptive_avg_pool2d(x, (1,1))
        # print(x.size())
        x = x.view(x.size(0), -1)
        # print(x.size())
        x = self.fc(x)
        # x = self.fc2(x)
        # x = self.fc3(x)
        # print(x.size())
        # x = self.classfication(x)
        # print(x.size())

        return x



# model = TS_model()
# model.cuda()
# img = torch.randn(2, 3, 224, 224).cuda()
# pred = model(img)
# # print(pred.size())