import torch
from torch import nn
import torch.nn.functional as F
import GLOB
from model.utils import batch_image_segmentation
class Position_Encode_Integration(nn.Module):
    '''
    encode the vector, mlp and cnn are all useful.
    '''
    def __init__(self, input_dim, hidden_dim, output_dim):
        super(Position_Encode_Integration, self).__init__()
        self.linear1 = nn.Linear(in_features=input_dim, out_features=hidden_dim)
        self.linear2 = nn.Linear(in_features=hidden_dim, out_features=output_dim)
        self.activate = nn.ReLU()

    def forward(self, input):
        x = self.linear1(input)
        x = self.activate(x)
        x = self.linear2(x)
        
        return x


import torch
import torch.nn as nn
import torch.nn.functional as F

class AttentionLayer(nn.Module):
    def __init__(self, element_dim, sentence_length, device='cuda' if torch.cuda.is_available() else 'cpu'):
        super(AttentionLayer, self).__init__()
        self.element_dim = element_dim
        self.sentence_length = sentence_length
        self.device = device
        
        # 初始化权重矩阵
        self.W_q = nn.Parameter(torch.randn(element_dim, element_dim, device=device))
        self.W_k = nn.Parameter(torch.randn(element_dim, element_dim, device=device))
        self.W_v = nn.Parameter(torch.randn(element_dim, element_dim, device=device))
        
        # 缩放因子，用于缩放点积结果
        self.scale = torch.sqrt(torch.FloatTensor([element_dim])).to(device)
        
        # 层归一化
        self.layer_norm = nn.LayerNorm(element_dim, device=device)
        
    def forward(self, input):
        # input维度为(batch_size, sentence_length, element_dim)
        
        # 应用层归一化
        input_norm = self.layer_norm(input)
        
        # 获取批次大小
        batch_size = input.shape[0]
        
        # 计算Q、K、V矩阵
        Q = torch.matmul(input_norm, self.W_q)  # (batch_size, sentence_length, element_dim)
        K = torch.matmul(input_norm, self.W_k)  # (batch_size, sentence_length, element_dim)
        V = torch.matmul(input_norm, self.W_v)  # (batch_size, sentence_length, element_dim)
        
        # 计算点积注意力
        attention_logits = torch.matmul(Q, K.transpose(-2, -1)) / self.scale  # (batch_size, sentence_length, sentence_length)
        
        # 应用softmax函数
        attention_weights = F.softmax(attention_logits, dim=-1)  # (batch_size, sentence_length, sentence_length)
        
        # 计算加权和
        attention_output = torch.matmul(attention_weights, V)  # (batch_size, sentence_length, element_dim)
        
        # 残差连接
        output = input + attention_output
        
        return output
        

class ClassifyLayer(nn.Module):
    def __init__(self, num_classes, input_dim, hidden_dim):
        super(ClassifyLayer, self).__init__()
        self.linear1 = nn.Linear(in_features=input_dim, out_features=hidden_dim)
        self.linear2 = nn.Linear(in_features=hidden_dim, out_features=num_classes)
        self.activate = nn.ReLU()

    def forward(self, input):
        
        x = input.view(input.size(0),-1)
        x = self.linear1(x)
        x = self.activate(x)
        x = self.linear2(x)
        
        x = F.softmax(x, dim=1)
        return x
    
    
class ViT(nn.Module):
    def __init__(self):
        super(ViT, self).__init__()
        # position
        self.input_dim=GLOB.SEGMENT_SIZE**2+1
        self.hidden_dim=128
        self.output_dim=32
        
        # encode
        self.encoder_num=4
        self.num_heads=2
        
        # output
        self.num_classes=10
        self.hidden=self.hidden_dim
        
        self.embed=nn.Sequential(
            Position_Encode_Integration(self.input_dim, self.hidden_dim, self.output_dim),
            *[AttentionLayer(element_dim=self.output_dim, sentence_length=(GLOB.PICTURE_SIZE/GLOB.SEGMENT_SIZE)**2+1) for _ in range(self.encoder_num)]
        )
        self.classify=ClassifyLayer(self.num_classes, self.output_dim, self.hidden)
    def forward(self, input):
        # vt
        x=batch_image_segmentation(input)
        if torch.cuda.is_available():
            x=x.to('cuda') 
        x=self.embed(x)
        x=x[:,0,:]   # fetch the classify axis
        return self.classify(x)