import torch
from torch import nn    
import time

from .SPA import SuperpixelAggregation
from .SPCA import CrossSuperPixelAggregation
from .SPCAwithXformers import CrossSuperPixelAggregation as xformsrs
from .ISPA import SPIntraAttModule
from .LA import LocalAttention
from .ffn import FFN


class SPI_Block(nn.Module):
    def __init__(self,
                # 图像形状参数
                height,
                width,
                
                # spa部分gird的大小
                grid_size:tuple,
                # spa迭代次数
                num_iters,
                
                # 所有transformer模块的隐藏维度
                hidden_dim,
                # 所有transformer模块的头数量
                num_heads,
                
                # local attention patch 大小，假定为正方形
                patch_size:int,
                # local attention overlap
                overlap
                ):
        super(SPI_Block, self).__init__()
        self.spa=SuperpixelAggregation(
            height=height,
            width=width,
            grid_size=grid_size,
            num_iters=num_iters
        )

        self.spca=CrossSuperPixelAggregation(
                hidden_dim=hidden_dim,
                num_heads=num_heads
            )
        self.spca_ffn=FFN(hidden_channels=hidden_dim*4, out_channels=hidden_dim)
        
    
        self.ispa=SPIntraAttModule(
                dim=hidden_dim,
                num_heads=num_heads,
                qk_dim=hidden_dim,
                topk=grid_size[0]*grid_size[1]
            )
        self.ispa_ffn=FFN(hidden_channels=hidden_dim*4, out_channels=hidden_dim)
        
        self.la=nn.Sequential(
            LocalAttention(
                patch_size=patch_size,
                overlap=overlap,
                num_heads=num_heads,
                input_dim=hidden_dim
            ),
            FFN(hidden_channels=hidden_dim*4, out_channels=hidden_dim)
        )
        
        # 计算超像素数量
        self.num_super_pixels=(height//grid_size[0])*(width//grid_size[1])
        
    def forward(self, x):
        s, a=self.spa(x)
        
        a=torch.permute(a, [0, 2, 1]).contiguous()
        
        out1=self.ispa(x, a, self.num_super_pixels)
        out1=self.ispa_ffn(out1)
        begintime=time.time()
        out2=self.spca(out1, s)
        out2=self.spca_ffn(out2)
        endtime=time.time()
        print("spcatime", endtime-begintime)
        #out2=out1
        result=self.la(out2)
        return result+x
    
class SPIN(nn.Module):
    def __init__(
                self,
        
                # 图像形状参数
                height,
                width,
                
                # spa部分gird的大小
                grid_size:tuple,
                # spa迭代次数
                num_iters,
                
                # 所有transformer模块的隐藏维度
                hidden_dim,
                # 所有transformer模块的头数量
                num_heads,
                
                # local attention patch 大小，假定为正方形
                patch_size:int,
                # local attention overlap
                overlap,
                
                # 块数量
                num_blocks,
                
                # 上采样率
                upscale_factor=2
                ):
        super(SPIN, self).__init__()
        self.conv1=nn.LazyConv2d(out_channels=hidden_dim, kernel_size=3, padding=1)
        self.conv2=nn.LazyConv2d(out_channels=(upscale_factor**2)*3, kernel_size=1)
        self.conv3=nn.LazyConv2d(out_channels=3, kernel_size=3, padding=1)
        
        self.pixel_shuffle=nn.PixelShuffle(upscale_factor=upscale_factor)
        
        self.blocks=nn.Sequential()
        for i in range(num_blocks):
            self.blocks.append(
                SPI_Block(
                    height=height,
                    width=width,
                    grid_size=grid_size,
                    num_iters=num_iters,
                    hidden_dim=hidden_dim,
                    num_heads=num_heads,
                    patch_size=patch_size,
                    overlap=overlap
                )
            )
            
        self.upscale_factor=upscale_factor
            
    def forward(self, x):
        # 添加插值
        y = nn.functional.interpolate(x, scale_factor=self.upscale_factor, mode='bilinear')
        
        x=self.conv1(x)
        for module in self.blocks:
            x=module(x)
        #print("time_cost=", endtime-begin_time)
        x=self.conv2(x)
        x=self.pixel_shuffle(x)
        x=self.conv3(x)
        
        return x+y
    
    
if __name__=='__main__':
    pass

        
        
        