####################################################################################


import torch
import einops
import math
from torch.nn import Module
from torch.nn import MSELoss
import torch.nn.functional as F
import parameters as p
from torch import nn
from functools import partial
import cv2

####################################################################################


class PreNorm(nn.Module):
    def __init__(self, dim, fn):
        super().__init__()
        self.fn = fn
        self.norm = nn.GroupNorm(1, dim)

    def forward(self, x):
        x = self.norm(x)
        return self.fn(x)


# 有val时返回val，val为None时返回d
def default(val, d):
    if val:
        return val
    return d() if callable(d) else d

class ResidualConnection(Module):
    def __init__(self, sublayer):
        super().__init__()
        self.sublayer= sublayer

    def forward(self, x, *args, **kwargs):
        return self.sublayer(x, *args, **kwargs) + x

# 上采样（反卷积）
def Upsample(channels):
    return nn.ConvTranspose2d(channels, channels, 4, 2, 1)

# 下采样
def Downsample(channels):
    return nn.Conv2d(channels, channels, 4, 2, 1)

def stable_softmax(t, dim = -1, alpha = 32 ** 2):
    t = t / alpha
    t = t - torch.amax(t, dim = dim, keepdim = True).detach()
    return (t * alpha).softmax(dim = dim)

class SinusoidalPositionEmbeddings(Module):
    '''
	类似于Transformer的positional embedding，为了让网络知道当前处理的是一系列去噪过程中的哪一个step，我们需要将步数 
 	也编码并传入网络之中。DDPM采用正弦位置编码（Sinusoidal Positional Embeddings）

	这一方法的输入是shape为 (batch_size, 1) | (batch_size) 的 tensor，也就是batch中每一个sample所处的 
 	，并将这个tensor转换为shape为 (batch_size, dim) 的 tensor。这个tensor会被加到每一个残差模块中。
	
	'''
    def __init__(self, dim):
        super().__init__()
        self.dim = dim

    def forward(self, t):
        device = t.device
        half_dim = self.dim // 2
        embeddings = math.log(10000) / (half_dim - 1)
        embeddings = torch.exp(torch.arange(half_dim, device=device) * -embeddings)
        embeddings = t[:, None] * embeddings[None, :]
        embeddings = torch.cat((embeddings.sin(), embeddings.cos()), dim=-1)
        return embeddings


class Block(Module):
	def __init__(self,in_channels,out_channels,groups=8, *args,**kwargs):
		super().__init__(*args,**kwargs)
		self.proj = nn.Conv2d(in_channels,out_channels,padding=1)
		self.norm = nn.GroupNorm(groups,out_channels)
		self.act = nn.SiLU()

	def forward(self,x,affine=None):

		o = self.proj(x)
		o = self.norm(o)

		if affine:
			scale,shift = affine
			o = scale * o + shift

		o = self.act(o)

		return o

class ResidualBlock(Module):
	def __init__(self,in_channels,out_channels,groups=8,t_embedding_dim=None,*args, **kwargs):
		super().__init__(*args,**kwargs)

		self.mlp = nn.Sequential(nn.SiLU(),nn.Linear(t_embedding_dim,out_channels)) if not t_embedding_dim else None
		self.block1 = Block(in_channels,out_channels,groups=groups)
		self.block2 = Block(out_channels,out_channels,groups=groups)
		self.res_conv = nn.Conv2d(in_channels,out_channels,kernel_size=1) if in_channels != out_channels else nn.Identity()

	def forward(self,x,t_embedding=None):
		h = self.block1(x)
		if self.mlp and t_embedding:
			condition = self.mlp(t_embedding)
			h = h + einops.rearrange(condition,'b c -> b c 1 1')
		h = self.block2(h)
		return h + self.res_conv(x)


class ConvNextBlock(Module):
	def __init__(self,in_channels,out_channels,out_channel_factor:int,time_emb_dim:int|None=None,norm=True,*args,**kwargs):
		super().__init__(*args,**kwargs)

		self.is_time_embedding = True if time_emb_dim else False
		self.mlp = nn.Sequential(nn.GELU(),nn.Linear(time_emb_dim,in_channels)) if time_emb_dim else None

		self.ds_conv = nn.Conv2d(in_channels,in_channels,7,padding=3,groups=in_channels)

		self.net = nn.Sequential(nn.GroupNorm(1,in_channels) if norm else nn.Identity(),
		                         nn.Conv2d(in_channels,out_channels*out_channel_factor,3,padding=1),
		                         nn.GELU(),
		                         nn.GroupNorm(1,out_channels*out_channel_factor),
		                         nn.Conv2d(out_channels*out_channel_factor,out_channels,3,padding=1))
		
		self.res_conv = nn.Conv2d(in_channels,out_channels,1) if in_channels != out_channels else nn.Identity()

	def forward(self,x,t_embedding=None):
		h = self.ds_conv(x)

		if self.is_time_embedding and t_embedding is not None:
			condition = self.mlp(t_embedding)
			h = h + einops.rearrange(condition,"b c -> b c 1 1")
		else:
			pass
		
		h = self.net(h)
		return h + self.res_conv(x)


class Attention(Module):
	def __init__(self,channels,heads=4,head_channels=32,*args,**kwargs):
		super().__init__(*args,**kwargs)
		self.scale = head_channels ** -0.5
		self.heads = heads
		hidden_dim = head_channels * heads

		self.to_qkv = nn.Conv2d(channels,hidden_dim*3,1,bias=False)
		self.to_out = nn.Conv2d(hidden_dim,channels,1)

	def forward(self,x):
		b,c,h,w = x.shape


		qkv = self.to_qkv(x).chunk(3,dim=1)
		
		q,k,v = map(lambda t:einops.rearrange(t,"b (h c) x y -> b h c (x y)",h=self.heads),qkv)

		q = q * self.scale

		K_transpose = k.transpose(-1,-2)

		scores = torch.matmul(q,K_transpose)
		scores = stable_softmax(scores,dim=-1)

		out = torch.matmul(scores,v)

		out = einops.rearrange(out,"b h d (x y)   -> b (h d) x y",x=h,y=w)
		return self.to_out(out)

class LinearAttention(Module):
	def __init__(self, channels,heads=4,head_channels=32,*args, **kwargs) -> None:
		super().__init__(*args, **kwargs)

		self.scale = head_channels ** -0.5 
		self.heads = heads
		hidden_dim = head_channels * heads
		self.to_qkv = nn.Conv2d(channels,hidden_dim * 3 ,1,bias=False)
		self.to_out = nn.Sequential(nn.Conv2d(hidden_dim,channels,1),nn.GroupNorm(1,channels) )

	def forward(self,x):
		b,c,h,w = x.shape
		qkv = self.to_qkv(x).chunk(3,dim=1)
		q,k,v = map(lambda t:einops.rearrange(t,"b (h c) x y -> b h c (x y) ",h=self.heads),qkv)
		q = q.softmax(dim=-2)
		k = k.softmax(dim=-1)
		q = q*self.scale

		k_transpose = k.transpose(-2,-1)

		scores = torch.matmul(q,k_transpose)
		out = torch.matmul(scores,v)
		# context = torch.einsum("b h d n,b h e n -> b h d e",k,v)
		# out =  torch.einsum("b h d e,b h d n -> b h e n",scores,v)
		out = einops.rearrange(out,"b h c (x y) -> b (h c) x y",h=self.heads,x=h,y=w)

		return self.to_out(out)

class Unet(Module):

	'''
	具体的网络结构：

	首先，输入通过一个卷积层，同时计算step 
 	所对应得embedding
	通过一系列的下采样stage，每个stage都包含：2个ResNet/ConvNeXT blocks + groupnorm + attention + residual connection + downsample operation
	在网络中间，应用一个带attention的ResNet或者ConvNeXT
	通过一系列的上采样stage，每个stage都包含：2个ResNet/ConvNeXT blocks + groupnorm + attention + residual connection + upsample operation
	最终，通过一个ResNet/ConvNeXT blocl和一个卷积层。
	 
	
	'''

	def __init__(self, 
		  use_convnext=True,
		  isGrayImage = False,
		  dim_mults = (1,2,4,8),
		  *args, 
		  **kwargs) -> None:
		super().__init__(*args, **kwargs)

		channels = 1 if isGrayImage else 3

		self.channels = channels
		init_channels = p.HEAD_CONV_OUT_CHANNELS
		channel_baseline = p.CHANNEL_BASELINE

		
		self.head_conv = nn.Conv2d(channels,p.HEAD_CONV_OUT_CHANNELS,7,padding=3)


		dims = [init_channels,*map(lambda m:channel_baseline*m,dim_mults)] # dims = [ini_c,dim*1,dim*2,dim*4,dim*8]
		in_out = list(zip(dims[:-1], dims[1:])) 
		# [ini_c,dim*1,dim*2,dim*4]  [dim*1,dim*2,dim*4,dim*8]

		if use_convnext:
			block_klass = partial(ConvNextBlock,out_channel_factor=p.CONVNEXT_CHANNEL_OUT_FACTOR)
		else:
			block_klass = partial(ResidualBlock,groups=p.RESNET_BLOCK_GROUPS)


		time_dim = channel_baseline * 4
		
		self.time_mlp = nn.Sequential(
				SinusoidalPositionEmbeddings(channel_baseline),
				nn.Linear(channel_baseline,time_dim),
				nn.GELU(),
				nn.Linear(time_dim,time_dim)
			)

		#layers
		self.downs = nn.ModuleList([])
		self.ups = nn.ModuleList([])
		num_resolutions = len(in_out)

		for ind,(channel_in,channel_out) in enumerate(in_out):
			is_last = ind >= (num_resolutions - 1)

			self.downs.append(
				nn.ModuleList([
				block_klass(channel_in,channel_out,time_emb_dim=time_dim),
				block_klass(channel_out,channel_out,time_emb_dim=time_dim),
				ResidualConnection(PreNorm(channel_out,LinearAttention(channel_out))),
				Downsample(channel_out) if not is_last else nn.Identity()
				])
			)

		mid_dim = dims[-1]

		self.mid_block1 = block_klass(mid_dim,mid_dim,time_emb_dim=time_dim)
		self.mid_attn = ResidualConnection(PreNorm(mid_dim,Attention(mid_dim)))
		self.mid_block2 = block_klass(mid_dim,mid_dim,time_emb_dim=time_dim)

		for ind,(channel_in,channel_out) in enumerate(reversed(in_out[1:])):
			is_last = ind >= (num_resolutions - 1)
			
			self.ups.append(nn.ModuleList([
				block_klass(channel_out * 2 ,channel_in,time_emb_dim=time_dim),
				block_klass(channel_in,channel_in,time_emb_dim=time_dim),
				ResidualConnection(PreNorm(channel_in,LinearAttention(channel_in))),
				Upsample(channel_in) if not is_last else nn.Identity()
			]))

		self.final_conv = nn.Sequential(
			block_klass(channel_baseline,channel_baseline),nn.Conv2d(channel_baseline,channels,1)
		)


	def forward(self,x,time):
		
		x = self.head_conv(x)

		t = self.time_mlp(time) 


		h = []

		#downsample
		for block1,block2,attn,downsample, in self.downs:
			x = block1(x,t)
			x = block2(x,t)
			x = attn(x)
			h.append(x)
			x = downsample(x)

		# bottleneck
		x = self.mid_block1(x,t)
		x = self.mid_attn(x)
		x = self.mid_block2(x,t)

		# upsample
		for block1,block2,attn,upsample in self.ups:
			x = torch.cat((x,h.pop()),dim=1)
			x = block1(x,t)
			x = block2(x,t)
			x = attn(x)
			x = upsample(x)

		return self.final_conv(x)


if __name__ == "__main__":
	# from utils  import sample
	# t = torch.tensor([1],device='cuda:0')
	# img = torch.randn([1,3,32,32],device='cuda:0')
	unet = Unet().to('cuda:0')
	# y = unet(img,t)
	# sam = sample(unet)
	# print(y)
	# print(y.shape)
	# convnext = ConvNextBlock(16,16,3)
	# for p in convnext.parameters():
	# 	print(p)
	# for o in unet.parameters():
		# print(o)
	optim = torch.optim.Adam(unet.parameters(),0.023)
	print(optim.state_dict())