import os

import numpy as np
import pywt
import torch.nn as nn
import torch
import random
from transformers import ViTImageProcessor, ViTForImageClassification
from timm.models.layers import trunc_normal_, DropPath
from mmcv.cnn import (build_conv_layer, build_norm_layer, build_upsample_layer,
                      constant_init, normal_init)
from omegaconf import OmegaConf
from depth.ldm.util import instantiate_from_config
import torch.nn.functional as F

from depth.models.FusionMoudule import FusionModule
# from depth.models.FFTNet import FFTBlock

from depth.models.catanet import CATANet
# from depth.models.WPL import WPL

from ecodepth.models import UNetWrapper, EmbeddingAdapter
# import matplotlib.pyplot as plt
# from depth.models.FusionMoudule import FusionModule
from transformers import logging
from pytorch_wavelets import DWTForward
logging.set_verbosity_error()


# from kan import KAN

class Identity(nn.Module):
    def __init__(self):
        super(Identity, self).__init__()

    def forward(self, x):
        return x


class EcoDepthEncoder(nn.Module):
    def __init__(self, out_dim=1024, ldm_prior=[320, 640, 1280 + 1280], sd_path=None, emb_dim=768,
                 dataset='nyu', args=None):
        super().__init__()

        self.args = args

        self.layer1 = nn.Sequential(
            nn.Conv2d(ldm_prior[0], ldm_prior[0], 3, stride=2, padding=1),
            nn.GroupNorm(16, ldm_prior[0]),
            nn.ReLU(),
            nn.Conv2d(ldm_prior[0], ldm_prior[0], 3, stride=2, padding=1),
        )

        self.layer2 = nn.Sequential(
            nn.Conv2d(ldm_prior[1], ldm_prior[1], 3, stride=2, padding=1),
        )

        self.out_layer = nn.Sequential(
            nn.Conv2d(sum(ldm_prior), out_dim, 1),
            nn.GroupNorm(16, out_dim),
            nn.ReLU(),
        )
        self.apply(self._init_weights)

        # self.cide_module = CIDE(args, emb_dim)
        self.emb_dim = emb_dim

        self.config = OmegaConf.load('./v1-inference.yaml')
        # self.config = OmegaConf.load('E:\\projects\\df-depth\\depth\\v1-inference.yaml')
        if sd_path is None:
            # self.config.model.params.ckpt_path = '../checkpoints/v1-5-pruned-emaonly.ckpt'
            self.config.model.params.ckpt_path = 'E:\\projects\\df-depth\\checkpoints\\v1-5-pruned-emaonly.ckpt'
        else:
            self.config.model.params.ckpt_path = f'../{sd_path}'

        sd_model = instantiate_from_config(self.config.model)
        self.encoder_vq = sd_model.first_stage_model
        # self.sgBlock = FFTBlock(dim=4,mlp_ratio=2.)

        self.unet = UNetWrapper(sd_model.model, use_attn=False)

        del sd_model.cond_stage_model
        del self.encoder_vq.decoder
        del self.unet.unet.diffusion_model.out

        for param in self.encoder_vq.parameters():
            param.requires_grad = False

    def _init_weights(self, m):
        if isinstance(m, (nn.Conv2d, nn.Linear)):
            trunc_normal_(m.weight, std=.02)
            nn.init.constant_(m.bias, 0)

    def forward_features(self, feats):
        x = self.ldm_to_net[0](feats[0])
        for i in range(3):
            if i > 0:
                x = x + self.ldm_to_net[i](feats[i])
            x = self.layers[i](x)
            x = self.upsample_layers[i](x)
        return self.out_conv(x)

    def forward(self, x):
        # Use torch.no_grad() to prevent gradient computation on application of VQ encoder since it is frozen
        # Refer to paper for more info
        # X：B,3,512,640
        with torch.no_grad():
            # convert the input image to latent space and scale.
            latents = self.encoder_vq.encode(x).mode().detach() * self.config.model.params.scale_factor  # B,4,64,80
        # print("latents:",latents.shape)
        # print("x:",x.shape)
        # conditioning_scene_embedding = self.cide_module(x)  # b,1,768
        # save_dir = "E:\\projects\\df-depth\\depth\\log_dir\\featuremap"
        # visualize=True
        # if visualize and save_dir is not None:
        #     save_path = os.path.join(save_dir, f'latents_heatmap.png')
        #     self.visualize_rgb_features(latents, save_path, 'latents')
        empty_conditioning_embedding = torch.zeros((x.shape[0], 1, self.emb_dim), device=x.device)

        t = torch.ones((x.shape[0],), device=x.device).long()

        # outs = self.unet(latents, t, c_crossattn=[conditioning_scene_embedding])

        outs = self.unet(latents, t, c_crossattn=[
            empty_conditioning_embedding])  # [(b,320,64,80),(b,640,32,40),(b,1280,16,20),(b,1280,8,10)]
        # print("outs:", outs)

        feats = [outs[0], outs[1], torch.cat([outs[2], F.interpolate(outs[3], scale_factor=2)],
                                             dim=1)]  # [(b,320,64,80),(b,640,32,40),(b,2560,16,20)]
        x = torch.cat([self.layer1(feats[0]), self.layer2(feats[1]), feats[2]], dim=1)  # b,3520,16,20
        return self.out_layer(x)


class CIDE(nn.Module):
    def __init__(self, args, emb_dim):
        super().__init__()
        self.args = args

        # 检查是否提供了本地路径
        if hasattr(args, 'local_vit_model_path') and args.local_vit_model_path:
            vit_model_path = args.local_vit_model_path
        else:
            vit_model_path = args.vit_model
            # vit_model_path = 'E:\\projects\\df-depth\\depth\\vit_models\\vit-base-patch16-224'

        self.vit_processor = ViTImageProcessor.from_pretrained(vit_model_path, resume_download=True)
        self.vit_model = ViTForImageClassification.from_pretrained(vit_model_path, resume_download=True)
        for param in self.vit_model.parameters():
            param.requires_grad = False

        # 原始解决方案
        self.fc = nn.Sequential(
            nn.Linear(1000, 400),
            nn.GELU(),
            nn.Linear(400, args.no_of_classes)
            # nn.Linear(400, 100)
        )

        # 将MLP替换为KAN
        # self.fc = KAN([1000, 100, args.no_of_classes], grid=5, k=3)  # grid 和 k 是样条函数超参数
        self.dim = emb_dim
        self.m = nn.Softmax(dim=1)

        self.embeddings = nn.Parameter(torch.randn(self.args.no_of_classes, self.dim))
        self.embedding_adapter = EmbeddingAdapter(emb_dim=self.dim)

        self.gamma = nn.Parameter(torch.ones(self.dim) * 1e-4)

    def pad_to_make_square(self, x):
        y = 255 * ((x + 1) / 2)
        y = torch.permute(y, (0, 2, 3, 1))
        bs, _, h, w = x.shape
        if w > h:
            patch = torch.zeros(bs, w - h, w, 3).to(x.device)
            y = torch.cat([y, patch], axis=1)
        else:
            patch = torch.zeros(bs, h, h - w, 3).to(x.device)
            y = torch.cat([y, patch], axis=2)
        return y.to(torch.int)

    def forward(self, x):

        # make the image of dimension 480*640 into a square and downsample to utilize pretrained knowledge in the ViT
        # B,3,512,640
        y = self.pad_to_make_square(x)  # b,640,640,3
        # use torch.no_grad() to prevent gradient flow through the ViT since it is kept frozen
        with torch.no_grad():
            inputs = self.vit_processor(images=y, return_tensors="pt").to(x.device)
            vit_outputs = self.vit_model(**inputs)
            vit_logits = vit_outputs.logits  # b,1000

        class_probs = self.fc(vit_logits)
        class_probs = self.m(class_probs)  # b,1000

        class_embeddings = class_probs @ self.embeddings
        conditioning_scene_embedding = self.embedding_adapter(class_embeddings, self.gamma)

        return conditioning_scene_embedding  # b,1,768

class Down_wt(nn.Module):
    def __init__(self, in_ch, out_ch):
        super(Down_wt, self).__init__()
        self.wt = DWTForward(J=1, mode='zero', wave='haar')
        self.conv_bn_relu = nn.Sequential(
            nn.Conv2d(in_ch * 2, out_ch, kernel_size=1, stride=1),
            nn.BatchNorm2d(out_ch),
            nn.ReLU(inplace=True),
        )
    def forward(self, x):
        yL, yH = self.wt(x)
        y_HL = yH[0][:, :, 0, ::]
        y_LH = yH[0][:, :, 1, ::]
        x = torch.cat([y_HL, y_LH], dim=1)
        x = self.conv_bn_relu(x)
        return x


class EcoDepth(nn.Module):
    def __init__(self, args=None, min_depth=0.1):
        super().__init__()
        self.max_depth = args.max_depth
        # self.max_depth = 10.0
        self.min_depth = args.min_depth
        # self.min_depth = 0.1

        self.args = args
        embed_dim = 192
        channels_in = embed_dim * 8
        channels_out = embed_dim

        channels_out_SR = 32
        # self.highFre = WaveletFeatureExtractor(wavelet='db1', levels=2, out_channels=32)
        self.highFre = Down_wt(in_ch=3, out_ch=32)

        self.encoder = EcoDepthEncoder(out_dim=channels_in, dataset='nyu', args=args)
        self.decoder = Decoder(channels_in, channels_out, args)
        self.decoder.init_weights()

        self.last_layer_depth = nn.Sequential(
            nn.Conv2d(channels_out_SR, channels_out_SR, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=False),
            nn.Conv2d(channels_out_SR, 1, kernel_size=3, stride=1, padding=1))

        for m in self.last_layer_depth.modules():
            if isinstance(m, nn.Conv2d):
                normal_init(m, std=0.001, bias=0)

    def forward(self, x):
        b, c, h, w = x.shape  # B,3,512,640
        # TOD 使用半精度
        # x2,x3,x4,x5 = self.swin_backbone(x)

        # print("X.shape:",x.shape)
        x = x * 2.0 - 1.0  # normalize to [-1, 1]

        conv_feats = self.encoder(x)  # B,1536,16,20
        # print("conv_feats.shape:",conv_feats.shape)
        if h == 480 or h == 352:
            conv_feats = conv_feats[:, :, :-1, :-1]  # 对特征图删掉最后一行和最后一列，以匹配输入的尺寸

        # 获取添加频率域处理后的特征

        # highFre = self.highFre(x)

        # ecodepth的解决方案
        highFre = self.highFre(x)
        out = self.decoder([conv_feats], highFre)  # b,32,512,640
        # out = self.decoder([conv_feats])  # b,192,512,640
        # print("out.shape:",out.shape)
        out_depth = self.last_layer_depth(out)  # b,1,512,640
        # print("out_depth.shape1:",out_depth.shape)
        out_depth = torch.sigmoid(out_depth) * self.max_depth  # b,1,512,640
        # print("out_depth.shape2:",out_depth.shape)

        # # catanet的解决方案
        # out = self.decoder([conv_feats]) # b,32,512,640
        # out_depth = self.last_layer_depth(out) # b,1,512,640
        # out_depth = torch.sigmoid(out_depth) * self.max_depth  # b,1,512,640

        return {'pred_d': out_depth}


class Decoder(nn.Module):
    def __init__(self, in_channels, out_channels, args):
        super().__init__()
        self.deconv = args.num_deconv
        self.in_channels = in_channels
        self.args = args
        self.deconv_layers = self._make_deconv_layer(
            args.num_deconv,
            args.num_filters,
            args.deconv_kernels,
        )
        # self.wpl = WPL()
        self.upsample = CATANet(in_chans=32, upscale=2)
        self.fusion = FusionModule(in_channels=32)

        # self.fusion = FusionModule(in_channels=32)
        # self.wpl = WPL()
        # # TOD 将in_channels修改为接收参数，image_size为接收参数HW
        # self.spl = FFTShearletPL(in_channels=32, num_scales=2, num_directions=4, image_size=(128, 160))

        conv_layers = []
        conv_layers.append(
            build_conv_layer(
                dict(type='Conv2d'),
                in_channels=args.num_filters[-1],
                out_channels=out_channels,
                kernel_size=3,
                stride=1,
                padding=1))
        conv_layers.append(
            build_norm_layer(dict(type='BN'), out_channels)[1])
        conv_layers.append(nn.ReLU(inplace=True))
        self.conv_layers = nn.Sequential(*conv_layers)

        self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)

    def forward(self, conv_feats,highFre):
        # import ipdb;ipdb.set_trace()
        out = self.deconv_layers(conv_feats[0])  # b,32,128,160

        # # 原始解决方案
        # out = self.conv_layers(out) # b,192,128,160
        #
        # out = self.up(out) # b,192,256,320
        # out = self.up(out) # b,192,512,640

        # catanet 解决方案
        # out = self.wpl(out)
        # out = self.spl(out)
        # save_dir="E:\\projects\\df-depth\\depth\\log_dir\\featuremap"

        # out = self.fusion(out, highFre)
        # out = self.upsample(out)  # b,32,512,640

        # 超分辨率解决方案
        # out = self.upsample(out)  # b,32,512,640
        # out = self.fusion(out, highFre)
        # out = self.up(out)

        out = self.upsample(out)  # b,32,512,640
        out = self.fusion(out, highFre)
        out = self.up(out)

        # if visualize and save_dir is not None:
        #     save_path = os.path.join(save_dir, f'upsample_heatmap.png')
        #     self.visualize_heatmap(out, save_path, 'Upsample Feature Heatmap')
        # out = self.wpl(out)
        # if visualize and save_dir is not None:
        #     save_path = os.path.join(save_dir, f'frequency_heatmap.png')
        #     self.visualize_heatmap(out, save_path, 'Frequency Feature Heatmap')
        # out = self.up(out)

        return out


    def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
        """Make deconv layers."""

        layers = []
        in_planes = self.in_channels
        for i in range(num_layers):
            kernel, padding, output_padding = \
                self._get_deconv_cfg(num_kernels[i])

            planes = num_filters[i]
            layers.append(
                build_upsample_layer(
                    dict(type='deconv'),
                    in_channels=in_planes,
                    out_channels=planes,
                    kernel_size=kernel,
                    stride=2,
                    padding=padding,
                    output_padding=output_padding,
                    bias=False))
            layers.append(nn.BatchNorm2d(planes))
            layers.append(nn.ReLU(inplace=True))
            in_planes = planes

        return nn.Sequential(*layers)

    def _get_deconv_cfg(self, deconv_kernel):
        """Get configurations for deconv layers."""
        if deconv_kernel == 4:
            padding = 1
            output_padding = 0
        elif deconv_kernel == 3:
            padding = 1
            output_padding = 1
        elif deconv_kernel == 2:
            padding = 0
            output_padding = 0
        else:
            raise ValueError(f'Not supported num_kernels ({deconv_kernel}).')

        return deconv_kernel, padding, output_padding

    def init_weights(self):
        """Initialize model weights."""
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                normal_init(m, std=0.001, bias=0)
            elif isinstance(m, nn.BatchNorm2d):
                constant_init(m, 1)
            elif isinstance(m, nn.ConvTranspose2d):
                normal_init(m, std=0.001)


if __name__ == '__main__':
    model = Decoder()
    x1 = torch.randn(2, 1536, 16, 20)
    x2 = torch.randn(2, 512, 128, 160)
    y = model(x1, x2)
    print(y.shape)


