# import spconv.pytorch as spconv
from omegaconf import OmegaConf
from DiffOGMP.utils.common_utils import instantiate_from_config
import torch
from torch import nn

def count_params(module):
    return [p.numel() for p in module.parameters()]

def forward_shape_hook(module, input, out):
    print(f"shape {out.shape}")

def find_modules_with_name(module, name_prefix):
    found_modules = []
    for name, submodule in module.named_children():
        if name.startswith(name_prefix):
            found_modules.append(submodule)
        found_modules.extend(find_modules_with_name(submodule, name_prefix))
    return found_modules

def get_all_modules(module, prefix=''):
    modules = []
    for name, submodule in module.named_children():
        full_name = f"{prefix}.{name}" if prefix else name
        modules.append((full_name, submodule))
        modules.extend(get_all_modules(submodule, full_name))
    return modules

def inspect_vae2d():
    cfg_path = 'configs/autoencoders/autoencoderkl_4x4x8.yaml'
    configs = OmegaConf.load(cfg_path)
    params = []
    model = instantiate_from_config(configs['model'])
    print("Encoder: ")
    print(model.encoder)
    params += count_params(model.encoder)
    print('*' * 100)
    print("Decoder: ")
    print(model.decoder)
    params += count_params(model.decoder)
    print('*' * 100)
    print("Loss: ")
    print(model.loss)

    inputs = torch.zeros(1, 1, 64, 64)
    x_hat, posterior = model(inputs)
    print(x_hat.shape)

    disc_out = model.loss.discriminator(inputs)
    print(f'disc shape: {disc_out.shape}')
    print('*'*100)

    print(f'total parameters {sum(params)/ 1e6}M')

def inspect_vqgan2d():
    cfg_path = 'configs/autoencoders/vqgan2d.yaml'
    configs = OmegaConf.load(cfg_path)
    params = []
    model = instantiate_from_config(configs['model'])
    print("Encoder: ")
    print(model.encoder)
    params += count_params(model.encoder)
    print('*' * 100)
    print("Decoder: ")
    print(model.decoder)
    params += count_params(model.decoder)
    print('*' * 100)
    print("Loss: ")
    print(model.loss)
    print('*'*100)
    print("Quantizer: ")
    print(model.quantizer)
    params += count_params(model.quantizer)

    inputs = torch.zeros(1, 1, 64, 64)
    x_hat, _ = model(inputs)
    print(x_hat.shape)

    disc_out = model.loss.discriminator(inputs)
    print(f'disc shape: {disc_out.shape}')

    quantize, emb_loss, info = model.encode(inputs)
    print(f'quantized: {quantize.shape}')

    print('*'*100)
    print(f'total parameters {sum(params)/ 1e6}M')


def inspect_vae3d():
    cfg_path = 'configs/autoencoders/vae3d.yaml'
    configs = OmegaConf.load(cfg_path)
    params = []
    model = instantiate_from_config(configs['model'])
    down_blocks = find_modules_with_name(model, 'down')
    for i in down_blocks:
        i.register_forward_hook(forward_shape_hook)
    print("Encoder: ")
    print(model.encoder)
    params += count_params(model.encoder)
    print('*'*100)
    print('Decoder: ')
    print(model.decoder)
    params += count_params(model.decoder)
    print('*'*100)
    print('Loss: ')
    print(model.loss)
    print('*'*100)
    inputs = torch.zeros(1, 1, 20, 64, 64)
    x_hat, posterior = model(inputs)
    print(x_hat.shape)
    print('*' * 100)
    print(f'total parameters {sum(params) / 1e6}M')
    print('*' * 100)
    h = model.encoder(inputs)
    print(f'encoder out : {h.shape}')

# class Gen(spconv.SparseModule):
#     def __init__(self):
#         super().__init__()
#         self.down1 = spconv.SparseConv3d(1, 32, 3, 1, 1, indice_key='down1')
#         self.down2 = spconv.SparseConv3d(32, 64, 3, 1, 1, indice_key='down2')
#         self.mid = spconv.SparseSequential(
#             spconv.SubMConv3d(64, 64, 3, 1, 1, indice_key='mid1'),
#             nn.BatchNorm1d(64),
#             nn.ReLU(),
#             spconv.SubMConv3d(64, 64, 3, 1, 1, indice_key='mid2'),
#             nn.BatchNorm1d(64),
#             nn.ReLU()
#         )
#         self.up1 = spconv.SparseInverseConv3d(32, 1, 3,  indice_key='down1')
#         self.up2 = spconv.SparseInverseConv3d(64, 32, 3,  indice_key='down2')


    # def forward(self, x):
    #     x_sp = spconv.SparseConvTensor.from_dense(x.permute(0, 1, 3, 4, 2))
    #     down1 = self.down1(x_sp)
    #     down2 = self.down2(down1)
    #     mid = self.mid(down2)
    #     rec = self.up2(mid)
    #     rec = self.up1(rec)
    #     rec = rec.dense()
    #     return rec

def inspect_spstuffs():
    cfg_path = 'configs/autoencoders/sparsevae3d.yaml'
    config = OmegaConf.load(cfg_path)
    model = instantiate_from_config(config['model']).to('cuda')
    params = []
    print("Encoder: ")
    print(model.encoder)
    params += count_params(model.encoder)
    print('*' * 100)
    print("Decoder: ")
    print(model.decoder)
    params += count_params(model.decoder)
    print('*' * 100)
    print("Loss: ")
    print(model.loss)
    print('*' * 100)
    inputs = torch.randint(0, 2, size=(1, 20, 1, 64, 64)).float().to('cuda')
    x_hat, posterior = model(inputs)
    print(x_hat.shape)
    print('*' * 100)
    print(f'total parameters {sum(params) / 1e6}M')
    print('*' * 100)
    h = model.encoder(inputs)
    print(f'encoder out : {h.shape}')

def inspect_basicvae():
    cfg_path = 'configs/autoencoders/basic_vae.yaml'
    config = OmegaConf.load(cfg_path)
    model = instantiate_from_config(config['model'])
    print(model)

if __name__ == "__main__":
    # inspect_vae2d()
    # inspect_vqgan2d()
    # inspect_vae3d()
    # inspect_spstuffs()
    inspect_basicvae()