import torch
# from diffusers.models.unet_2d_condition import UNet2DConditionOutput


class ControlUnet(torch.nn.Module):
    def __init__(self, unet, controlnet, control_scale=1.0):
        super().__init__()
        self.unet = unet
        self.unet.requires_grad_(False) # TODO
        self.controlnet = controlnet
        self.control_scale = control_scale

    def forward(self, sample, timestep, encoder_hidden_states, control_image=None, cross_attention_kwargs=None):
        down_block_res_samples, mid_block_res_sample = self.controlnet(
            sample,
            timestep,
            encoder_hidden_states=encoder_hidden_states,
            controlnet_cond=control_image,
            return_dict=False,
        )
        
        # * training do not need this 
        # down_block_res_samples = [
        #     down_block_res_sample * self.control_scale
        #     for down_block_res_sample in down_block_res_samples
        # ]
        # mid_block_res_sample = mid_block_res_sample * self.control_scale
        
        unet_output = self.unet(
            sample,
            timestep,
            encoder_hidden_states=encoder_hidden_states,
            cross_attention_kwargs=cross_attention_kwargs,
            down_block_additional_residuals=down_block_res_samples,
            mid_block_additional_residual=mid_block_res_sample,
        )
        return unet_output
        

    @property
    def device(self):
        return self.unet.device
