
import argparse
import os
from datetime import datetime
from typing import Any, Optional

from PIL import Image, ImageOps

import torch
import comfy.utils
import numpy as np
import folder_paths
from diffusers import FluxTransformer2DModel
from .pipeline_flux_fill_with_cfg import FluxFillCFGPipeline

def is_low_memory():
    mem = torch.cuda.get_device_properties(0).total_memory / (1024 ** 3)
    if mem < 30:
        return True
    else:
        return False

class RH_OneReward_Loader:
    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "model": (['flux.1-fill-dev-OneReward-transformer', 'flux.1-fill-dev-OneRewardDynamic-transformer'], 'flux.1-fill-dev-OneReward-transformer'),
            },
        }

    RETURN_TYPES = ("RHOneRewardPipeline",)
    RETURN_NAMES = ("OneReward Pipeline",)
    FUNCTION = "load"

    CATEGORY = "Runninghub/OneReward"

    def load(self, **kwargs):
        model = kwargs.get('model')
        model_path = os.path.join(folder_paths.models_dir, 'OneReward', model)
        transformer_onereward = FluxTransformer2DModel.from_pretrained(
            model_path,
            torch_dtype=torch.bfloat16
        )
        print('--- OneReward transformer loaded ---')

        pipe = FluxFillCFGPipeline.from_pretrained(
            os.path.join(folder_paths.models_dir, 'black-forest-labs', 'FLUX.1-Fill-dev'), 
            transformer=transformer_onereward,
            torch_dtype=torch.bfloat16)
        
        print('--- pipeline loaded ---')

        if is_low_memory():
            pipe.enable_model_cpu_offload()
            from optimum.quanto import freeze, qint8, quantize, quantization_map, QuantizedDiffusersModel, requantize
            quantize(pipe.transformer, qint8)
            freeze(pipe.transformer)
            print('--- transformer quantized ---')
        else:
            pipe.to('cuda')
            print('--- run in 48g mode ---')

        return (pipe, )

class RH_OneReward_Sampler:

    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "pipeline": ("RHOneRewardPipeline", ),
                "image": ("IMAGE", ),
                "mask": ("MASK", ),
                "prompt": ("STRING", {"multiline": True,
                                      'default': ''}),
                "num_inference_steps": ("INT", {"default": 50}),
                "guidance_scale": ("FLOAT", {"default": 1.0}),
                "true_cfg": ("FLOAT", {"default": 4.0}),
                "seed": ("INT", {"default": 20, "min": 0, "max": 0xffffffffffffffff,
                                 "tooltip": "The random seed used for creating the noise."}),
            },
        }

    RETURN_TYPES = ("IMAGE",)
    RETURN_NAMES = ("image",)
    FUNCTION = "sample"

    CATEGORY = "Runninghub/OneReward"

    def tensor_2_pil(self, img_tensor):
        if img_tensor is not None:
            i = 255. * img_tensor.squeeze().cpu().numpy()
            img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
            return img
        else:
            return None

    def sample(self, **kwargs):
        pipeline = kwargs.get('pipeline')
        image = self.tensor_2_pil(kwargs.get('image'))
        mask = self.tensor_2_pil(kwargs.get('mask'))

        width, height = image.size[0], image.size[1]
        prompt = kwargs.get('prompt')
        guidance_scale = kwargs.get('guidance_scale')
        num_inference_steps = kwargs.get('num_inference_steps')
        seed = kwargs.get('seed') % (2 ** 32)
        true_cfg = kwargs.get('true_cfg')

        #kiki:hardcode hyperparameters
        negative_prompt = 'nsfw'
        self.pbar = comfy.utils.ProgressBar(num_inference_steps)

        with torch.no_grad():
            image = pipeline(
                prompt=prompt,
                negative_prompt=negative_prompt,
                image=image,
                mask_image=mask,
                height=height,
                width=width,
                guidance_scale=guidance_scale,
                true_cfg=true_cfg,
                num_inference_steps=num_inference_steps,
                generator=torch.Generator("cpu").manual_seed(seed),
                update_func=self.update,
            ).images[0]

        image = np.array(image).astype(np.float32) / 255.0
        image = torch.from_numpy(image)[None,]

        return (image, )
    
    def update(self):
        self.pbar.update(1)

class RH_OneReward_Eraser(RH_OneReward_Sampler):
    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "pipeline": ("RHOneRewardPipeline", ),
                "image": ("IMAGE", ),
                "mask": ("MASK", ),
                "num_inference_steps": ("INT", {"default": 50}),
                "guidance_scale": ("FLOAT", {"default": 1.0}),
                "true_cfg": ("FLOAT", {"default": 4.0}),
                "seed": ("INT", {"default": 20, "min": 0, "max": 0xffffffffffffffff,
                                 "tooltip": "The random seed used for creating the noise."}),
            },
        }
    
    def sample(self, **kwargs):
        pipeline = kwargs.get('pipeline')
        image = self.tensor_2_pil(kwargs.get('image'))
        mask = self.tensor_2_pil(kwargs.get('mask'))

        width, height = image.size[0], image.size[1]
        guidance_scale = kwargs.get('guidance_scale')
        num_inference_steps = kwargs.get('num_inference_steps')
        seed = kwargs.get('seed') % (2 ** 32)
        true_cfg = kwargs.get('true_cfg')

        #kiki:hardcode hyperparameters
        prompt = 'remove'
        negative_prompt = 'nsfw'
        self.pbar = comfy.utils.ProgressBar(num_inference_steps)

        with torch.no_grad():
            image = pipeline(
                prompt=prompt,
                negative_prompt=negative_prompt,
                image=image,
                mask_image=mask,
                height=height,
                width=width,
                guidance_scale=guidance_scale,
                true_cfg=true_cfg,
                num_inference_steps=num_inference_steps,
                generator=torch.Generator("cpu").manual_seed(seed),
                update_func=self.update,
            ).images[0]

        image = np.array(image).astype(np.float32) / 255.0
        image = torch.from_numpy(image)[None,]

        return (image, )

NODE_CLASS_MAPPINGS = {
    "RunningHub OneReward Loader": RH_OneReward_Loader,
    "RunningHub OneReward Sampler": RH_OneReward_Sampler,
    "RunningHub OneReward Eraser": RH_OneReward_Eraser,
}

NODE_DISPLAY_NAME_MAPPINGS = {
    "RunningHub OneReward Loader": "RunningHub OneReward Loader",
    "RunningHub OneReward Sampler": "RunningHub OneReward Sampler",
    "RunningHub OneReward Eraser": "RunningHub OneReward Eraser",
} 