from mask import (bbox2mask, brush_stroke_mask, get_irregular_mask, random_bbox, bbox2mask_uncropping, random_cropping_bbox)
import copy
import numpy as np
import os
import os.path as osp
import cv2
import torchvision.transforms as T
import torch
from PIL import Image
from PIL import ImageFile

def get_outpaint_mask(image_size):
    # mask未知区域为1， 已知区域为0
    if np.random.randint(0,2)<1:
        mask = bbox2mask_uncropping(image_size, random_cropping_bbox(img_shape=image_size, mask_mode='onedirection_multiscale'))
    else:
        mask = bbox2mask_uncropping(image_size, random_cropping_bbox(img_shape=image_size, mask_mode='fourdirection'))
    return mask  # h,w,1

def get_inpaint_mask(image_size):
        # # mask未知区域为1， 已知区域为0
        regular_mask = bbox2mask(image_size, random_bbox(img_shape=image_size, max_bbox_shape=(200, 200)))  
        irregular_mask = brush_stroke_mask(image_size, num_vertices=(4, 8),brush_width=(12, 30))
        mask = regular_mask | irregular_mask
        return mask

if __name__ == '__main__':
    imgsize = [1024, 1024] # [h,w]
    totensor =  T.Compose(
            [
                T.ToTensor(),
                
            ]
        )

    img = Image.open('/llmcapagroup1/test-bucket/liuxin/tozky/HunyuanDiT-main/k_second_buliubai_2-1280-768_00019.jpg')
    img = img.resize((1024, 1024))
    img.save('/llmcapagroup1/test-bucket/liuxin/tozky/HunyuanDiT-main/k_second_buliubai_2-1280-768_00019_1.jpg')
    savepath = 'mask/outpaint'
    os.makedirs(savepath,exist_ok=True)
    mask = get_inpaint_mask(imgsize)  # h,w,1
    mask[mask < 0.5] = 0
    mask[mask >= 0.5] = 1
    # mask = get_outpaint_mask(imgsize) # # h,w,1
    # mask = np.concatenate((mask, mask, mask),-1)
    # mask = (mask*255).astype(np.uint8)
    # cv2.imwrite(osp.join(savepath, 'mask.jpg'), mask)

    # mask = cv2.resize(mask, (1280, 960))
    mask[mask < 0.5] = 0
    mask[mask >= 0.5] = 1
    print(mask.shape, mask.max(), mask.min())
    mask = (mask*255).astype(np.uint8)
    cv2.imwrite('mask.jpg', mask)
    # mask_np = copy.deepcopy(mask)
    
    # mask = mask.transpose(2, 0, 1)
    # mask = torch.from_numpy(mask)  # h,w,1, [0, 1]

    # # mask = ((np.array(mask).transpose(1, 2, 0))*255).astype(np.uint8)
    # # cv2.imwrite('mask.jpg',mask)

    # image_tensor = totensor(img)
    # print(mask.shape, image_tensor.shape, mask.max(), mask.min(), image_tensor.max(), image_tensor.min())
    # image_tensor = image_tensor * 2.0 - 1.0

    # masked_image_tensor = (1-mask)*image_tensor
    # import cv2
    # save_image = masked_image_tensor.numpy()#
    # save_image = (save_image + 1)/2
    # save_image=save_image.transpose((1,2,0))
    # save_image = (((1-mask_np)*save_image)*255).astype(np.uint8)
    # cv2.imwrite('masked.jpg',save_image)