import os
import argparse

parser = argparse.ArgumentParser()
parser.add_argument('-c', '--cuda', type=str, default='0', help='CUDA device(s) to use')
args, _ = parser.parse_known_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda
osj = os.path.join

import util_for_huggingface

from PIL import Image, ImageOps
import torch
import torch.nn.functional as F
from transformers import AutoModelForImageSegmentation
from torchvision.transforms.functional import normalize
import numpy as np
from tqdm import tqdm

def preprocess_image(im: np.ndarray, model_input_size: list) -> torch.Tensor:
    if len(im.shape) < 3:
        im = im[:, :, np.newaxis]
    # orig_im_size=im.shape[0:2]
    im_tensor = torch.tensor(im, dtype=torch.float32).permute(2,0,1)
    im_tensor = F.interpolate(torch.unsqueeze(im_tensor,0), size=model_input_size, mode='bilinear')
    image = torch.divide(im_tensor,255.0)
    image = normalize(image,[0.5,0.5,0.5],[1.0,1.0,1.0])
    return image

def postprocess_image(result: torch.Tensor, im_size: list)-> np.ndarray:
    result = torch.squeeze(F.interpolate(result, size=im_size, mode='bilinear') ,0)
    ma = torch.max(result)
    mi = torch.min(result)
    result = (result-mi)/(ma-mi)
    im_array = (result*255).permute(1,2,0).cpu().data.numpy().astype(np.uint8)
    im_array = np.squeeze(im_array)
    return im_array

def load_rmbg():
    model = AutoModelForImageSegmentation.from_pretrained("briaai/RMBG-1.4",trust_remote_code=True)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model.to(device)
    return model




def get_mask_by_rmbg(model,img_pil):
    
    image = preprocess_image( np.array(img_pil), img_pil.size ).to( model.device )
    # inference 
    result=model(image) # ( A1 , A2 ) len(A)=6 A=[ 1*1*1024*1024,...,1*1*1024*1024 ]

    import pdb
    # post process
    mask_np = postprocess_image(result[0][0], img_pil.size ) # 1024,1024


    # save result
    mask_pil = Image.fromarray( mask_np )

    orig_image = img_pil
    no_bg_image = orig_image.copy()
    no_bg_image.putalpha( mask_pil )

    # pdb.set_trace()

    return mask_pil,no_bg_image

def test_rmbg():
    # prepare input
    image_path = "/mnt/nas/shengjie/datasets/cloth_collar_balanced/collar_0000994.jpg"

    # orig_im = io.imread(image_path)
    # orig_im_size = orig_im.shape[0:2]
    # model_input_size = [1024, 1024]

    from util_flux import process_img_1024
    img = process_img_1024( image_path )

    model = load_rmbg()

    img = process_img_1024( image_path )
    mask_pil,_ = get_mask_by_rmbg( model ,  img )


    mask_pil.save('tmp.jpg')

    if mask_pil is not None:
        mask_pil = process_img_1024(mask_pil, pad_color=(0, 0, 0)).convert('L')
        black_mask = Image.new('RGB', img.size, 'white')
        # 高斯模糊与边缘处理
        masked_img = Image.composite(
            black_mask, img, ImageOps.invert(mask_pil)
        )
    else:
        masked_img = img

    masked_img.convert('RGB')

    return mask_pil,masked_img


def get_masked_img(image_path, rmbg_model, color='white'):
    from util_flux import process_img_1024
    img = process_img_1024( image_path )

    mask_pil,_ = get_mask_by_rmbg( rmbg_model,  img )

    if mask_pil is not None:
        mask_pil = process_img_1024(mask_pil, pad_color=(0, 0, 0)).convert('L')
        bg = Image.new('RGB', img.size, color)
        masked_img = Image.composite(
            bg, img, ImageOps.invert(mask_pil) # mask的白色区域 用 bg 填充
        )
    else:
        masked_img = img

    masked_img = masked_img.convert('RGB')

    return mask_pil,masked_img

if __name__ == '__main__':
    model = load_rmbg()

    # test_rmbg()
    dir = '/mnt/nas/shengjie/datasets/KontextRefControl_extractclo_upper/train/image'
    save_dir = f'{dir}_rmbg'
    os.makedirs(save_dir, exist_ok=True)
    names_path = osj(dir, 'names.txt')
    # 读取 names.txt，筛选所有的jpg图片
    with open(names_path, "r", encoding="utf-8") as f:
        all_names = [line.strip() for line in f if line.strip()]
    # 只保留.jpg结尾的文件名
    jpg_names = [name for name in all_names if name.lower().endswith('.jpg')]

    for jpg_name in tqdm(jpg_names):
        path = osj(dir, jpg_name)
        
        mask_pil, masked_img = get_masked_img(path, model, 'white')

        masked_img.save(osj(save_dir, jpg_name))