import cv2
from region_selector import select_region
from cmd_utils import Stdout_progressbar
import torch
import os
from PIL import Image
import numpy as np
import torch.nn.functional as F
from glb import get_seggpt_model, DEVICE

imagenet_mean = np.array([0.485, 0.456, 0.406])
imagenet_std = np.array([0.229, 0.224, 0.225])

def main(cap, frame0, video_path, status_dict, task_id):
    if not cap: return    
    mask0 = select_region(frame0)
    if mask0 is None:
        return None
    model = get_seggpt_model()
    num = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    stdoutpb = Stdout_progressbar(num)
    stdoutpb.reset()
    status_dict[task_id]['percentage'] = 0
    fps = cap.get(cv2.CAP_PROP_FPS)
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    out_path = os.path.join("seg_out", "output_" + '.'.join(os.path.basename(video_path).split('.')[:-1]) + '.mp4')
    mask_path = os.path.join("seg_out", "mask_" + '.'.join(os.path.basename(video_path).split('.')[:-1]) + '.mp4')
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    video_writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height), True)
    mask_writer = cv2.VideoWriter(mask_path, fourcc, fps, (width, height), True)

    # resize prompt and target
    res, hres = 448, 448
    img = Image.fromarray(frame0[:, :, ::-1]).convert('RGB')
    size = img.size
    img = np.array(img.resize((res, hres))) / 255.
    tgt = Image.fromarray(mask0).convert('RGB')
    tgt = np.array(tgt.resize((res, hres))) / 255.
    # main loop
    cnt = 0
    cap.set(1, 0)
    while True:
        if status_dict[task_id]['status'] == 'Stopped':
            print(f'task {task_id} aborted')
            break
        ret, frame = cap.read()
        if not ret:
            break
        image = Image.fromarray(frame[:, :, ::-1]).convert('RGB')
        input_image = np.array(image)
        image = np.array(image.resize((res, hres))) / 255.
        img2 = np.concatenate((img, image), axis=0)
        tgt2 = np.concatenate((tgt, tgt), axis=0)
        assert img2.shape == (2*res, res, 3), f'{img2.shape}'
        assert tgt2.shape == (2*res, res, 3), f'{tgt2.shape}'
        img2 = (img2 - imagenet_mean) / imagenet_std
        tgt2 = (tgt2 - imagenet_mean) / imagenet_std
        img_batch = np.expand_dims(img2, axis=0)
        tgt_batch = np.expand_dims(tgt2, axis=0)

        torch.manual_seed(2)
        output = run_one_image(img_batch, tgt_batch, model, DEVICE)
        cnt += 1
        percentage = stdoutpb.update(cnt)
        status_dict[task_id]['percentage'] = percentage
        binary_mask = output.mean(-1).gt(128).float().unsqueeze(-1).expand(-1, -1, 3).numpy()
        output = F.interpolate(
            output[None, ...].permute(0, 3, 1, 2), 
            size=[size[1], size[0]], 
            mode='nearest',
        ).permute(0, 2, 3, 1)[0].numpy()
        mask_full_size = F.interpolate(
            torch.tensor(binary_mask)[None, ...].permute(0, 3, 1, 2), 
            size=[size[1], size[0]], 
            mode='nearest',
        ).permute(0, 2, 3, 1)[0].numpy()
        output = input_image * (0.6 * mask_full_size + 0.4)
        video_writer.write(np.ascontiguousarray(output.astype(np.uint8)[:, :, ::-1]))
        mask_frame = (mask_full_size * 255).astype(np.uint8)
        mask_writer.write(np.ascontiguousarray(mask_frame[:, :, ::-1]))
    video_writer.release()
    mask_writer.release()
    cap.release()
    return mask_path
    
@torch.no_grad()
def run_one_image(img, tgt, model, device):
    x = torch.tensor(img)
    # make it a batch-like
    x = torch.einsum('nhwc->nchw', x)

    tgt = torch.tensor(tgt)
    # make it a batch-like
    tgt = torch.einsum('nhwc->nchw', tgt)

    bool_masked_pos = torch.zeros(model.patch_embed.num_patches)
    bool_masked_pos[model.patch_embed.num_patches//2:] = 1
    bool_masked_pos = bool_masked_pos.unsqueeze(dim=0)
    valid = torch.ones_like(tgt)

    if model.seg_type == 'instance':
        seg_type = torch.ones([valid.shape[0], 1])
    else:
        seg_type = torch.zeros([valid.shape[0], 1])
    
    feat_ensemble = 0 if len(x) > 1 else -1
    _, y, mask = model(x.float().to(device), tgt.float().to(device), bool_masked_pos.to(device), valid.float().to(device), seg_type.to(device), feat_ensemble)
    y = model.unpatchify(y)
    y = torch.einsum('nchw->nhwc', y).detach().cpu()

    output = y[0, y.shape[1]//2:, :, :]
    output = torch.clip((output * imagenet_std + imagenet_mean) * 255, 0, 255)
    return output

if __name__ == "__main__":
    main()

