'''
Inference code for MUTR, on MeViS
Modified from DETR (https://github.com/facebookresearch/detr)
'''
import argparse
import json
import random
import time
from pathlib import Path

import numpy as np
import torch

import util.misc as utils
from models import build_model
import datasets.transforms_video as T
import os
from PIL import Image
import torch.nn.functional as F
import json

import opts
from tqdm import tqdm

import multiprocessing as mp
import threading
import math
from tools.colormap import colormap
import warnings
warnings.filterwarnings('ignore')
from datasets.mevis import TestMeViSDataset
from torch.utils.data import DataLoader
# build transform
transform = T.Compose([
	T.RandomResize([432]),
	T.ToTensor(),
	T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])


import zipfile
def zip_folder(source_folder, zip_dir):
	f = zipfile.ZipFile(zip_dir, 'w', zipfile.ZIP_DEFLATED)
	pre_len = len(os.path.dirname(source_folder))
	for dirpath, dirnames, filenames in os.walk(source_folder):
		for filename in filenames:
			pathfile = os.path.join(dirpath, filename)
			arcname = pathfile[pre_len:].strip(os.path.sep)
			f.write(pathfile, arcname)
	f.close()

def setup(rank, world_size):
    dist.init_process_group("nccl", rank=rank, world_size=world_size)
    torch.cuda.set_device(rank)

def cleanup():
    dist.destroy_process_group()

def main(args):
	args.masks = True
	args.batch_size == 1

	os.makedirs(args.output_dir, exist_ok=True)

	# fix the seed for reproducibility
	seed = args.seed + utils.get_rank()
	torch.manual_seed(seed)
	np.random.seed(seed)
	random.seed(seed)
	
	# save path
	output_dir = args.output_dir

	save_path_prefix = os.path.join(output_dir, 'Annotations')
	if not os.path.exists(save_path_prefix):
		os.makedirs(save_path_prefix)
	
	torch.cuda.set_device(0)
	device = 'cuda'
	model, _, _ = build_model(args) 
	model.to(device)
	model_without_ddp = model
	if args.resume:
		checkpoint = torch.load(args.resume, map_location='cpu')
		missing_keys, unexpected_keys = model_without_ddp.load_state_dict(checkpoint['model'], strict=False)
		unexpected_keys = [k for k in unexpected_keys if not (k.endswith('total_params') or k.endswith('total_ops'))]
		if len(missing_keys) > 0:
			print('Missing Keys: {}'.format(missing_keys))
		if len(unexpected_keys) > 0:
			print('Unexpected Keys: {}'.format(unexpected_keys))
	else:
		raise ValueError('Please specify the checkpoint for inference.')
		
	model.eval()
    # load data
	root = Path(args.mevis_path)
	test_interpolate_chunk_size = 6
	datasets = TestMeViSDataset(root,'valid',transform)
	data_loader = DataLoader(datasets, batch_size=1,drop_last=False,num_workers=8,shuffle=False)

	progress = tqdm(
		total=len(data_loader),
		position=0,
		desc='testing',
		ncols=0
	)
	with torch.no_grad():
		for samples, targets in data_loader:
			targets['size'] = targets['size'].squeeze(0).to(device)
			origin_h,origin_w = targets['orig_size'].squeeze(0)
			samples = samples.squeeze(0).to(device)
			frames = targets['frames']
			exp_id = targets['exp_id']
			video_name = targets['video_name']
			for i in range(math.ceil(len(frames) / test_interpolate_chunk_size)):
				inputs = samples[i * test_interpolate_chunk_size: (i + 1) * test_interpolate_chunk_size, :, :, :]
				sub_frame_names = frames[i * test_interpolate_chunk_size: (i + 1) * test_interpolate_chunk_size]
				exp = targets['caption']
				outputs = model([inputs], exp, [targets])
				if args.me != 'tlggnv1':
					pred_logits = outputs["pred_logits"][0]   #[t,n,1]
					pred_masks = outputs["pred_masks"][0] #[t,n,h,w]
					# according to pred_logits, select the query index
					pred_scores = pred_logits.sigmoid() # [t, q, k]
					pred_scores = pred_scores.mean(0)   # [q, k]
					max_scores, _ = pred_scores.max(-1) # [q,]
					_, max_ind = max_scores.max(-1)     # [1,]
					#max_inds = max_ind.repeat(pred_masks.size()[0]) #[t]
					pred_masks = pred_masks[range(pred_masks.size()[0]), max_ind] # [t, h, w]
					pred_masks = pred_masks.unsqueeze(0)
					pred_masks = F.interpolate(pred_masks, size=(origin_h, origin_w), mode='bilinear', align__corners=False)
					pred_masks = (pred_masks.sigmoid() > args.threshold).squeeze(0).detach().cpu().numpy() 
					all_pred_masks = pred_masks
				else:
					all_pred_masks = []
					pred_logits = outputs["pred_logits"]   #[t,n,1]
					pred_masks = outputs["pred_masks"] #[t,n,h,w]
					# according to pred_logits, select the query index
					pred_scores = F.softmax(pred_logits, dim=-1)[:, :, :-1] #[t,n,1]
					idxs = pred_scores.squeeze(-1) > 0.7 #[t,n]
					for pred_mask,idx in zip(pred_masks,idxs): #[n,h,w]
						pred_mask = pred_mask[idx].unsqueeze(1) #[k,h,w]
						pred_mask = F.interpolate(pred_mask, size=(origin_h, origin_w), mode='bilinear', align_corners=False) > 0 
						pred_mask = pred_mask.sum(dim=0, keepdim=False).clamp(max=1).squeeze(0) #[1,h,w]
						all_pred_masks.append(pred_mask)
					all_pred_masks = torch.stack(all_pred_masks,dim=0)		

				
				# save binary image
				save_path = os.path.join(save_path_prefix, video_name[0], exp_id[0])
				if not os.path.exists(save_path):
					os.makedirs(save_path)
				for j, frame_name in enumerate(sub_frame_names):
					mask = all_pred_masks[j].astype(np.float32) 
					mask = Image.fromarray(mask * 255).convert('L')
					save_file = os.path.join(save_path, frame_name[0] + ".png")
					mask.save(save_file)
		
			progress.update(1)
	progress.close()
  
if __name__ == '__main__':
	torch.multiprocessing.set_start_method('spawn')
	parser = argparse.ArgumentParser('MUTR inference script', parents=[opts.get_args_parser()])
	args = parser.parse_args()
	main(args)

