import argparse
import datetime
import os.path as osp
import time

import numpy as np
import torch
import torch.utils.data
from torchvision.transforms import functional as F

from datasets import build_test_loader, build_train_loader
from defaults import get_default_cfg
from engine import evaluate_performance, train_one_epoch
from models.seqnet import SeqNet
from utils.utils import mkdir, resume_from_ckpt, save_on_master,\
    set_random_seed,resume_from_ckpt_model_only,resume_from_ckpt_model_only_psmlc
import pdb
import cv2
from PIL import Image
from utils.transforms import build_transforms


def draw_det_bbox(img_path,dets):
    im = cv2.imread(img_path)
    # cv2.imshow('d',im)
    # cv2.waitKey()
    print(dets)

    for boxes in dets:
        for j,box in enumerate(boxes):
            pt1 = (int(box[0]),int(box[1]))
            pt2 = (int(box[2]),int(box[3]))
            x1=int(box[0])
            y1=int(box[1])
            x2=int(box[2])
            y2=int(box[3])
            h,w= pt2[1]-pt1[1],pt2[0]-pt1[0]
            if box[-1]<0.9:
                continue
            im=cv2.rectangle(im,pt1,pt2,color=(255,255,0),thickness=2)
            person =im[y1:y2,x1:x2,:]
            cv2.imwrite(img_path.split('/')[-1] + str(j)+'detssm.jpg', person)
            cv2.imshow('e',person)
            cv2.waitKey()
            # cv2.putText(im, str(h)+'_'+str(w)+'_'+str(round(h/w,2))+'_'+str(h*w),pt1,1,2,(255,0,0),2)

    cv2.imwrite(img_path.split('/')[-1]+'detssm.jpg',im)
    im=cv2.resize(im,(int((im.shape[1])/2),int((im.shape[0])/2)))
    cv2.imshow('d',im);cv2.waitKey()

def detector_all(img_path=None,model=None,device=None):

    query_img = Image.open(img_path).convert("RGB")
    transforms = build_transforms(is_train=False)
    img, target = transforms(query_img, target=None)
    img=img.to(device)
    #query_roi = np.array([0, 0, 466, 943])  # [x1, y1, x2, y2]
    outputs = model.inference([img])
    dets=[]
    feats=[]
    for output in outputs:
        box_w_scores = torch.cat([output["boxes"], output["scores"].unsqueeze(1)], dim=1)
        box_w_scores =box_w_scores.cpu().detach().numpy()
        dets.append(box_w_scores)

    draw_det_bbox(img_path,dets)
    return dets,feats

def detector(img_path=None,model=None,device=None):

    query_img = Image.open(img_path).convert("RGB")
    transforms = build_transforms(is_train=False)
    img, target = transforms(query_img, target=None)
    img=img.to(device)
    #query_roi = np.array([0, 0, 466, 943])  # [x1, y1, x2, y2]
    outputs = model.inference([img])
    dets=[]
    feats=[]
    for output in outputs:
        score = output["scores"].unsqueeze(0).cpu().detach().numpy()
        ind_bigger85= score>0.9
        if sum(ind_bigger85[0])==0:
            continue
        box_w_scores = torch.cat([output["boxes"], output["scores"].unsqueeze(1)], dim=1)
        box_w_scores =box_w_scores.cpu().detach().numpy()
        box_w_scores = box_w_scores[ind_bigger85[0]]
        h=box_w_scores[:,3]-box_w_scores[:,1]
        w=box_w_scores[:,2]-box_w_scores[:,0]
        ind_bigger60h=h*w>3000
        if sum(ind_bigger60h)==0:
            continue

        box_w_scores = box_w_scores[ind_bigger60h]

        f = output["embeddings"].cpu().detach().numpy()
        f = f[ind_bigger85[0]]
        f = f[ind_bigger60h]

        dets.append(box_w_scores)
        feats.append(f)
        assert len(f)==len(box_w_scores)

    draw_det_bbox(img_path,dets)
    return dets,feats

def detector_box_only(img_path=None,model=None,device=None,targets=None):

    query_img = Image.open(img_path).convert("RGB")
    transforms = build_transforms(is_train=False)
    img, target = transforms(query_img, target=None)
    img=img.to(device)
    #query_roi = np.array([0, 0, 466, 943])  # [x1, y1, x2, y2]
    if targets is not None: #use gt boxes
        boxes = targets
        tt={'boxes':torch.tensor(boxes,device=device)}
        embeddings = model.inference([img], [tt])
        embeddings=torch.cat(embeddings)
        f = embeddings.cpu().detach()

    else:
        outputs = model.inference([img])
        boxes=[]
        f=[]
        for output in outputs:
            score = output["scores"].unsqueeze(0).cpu().detach().numpy()
            # print(score,len(score[0]))
            ind_bigger85= score>0.9
            if sum(ind_bigger85[0])==0:
                continue
            box_w_scores = torch.cat([output["boxes"], output["scores"].unsqueeze(1)], dim=1)
            box_w_scores =box_w_scores.cpu().detach().numpy()
            box_w_scores = box_w_scores[ind_bigger85[0]]

            boxes=box_w_scores[:,:-1]

            f = output["embeddings"].cpu().detach()
            f = f[ind_bigger85[0]]
            assert len(f)==len(boxes)

    # draw_det_bbox(img_path,[boxes])
    return boxes,f

def detector_box_only_ssm(img_path=None,model=None,device=None,targets=None):

    query_img = Image.open(img_path).convert("RGB")
    transforms = build_transforms(is_train=False)
    img, target = transforms(query_img, target=None)
    img=img.to(device)
    #query_roi = np.array([0, 0, 466, 943])  # [x1, y1, x2, y2]
    if targets is not None: #use gt boxes
        boxes = targets
        tt={'boxes':torch.tensor(boxes,device=device)}
        embeddings = model.inference([img], [tt])
        embeddings=torch.cat(embeddings)
        f = embeddings.cpu().detach()

    else:
        outputs = model.inference([img])
        boxes,boxes_all=[],[]
        f,f_all=[],[]

        if len(outputs[0]['scores'])==0:
            return boxes,f,boxes_all,f_all

        for output in outputs:
            score = output["scores"].unsqueeze(0).cpu().detach().numpy()
            box_w_scores = torch.cat([output["boxes"], output["scores"].unsqueeze(1)], dim=1)
            boxes_all =box_w_scores.cpu().detach().numpy()
            boxes_all =boxes_all[:,:-1]

            f_all = output["embeddings"].cpu().detach()

            ind_bigger90 = score > 0.9
            if sum(ind_bigger90[0]) == 0:
                continue
            boxes = boxes_all[ind_bigger90[0]]
            f = f_all[ind_bigger90[0]]
            assert len(f)==len(boxes)

    # draw_det_bbox(img_path,[boxes_all])
    # draw_det_bbox(img_path,[boxes])
    return boxes,f,boxes_all,f_all

if __name__ == "__main__":
    # img_path ="/home/cv7609/zjh/ps_raw/ssm_paste_dukemask/Image/s100.jpg"
    # img_path ="/home/cv7609/zjh/ps_raw/prw/c3s2_055837.jpg"
    img_path = '/home/cv7609/zjh/ps_raw/ssm/Image/s17209.jpg'
    parser = argparse.ArgumentParser(description="Train a person search network.")
    parser.add_argument("--cfg", default='configs/prw_sup_oim.yaml', help="Path to configuration file.")
    parser.add_argument("--ckpt",
                        default='../author_model/epoch_19.pth',
                        help="Path to checkpoint to resume or evaluate.")
    parser.add_argument(
        "opts", nargs=argparse.REMAINDER, help="Modify config options using the command-line"
    )
    args= parser.parse_args()

    cfg = get_default_cfg()
    if args.cfg:
        cfg.merge_from_file(args.cfg)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    device = torch.device(cfg.DEVICE)
    if cfg.SEED >= 0:
        set_random_seed(cfg.SEED)

    print("Creating model")
    model = SeqNet(cfg)
    model.to(device)
    use_mcl_model=True
    if use_mcl_model:
        resume_from_ckpt_model_only_psmlc(args.ckpt,model)
    else:
        resume_from_ckpt_model_only(args.ckpt, model)
    model.eval()

    detector_all(img_path,model,'cuda')
