import os
import os.path as osp
import sys
import torch
import torch.utils.data as data
import cv2
import  time
import numpy as np
from .transform import CenterNet_Transform
from lib.utils.viz import cv_plot_img_bbox_landm, plot_img_bbox_landm

class Face():
    def __init__(self, bbox, landmark=None, label=0):
        self.bbox = bbox
        self.landmark = landmark
        self.label = label
    
    def haslandmark(self):
        return self.landmark is not None

class WiderFaceDetection(data.Dataset):
    def __init__(self, cfg, split='train'):
        '''
        cfg: config
        split: mode, 'train', 'val', 'test'
        preproc: transform 
        '''
        assert split in ('train', 'val', 'test'), 'dataset mode error'
        self.split = split
        self.image_size = cfg['dataset']['image_size']
        self.root_path = '%s/WIDER_%s/images'%(cfg['dataset']['root'], split)
        self.anno_path = '%s/%s/label.txt'%(cfg['dataset']['anno_root'], split)

        self.im_annos = {}
        for line in open(self.anno_path, 'r'):
          line = line.strip()
          if line.startswith('#'):
            name = line[1:].strip()
            self.im_annos[name] = []
            continue
          assert name is not None
          assert name in self.im_annos
          self.im_annos[name].append([float(x) for x in line.split()])
        self.im_annos = list(self.im_annos.items())      
    

    def __getitem__(self, index):
        img_path, annos = self.im_annos[index]
        img = cv2.imread(osp.join(self.root_path, img_path))
        im_h, im_w = img.shape[:2]
        if self.split=='train':
            targets = np.zeros((0, 15))
            for anno in np.array(annos):
                x1, y1, w, h = anno[:4]
                # if min(w, h)<8:
                #     continue
                x1, y1 = max(0, x1), max(0, y1)
                x2, y2 = min(x1+w, im_w), min(y1+h, im_h)
                
                landm = anno[4:-1].reshape(5, 3)
                # if (landm<0).all():
                #     continue
                landm = landm[:, :2].reshape(-1)
                label = 0 # face
                blur = anno[-1]
                # if blur<0.3:
                #     continue
                target = np.hstack(([x1, y1, x2, y2], landm, label))
                targets = np.vstack((targets, target))
        else:
            targets = np.zeros((0, 7))
            for anno in np.array(annos):
                x1, y1, w, h = anno[:4]
                x1, y1 = max(0, x1), max(0, y1)
                x2, y2 = min(x1+w, im_w), min(y1+h, im_h)
                target = np.hstack(([x1, y1, x2, y2], anno[4:]))
                targets = np.vstack((targets, target))
        
        # cv_plot_img_bbox_landm(img.copy(), targets[:, :4], targets[:, 4:-1], save_path='%d_1.jpg'%index)
        img, targets = CenterNet_Transform(img, targets, self.image_size, self.split)
        # cv_plot_img_bbox_landm(img.copy(), targets[:, :4], targets[:, 4:-1], save_path='%d_2.jpg'%index)
        # plot_img_bbox_landm(img, targets[:, :4], targets[:, 4:-1])

        # import matplotlib.pyplot as plt
        # fig = plt.figure()
        # ax = fig.add_subplot(1, 1, 1)
        # hm = np.zeros((800, 800)).astype(np.float32)
        # ax.imshow(hm)
        # cnt = 0
        # for box, landm in zip(targets[:, :4], targets[:, 4:-1]):
        #     x1, y1, x2, y2 = box 
        #     rect = plt.Rectangle((x1, y1), x2-x1, y2-y1, fill=False, edgecolor='r', linewidth=1)
        #     ax.add_patch(rect)
        #     ax.text(x1-5, y1-5, '%d'%cnt, color='r', fontsize=5)
        #     landm = landm.reshape(5,2)
        #     for x,y in landm:
        #         circle = plt.Circle((x,y), 1 , color='r', fill=False)
        #         ax.add_patch(circle)
        #     ax.text(landm[0][0]-5, landm[0][1]-5, '%d'%cnt, color='r', fontsize=5)
        #     cnt+=1
        # plt.draw()
        # plt.show()
        
        
        img = img[:, :, ::-1]
        img = img.astype(np.float32)
        img = (img - 127.5) / 128.0
        img = img.transpose(2, 0, 1)
        return torch.FloatTensor(img), targets
            

    def __len__(self):
        return len(self.im_annos)


    def detection_collate(self, batch):
        targets = []
        imgs = []
        for sample in batch:
            imgs.append(sample[0])
            targets.append(torch.FloatTensor(sample[1]))
        return torch.stack(imgs, 0), targets