import json
import mxnet as mx
import os
import numpy as np
import logging
import cv2
from data.bbox.bbox_dataset import DetectionDataset
class retailDetDataset(DetectionDataset):
    def __init__(self, objs):
        super(retailDetDataset,self).__init__()
        self.objs = objs
        self.classes = ["person",]
    def at_with_image_path(self, idx):
        obj = self.objs[idx]
        path = obj[0]
        bboxes = []
        for box in obj[1]:
            xmin = box["minx"]
            ymin = box["miny"]
            xmax = box["maxx"]
            ymax = box["maxy"]
            customer = box["customer"]
            sit = box["sit"]
            gender = box["gender"]
            stand = box["stand"]
            play_with_phone = box["play_with_phone"]
            staff = box["staff"]
            bboxes.append([xmin,ymin,xmax,ymax,self.classes.index(box["name"]),customer, sit, gender, stand, play_with_phone, staff])
        return path, np.array(bboxes).astype(float)
    def __len__(self):
        return len(self.objs)

def get_retail_dataset():
    from sklearn.model_selection import train_test_split
    root = "/data1/zyx/yks/dataset/retail/"
    img_root = "/data1/zyx/yks/dataset/retail/train_image/"
    all_imgs = []
    for r,_,names in os.walk(img_root):
        for name in names:
            path = os.path.join(r, name)
            anno_path = path.replace("train_image","train_label").replace(".jpg",".json")
            anno = json.load(open(anno_path,"rb"))["annotation"][0]["object"]
            if len(anno) > 0:
                all_imgs.append((path,anno))
            else:
                print("ignore",path)
    train_objs,val_objs = train_test_split(all_imgs,random_state=43,test_size=.1)
    train_da = retailDetDataset(all_imgs)
    val_da = retailDetDataset(val_objs)
    return train_da,val_da
class RetailClassficationDataset(DetectionDataset):
    def __init__(self, transforms = None):
        super(RetailClassficationDataset,self).__init__()
        path = "/data1/zyx/yks/sources/Detectron/output/retail/results_1538403944.92.txt"
        a = open(path,"rt")
        objs = {}
        all_boxes = []
        for line in a:
            try:
                name,x0,y0,x1,y1,score,cls = line.strip().split("\t")
            except:
                continue
            if float(score) < 0.01 :continue
            x0 = float(x0)
            y0 = float(y0)
            x1 = float(x1)
            y1 = float(y1)
            cls = int(cls) - 1
            all_boxes.append([name,x0,y0,x1,y1])
            try:
                objs[name].append([x0,y0,x1,y1,cls,0])
            except KeyError:
                objs[name] = [[x0, y0, x1, y1, cls, 0]]
        self.objs = [(k,objs[k]) for k in objs.keys()]
        self.classes  = ["bike","bus"]
        self.img_root =  "/data1/zyx/yks/dataset/retail/test_images/test"
        self.all_boxes = all_boxes
        self.train_da,self.val_da = get_retail_dataset()
        self.path2anno = {os.path.basename(self.train_da.at_with_image_path(i)[0]):self.train_da.at_with_image_path(i)[1]
                          for i in range(len(self.train_da))}
        sn = lambda n:os.path.basename(n)
        self.all_boxes = list(filter(lambda x:sn(x[0]) in self.path2anno.keys(), self.all_boxes))
        self.transforms = transforms
    def __getitem__(self, idx):
        name, x0, y0, x1, y1 = self.all_boxes[idx]
        box1 = mx.nd.array([[x0,y0,x1,y1]])
        box2_anno = self.path2anno[os.path.basename(name)]
        box2 = mx.nd.array(box2_anno)[:,:4]
        overlaps = mx.nd.contrib.box_iou(box1,box2).asnumpy()
        other_label = [0] * 6
        if np.max(overlaps) > .7:
            overlap_label = 1
            o_idx = overlaps.argmax(axis = 1).squeeze()
            other_label = box2_anno[o_idx,-6:].tolist()
        else:
            overlap_label = 0
        label = [overlap_label] + other_label
        mask = [overlap_label] * len(other_label)
        ori_img = cv2.imread(os.path.join("/data1/zyx/yks/dataset/retail/all_images", os.path.basename(name)))
        img = ori_img[int(y0):int(y1),int(x0):int(x1)]
        if self.transforms is not None:
            img = self.transforms(img)
        return img, np.array(label).astype('f'), np.array(mask).astype('f')
    def __len__(self):
        return len(self.all_boxes)
if __name__ == '__main__':
    # get_retail_dataset()
    da = RetailClassficationDataset()
    for d in da:
        # print(d)
        pass
# class retailCLassfication(object):
#     def __init__(self):
#         pass
#     def __getitem__(self, item):
#         pass
#     def __len__(self):
#         pass
