import os
import numpy as np
import random
import pickle

from easydict import EasyDict
from scipy.io import loadmat

np.random.seed(0)
random.seed(0)

attr_words = [
    'A pedestrian whose gender is male',
    'A pedestrian whose hair is long', 'pedestrian wearing sunglasses', ' pedestrian wearing a hat',
    'A pedestrian whose upper is t-shirt', 'A pedestrian whose upper is long sleeve', 'A pedestrian whose upper is formal suits', 
    'A pedestrian whose lower is shorts','A pedestrian whose lower is jeans', 'A pedestrian whose lower is long pants','A pedestrian whose lower is skirt',
    'A pedestrian who is face mask','A pedestrian who is logo','A pedestrian who is plaid'
]

def make_dir(path):
    if os.path.exists(path):
        pass
    else:
        os.mkdir(path)

def generate_data_description(save_dir):
    """
    create a dataset description file, which consists of images, labels
    """
    #pa100k_data = loadmat(os.path.join(save_dir, 'annotation.mat'))

    dataset = EasyDict()
    dataset.description = 'wider'
    dataset.root = os.path.join(save_dir, 'Pad_datasets')
    
    trainval_name=[]
    test_name=[]
    trainval_gt_list=[]
    test_gt_list=[]
    attr_name_list=[]
    dataset.attributes=attr_words    
    trainval_name_file=open("/data/jinjiandong/datasets/WIDER/Annotations/trainval_name.txt",'r',encoding='utf8').readlines()
    for name in trainval_name_file :
        curLine=name.strip('\n')
        trainval_name.append(curLine)    
        
    test_name_file=open("/data/jinjiandong/datasets/WIDER/Annotations/test_name.txt",'r',encoding='utf8').readlines()
    for name in test_name_file :
        curLine=name.strip('\n')
        test_name.append(curLine)        
    
    trainval_gt_file=open("/data/jinjiandong/datasets/WIDER/Annotations/trainval_gt_label.txt",'r',encoding='utf8').readlines()
    for gt in trainval_gt_file :
        curLine=gt[1:-2].strip().split(",")
        count=0
        for elem in curLine:
            if int(elem)<=0 :
                curLine[count]=0
            else :
                curLine[count]=1
            count+=1
        trainval_gt_list.append(curLine)
        
    test_gt_file=open("/data/jinjiandong/datasets/WIDER/Annotations/test_gt_label.txt",'r',encoding='utf8').readlines()
    for gt in test_gt_file :
        curLine=gt[1:-2].strip().split(",")
        count=0
        for elem in curLine:
            if int(elem)<=0 :
                curLine[count]=0
            else :
                curLine[count]=1
            count+=1
        test_gt_list.append(curLine)
        
    dataset.image_name = trainval_name + test_name
    attr_file=open("/data/jinjiandong/datasets/WIDER/Annotations/attr_name.txt",'r',encoding='utf8')
    for attr in attr_file.readlines() :
        curLine=attr.strip('\n')
        attr_name_list.append(curLine)

    dataset.label = np.concatenate((np.array(trainval_gt_list),np.array(test_gt_list)), axis=0)
    assert dataset.label.shape == (28330+29161, 14)
    dataset.attr_name = attr_name_list
    dataset.attributes = attr_words
    dataset.partition = EasyDict()

    dataset.partition.test = np.arange(28330, 28330+29161)  # np.array(range(90000, 100000))
    dataset.partition.trainval = np.arange(0, 28330)  # np.array(range(90000))
    
    dataset.weight_trainval = np.mean(dataset.label[dataset.partition.trainval], axis=0).astype(np.float32)

    with open(os.path.join(save_dir, 'pad.pkl'), 'wb+') as f:
        pickle.dump(dataset, f)


if __name__ == "__main__":
    save_dir = '/data/jinjiandong/datasets/WIDER/'
    generate_data_description(save_dir)
