import os
import torch
import numpy as np
import itertools
from torch.utils.data import Dataset, DataLoader
import struct
import settings
import csv
import pickle as pkl
import cv2
from torch.autograd import Variable
from torch import nn
from torch.nn import functional
from PIL import Image


def csv2database(csvfile='train.csv', csvfile2="annotations_warmup.csv"):
    csvfile = settings.data_dir+'/train'+'/Annotations/'+csvfile
    csvfile2 = settings.data_dir+'/train'+'/Annotations/'+csvfile2
    class_name = ['blouse', 'dress', 'outwear', 'skirt', 'trousers']

    database = {key: {'image_lst': [], 'annotation': [], 'visable': [], 'category': []} for key in class_name}

    keys = ['image_id', 'image_category', 'neckline_left', 'neckline_right', 'center_front', 'shoulder_left',
            'shoulder_right', 'armpit_left', 'armpit_right', 'waistline_left', 'waistline_right', 'cuff_left_in',
            'cuff_left_out', 'cuff_right_in', 'cuff_right_out', 'top_hem_left', 'top_hem_right', 'waistband_left',
            'waistband_right', 'hemline_left', 'hemline_right', 'crotch', 'bottom_left_in', 'bottom_left_out',
            'bottom_right_in', 'bottom_right_out']
    
    with open(csvfile, 'r') as f:
        f_csv = csv.DictReader(f)  # use DictReader can generate a Dictionary

        cnt = 0
        # row stands for each row of data in the csv file
        for row in f_csv:
            cnt += 1
            #print(cnt)
            if row['image_category'] not in class_name:
                print(row)

            # split('/')[2] will separate the third element (each picture's name), so the imgae_list is the name of each picture
            database[row['image_category']]['image_lst'].append(row['image_id'].split('/')[2]) 
            vis_array = np.zeros((24, 1))
            cor_array = np.zeros((24, 2))

            for i in range(2,26):
                point = row[keys[i]].split('_')
                x, y, visavle = int(point[0]), int(point[1]), int(point[2])
                cor_array[i-2] = np.array([x, y])
                vis_array[i-2][0] = visavle
                
           # if cnt == 2:
           #     print('cor_array = ', '\n', cor_array)
           #     print('vis_array = ', '\n', vis_array)

            cate_dic = {
                    'blouse'    : lambda: 1,
                    'dress'     : lambda: 2,
                    'outwear'   : lambda: 3,
                    'skirt'     : lambda: 4,
                    'trousers'  : lambda: 5
                    }
            cate = cate_dic[row['image_category']]()
            database[row['image_category']]['category'].append(cate)
                
          
          # database[row['image_category']]['category'].append(row['image_category'])
            database[row['image_category']]['annotation'].append(cor_array)
            database[row['image_category']]['visable'].append(np.array(vis_array))

    with open(csvfile2, 'r') as f:
        f_csv = csv.DictReader(f)
        cnt = 0
        for row in f_csv:
            cnt += 1
            #print(cnt)
            
            if row["image_category"] not in class_name:
                print(row)
                break
            
            database[row["image_category"]]['image_lst'].append(row['image_id'].split('/')[2])
            vis_array = np.zeros((24, 1))
            cor_array = np.zeros((24, 2))
            
            for i in range(2,26):
                point = row[keys[i]].split('_')
                x, y, visavle = int(point[0]), int(point[1]), int(point[2])
                cor_array[i-2] = np.array([x, y])
                vis_array[i-2][0] = visavle
            
            cate_dic = {
                    'blouse'    : lambda: 1,
                    'dress'     : lambda: 2,
                    'outwear'   : lambda: 3,
                    'skirt'     : lambda: 4,
                    'trousers'  : lambda: 5
                    }
            cate = cate_dic[row['image_category']]()
            database[row['image_category']]['category'].append(cate)
                
           # database[row['image_category']]['category'].append(row['image_category'])
            database[row['image_category']]['annotation'].append(cor_array)
            database[row['image_category']]['visable'].append(vis_array)

    if len(database[row["image_category"]]['annotation']) == len(database[row["image_category"]]['image_lst']):
        print("total num {} index create successful".format(len(database[row["image_category"]]['annotation'])))
    return database



class Fashiondataset(Dataset):
    def __init__(self, name, train_val_or_test):   # name: all  blouse, dress, outwear, skirt trousers
        super(Fashiondataset,self).__init__()
        self.ann_dir = os.path.join(settings.data_dir,'train',)
        self.image_dir = os.path.join(settings.data_dir, 'train', 'Images', name)
        self.genrate_ann_dir = settings.prepared_data_dir
        if not os.path.exists(self.genrate_ann_dir+"/trainval.pkl"):
            alldata = csv2database()
            print("****************")
            with open(self.genrate_ann_dir+"/trainval.pkl", 'wb') as fw:
                pkl.dump(alldata, fw)
                database = alldata[name] if name != 'all' else alldata
        else:
            with open(self.genrate_ann_dir+"/trainval.pkl", 'rb') as fr:      # open lable file
                alldata = pkl.load(fr) 
                database = alldata[name] if name != 'all' else alldata
        
        print(len(database['visable']))
        print(len(database['annotation']))
        self.name_database = name
        self.total_num = len(database['image_lst'])
        
        if train_val_or_test == 'train':                     #trainset
            self.total_num = int(self.total_num * 0.7)
            self.img_lst = database['image_lst'][0:self.total_num]
            self.ann_lst = database['annotation'][0:self.total_num]
            self.visabel_lst = database['visable'][0:self.total_num]
            self.category_lst = database['category'][0:self.total_num]

        elif train_val_or_test == 'val':
            start = int(self.total_num * 0.7)
            stop = int(self.total_num * 0.9)
            self.img_lst = database['image_lst'][start:stop]
            self.ann_lst = database['annotation'][start:stop]
            self.visabel_lst = database['visable'][start:stop]
            self.category_lst = database['category'][start:stop]
            self.total_num = int(self.total_num * 0.2)

    def __len__(self):
        return self.total_num

    def __getitem__(self, idx):
        im = cv2.imread(self.image_dir +'/'+self.img_lst[idx])
        im_padding = self.img_padding(im)
        im_pad = im_padding.astype("float32")
        im_pad = torch.from_numpy(im_pad)
        cords = torch.from_numpy((self.ann_lst[idx]).astype('float32'))
        vis = torch.from_numpy((self.visabel_lst[idx]).astype('float32'))
        #category = torch.from_numpy((self.category_lst[idx]).astype('int32'))
        ctg = np.zeros((1,1), dtype="float32")
        ctg[0][0] = self.category_lst[idx]
  #      category = np.array(self.category_lst[idx])
  #      category.dtype = 'float32'
        category = torch.from_numpy(ctg)
        shape = torch.from_numpy(np.array([im.shape[0], im.shape[1]]))
        im_pad = im_pad.view(3,512,512)
        sample = {'data': im_pad, 'label': cords, 'visable': vis, 'category': category, 'shape': shape}
        return sample

    # by using padding, the size of picture will become [512*512]
    def img_padding(self, imgs, h_dst=settings.padding['height'], w_dst=settings.padding['width']):
        h, w = imgs.shape[0],imgs.shape[1]
        top, left = 0, 0
        bottom, right = 512 - h, 512 - w
       # top, right = 256-int(h/2), 256-int(w/2)
       # bottom = top if h%2 == 0 else top-1
       # left = right if w%2 == 0 else right-1
        imgs = cv2.copyMakeBorder(imgs, top, bottom, right, left,cv2.BORDER_CONSTANT)
        return imgs


def showAnnotaion(img, ann,shape, w=3,h=3):
    print(type(shape),"______________________________")
    print(shape,'______________________________________')
    height, width = int(shape[0]), int(shape[1])
    top, right = 256 - int(height / 2), 256 - int(width / 2)
    ann = ann.numpy()
    ann = ann.astype('int')
    print(ann.dtype, type(right))
    ann = ann + np.array([right, top], dtype='int')
    print(ann.dtype)
    print(ann)
    for i in range(24):
        if ann[i][0] != -1:
            print('hihahiahiahiahiahiai',[ann[i][0]-h, ann[i][0] +h,ann[i][1]-w,ann[i][1]+w])
            img[ann[i][1]-h:ann[i][1]+h, ann[i][0]-w:ann[i][0]+w, 0] = 255
            img[ann[i][1]-h:ann[i][1]+h, ann[i][0]-w:ann[i][0]+w, 1] = 0
            img[ann[i][1]-h:ann[i][1]+h, ann[i][0]-w:ann[i][0]+w, 2] = 0
    return img


def critc(pred, gt, vis, kind): # all input is torch Variable
    dk = torch.sum((pred - gt) ** 2, 2) ** 0.5
    if kind in ['blouse', 'dress', 'outwear']:
       sk = torch.sum((gt[:,5,:] -gt[:,6,:])**2,1,keepdim=True)**0.5
    else:
       sk = torch.sum((gt[:,15,:] -gt[:,16,:])**2,1,keepdim=True)**0.5
    dk = dk/sk
    ans = Variable(torch.zeros(dk.size()[0],1)).cuda()
    ans_cnt = Variable(torch.zeros(dk.size()[0],1)).cuda()
    vis = vis == 1
    for i in range(dk.size()[0]):
        for j in range(24):
            if vis[i, j, 0]:
                ans[i, 0] = ans[i,0] + dk[i, j]
                ans_cnt[i,0] =  ans_cnt[i,0] +1
    return torch.sum(ans/ans_cnt)/(ans.size()[0])


class ccf_blouse():
    def __init__(self):
        super(ccf_blouse,self).__init__()

    def forward(self,points, gt,vis):   # assume batch size =1
        vis = vis.numpy()[0]
        vis = vis.reshape(-1)
        print("vis shape", vis.shape)
        tp = points.numpy()[0]
        print("vis shape", tp.shape)
        tgt = gt.numpy()[0]
        print("gt shape", tgt.shape)
        vis = vis ==1
        tp1 = tp[vis,:]
        tgt1 = tgt[vis,:]
        print("vis shape", tgt1.shape)
        dk = np.sum((tp1-tgt1)**2,1)**0.5
        print(dk)
        sk = np.sum((tgt[5,:] - tgt[6,:])**2)**0.5
        print(sk)
        dk = dk/sk
        print(dk.dtype)
        return torch.FloatTensor(dk)

    def backward(*grad_outputs):
        return grad_outputs

class ccfblouse(nn.Module):
    def __init__(self):
        super(ccfblouse, self).__init__()
    
    def forward(self, points, gt,vis ):
        dk = torch.sum(torch.sum((points-gt)**2,2)**0.2, 1)
        y = torch.mean(dk)
        return y


if __name__ == '__main__':
    btzise = 1
    
    if not os.path.exists(r'/data/Datasets/guanls/ccf/train/Images/blouse/2a2c7ecd6d96fbef463210f7bb48c7cc.jpg'):
        print("do not exits")
    
    img = cv2.imread(r'/data/Datasets/guanls/ccf/train/Images/blouse/2a2c7ecd6d96fbef463210f7bb48c7cc.jpg')
    class_name = ['blouse', 'dress', 'outwear', 'skirt', 'trousers']
    dataset = Fashiondataset(class_name[0], 'val')
   # dataset = Fashiondataset('all', 'train')
    row = dataset.__getitem__(1)
    print(row['category'])
    print(row['visable'].shape)
 #  print(row)

    dataloaders = \
            DataLoader(dataset, batch_size=btzise,
                       shuffle=True, num_workers=1)
    it = itertools.cycle(dataloaders)
    critc = ccfblouse().cuda()
    database = csv2database()
''' 
    for i in range(10):
        batch = next(it)
        imgs = batch['data']
        ann = Variable(batch['label']).cuda()
        shape = batch['shape']
        vis = Variable(batch['visable']).cuda()
        pred = Variable(torch.zeros(btzise, 24, 2), requires_grad=True).cuda()
        loss = critc(pred,ann,vis)
        print("loss  ********",critc(pred, ann, vis))
        
        loss.backward()
        print(type(imgs))
        
        imgs = imgs[0]
        imgs = imgs.view(512,512,3)
        imgs = imgs.numpy()
        print(np.max(imgs), np.min(imgs))
        imgs = imgs.astype('uint8')
'''
