import os
import cv2
import numpy as np
import torch
import torch.utils.data as data
import sys
import math
from utils.augment import *
from utils.utils import plot_gt
from utils.bbox import quad_2_rbox, constraint_theta
from PIL import Image, ImageDraw

class HRSID(data.Dataset):

    def __init__(self,args,split):
        self.image_set_path = os.path.join(args.data_path,'ImageSets','train.txt') 
        img_path = os.path.join(args.data_path,'JPEGImages') 
        
        self.phase = args.phase  
        self.image_list = self._load_image_names(img_path)  
        try:
            if 0 < args.split_ratio < 1:
                split_size = int(np.clip(args.split_ratio * len(self.image_list), 1, len(self.image_list)))
                self.image_list = self.image_list[:split_size]
        except:
            pass
        self.classes = ('__background__', 'ship')  
        self.num_classes = len(self.classes)
        
        self.class_to_ind = dict(zip(self.classes, range(self.num_classes)))    
#         print(self.class_to_ind)

        self.in_h = args.input_h
        self.in_w = args.input_w

        self.max_objs = args.max_obj_per_img
             
#         self.mean = np.array([0.07005435, 0.07005435, 0.07005435], dtype=np.float32).reshape(1, 1, 3)
#         self.std  = np.array([0.05547728, 0.05547728, 0.05547728], dtype=np.float32).reshape(1, 1, 3)
        self.mean = np.array([0.5194416012442385,0.5378052387430711,0.533462090585746], dtype=np.float32).reshape(1, 1, 3)
        self.std  = np.array([0.3001546018824507, 0.28620901391179554, 0.3014112676161966], dtype=np.float32).reshape(1, 1, 3)
#         self.rand_scales = np.arange(1, 1, 0.1)#(0.6, 1.4, 0.1)
        self.rand_scales = np.array([1])
    def __len__(self):
        return len(self.image_list)

    def __getitem__(self, index):
        im_path = self.image_list[index]  
        img = cv2.imread(im_path, cv2.IMREAD_COLOR)
#         img = cv2.cvtColor(cv2.imread(im_path, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
#         print(self.image_list[index])
        roidb = self._load_annotation(self.image_list[index])#返回的是roidb是字典
#         nt = len(roidb['boxes'])
        # self.display(roidb['boxes'], im_path)
       
        gt_inds = np.where(roidb['gt_classes'] != 0)[0]#background=0 others =1
        bboxes = roidb['boxes'][gt_inds, :]

        classes = roidb['gt_classes'][gt_inds]

        gt_boxes = np.zeros((len(gt_inds), 5), dtype=np.float32)    
       
        height, width = img.shape[0], img.shape[1]  
        center = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)  # 中心点       
        keep_res = False #
        if keep_res:
            input_h = (height | 31) + 1   
            input_w = (width | 31) + 1
#             print(input_h)
            scale = np.array([input_w, input_h], dtype=np.float32)
        else:
            scale = max(img.shape[0], img.shape[1]) * 1.0  
            input_h, input_w = self.in_h, self.in_w
        
                
        scale = max(height, width) * 1.0#取宽和高中最大的值
     
        w_border = get_border(128, width)#返回值128
        h_border = get_border(128, height)
        center[0] = np.random.randint(low=w_border, high=width - w_border)#假如width=512，从128到384
        center[1] = np.random.randint(low=h_border, high=height - h_border)
        if self.phase == 'train':
            flipped_w = False
            flipped_h = False
            scale = scale * np.random.choice(self.rand_scales)
            if np.random.random() < 0.5:#0.5
                flipped_w = True
                img = img[:, ::-1, :]
                center[0] = width - center[0] - 1
            if np.random.random() < 0.5:#0.5
                flipped_h = True   
                img = img[::-1, :, :]#上下翻转
                center[1] = height - center[1] - 1
      
        trans_input = get_affine_transform(center, scale, 0, [input_w, input_h])      
        inp = cv2.warpAffine(img, trans_input,(input_w, input_h),flags=cv2.INTER_LINEAR) 
           
        inp = (inp.astype(np.float32) / 255.)  

        #归一化
        inp = (inp - self.mean) / self.std
        inp = inp.transpose(2, 0, 1) 
        
        down_ratio = 4 
        output_h = input_h // down_ratio
        output_w = input_w // down_ratio
        trans_output = get_affine_transform(center, scale, 0, [output_w, output_h])  #返回仿射矩阵

        num_classes = self.num_classes-1
        hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32)   
        cxcy = np.zeros((self.max_objs, 2), dtype=np.float32)
        wh = np.zeros((self.max_objs, 2), dtype=np.float32)
        ang = np.zeros((self.max_objs, 1), dtype=np.float32)
        reg = np.zeros((self.max_objs, 2), dtype=np.float32) 
        ind = np.zeros((self.max_objs), dtype=np.int64)     
        reg_mask = np.zeros((self.max_objs), dtype=np.uint8) 

        draw_gaussian = draw_umich_gaussian
 #####################################################################################       
        for k, bbox_ in enumerate(bboxes):   
#             gt_boxes[k,:5] = quad_2_rbox(np.array(bbox_), inp,mode = 'xyxya')   
            gt_boxes[k,:5] = quad_2_rbox(np.array(bbox_), inp)  
            bbox = gt_boxes[k, :4]
            an = gt_boxes[k, 4]
#             print(an)
            cls_id = int(classes[k])-1
            if self.phase =='train':
                if flipped_w:
                    bbox[[0, 2]] = width - bbox[[2, 0]] - 1
                    an = 180 -an
                if flipped_h:
                    bbox[[1, 3]] = height - bbox[[3, 1]] - 1    
                    an = 180 -an                       
            bbox[:2] = affine_transform(bbox[:2], trans_output)    # 将box坐标转换到 128*128内的坐标
            bbox[2:] = affine_transform(bbox[2:], trans_output)
            bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, output_w - 1)  
            bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, output_h - 1)
            # 上面几行都是做数据扩充和resize之后的变换，不重要
            h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
           
            if h > 1 and w > 1:
                radius = gaussian_radius((math.ceil(h), math.ceil(w)))  
                radius = max(0, int(1.5*radius))#1.5自己加的！！！！！！！！！！！！！！！！！！
                ct = np.array([(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32) #ct中心点坐标
                ct_int = ct.astype(np.int32) 
                draw_gaussian(hm[cls_id], ct_int, radius)
                wh[k] = 1. * w, 1. * h
                ang[k] = 1. * an
                cxcy[k] = ct_int[0],ct_int[1]
#                 print(an)
                ind[k] = ct_int[1] * output_w + ct_int[0]  
                reg[k] = ct - ct_int
                reg_mask[k] = 1
                
        ret = {'input': inp, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh, 'ang':ang}
        reg_offset_flag = True #
        if reg_offset_flag:
            ret.update({'reg': reg})
        return ret        
        ## test augmentation
        # plot_gt(im, gt_boxes[:,:-1], im_path, mode = 'xyxya')

    def _load_image_names(self,img_path):
        """
        Load the names listed in this dataset's image set file.
        """
        image_set_file = self.image_set_path#在UCAS_AOD folder下
        
        assert os.path.exists(image_set_file), \
           'Path does not exist: {}'.format(image_set_file)
        with open(image_set_file) as f:
            image_list = [os.path.join(img_path,x.strip()+".jpg") for x in f.readlines()]#img_path = './UCAS_AOD/ALLimages'
#             print(image_list)
        return image_list


    def _load_annotation(self, index):
        root_dir, img_name = os.path.split(index)

        filename = os.path.join(root_dir.replace('JPEGImages','Annotations_txt'), os.path.basename(img_name).split(".")[0]+'.txt')

        boxes, gt_classes = [], []
        with open(filename,'r',encoding='utf-8-sig') as f:
            content = f.read()
            objects = content.split('\n')
            for obj in objects:
                if len(obj) != 0 :
                    class_name = 'ship'
                    box = obj.split()[0:8]
                    label = self.class_to_ind[class_name]
                    box = [ eval(x) for x in  box]
                    boxes.append(box)
                    gt_classes.append(label)
        return {'boxes': np.array(boxes, dtype=np.float32), 'gt_classes': np.array(gt_classes)}

    def display(self,boxes, img_path):
        img = cv2.imread(img_path)
        for box in boxes:
            coors = box.reshape(4,2)
            img = cv2.polylines(img,[coors],True,(0,0,255),2)	
        cv2.imshow(img_path,img)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    def return_class(self, id):
        id = int(id)
        return self.classes[id]
    

def quad_2_rbox(quads, img,mode='xyxya'):
    # http://fromwiz.com/share/s/34GeEW1RFx7x2iIM0z1ZXVvc2yLl5t2fTkEg2ZVhJR2n50xg
    #mode='xyxya'/'xywha'
    if len(quads.shape) == 1:
        quads = quads[np.newaxis, :]
    rboxes = np.zeros((quads.shape[0], 5), dtype=np.float32)
    for i, quad in enumerate(quads):

        x=quad[0]+quad[2]-quad[4]-quad[6]
        y=quad[1]+quad[3]-quad[5]-quad[7]
#         txx = math.atan(x/(y+1e-10))*180/math.pi + 90
#         print('txx',180-txx)
#         h = (quad[0]+quad[2]-quad[4]-quad[6])/2/math.sin(math.atan(x/(y+1e-10)))
#         w = (quad[2]+quad[4]-quad[6]-quad[0])/2/math.cos(math.atan(x/(y+1e-10)))

        rbox = cv2.minAreaRect(quad.reshape([4, 2]))        
        #t是旋转角度
        x, y, w_, h_,t = rbox[0][0], rbox[0][1], rbox[1][0], rbox[1][1],rbox[2]#-90,0
        #t是-90到0度
        h=max(w_,h_)#chang
        w=min(w_,h_)#duan
        if w_ == max(w_,h_):
            t = t+180
        else:
            t=t+90 
        rboxes[i, :] = np.array([x, y, h, w, t])
    # (x_ctr, y_ctr, w, h) -> (x1, y1, x2, y2)
    if mode == 'xyxya':
        rboxes[:, 0:2] = rboxes[:, 0:2] - rboxes[:, 2:4] * 0.5
        rboxes[:, 2:4] = rboxes[:, 0:2] + rboxes[:, 2:4]
    rboxes[:, 0:4] = rboxes[:, 0:4].astype(np.float32)

    return rboxes

def grayscale(image):
    return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

def lighting_(data_rng, image, alphastd, eigval, eigvec):
    alpha = data_rng.normal(scale=alphastd, size=(3, ))
    image += np.dot(eigvec, eigval * alpha)
    
def blend_(alpha, image1, image2):
    image1 *= alpha
    image2 *= (1 - alpha)
    image1 += image2
    
def saturation_(data_rng, image, gs, gs_mean, var):
    alpha = 1. + data_rng.uniform(low=-var, high=var)
    blend_(alpha, image, gs[:, :, None])
    
def brightness_(data_rng, image, gs, gs_mean, var):
    alpha = 1. + data_rng.uniform(low=-var, high=var)
    image *= alpha
    
def contrast_(data_rng, image, gs, gs_mean, var):
    alpha = 1. + data_rng.uniform(low=-var, high=var)
    blend_(alpha, image, gs_mean)
# 颜色扩充
def color_aug(data_rng, image, eig_val, eig_vec):
    functions = [brightness_, contrast_, saturation_]
    random.shuffle(functions)
    gs = grayscale(image)
    gs_mean = gs.mean()
    for f in functions:
        f(data_rng, image, gs, gs_mean, 0.4)
    lighting_(data_rng, image, 0.1, eig_val, eig_vec)
    
def get_3rd_point(a, b):
    direct = a - b
    return b + np.array([-direct[1], direct[0]], dtype=np.float32)

def get_dir(src_point, rot_rad):
    sn, cs = np.sin(rot_rad), np.cos(rot_rad)
    src_result = [0, 0]
    src_result[0] = src_point[0] * cs - src_point[1] * sn
    src_result[1] = src_point[0] * sn + src_point[1] * cs
    return src_result    
    
def get_affine_transform(center,scale,rot, output_size,
                         shift=np.array([0, 0], dtype=np.float32),inv=0):
    if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
        scale = np.array([scale, scale], dtype=np.float32)

    scale_tmp = scale
    src_w = scale_tmp[0]
    dst_w = output_size[0]
    dst_h = output_size[1]

    rot_rad = np.pi * rot / 180 #=0
    src_dir = get_dir([0, src_w * -0.5], rot_rad)#乘以-0.5
    dst_dir = np.array([0, dst_w * -0.5], np.float32)

    src = np.zeros((3, 2), dtype=np.float32)#初始化矩阵。shape（3，2）
    dst = np.zeros((3, 2), dtype=np.float32)
    src[0, :] = center + scale_tmp * shift
    src[1, :] = center + src_dir + scale_tmp * shift
    dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
    dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir

    src[2:, :] = get_3rd_point(src[0, :], src[1, :])
    dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])

    if inv:
        trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
    else:
        trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
    return trans

def gaussian2D(shape, sigma=1):
    m, n = [(ss - 1.) / 2. for ss in shape]
    y, x = np.ogrid[-m:m+1,-n:n+1]
    h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
    h[h < np.finfo(h.dtype).eps * h.max()] = 0
    return h

def draw_umich_gaussian(heatmap, center, radius, k=1):
    diameter = 2 * radius + 1
    gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
  
    x, y = int(center[0]), int(center[1])

    height, width = heatmap.shape[0:2]
    left, right = min(x, radius), min(width - x, radius + 1) 
    top, bottom = min(y, radius), min(height - y, radius + 1)

    masked_heatmap  = heatmap[y - top:y + bottom, x - left:x + right] 
    masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right] 
    if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
        np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
    return heatmap
    
def coco_box_to_bbox(box):
    bbox = np.array([box[0] - box[2]/2, box[1] - box[3]/2, box[0] + box[2]/2, box[1] + box[3]/2],dtype=np.float32)
    ang = float(box[4])
    return bbox, ang

def affine_transform(pt, t):
    new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T
    new_pt = np.dot(t, new_pt)
    return new_pt[:2]

def gaussian_radius(det_size, min_overlap=0.7):
    height, width = det_size
    a1  = 1
    b1  = (height + width)
    c1  = width * height * (1 - min_overlap) / (1 + min_overlap)
    sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
    r1  = (b1 + sq1) / 2
    a2  = 4
    b2  = 2 * (height + width)
    c2  = (1 - min_overlap) * width * height
    sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
    r2  = (b2 + sq2) / 2
    a3  = 4 * min_overlap
    b3  = -2 * min_overlap * (height + width)
    c3  = (min_overlap - 1) * width * height
    sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
    r3  = (b3 + sq3) / 2
    return min(r1, r2, r3)

def draw_dense_reg(regmap, heatmap, center, value, radius, is_offset=False):
    diameter = 2 * radius + 1
    gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
    value = np.array(value, dtype=np.float32).reshape(-1, 1, 1)
    dim = value.shape[0]
    reg = np.ones((dim, diameter*2+1, diameter*2+1), dtype=np.float32) * value
    if is_offset and dim == 2:
        delta = np.arange(diameter*2+1) - radius
        reg[0] = reg[0] - delta.reshape(1, -1)
        reg[1] = reg[1] - delta.reshape(-1, 1)
  
    x, y = int(center[0]), int(center[1])

    height, width = heatmap.shape[0:2]
    
    left, right = min(x, radius), min(width - x, radius + 1)
    top, bottom = min(y, radius), min(height - y, radius + 1)
    
    masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
    masked_regmap = regmap[:, y - top:y + bottom, x - left:x + right]
    masked_gaussian = gaussian[radius - top:radius + bottom,
                             radius - left:radius + right]
    masked_reg = reg[:, radius - top:radius + bottom,
                      radius - left:radius + right]
    if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
        idx = (masked_gaussian >= masked_heatmap).reshape(1, masked_gaussian.shape[0], masked_gaussian.shape[1])
        masked_regmap = (1-idx) * masked_regmap + idx * masked_reg
    regmap[:, y - top:y + bottom, x - left:x + right] = masked_regmap
    return regmap


def get_border(border, size):
    i = 1
    while size - border // i <= border // i:
        i *= 2
    return border // i
