"""
林业病虫害检测
"""
import os
import numpy as np
import xml.etree.ElementTree as ET
import cv2
import paddle.io
from PIL import Image,ImageEnhance
import random
import matplotlib.pyplot as plt
import time

from Darknet53 import YOLOv3

INSECT_NAMES = ['Boerner','Leconte','Linnaeus','acuminatus','armandi','coleoptera','linnaeus']

def get_insect_names():
    """
    :return: a dict, as following,
    {'Boerner':0,
    'Leconte': 1,
    'Linnaeus': 2,
    'acuminatus': 3,
    'armandi': 4,
    'coleoptera': 5,
    'linnaeus': 6
    }
    It can map the insect name into an integer label.
    """
    insect_category2id = {}
    for i ,item in enumerate(INSECT_NAMES):
        insect_category2id[item] = i

    return insect_category2id

# cname2cid = get_insect_names()
# print(cname2cid)

def get_annotations(cname2cid,datadir):
    filenames = os.listdir(os.path.join(datadir,'annotations','xmls'))
    records = []
    ct = 0
    for fname in filenames:
        fid = fname.split('.')[0]
        fpath = os.path.join(datadir,'annotations','xmls',fname)
        img_file = os.path.join(datadir,'images',fid+'.jpeg')
        tree = ET.parse(fpath)

        if  tree.find('id') is None:
            im_id = np.array([ct])
        else:
            im_id = np.array([int(tree.find('id').text)])

        objs = tree.findall('object')
        im_w = float(tree.find('size').find('width').text)
        im_h = float(tree.find('size').find('height').text)
        gt_bbox = np.zeros((len(objs),4),dtype=np.float32)
        gt_class = np.zeros((len(objs),),dtype=np.int32)
        is_crowd = np.zeros((len(objs),),dtype=np.int32)
        difficult = np.zeros((len(objs),),dtype=np.int32)
        for i ,obj in enumerate(objs):
            cname = obj.find('name').text
            gt_class[i] = cname2cid[cname]
            _difficult = int(obj.find('difficult').text)
            x1 = float(obj.find('bndbox').find('xmin').text)
            y1 = float(obj.find('bndbox').find('ymin').text)
            x2 = float(obj.find('bndbox').find('xmax').text)
            y2 = float(obj.find('bndbox').find('ymax').text)
            x1 = max(0,x1)
            y1 = max(0,y1)
            x2 = min(im_w - 1,x2)
            y2 = min(im_h - 1,y2)

            # 这里使用xywh格式来表示目标物体真实框
            gt_bbox[i] = [(x1+x2)/2.0,(y1+y2)/2.0,x2-x1+1.,y2-y1+1.]
            is_crowd[i] = 0
            difficult[i] = _difficult
        voc_rec = {
            'im_file':img_file,
            'im_id':im_id,
            'h':im_h,
            'w':im_w,
            'is_crowd':is_crowd,
            'gt_class': gt_class,
            'gt_bbox': gt_bbox,
            'gt_poly':[],
            'difficult': difficult
        }

        if len(objs) != 0:
            records.append(voc_rec)
        ct += 1

    return records

# TRAINDIR = 'data/insects/train'
# TESTDIR = 'data/insects/test'
# VALIDDIR = 'data/insects/val'
# cname2cid = get_insect_names()
# records = get_annotations(cname2cid,TRAINDIR)

# print('records num: {}\n records[0]:{}'.format(len(records),records[0]))

def get_bbox(gt_bbox,gt_class):
    # 对于一般的检测任务来说，一张图片上往往有多个目标物体
    # 设置参数MAX_NUM = 50,即一张图片最多取50个真实框；
    # 如果真实框的数目少于50，则将不足部分gt_bbox,gt_class和gt_score的各项数值全设置为0
    MAX_NUM = 50
    gt_bbox2 = np.zeros((MAX_NUM,4))
    gt_class2 = np.zeros((MAX_NUM,))
    for i in range(len(gt_bbox)):
        gt_bbox2[i,:] = gt_bbox[i,:]
        gt_class2[i] = gt_class[i]
        if i >= MAX_NUM:
            break

    return gt_bbox2,gt_class2

def get_img_data_from_file(record):
    """
       record is a dict as following,
         record = {
               'im_file': img_file,
               'im_id': im_id,
               'h': im_h,
               'w': im_w,
               'is_crowd': is_crowd,
               'gt_class': gt_class,
               'gt_bbox': gt_bbox,
               'gt_poly': [],
               'difficult': difficult
               }
   """
    im_file = record['im_file']
    h = record['h']
    w = record['w']
    is_crowd = record['is_crowd']
    gt_class = record['gt_class']
    gt_bbox = record['gt_bbox']
    difficult = record['difficult']

    img = cv2.imread(im_file)
    img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)

    # check if h and w in record equals that record from img
    assert img.shape[0] == int(h), "image height of {} inconsistent in record({}) and img file({})".format(im_file,h,img.shape[0])

    assert img.shape[1] == int(w), "image width of {} inconsistent in record({}) and img file({})".format(im_file,w,img.shape[1])

    gt_boxes, gt_labels = get_bbox(gt_bbox,gt_class)

    # gt_bbox 用相对值
    gt_boxes[:,0] = gt_boxes[:,0] / float(w)
    gt_boxes[:,1] = gt_boxes[:,1] / float(h)
    gt_boxes[:,2] = gt_boxes[:,2] / float(w)
    gt_boxes[:,3] = gt_boxes[:,3] / float(h)

    return img, gt_boxes,gt_labels,(h,w)

# record = records[0]
# img,gt_boxes,gt_labels,scales = get_img_data_from_file(record)
# print('img shape: {}, \n gt_labels: {},\n scales: {}\n'.format(img.shape,gt_labels,scales))


def random_distort(img):
    # 随机改变亮度
    def random_brightness(img,lower=0.5,upper=1.5):
        e = np.random.uniform(lower,upper)
        return ImageEnhance.Brightness(img).enhance(e)

    # 随机改变对比度
    def random_contrast(img,lower=0.5,upper=1.5):
        e = np.random.uniform(lower,upper)
        return ImageEnhance.Contrast(img).enhance(e)

    # 随机改变颜色
    def random_color(img,lower=0.5,upper=1.5):
        e = np.random.uniform(lower,upper)
        return ImageEnhance.Color(img).enhance(e)

    ops = [random_brightness,random_contrast,random_color]
    np.random.shuffle(ops)

    img = Image.fromarray(img)
    img = ops[0](img)
    img = ops[1](img)
    img = ops[2](img)
    img = np.asarray(img)

    return img

def visualize(srcimg,img_enhance):
    # 图像可视化
    plt.figure(num=2,figsize=(6,12))
    plt.subplot(1,2,1)
    plt.title('Src Image',color='#0000FF')
    plt.axis('off') # 不显示坐标轴
    plt.imshow(srcimg) # 显示原图片

    # 对原图做 随机改变亮暗、对比度和颜色等 数据增强
    # srcimg_gtbox = records[0]['gt_bbox']
    # srcimg_label = records[0]['gt_class']

    plt.subplot(1,2,2)
    plt.title('Enhance Image',color='#0000FF')
    plt.axis('off') # 不显示
    plt.imshow(img_enhance)

    plt.show()

# image_path = records[0]['im_file']
# print("read image from file {}".format(image_path))
# srcimg = Image.open(image_path)
# # 将PIL读取的图像转换成array类型
# srcimg = np.array(srcimg)
#
# # 对原图做随机改变亮暗、对比度和颜色等 数据增强
# img_enhance = random_distort(srcimg)
# visualize(srcimg,img_enhance)

def random_expand(img,
                  gtboxes,
                  max_ratio=4,
                  fill=None,
                  keep_ratio=True,
                  thresh=0.5):
    if random.random() > thresh:
        return img,gtboxes

    if max_ratio < 1.0:
        return img,gtboxes

    h,w,c = img.shape
    ratio_x = random.uniform(1,max_ratio)
    if keep_ratio:
        ratio_y = ratio_x
    else:
        ratio_y = random.uniform(1,max_ratio)

    oh = int(h * ratio_y)
    ow = int(w * ratio_x)
    off_x = random.randint(0,ow-w)
    off_y = random.randint(0,oh-h)

    out_img = np.zeros((oh,ow,c))
    if fill and len(fill) == c:
        for i in range(c):
            out_img[:,:,i] = fill[i] * 255.0

    out_img[off_y:off_y + h,off_x:off_x + w,:] = img
    gtboxes[:, 0] = ((gtboxes[:, 0] * w) + off_x) / float(ow)
    gtboxes[:, 1] = ((gtboxes[:, 1] * h) + off_y) / float(oh)
    gtboxes[:, 2] = gtboxes[:,2] / ratio_x
    gtboxes[:, 3] = gtboxes[:,3] / ratio_y

    return out_img.astype('uint8'), gtboxes

# srcimg_gtbox = records[0]['gt_bbox']
# srcimg_label = records[0]['gt_class']
#
# img_enhance,new_gtbox = random_expand(srcimg,srcimg_gtbox)
# visualize(srcimg,img_enhance)

def random_interp(img,size,interp=None):
    interp_method = [
        cv2.INTER_NEAREST,
        cv2.INTER_LINEAR,
        cv2.INTER_AREA,
        cv2.INTER_CUBIC,
        cv2.INTER_LANCZOS4,
    ]
    if not interp or interp not in interp_method:
        interp = interp_method[random.randint(0,len(interp_method) -1)]
    h, w, _ = img.shape
    im_scale_x = size / float(w)
    im_scale_y = size / float(h)
    img = cv2.resize(img,None,None,fx=im_scale_x,fy=im_scale_y,interpolation=interp)
    return img

#  对原图做 随机缩放
# random_size = 640
# img_enhance = random_interp(srcimg,random_size)
# visualize(srcimg,img_enhance)
# print('src image shape:{},resize image shape:{}'.format(srcimg.shape,img_enhance.shape))

def random_flip(img,gtboxes,thresh=0.5):
    if random.random() > thresh:
        img = img[:,::-1,:]
        gtboxes[:, 0] = 1.0 - gtboxes[:,0]
    return img,gtboxes

# 对原图做 随机改变亮暗，对比度和颜色等 数据增强
# img_enhance,box_enhance = random_flip(srcimg,srcimg_gtbox)
# visualize(srcimg,img_enhance)


# 随机打乱真实框排列顺序
def shuffle_gtbox(gtbox,gtlabel):
    gt = np.concatenate([gtbox,gtlabel[:,np.newaxis]],axis=1)
    idx = np.arange(gt.shape[0])
    np.random.shuffle(idx)
    gt = gt[idx,:]
    return gt[:, :4],gt[:,4]

# 图像增广方法汇总
def image_augment(img,gtboxes,gtlabels,size,means=None):
    # 随机改变亮暗、对比度和颜色等
    img = random_distort(img)
    # 随机填充
    img,gtboxes = random_expand(img,gtboxes,fill=means)
    # 随机缩放
    img = random_interp(img,size)
    # 随机翻转
    img,gtboxes = random_flip(img,gtboxes)
    # 随机打乱真实框排列顺序
    gtboxes,gtlabels = shuffle_gtbox(gtboxes,gtlabels)

    return img.astype('float32'), gtboxes.astype('float32'), gtlabels.astype('int32')

# img_enhance, img_box, img_label = image_augment(srcimg,srcimg_gtbox,srcimg_label,size=320)
# visualize(srcimg, img_enhance)

# img,gt_boxes,gt_labels,scales = get_img_data_from_file(record)
# size = 512
# img, gt_boxes,gt_labels = image_augment(img,gt_boxes,gt_labels,size)
# print('img shape:{}\n gt_boxes shape:{}\n gt_labels shape:{}'.format(img.shape,gt_boxes.shape,gt_labels.shape))

def get_img_data(record,size=640):
    img,gt_boxes,gt_labels,scales = get_img_data_from_file(record)
    img,gt_boxes,gt_labels = image_augment(img,gt_boxes,gt_labels,size)
    mean = [0.485,0.456,0.406]
    std = [0.229,0.224,0.225]
    mean = np.array(mean).reshape((1,1,-1))
    std = np.array(std).reshape((1,1,-1))
    img = (img / 255.0 - mean) / std
    img = img.astype('float32').transpose((2,0,1))
    return img,gt_boxes,gt_labels,scales

# TRAINDIR = 'data/insects/train'
# TESTDIR = 'data/insects/test'
# VALIDDIR = 'data/insects/val'
# cname2cid = get_insect_names()
# records = get_annotations(cname2cid,TRAINDIR)

# record = records[0]
# img,gt_boxes,gt_labels,scales = get_img_data(record, size=480)
# print('img shape:{} \n gt_boxes shape:{} \n gt_bales:{}\n scales:{}'.format(img.shape,gt_boxes.shape,gt_labels,scales))

# 获取一个批次内样本随机缩放的尺寸
def get_img_size(mode):
    if (mode == 'train') or (mode == 'valid'):
        inds = np.array([0,1,2,3,4,5,6,7,8,9])
        ii = np.random.choice(inds)
        img_size = 320 + ii * 32
    else:
        img_size = 608

    return img_size

# 将list形式的batch数据 转化成多个array构成的tuple
def make_array(batch_data):
    img_array = np.array([item[0] for item in batch_data],dtype = 'float32')
    gt_box_array = np.array([item[1] for item in batch_data],dtype = 'float32')
    gt_labels_array = np.array([item[2] for item in batch_data],dtype='int32')
    img_scale = np.array([item[3] for item in batch_data],dtype='int32')
    return img_array,gt_box_array,gt_labels_array,img_scale

# 定义数据读取类，继承paddle.io.Dataset
class TrainDataset(paddle.io.Dataset):
    def __init__(self,datadir,mode='train'):
        self.datadir = datadir
        cname2cid = get_insect_names()
        self.records = get_annotations(cname2cid,datadir)
        self.img_size = 640 # get_img_size(mode)

    def __getitem__(self,idx):
        record = self.records[idx]
        img, gt_bbox,gt_labels,im_shape = get_img_data(record,size=self.img_size)

        return img,gt_bbox,gt_labels,np.array(im_shape)

    def __len__(self):
        return len(self.records)


# # 创建数据读取类
# train_dataset = TrainDataset(TRAINDIR,mode='train')
#
# # 使用paddle.io.DataLoader创建数据读取器，并设置batchsize,进程数量num_workers等参数
# train_loader = paddle.io.DataLoader(train_dataset,batch_size=2,shuffle=True,num_workers=2,drop_last=True)
#
# img,gt_boxes,gt_labels,im_shape = next(train_loader())
# print('img shape:{} \n gt_boxes shape:{} \n gt_labels shape: {}'.format(img.shape,gt_boxes.shape,gt_labels.shape))

def make_test_array(batch_data):
    img_name_array = np.array([item[0] for item in batch_data])
    img_data_array = np.array([item[1] for item in batch_data],dtype='float32')
    img_scale_array = np.array([item[2] for item in batch_data],dtype='int32')
    return img_name_array,img_data_array,img_scale_array

def test_data_loader(datadir,batch_size=10,test_image_size=608,mode='test'):
    """
    加载测试用的图片，测试数据没有groundtruth标签
    :param datadir:
    :param batch_size:
    :param test_image_size:
    :param mode:
    :return:
    """
    image_names = os.listdir(datadir)
    def reader():
        batch_data = []
        img_size = test_image_size
        for image_name in image_names:
            file_path = os.path.join(datadir,image_name)
            img = cv2.imread(file_path)
            img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
            H = img.shape[0]
            W = img.shape[1]
            img = cv2.resize(img,(img_size,img_size))

            mean = [0.485,0.456,0.406]
            std = [0.229,0.224,0.225]
            mean = np.array(mean).reshape((1,1,-1))
            std =  np.array(std).reshape((1,1,-1))
            out_img = (img / 255.0 - mean) / std
            out_img = out_img.astype('float32').transpose((2,0,1))
            img = out_img
            im_shape = [H,W]

            batch_data.append((image_name.split('.')[0],img,im_shape))
            if len(batch_data) == batch_size:
                yield make_test_array(batch_data)
                batch_data = []

        if len(batch_data) > 0:
            yield make_test_array(batch_data)

    return reader

def get_loss(num_calsses,outputs,gtbox,gtlabel,gtscore=None,
             anchors = [10,13,16,30,23,30,61,62,45,59,119,116,90,156,198,373,326],
             anchor_masks = [[6,7,8],[3,4,5],[0,1,2]],
             ignore_thresh=0.7,
             use_label_smooth=False):
    """
    使用paddle.vision.ops.yolo_loss
    :param num_calsses:
    :param outputs:
    :param gtbox:
    :param gtlabel:
    :param gtscore:
    :param anchors:
    :param anchor_masks:
    :param ignore_thresh:
    :param use_label_smooth:
    :return:
    """
    losses = []
    downsample = 32
    for i, out in enumerate(outputs): # 对三个层级分别求损失函数
        anchor_mask_i = anchor_masks[i]
        loss = paddle.vision.ops.yolo_loss(
            x=out, # out是P0,P1,P2中的一个
            gt_box=gtbox, # 真实框坐标
            gt_label=gtlabel, # 真实框类别
            gt_score=gtscore, # 真实框得分，使用mixup训练技巧时需要，不使用该技巧时直接设置为1，形状与gtlabel相同
            anchors=anchors, # 锚框尺寸，包含[w0,h0,w1,h1,...,w8,h8]共9个锚框的尺寸
            anchor_mask=anchor_mask_i, #筛选锚框的mask,例如anchor_mask_i=[3,4,5],将anchors中第3,4,5个锚框挑选出来给该层级使用
            class_num=num_calsses, #分类类别数
            ignore_thresh=ignore_thresh, #当预测框与真实框IoU > ignore_thresh,标注objectness=-1
            downsample_ratio=downsample, # 当特征图相对于原图缩小倍数，列如P0是32,P1是16,P2是8
            use_label_smooth=False) # 使用label_smooth训练技巧时会用到，这里没用此技巧，直接设置为False

        losses.append(paddle.mean(loss)) #mean对每张图片求和
        downsample = downsample//2 # 下一级特征图的缩放倍数会减半

    return sum(losses)

def get_lr(base_lr=0.0001,lr_decay=0.1):
    bd = [10000,20000]
    lr = [base_lr,base_lr * lr_decay,base_lr * lr_decay * lr_decay]
    learning_rate = paddle.optimizer.lr.PiecewiseDecay(boundaries=bd,values=lr)
    return learning_rate

# def get_lr(base_lr=0.0001,lr_decay=0.1):
#     bd = [10000,20000]
#     lr = [base_lr,base_lr * lr_decay,base_lr * lr_decay * lr_decay]
#     learning_rate = paddle.optimizer.lr.PiecewiseDecay(boundaries=bd,values=lr)
#     return learning_rate
#
#
# MAX_EPOCH = 1
#
# ANCHORS = [10,13,16,30,33,23,30,61,62,45,59,119,116,90,156,198,373,326]
#
# ANCHOR_MASKS = [[6,7,8],[3,4,5],[0,1,2]]
#
# IGNORE_THRESH = .7
#
# NUM_CLASS = 7
#
# TRAINDIR = 'data/insects/train'
# TESTDIR = 'data/insects/test'
# VALIDDIR = 'data/insects/val'
# paddle.set_device('gpu:0')
# # 创建数据读取类
# train_dataset = TrainDataset(TRAINDIR,mode='train')
# valid_dataset = TrainDataset(VALIDDIR,mode='valid')
# test_dataset = train_dataset(VALIDDIR,mode='valid')
#
# # 使用paddle.io.dataloader创建数据读取器，并设置batchsize,进程数量num_workers等参数
# train_loader = paddle.io.DataLoader(train_dataset,batch_size=10,shuffle=True,num_workers=0,drop_last=True,use_shared_memory=False)
# valid_loader = paddle.io.DataLoader(valid_dataset,batch_size=10,shuffle=False,num_workers=0,drop_last=False,use_shared_memory=False)
#
# model = YOLOv3(num_classes=NUM_CLASS) # 创建模型
#
# learning_rate = get_lr()
#
# opt = paddle.optimizer.Momentum(
#     learning_rate=learning_rate,
#     momentum=0.9,
#     weight_decay=paddle.regularizer.L2Decay(0.0005),
#     parameters=model.parameters()) # 创建优化器
#
# if __name__ == '__main__':
#     for epoch in range(MAX_EPOCH):
#         for i, data in enumerate(train_loader()):
#             img,gt_boxes,gt_labels,img_scale = data
#             gt_scores = np.ones(gt_labels.shape).astype('float32')
#             gt_scores = paddle.to_tensor(gt_scores)
#             img = paddle.to_tensor(img)
#             gt_boxes = paddle.to_tensor(gt_boxes)
#             gt_labels = paddle.to_tensor(gt_labels)
#             outputs = model(img) # 前向传播，输出[P0,P1,P2]
#             loss = get_loss(NUM_CLASS,outputs,gt_boxes,gt_labels,gtscore=gt_scores,
#                             anchors=ANCHORS,
#                             anchor_masks=ANCHOR_MASKS,
#                             ignore_thresh=IGNORE_THRESH,
#                             use_label_smooth=False) # 计算损失函数
#
#             loss.backward() # 反向传播计算梯度
#             opt.step() # 更新参数
#             opt.clear_grad()
#             if i % 10 == 0:
#                 timestring = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time()))
#                 print('{}[TRAIN]epoch {},iter {},output loss: {}'.format(timestring,epoch,i,loss.numpy()))
#
#             # save params of model
#             if (epoch % 5 == 0) or (epoch == MAX_EPOCH -1):
#                 paddle.save(model.state_dict(),'yolo_epoch{}'.format(epoch))
#
#             # 每个epoch结束之后在验证集上进行测试
#             model.eval()
#             for i, data in enumerate(valid_loader()):
#                 img,gt_boxes,gt_labels,img_scale = data
#                 gt_scores = np.ones(gt_labels.shape).astype('float32')
#                 gt_scores = paddle.to_tensor(gt_scores)
#                 img = paddle.to_tensor(img)
#                 gt_boxes = paddle.to_tensor(gt_boxes)
#                 gt_labels = paddle.to_tensor(gt_labels)
#                 outputs = model(img)
#                 loss = get_loss(NUM_CLASS,outputs,gt_boxes,gt_labels,gtscore=gt_scores,
#                                 anchors=ANCHORS,
#                                 anchor_masks=ANCHOR_MASKS,
#                                 ignore_thresh=IGNORE_THRESH,
#                                 use_label_smooth=False)
#                 if i % 1 == 0:
#                     timestring = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time()))
#                     print('{}[VALID]epoch {}, iter {}, output loss: {}'.format(timestring,epoch,i,loss.numpy()))
#
#                 model.train()

