import os
import os.path as osp
import numpy as np
import cv2 as cv
import logging
import glob
import torch.utils.data as data
import torch
import time
import shutil
import torch.nn as nn
from pathlib import Path
from shapely.geometry import Polygon
from utils.general import rotate_non_max_suppression

logger = logging.getLogger(__name__)
np.set_printoptions(precision=4, suppress=True)


class Ensemble(nn.ModuleList):
    # Ensemble of models
    def __init__(self):
        super(Ensemble, self).__init__()

    def forward(self, x, augment=False):
        y = []
        for module in self:
            y.append(module(x, augment)[0])
        # y = torch.stack(y).max(0)[0]  # max ensemble
        # y = torch.cat(y, 1)  # nms ensemble
        y = torch.stack(y).mean(0)  # mean ensemble
        return y, None  # inference, train output


def attempt_load(weights, map_location=None):
    # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
    model = Ensemble()
    for w in weights if isinstance(weights, list) else [weights]:
        model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval())  # load FP32 model

    if len(model) == 1:
        return model[-1]  # return model
    else:
        print('Ensemble created with %s\n' % weights)
        for k in ['names', 'stride']:
            setattr(model, k, getattr(model[-1], k))
        return model  # return ensemble


# 分割图片
def compute_split_num(w, small_size, mark_rate):
    div_w, mod_w = divmod(w, small_size*(1-mark_rate))
    if mod_w < small_size*mark_rate:
        num_wh = div_w-1
        flag = 1
    elif mod_w == small_size*mark_rate:
        num_wh = div_w
        flag = 0
    else:
        num_wh = div_w
        flag = 1
    return int(num_wh), flag


def split(img, img_name, num_wh, flag, mark_rate, small_s):
    pic_dict_f = {}
    step = int(small_s*(1-mark_rate))
    # print(step)
    img_name = img_name.split('.')
    for i in range(num_wh):
        for j in range(num_wh):
            img_f = img[i*step: small_s+i*step, j*step: small_s+j*step, :]
            img_f = img_f[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
            img_f = np.ascontiguousarray(img_f)
            pic_dict_f[img_name[0] + '_{}_{}.'.format(i+1, j+1) + img_name[1]] = img_f

    if flag:
        for k in range(num_wh):

            img_f = img[k*step: small_s+k*step, -small_s:, :]
            img_f = img_f[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
            img_f = np.ascontiguousarray(img_f)
            pic_dict_f[img_name[0] + '_{}_{}.'.format(k+1, -1) + img_name[1]] = img_f

            img_f = img[-small_s:, k*step: small_s+k*step, :]
            img_f = img_f[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
            img_f = np.ascontiguousarray(img_f)
            pic_dict_f[img_name[0] + '_{}_{}.'.format(-1, k+1) + img_name[1]] = img_f

        img_f = img[-small_s:, -small_s:, :]
        img_f = img_f[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
        img_f = np.ascontiguousarray(img_f)
        pic_dict_f[img_name[0] + '_{}_{}.'.format(-1, -1) + img_name[1]] = img_f

    return pic_dict_f


def split_one_image(img, img_name, small_size=480, mark_rate=0.25):
    w, h, _ = img.shape
    num_wh, flag = compute_split_num(w, small_size, mark_rate)
    # print(num_wh, flag)
    pic_dict = split(img, img_name, num_wh, flag, mark_rate, small_size)
    return pic_dict


# 选择设备
def select_device(device='', batch_size=None):
    # device = 'cpu' or '0' or '0,1,2,3'
    cpu_request = device.lower() == 'cpu'
    if device and not cpu_request:  # if device requested other than 'cpu'
        os.environ['CUDA_VISIBLE_DEVICES'] = device  # set environment variable
        assert torch.cuda.is_available(), 'CUDA unavailable, invalid device %s requested' % device  # check availablity

    cuda = False if cpu_request else torch.cuda.is_available()
    if cuda:
        c = 1024 ** 2  # bytes to MB
        ng = torch.cuda.device_count()
        if ng > 1 and batch_size:  # check that batch_size is compatible with device_count
            assert batch_size % ng == 0, 'batch-size %g not multiple of GPU count %g' % (batch_size, ng)
        x = [torch.cuda.get_device_properties(i) for i in range(ng)]
        s = 'Using CUDA '
        for i in range(0, ng):
            if i == 1:
                s = ' ' * len(s)
            logger.info("%sdevice%g _CudaDeviceProperties(name='%s', total_memory=%dMB)" %
                        (s, i, x[i].name, x[i].total_memory / c))
    else:
        logger.info('Using CPU')

    logger.info('')  # skip a line
    return torch.device('cuda:0' if cuda else 'cpu')


# 去除优化器等信息
def strip_optimizer(device_f, f='weights/best0.pt'):  # from utils.general import *; strip_optimizer()
    # Strip optimizer from 'f' to finalize training, optionally save as 's'
    x = torch.load(f, map_location=device_f)
    x['optimizer'] = None
    x['training_results'] = None
    x['epoch'] = -1
    x['model'].half()  # to FP16
    for p in x['model'].parameters():
        p.requires_grad = False
    return x


def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
    # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
    shape = img.shape[:2]  # current shape [height, width]
    if isinstance(new_shape, int):
        new_shape = (new_shape, new_shape)

    # Scale ratio (new / old)
    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
    if not scaleup:  # only scale down, do not scale up (for better test mAP)
        r = min(r, 1.0)

    # Compute padding
    ratio = r, r  # width, height ratios
    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
    dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding
    if auto:  # minimum rectangle
        dw, dh = np.mod(dw, 64), np.mod(dh, 64)  # wh padding
    elif scaleFill:  # stretch
        dw, dh = 0.0, 0.0
        new_unpad = (new_shape[1], new_shape[0])
        ratio = new_shape[1] / shape[1], new_shape[0] / shape[0]  # width, height ratios

    dw /= 2  # divide padding into 2 sides
    dh /= 2

    if shape[::-1] != new_unpad:  # resize
        img = cv.resize(img, new_unpad, interpolation=cv.INTER_LINEAR)
    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
    img = cv.copyMakeBorder(img, top, bottom, left, right, cv.BORDER_CONSTANT, value=color)  # add border
    return img, ratio, (dw, dh)


def padding_resize(img00, new_img_size):
    # Padded resize
    img000 = letterbox(img00, new_shape=new_img_size)[0]
    # Convert
    img000 = img000[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
    img000 = np.ascontiguousarray(img000)
    return img000


# 加载图片
class LoadImages(data.Dataset):

    def __init__(self, im_path, img_size=4096):

        p = osp.abspath(str(Path(im_path)))  # absolute path
        if '*' in p:
            files = sorted(glob.glob(p, recursive=True))  # glob
        elif os.path.isdir(p):
            files = sorted(glob.glob(os.path.join(p, '*.*')))  # dir
        elif os.path.isfile(p):
            files = [p]  # files
        else:
            raise Exception('ERROR: %s does not exist' % p)
        # 读入所有图片
        img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.dng']
        self.img_size = img_size
        self.images = [[osp.basename(x), x] for x in files if os.path.splitext(x)[-1].lower() in img_formats]
        self.num_f = len(self.images)
        assert self.num_f > 0, 'No images found in %s. Supported formats are:\n images: %s' % (p, img_formats)

    def __getitem__(self, index):
        image_name, image = self.images[index][0], cv.imread(self.images[index][1])
        image_big_resize = padding_resize(image, new_img_size=1024)
        if image.shape[0] != self.img_size:
            image = cv.resize(image, (self.img_size, self.img_size))
        im_dict = split_one_image(image, image_name)
        return image_name, image_big_resize, self.images[index][1], im_dict

    def __len__(self):
        return self.num_f


def convert2big(res, pic_name):
    pic_name = pic_name.split('_')
    # print(pic_name)
    i, j = int(pic_name[-2]), int(pic_name[-1].split('.')[0])
    # print(i, j)
    for rr in res:
        if i == -1 and j == -1:
            rr[0] = rr[0]+4096-480
            rr[1] = rr[1]+4096-480
        elif i != -1 and j == -1:
            rr[0] = rr[0]+4096-480
            rr[1] = rr[1]+(i-1)*360
        elif i == -1 and j != -1:
            rr[0] = rr[0]+(j-1)*360
            rr[1] = rr[1]+4096-480
        else:
            rr[0] = rr[0]+(j-1)*360
            rr[1] = rr[1]+(i-1)*360
    return res


def convert2org(res, ss):
    rate = ss[0]/ss[1]
    for rr in res:
        rr[0] = rr[0] * rate
        rr[1] = rr[1] * rate
        rr[2] = rr[2] * rate
        rr[3] = rr[3] * rate
    return res


def xywha2xy4(res_one):
    arg = ((res_one[0], res_one[1]), (res_one[2], res_one[3]), (res_one[4]/np.pi)*180)
    box = cv.boxPoints(arg)
    return box


def plot_one_img_box(results, in_img_path, out_img_path, img_name, names, colors):
    image = cv.imread(in_img_path)
    for res_one in results:
        box = xywha2xy4(res_one)
        box = np.int0(box)
        cv.drawContours(image, [box], 0, colors[int(res_one[-1])], thickness=2)
        cv.putText(image, text=names[int(res_one[-1])] + ' ' + str(res_one[-2])[:4], org=(int(res_one[0]), int(res_one[1])),
                   fontFace=0, fontScale=0.5, color=[0, 0, 255], thickness=2, lineType=cv.LINE_AA)
    cv.imwrite(os.path.join(out_img_path, img_name), image)
    print('{} has been saved in :{}'.format(img_name, out_img_path))


def write_txt(img_n, result_list, names, out_txtfile_path):
    with open(out_txtfile_path, 'a+') as fp:
        for res_one in result_list:
            res = xywha2xy4(res_one)
            # print(res)
            line = '{} {} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f}\n'.format(
                img_n, names[int(res_one[-1])], res_one[-2], res[0][0], res[0][1], res[1][0], res[1][1],
                res[2][0], res[2][1], res[3][0], res[3][1])
            fp.write(line)
        fp.close()


def compute_one_ration_iou(x_box, y_box):
    # 四边形的二维坐标表示
    xx_box, yy_box = np.array(x_box).reshape(4, 2), np.array(y_box).reshape(4, 2)
    # 构建四边形对象，会自动计算四个点的顺序：左上 左下  右下 右上 左上（返回5个点，最后回到起始点）
    x_poly, y_poly = Polygon(xx_box).convex_hull, Polygon(yy_box).convex_hull

    intersect_area = x_poly.intersection(y_poly).area  # 相交面积
    if intersect_area == 0:
        iou = 0
    else:
        union_area = x_poly.area + y_poly.area - intersect_area  # 总共面积
        iou = intersect_area / union_area
    return iou


def compute_rotation_ious(result, index):
    ious_list = []
    # np.empty(shape=(0, 0))
    for i in range(len(index)-1):
        aa, bb = xywha2xy4(result[index[0]]), xywha2xy4(result[index[i+1]])
        iou = compute_one_ration_iou(aa, bb)
        ious_list.append(iou)
    return np.array(ious_list)


def hard_nms(pp, thresh=0.4):
    result = np.array(pp)
    # print(result)
    scores = result[:, -2]
    index = scores.argsort()[::-1]

    keep = []
    while index.size > 0:
        i = index[0]
        keep.append(i)
        ious = compute_rotation_ious(result, index)

        idx = np.where(ious <= thresh)[0]

        index = index[idx+1]

    return result[keep].tolist()


def inference(img_path, weight_path, out_txtfile_path, out_img_path=None, img_sz=4096, conf_thr=0.5, nms_thr=0.4, iou_thr=0.4):
    # 选择设备
    device = select_device()
    # 加载数据
    print('Data Loading...')
    t0 = time.time()
    dataset = LoadImages(img_path, img_size=img_sz)
    print('Data Load Finished... Use Time: %.2f' % (time.time()-t0))
    # 重建文件
    with open(out_txtfile_path, 'w') as fp:
        fp.close()
    if out_img_path:
        if os.path.exists(out_img_path):
            shutil.rmtree(out_img_path)
        os.mkdir(out_img_path)

    # 载入模型
    model_small = attempt_load(weights=osp.join(weight_path, 'small_weight.pt'), map_location=device)
    model_big = attempt_load(weights=osp.join(weight_path, 'big_weight.pt'), map_location=device)

    # 类别名字空间
    names = model_small.module.names if hasattr(model_small, 'module') else model_small.names

    # 颜色空间
    color_dict = {
        0: (255, 000, 000),
        1: (255, 128, 000),
        2: (255, 255, 000),
        3: (000, 255, 000),
        4: (000, 255, 255),
        5: (000, 000, 255),
        6: (128, 000, 255),
        7: (255, 000, 255),
        8: (128, 000, 000),
        9: (000, 128, 000),
        10: (000, 000, 128)
    }

    # 推理
    with torch.no_grad():
        for step, (img_n, resize_image_big, img_abs_path, img_splited) in enumerate(dataset):
            t1 = time.time()
            print('{}/{}: {}'.format(step+1, len(dataset), img_n))
            # small
            result_list0 = []
            for img_small_n in img_splited:
                # print(img_small_n)
                img_small = torch.from_numpy(img_splited[img_small_n]).to(device)
                img_small = img_small.float()
                img_small /= 255.0  # 0 - 255 to 0.0 - 1.0
                if img_small.ndimension() == 3:
                    img_small = img_small.unsqueeze(0)
                pred = model_small(img_small, augment=False)[0]
                # Apply NMS
                pred = rotate_non_max_suppression(pred, conf_thr, nms_thr, agnostic=True)[0]
                if pred is not None and len(pred):
                    pred = pred.cpu().detach().numpy()
                    pred = convert2big(pred, img_small_n)
                    # print(pred)
                    for one in pred.tolist():
                        result_list0.append(one)
            if len(result_list0) == 0:
                pass
            else:
                result_list0 = hard_nms(result_list0, thresh=iou_thr)
            print(len(result_list0))
            # big
            result_list1 = []
            img_big = torch.from_numpy(resize_image_big).to(device)
            img_big = img_big.float()
            img_big /= 255.0  # 0 - 255 to 0.0 - 1.0
            if img_big.ndimension() == 3:
                img_big = img_big.unsqueeze(0)
            pred1 = model_big(img_big, augment=False)[0]
            # Apply NMS
            pred1 = rotate_non_max_suppression(pred1, 0.5, 0.1, agnostic=True)[0]
            if pred1 is not None and len(pred1):
                pred1 = pred1.cpu().detach().numpy()
                pred1 = convert2org(pred1, (4096, 1024))
                result_list1 = pred1.tolist()

            print(len(result_list1))
            result_list = result_list0 + result_list1
            print(len(result_list))
            if len(result_list) == 0:
                pass
            else:
                result_list = hard_nms(result_list, thresh=iou_thr)
                if out_img_path:
                    plot_one_img_box(result_list, img_abs_path, out_img_path, img_n, names, color_dict)
                write_txt(img_n, result_list, names, out_txtfile_path)
            print('%d obj Done!  Use Time: %.2fs' % (len(result_list), time.time() - t1))


inference(img_path='/data1/TZB/ro_yolov5/convertor/a_img01',
          weight_path='/data1/TZB/ro_yolov5/test',
          out_txtfile_path='/data1/TZB/ro_yolov5/test/result_05_500_multi.txt',
          out_img_path='/data1/TZB/ro_yolov5/test/output')