import argparse
import os
import glob
import random
import darknet
import time
import cv2
import numpy as np
import darknet

import json
from load import recognize_road
from river import recognition_river
from Utils.colors import ColorRange
from Utils.color_utils import Utils

import socket
import sys
import copy


def parser():
    parser = argparse.ArgumentParser(description="YOLO Object Detection")
    # 房子识别
    parser.add_argument("--input", type=str, default="/home/zhaokaiyue/Desktop/ss",
                        help="image source. It can be a single image, a"
                        "txt with paths to them, or a folder. Image valid"
                        " formats are jpg, jpeg or png."
                        "If no input is given, ")
    parser.add_argument("--batch_size", default=1, type=int, help="number of images to be processed at the same time")
    parser.add_argument("--weights", default="./backup/one-416/yolov4-obj_5000.weights", help="yolo weights path")
    parser.add_argument("--dont_show", action='store_true', help="windown inference display. For headless systems")
    parser.add_argument("--ext_output", action='store_true', help="display bbox coordinates of detected objects")
    parser.add_argument("--save_labels", action='store_true', help="save detections bbox for each image in yolo format")
    parser.add_argument("--config_file", default="./backup/one-416/yolov4-obj.cfg", help="path to config file")
    parser.add_argument("--data_file", default="./backup/one-416/obj.data", help="path to data file")
    parser.add_argument("--thresh", type=float, default=.25, help="remove detections with lower confidence")
    parser.add_argument("--nms", type=float, default=.3, help="nms of detections")

    # 电子图识别参数设置
    parser.add_argument('--image_path', default="/home/zhaokaiyue/Desktop/tests/12.png", type=str, help="the path of image")
    parser.add_argument('--road_color', default=[(20, 26), (43, 255), (46, 255)], type=list, help="range of road color!")
    parser.add_argument('--village_road_color', default=[(0, 180), (0, 3), (254, 255)], type=list, help="the color of village")
    parser.add_argument('--river_color', default=[(100, 124), (43, 255), (46, 255)], type=list, help="the color of river")
    parser.add_argument('--road_edge_color', default=[(0, 180), (0, 43), (46, 225)], type=list, help="the edge of road")
    parser.add_argument('--around_distance', default=6, type=int, help="the distance of center point")
    parser.add_argument('--point_distance', default=50, type=int, help="the distance of point")
    parser.add_argument('--area_value', default=0.3, type=int, help="the persent of area")
    parser.add_argument('--railway_search_range', default=6, type=int, help='')

    # socket设置
    parser.add_argument('--host', default='192.168.1.10', type=str, help="the ip of host")
    parser.add_argument('--port', default=9090, type=int, help="the port of host")
    return parser.parse_args()


class MyEncoder(json.JSONEncoder):
    def default(self, obj):
        if isinstance(obj, np.integer):
            return int(obj)
        elif isinstance(obj, np.floating):
            return float(obj)
        elif isinstance(obj, np.ndarray):
            return obj.tolist()
        elif isinstance(obj, np.int64):
            return int(obj)
        else:
            return super(MyEncoder, self).default(obj)


def road_recognization(image_path, args):
    """
    识别各种道路，包括国省道、大车路、河流和铁路
    :param image_path:
    :param args:
    :return:
    """
    if not image_path.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff')):
        print("image is not Pic!")
        return

    if not os.path.exists(image_path):
        print("ele_path is not exited!")
        return

    color_config = ColorRange()
    image = cv2.imread(image_path)
    image_height, image_width, image_channel = image.shape
    image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

    # 识别的为国省道的轮廓
    color_config.set_road_color(args.road_color)  # 橙色区域
    highway_contours = recognize_road.Road().read_road_contours(color_config, image_hsv)
    highway = Utils.contours_to_list(highway_contours, image_height)

    # 识别为河流的轮廓
    color_config.set_river_color(args.river_color)  # 蓝色区域
    river_contours = recognition_river.River().read_river_contours(color_config, image_hsv)
    river = Utils.contours_to_list(river_contours, image_height)

    # 识别为铁路和大车路边沿
    color_config.set_road_color(args.road_edge_color)  # 灰色区域 [(0, 180), (0, 43), (46, 225)]
    road_edge_contours = recognize_road.RoadEdge().read_road_edge_contours(color_config, image_hsv)

    # 识别大车路的轮廓
    color_config.set_road_color(args.village_road_color)  # 白色区域
    cartway_contours = recognize_road.Road().read_road_contours(color_config, image_hsv)
    cartway = Utils.contours_to_list(cartway_contours, image_height)

    # 识别铁路的轮廓
    color_config.set_road_color(args.village_road_color)
    road_thread = recognize_road.Road().read_road_thread(color_config, image_hsv)
    diff = recognize_road.DiffVillageOrRoad(road_edge_contours, road_thread, args.around_distance,
                                            args.point_distance, args.area_value)

    # railway_point_contours, road_point_contours = diff.get_diff_vilageroad_or_road_point()
    railway_point_contours, road_point_contours = diff.get_diff_vilageroad_or_road_line()
    # railway_point_contours = Utils.contours_to_mult_contours(railway_point_contours)
    railway_object = Utils.contours_to_list(railway_point_contours, image_height)
    railway = recognize_road.Railway(args.railway_search_range, road_thread)
    railway = railway.point_connect_line(railway_object)

    # 黑色背景画轮廓
    # black_background = np.zeros(image.shape, np.uint8)
    # img = cv2.drawContours(black_background, railway_point_contours, -1, (0, 255, 0), 1)
    #
    # cv2.imshow("jieguo", img)
    # cv2.waitKey(0)
    # cv2.destroyWindow(0)

    roads_lists = [{"cartway": cartway}, {"river": river}, {"railway": railway}, {"highway": highway}]
    return roads_lists


def check_arguments_errors(args):
    assert 0 < args.thresh < 1, "Threshold should be a float between zero and one (non-inclusive)"
    if not os.path.exists(args.config_file):
        raise(ValueError("Invalid config path {}".format(os.path.abspath(args.config_file))))
    if not os.path.exists(args.weights):
        raise(ValueError("Invalid weight path {}".format(os.path.abspath(args.weights))))
    if not os.path.exists(args.data_file):
        raise(ValueError("Invalid data file path {}".format(os.path.abspath(args.data_file))))
    # if args.input and not os.path.exists(args.input):
    #     raise(ValueError("Invalid image path {}".format(os.path.abspath(args.input))))


def check_batch_shape(images, batch_size):
    """
        Image sizes should be the same width and height
    """
    shapes = [image.shape for image in images]
    if len(set(shapes)) > 1:
        raise ValueError("Images don't have same shape")
    if len(shapes) > batch_size:
        raise ValueError("Batch size higher than number of images")
    return shapes[0]


def load_images(images_path):
    """
    If image path is given, return it directly
    For txt file, read it and return each line as image path
    In other case, it's a folder, return a list with names of each
    jpg, jpeg and png file
    """
    input_path_extension = images_path.split('.')[-1]
    if input_path_extension in ['jpg', 'jpeg', 'png']:
        return [images_path]
    elif input_path_extension == "txt":
        with open(images_path, "r") as f:
            return f.read().splitlines()
    else:
        return glob.glob(
            os.path.join(images_path, "*.jpg")) + \
            glob.glob(os.path.join(images_path, "*.png")) + \
            glob.glob(os.path.join(images_path, "*.jpeg"))


def prepare_batch(images, network, channels=3):
    width = darknet.network_width(network)
    height = darknet.network_height(network)

    darknet_images = []
    for image in images:
        image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image_resized = cv2.resize(image_rgb, (width, height),
                                   interpolation=cv2.INTER_LINEAR)
        custom_image = image_resized.transpose(2, 0, 1)
        darknet_images.append(custom_image)

    batch_array = np.concatenate(darknet_images, axis=0)
    batch_array = np.ascontiguousarray(batch_array.flat, dtype=np.float32)/255.0
    darknet_images = batch_array.ctypes.data_as(darknet.POINTER(darknet.c_float))
    return darknet.IMAGE(width, height, channels, darknet_images)


def image_detection(image_path, network, class_names, class_colors, thresh, nms):
    # Darknet doesn't accept numpy images.
    # Create one with image we reuse for each detect
    width = darknet.network_width(network)
    height = darknet.network_height(network)
    darknet_image = darknet.make_image(width, height, 3)

    image = cv2.imread(image_path)
    image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image_resized = cv2.resize(image_rgb, (width, height),
                               interpolation=cv2.INTER_LINEAR)

    darknet.copy_image_from_bytes(darknet_image, image_resized.tobytes())
    detections = darknet.detect_image(network, class_names, darknet_image, thresh=thresh, nms=nms)
    darknet.free_image(darknet_image)
    image = darknet.draw_boxes(detections, image_resized, class_colors)
    return cv2.cvtColor(image, cv2.COLOR_BGR2RGB), detections


def batch_detection(network, images, class_names, class_colors,
                    thresh=0.25, hier_thresh=.5, nms=.45, batch_size=4):
    image_height, image_width, _ = check_batch_shape(images, batch_size)
    darknet_images = prepare_batch(images, network)
    batch_detections = darknet.network_predict_batch(network, darknet_images, batch_size, image_width,
                                                     image_height, thresh, hier_thresh, None, 0, 0)
    batch_predictions = []
    for idx in range(batch_size):
        num = batch_detections[idx].num
        detections = batch_detections[idx].dets
        if nms:
            darknet.do_nms_obj(detections, num, len(class_names), nms)
        predictions = darknet.remove_negatives(detections, class_names, num)
        images[idx] = darknet.draw_boxes(predictions, images[idx], class_colors)
        batch_predictions.append(predictions)
    darknet.free_batch_detections(batch_detections, batch_size)
    return images, batch_predictions


def image_classification(image, network, class_names):
    width = darknet.network_width(network)
    height = darknet.network_height(network)
    image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image_resized = cv2.resize(image_rgb, (width, height),
                                interpolation=cv2.INTER_LINEAR)
    darknet_image = darknet.make_image(width, height, 3)
    darknet.copy_image_from_bytes(darknet_image, image_resized.tobytes())
    detections = darknet.predict_image(network, darknet_image)
    predictions = [(name, detections[idx]) for idx, name in enumerate(class_names)]
    darknet.free_image(darknet_image)
    return sorted(predictions, key=lambda x: -x[1])


def convert2relative(image, bbox):
    """
    YOLO format use relative coordinates for annotation
    """
    x, y, w, h = bbox
    height, width, _ = image.shape
    return x/width, y/height, w/width, h/height


def save_annotations(name, image, detections, class_names):
    """
    Files saved with image_name.txt and relative coordinates
    """
    file_name = name.split(".")[:-1][0] + ".txt"
    with open(file_name, "w") as f:
        for label, confidence, bbox in detections:
            x, y, w, h = convert2relative(image, bbox)
            label = class_names.index(label)
            f.write("{} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}\n".format(label, x, y, w, h, float(confidence)))


def batch_detection_example():
    args = parser()
    check_arguments_errors(args)
    batch_size = 2
    random.seed(3)  # deterministic bbox colors
    network, class_names, class_colors = darknet.load_network(
        args.config_file,
        args.data_file,
        args.weights,
        batch_size=batch_size
    )
    image_names = ['/home/zhaokaiyue/Desktop/ss/13.png', '/home/zhaokaiyue/Desktop/ss/111.jpg']
    images = [cv2.imread(image) for image in image_names]
    images, detections,  = batch_detection(network, images, class_names,
                                           class_colors, batch_size=batch_size)
    # for name, image in zip(image_names, images):
    #     cv2.imwrite(name.replace("data/", ""), image)
    print(detections)


def throught_image_list():
    imagepath = "/home/zhaokaiyue/Desktop/ss/resize-416"
    args = parser()
    if not os.path.exists(imagepath):
        print("image path is not exited!")
        return

    image_list = [os.path.join(imagepath, child_image) for child_image in os.listdir(imagepath) if child_image.endswith((".jpg", ".png", ".jpeg"))]

    modelconfig = ModelConfig(args)
    modelconfig.init_model()

    recognized_house_dict = modelconfig.recogniezed_house(image_list)
    print(recognized_house_dict)


class HandleData:
    """
    对数据进行处理
    1、conv_four_location：转换坐标位置点
    2、transform_coordinates：转换坐标系
    3、change_single_box_size：改变尺寸
    4、nms:去除框中的重叠矩形框
    """

    def conv_four_location(self, detection_location):
        """
        中心点，宽高信息转换为四个点坐标-左上，右上，右下，左下
        :param detection_location:(x，y，w, h)
        :return:[左上，右上，右下，左下]
        """
        # 检查位置参数
        if not isinstance(detection_location, list):
            raise (ValueError("recognition result:{} is wrong!".format(detection_location)))
        if not len(detection_location) == 4:
            raise (ValueError("result is :{} ,result should be three!".format(detection_location)))

        # 位置信息的四个点
        left_top_xy = round(detection_location[0] - detection_location[2] / 2), round(
            detection_location[1] - detection_location[3] / 2)
        left_down_xy = round(detection_location[0] - detection_location[2] / 2), round(
            detection_location[1] + detection_location[3] / 2)
        right_top_xy = round(detection_location[0] + detection_location[2] / 2), round(
            detection_location[1] - detection_location[3] / 2)
        right_down_xy = round(detection_location[0] + detection_location[2] / 2), round(
            detection_location[1] + detection_location[3] / 2)

        # 顺序分别为左上，右上，右下，左下
        return [left_top_xy, right_top_xy, right_down_xy, left_down_xy]

    def transform_coordinates(self, location, image):
        """
        左上角坐标转换为左下角坐标为原点
        :param location:[left_top_xy, right_top_xy, right_down_xy, left_down_xy]，image
        :return:[(x,h-y), (x,h-y), (x,h-y), (x,h-y)]
        """
        image = cv2.imread(image)
        height, width, channel = image.shape
        box = [[location[0][0], height - location[0][1]], [location[1][0], height - location[1][1]],
               [location[2][0], height - location[2][1]], [location[3][0], height - location[3][1]]]
        return box

    def change_single_box_size(self, box, rate_w, rate_h):
        """
        改变尺寸
        :param box:(x, ,y ,w, h)
        :param rate_w:width
        :param rate_h:height
        :return:
        """
        # 将box进行调整
        boxx = [int(int(box[0]) * rate_w), int(int(box[1]) * rate_h), int(int(box[2]) * rate_w),
                int(int(box[3]) * rate_h)]
        return boxx

    def nms(self, bboxes):
        """非极大抑制过程
        :param bboxes: 同类别候选框坐标
        :param confidence: 同类别候选框分数
        :param threshold: iou阈值
        :return:
        """
        # 1、传入无候选框返回空
        if len(bboxes) == 0:
            return [], []
        # 强转数组
        bboxes = np.array(bboxes)

        # 从x,y,w,h四个值转换为左上角顶点和右下角顶点
        center_x = bboxes[:, 0]
        center_y = bboxes[:, 1]
        w = bboxes[:, 2]
        h = bboxes[:, 3]

        # 取出n个的极坐标点
        x1 = np.maximum(0.0, center_x - (w / 2))
        y1 = np.maximum(0.0, center_y - (h / 2))
        x2 = np.maximum(0.0, center_x + (w / 2))
        y2 = np.maximum(0.0, center_y + (h / 2))

        # 2、对候选框进行NMS筛选
        # 返回的框坐标和分数
        picked_boxes = []
        # 对置信度进行排序, 获取排序后的下标序号, argsort默认从小到大排序
        order = np.array([i for i in range(len(bboxes))])
        areas = (x2 - x1) * (y2 - y1)
        while order.size > 0:
            # 将当前置信度最大的框加入返回值列表中
            index = order[-1]
            picked_boxes.append(bboxes[index])

            # 获取当前置信度最大的候选框与其他任意候选框的相交面积
            x11 = np.maximum(x1[index], x1[order[:-1]])
            y11 = np.maximum(y1[index], y1[order[:-1]])
            x22 = np.minimum(x2[index], x2[order[:-1]])
            y22 = np.minimum(y2[index], y2[order[:-1]])
            rate = areas[index] / areas[order[:-1]]
            rate1 = areas[order[:-1]] / areas[index]

            w = np.maximum(0.0, x22 - x11)
            h = np.maximum(0.0, y22 - y11)
            intersection = w * h

            # 利用相交的面积和两个框自身的面积计算框的交并比, 保留大于阈值的框
            ratio = intersection / (areas[index] + areas[order[:-1]] - intersection)
            # rate==ratio表示包含关系，保留不为包含关系的框
            keep_boxes_indics = np.where(ratio != rate)
            keep_boxes_indics1 = np.where(ratio != rate1)

            if keep_boxes_indics.__len__() < keep_boxes_indics1.__len__():
                order = order[keep_boxes_indics]
            else:
                order = order[keep_boxes_indics1]

        return picked_boxes


class ModelConfig:
    def __init__(self, args):
        self.config_file = args.config_file
        self.data_file = args.data_file
        self.weights = args.weights
        self.batch_size = args.batch_size
        self.thresh = args.thresh
        self.nms = args.nms
        self.input = args.input

        self.network = None
        self.class_names = None
        self.class_colors = None

    def init_model(self):
        self.network, self.class_names, self.class_colors = darknet.load_network(
            self.config_file,
            self.data_file,
            self.weights,
            batch_size=self.batch_size
        )

    def recogniezed_house(self, image_list):
        check_arguments_errors(self)

        random.seed(3)  # deterministic bbox colors
        index = 0
        temp_image_dict = {}
        image_index = 0
        while True:
            # loop asking for new image paths if no list is given
            if self.input:
                if index >= len(image_list):
                    break
                image_name = image_list[index]
            else:
                image_name = input("Enter Image Path: ")

            image, detections = image_detection(
                image_name, self.network, self.class_names, self.class_colors, self.thresh, self.nms
            )
            # darknet.print_detections(detections, args.ext_output)
            # fps = int(1/(time.time() - prev_time))
            # print("FPS: {}".format(fps))

            # 画框
            # image_index += 1
            # image = darknet.draw_boxes(detections, image, class_colors)
            # cv2.imwrite("/home/zhaokaiyue/Desktop/"+str(image_index)+".jpg", image)
            #
            temp_image_dict[image_name] = detections
            index += 1
        return temp_image_dict


def main():
    args = parser()

    try:
        modelconfig = ModelConfig(args)
        modelconfig.init_model()
        print("init network succeed!")

        socketserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        host, port = args.host, args.port
        socketserver.bind((host, port))
        socketserver.listen(5)

    except socket.error as msg:
        print(msg)
        sys.exit(1)
    print('waiting connectiion.....')

    while True:
        clientsocket, addr = socketserver.accept()
        recvmsg = clientsocket.recv(1024)
        if not recvmsg:
            print("client has lost...")
            continue

        strData = recvmsg.decode("utf-8")
        strData_dic = eval(strData)
        print(strData_dic)   # 测试输出
        if not isinstance(strData_dic, dict):
            print("received data is error!")
            continue
        if "electronicsPath" not in strData_dic.keys() and "satellitePath" not in strData_dic.keys():
            print("keys is not in cluded!")
            continue

        electronic_picpath_str = strData_dic["electronicsPath"]
        satellite_picpath_dict = strData_dic["satellitePath"]
        print("satellite_picpath_dict:{}".format(satellite_picpath_dict))

        satellite_picpath_list = satellite_picpath_dict["satellitePathList"]
        lastx = int(satellite_picpath_dict["lastX"])
        lasty = int(satellite_picpath_dict["lastY"])

        if not isinstance(electronic_picpath_str, str) or not isinstance(satellite_picpath_list, list):
            print("satellite_picpath_list is not right!")
            continue

        data_list = []
        for ver_data in satellite_picpath_list:
            data_list.extend(ver_data)
        if len(data_list) == 0:
            continue
        house_union_dict = modelconfig.recogniezed_house(data_list)  # 返回的是所有房子识别后的字典
        house_union_relate = []
        try:
            for hor_index, horizontal_satellite in enumerate(satellite_picpath_list):
                # 对纵列的分割
                ver_length = len(horizontal_satellite)
                for ver_index, vertical_satellite in enumerate(horizontal_satellite):
                    if not os.path.exists(vertical_satellite):
                        print("picpath:{} is not exited!".format(vertical_satellite))
                        continue
                    print(vertical_satellite)
                    single_image_house_union = house_union_dict[satellite_picpath_list[hor_index][ver_index]]
                    if len(single_image_house_union) == 0:
                        continue
                    # image = cv2.imread(satellite_picpath_list[hor_index][ver_index])

                    # 提取位置信息
                    location_array = np.array(single_image_house_union)[:, 2]
                    # 数据维度转换(98, ) -> (98, 4)
                    remove_nest_house_data = [list(data) for data in location_array]
                    # 使用num算法去除重叠房子
                    single_image_house_union = HandleData().nms(remove_nest_house_data)

                    for single_house_location in single_image_house_union:
                        temp = []
                        # 转换坐标-从416的图片转换为800的图片坐标
                        rate_w = rate_h = 800 / 416
                        box = HandleData().change_single_box_size(single_house_location, rate_w, rate_h)

                        # left, top, right, bottom = darknet.bbox2points(box)
                        # cv2.rectangle(image, (left, top), (right, bottom), (255, 0, 0), 1)

                        # 将宽高信息转换为四个点坐标
                        four_point_location = HandleData().conv_four_location(box)
                        # 转换图像的原点坐标
                        boxx = HandleData().transform_coordinates(four_point_location, satellite_picpath_list[hor_index][ver_index])

                        for location in boxx:
                            house_relate_x, house_relate_y = location
                            single_house_relate = house_relate_x + hor_index * 800, house_relate_y + (
                                        ver_length - 1 - ver_index) * 800 - lasty
                            temp.append(copy.deepcopy(single_house_relate))

                        house_union_relate.append(temp.copy())
                    # cv2.imwrite("/home/zhaokaiyue/Desktop/" + str(hor_index) + "_" + str(ver_index) + ".jpg", image)
        except Exception as e:
            print(e)
        finally:
            # 识别道路
            road_union = road_recognization(electronic_picpath_str, args)
            json_data = {"house": house_union_relate, "roads": road_union}
            json_data = json.dumps(json_data, cls=MyEncoder)
            # 识别发送的长度
            # lenght = len(str(json_data))
            # print(lenght)
            # clientsocket.send(str(lenght).encode("utf-8"))
            clientsocket.send((str(json_data)+'\n').encode("utf-8"))
            print("send over!")

    socketserver.close()


if __name__ == "__main__":
    # unconmment next line for an example of batch processing
    # batch_detection_example()
    main()
    # throught_image_list()

