import rospy
from sensor_msgs.msg import Image
#import cv2
from cv_bridge import CvBridge
#!/usr/bin/env python3
import logging
import threading
import os
import os.path
import sys
from collections import deque
from argparse import ArgumentParser, SUPPRESS
from math import exp as exp
from time import perf_counter
from enum import Enum
import time
from std_msgs.msg import String,Int32,Int32MultiArray,MultiArrayLayout,MultiArrayDimension
#import cv2
import numpy as np
from openvino.inference_engine import IECore
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'common'))
import monitors
from ruitu_pkg.msg import Alarm
import datetime
from std_msgs.msg import Float32
from ruitu_pkg.msg import robot_position
#from IntList.msg import IntList
import base64
import urllib
import _thread
import math
logging.basicConfig(format="[ %(levelname)s ] %(message)s", level=logging.INFO, stream=sys.stdout)
log = logging.getLogger()

sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'common'))
import monitors
#ros_path="/opt/ros/kinetic/lib/python2.7/dist-packages"
#if ros_path in sys.path:
#    sys.path.remove(ros_path)
#sys.path.append("/opt/intel/openvino_2020.4.287/python/python3")
sys.path.insert(0,"/opt/intel/openvino_2020.4.287/python/python3")
import cv2

class YoloParams:
    # ------------------------------------------- Extracting layer parameters ------------------------------------------
    # Magic numbers are copied from yolo samples
    def __init__(self, param, side):
        self.num = 3 if 'num' not in param else int(param['num'])
        self.coords = 4 if 'coords' not in param else int(param['coords'])
        self.classes = 80 if 'classes' not in param else int(param['classes'])
        self.side = side
        self.anchors = [10.0, 13.0, 16.0, 30.0, 33.0, 23.0, 30.0, 61.0, 62.0, 45.0, 59.0, 119.0, 116.0, 90.0, 156.0,
                        198.0,
                        373.0, 326.0] if 'anchors' not in param else [float(a) for a in param['anchors'].split(',')]

        self.isYoloV3 = False

        if param.get('mask'):
            mask = [int(idx) for idx in param['mask'].split(',')]
            self.num = len(mask)

            maskedAnchors = []
            for idx in mask:
                maskedAnchors += [self.anchors[idx * 2], self.anchors[idx * 2 + 1]]
            self.anchors = maskedAnchors

            self.isYoloV3 = True # Weak way to determine but the only one.


class Modes(Enum):
    USER_SPECIFIED = 0
    MIN_LATENCY = 1


class Mode():
    def __init__(self, value):
        self.current = value

    def next(self):
        if self.current.value + 1 < len(Modes):
            self.current = Modes(self.current.value + 1)
        else:
            self.current = Modes(0)


class ModeInfo():
    def __init__(self):
        self.last_start_time = perf_counter()
        self.last_end_time = None
        self.frames_count = 0
        self.latency_sum = 0


def scale_bbox(x, y, height, width, class_id, confidence, im_h, im_w, is_proportional):
    if is_proportional:
        scale = np.array([min(im_w/im_h, 1), min(im_h/im_w, 1)])
        offset = 0.5*(np.ones(2) - scale)
        x, y = (np.array([x, y]) - offset) / scale
        width, height = np.array([width, height]) / scale
    xmin = int((x - width / 2) * im_w)
    ymin = int((y - height / 2) * im_h)
    xmax = int(xmin + width * im_w)
    ymax = int(ymin + height * im_h)
    # Method item() used here to convert NumPy types to native types for compatibility with functions, which don't
    # support Numpy types (e.g., cv2.rectangle doesn't support int64 in color parameter)
    return dict(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, class_id=class_id.item(), confidence=confidence.item())


def parse_yolo_region(predictions, resized_image_shape, original_im_shape, params, threshold, is_proportional):
    # ------------------------------------------ Validating output parameters ------------------------------------------
    _, _, out_blob_h, out_blob_w = predictions.shape
    assert out_blob_w == out_blob_h, "Invalid size of output blob. It sould be in NCHW layout and height should " \
                                     "be equal to width. Current height = {}, current width = {}" \
                                     "".format(out_blob_h, out_blob_w)

    # ------------------------------------------ Extracting layer parameters -------------------------------------------
    orig_im_h, orig_im_w = original_im_shape
    resized_image_h, resized_image_w = resized_image_shape
    objects = list()
    size_normalizer = (resized_image_w, resized_image_h) if params.isYoloV3 else (params.side, params.side)
    bbox_size = params.coords + 1 + params.classes
    # ------------------------------------------- Parsing YOLO Region output -------------------------------------------
    for row, col, n in np.ndindex(params.side, params.side, params.num):
        # Getting raw values for each detection bounding box
        bbox = predictions[0, n*bbox_size:(n+1)*bbox_size, row, col]
        x, y, width, height, object_probability = bbox[:5]
        class_probabilities = bbox[5:]
        if object_probability < threshold:
            continue
        # Process raw value
        x = (col + x) / params.side
        y = (row + y) / params.side
        # Value for exp is very big number in some cases so following construction is using here
        try:
            width = exp(width)
            height = exp(height)
        except OverflowError:
            continue
        # Depends on topology we need to normalize sizes by feature maps (up to YOLOv3) or by input shape (YOLOv3)
        width = width * params.anchors[2 * n] / size_normalizer[0]
        height = height * params.anchors[2 * n + 1] / size_normalizer[1]

        class_id = np.argmax(class_probabilities)
        confidence = class_probabilities[class_id]*object_probability
        if confidence < threshold:
            continue
        objects.append(scale_bbox(x=x, y=y, height=height, width=width, class_id=class_id, confidence=confidence,
                                  im_h=orig_im_h, im_w=orig_im_w, is_proportional=is_proportional))
    return objects


def intersection_over_union(box_1, box_2):
    width_of_overlap_area = min(box_1['xmax'], box_2['xmax']) - max(box_1['xmin'], box_2['xmin'])
    height_of_overlap_area = min(box_1['ymax'], box_2['ymax']) - max(box_1['ymin'], box_2['ymin'])
    if width_of_overlap_area < 0 or height_of_overlap_area < 0:
        area_of_overlap = 0
    else:
        area_of_overlap = width_of_overlap_area * height_of_overlap_area
    box_1_area = (box_1['ymax'] - box_1['ymin']) * (box_1['xmax'] - box_1['xmin'])
    box_2_area = (box_2['ymax'] - box_2['ymin']) * (box_2['xmax'] - box_2['xmin'])
    area_of_union = box_1_area + box_2_area - area_of_overlap
    if area_of_union == 0:
        return 0
    return area_of_overlap / area_of_union


def resize(image, size, keep_aspect_ratio, interpolation=cv2.INTER_LINEAR):
    if not keep_aspect_ratio:
        return cv2.resize(image, size, interpolation=interpolation)

    iw, ih = image.shape[0:2][::-1]
    w, h = size
    scale = min(w/iw, h/ih)
    nw = int(iw*scale)
    nh = int(ih*scale)
    image = cv2.resize(image, (nw, nh), interpolation=interpolation)
    new_image = np.full((size[1], size[0], 3), 128, dtype=np.uint8)
    dx = (w-nw)//2
    dy = (h-nh)//2
    new_image[dy:dy+nh, dx:dx+nw, :] = image
    return new_image


def preprocess_frame(frame, input_height, input_width, nchw_shape, keep_aspect_ratio):
    in_frame = resize(frame, (input_width, input_height), keep_aspect_ratio)
    if nchw_shape:
        in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
    in_frame = np.expand_dims(in_frame, axis=0)
    return in_frame


def get_objects(output, net, new_frame_height_width, source_height_width, prob_threshold, is_proportional):
    objects = list()

    for layer_name, out_blob in output.items():
        out_blob = out_blob.buffer.reshape(net.layers[net.layers[layer_name].parents[0]].out_data[0].shape)
        layer_params = YoloParams(net.layers[layer_name].params, out_blob.shape[2])
        objects += parse_yolo_region(out_blob, new_frame_height_width, source_height_width, layer_params,
                                     prob_threshold, is_proportional)

    return objects


def filter_objects(objects, iou_threshold, prob_threshold):
    # Filtering overlapping boxes with respect to the --iou_threshold CLI parameter
    objects = sorted(objects, key=lambda obj : obj['confidence'], reverse=True)
    for i in range(len(objects)):
        if objects[i]['confidence'] == 0:
            continue
        for j in range(i + 1, len(objects)):
            if intersection_over_union(objects[i], objects[j]) > iou_threshold:
                objects[j]['confidence'] = 0

    return tuple(obj for obj in objects if obj['confidence'] >= prob_threshold)


def async_callback(status, callback_args):
    request, frame_id, frame_mode, frame, start_time, completed_request_results, empty_requests, \
    mode, event, callback_exceptions = callback_args
    print("######")
    print("######")
    print("######")
    print("######")
    try:
        if status != 0:
            raise RuntimeError('Infer Request has returned status code {}'.format(status))

        completed_request_results[frame_id] = (frame, request.output_blobs, start_time, frame_mode == mode.current)

        if mode.current == frame_mode:
            empty_requests.append(request)
    except Exception as e:
        callback_exceptions.append(e)

    event.set()


def put_highlighted_text(frame, message, position, font_face, font_scale, color, thickness):
    cv2.putText(frame, message, position, font_face, font_scale, (255, 255, 255), thickness + 1) # white border
    cv2.putText(frame, message, position, font_face, font_scale, color, thickness)


def await_requests_completion(requests):
    for request in requests:
        request.wait()

#def tostring(threadName,delay):
    #os.system("cd /home/dji/catkin_trans/devel/lib/img2str && ./img2str")

#_thread.start_new_thread(tostring,("thread",2,))
#启动ie推理器
ie = IECore()
#ie.add_extension(device_name="CPU")
print("启动ie推理器")
count=0
#读入神经网络
net = ie.read_network(model = "/home/dji/openvino_yolov3/src/yolov3_crack_detection/scripts/frozen_darknet_yolov3_model.xml", weights="/home/dji/openvino_yolov3/src/yolov3_crack_detection/scripts/frozen_darknet_yolov3_model.bin")
print("读入神经网络")

#3个空的字典config_user_specified = {}，config_user_specified = {}，devices_nstreams = {}
config_user_specified = {}
config_min_latency = {}
devices_nstreams = {}

#字典加入CPU_THROUGHPUT_STREAMS，值为1
config_min_latency['CPU_THROUGHPUT_STREAMS'] = '1'
#print(config_min_latency)

#输入形状传入变量input_blob   将输入层名称映射到InputInfoPtr对象的字典。
#通过input_height, input_width = net.input_info[input_blob].input_data.shape[2:]把输入网络需要的形状传入变量input_height, input_width
input_blob = next(iter(net.input_info))
#print(input_blob)
if net.input_info[input_blob].input_data.shape[1] == 3:
     input_height, input_width = net.input_info[input_blob].input_data.shape[2:]
     nchw_shape = True
     #print("#######################################")
else:
     input_height, input_width = net.input_info[input_blob].input_data.shape[1:3]
     nchw_shape = False
print("调整输入通道")

#标签暂时不需要
labels_map = None



#cap=cv2.VideoCapture("000001block.jpg")
wait_key_time = 1

print("加载神经网络")
#ie推理器传入的模型类别按以下规则传入python字典，其中使用了Mode函数，最终字典有两个值，exec_nets[Modes.USER_SPECIFIED]为一个网络，exec_nets[Modes.MIN_LATENCY]为一个网络
mode = Mode(Modes.USER_SPECIFIED)
exec_nets = {}
exec_nets[Modes.USER_SPECIFIED] = ie.load_network(network=net, device_name="CPU",
                                                  config=config_user_specified,
                                                  num_requests=1)
exec_nets[Modes.MIN_LATENCY] = ie.load_network(network=net, device_name="CPU",
                                               config=config_min_latency,
                                               num_requests=1)

print("准备就绪,开始推理")

bridge = CvBridge()
with open("/home/dji/openvino_yolov3/src/yolov3_crack_detection/scripts/aa.txt", 'r') as f:
    labels_map = [x.strip() for x in f]  

def yolo3_go(img):
    #path="/home/g/存根/tuxiang5/"+filename2
    #print("读入图像为",path)
    #为字典当中的网络添加推理请求，empty_requests为请求类实体
    print("执行了一次推理")
    empty_requests = deque(exec_nets[mode.current].requests)


    #创建收集输出的字典completed_request_results，目前为空字典
    completed_request_results = {}


    next_frame_id = 0
    next_frame_id_to_show = 0
    mode_info = { mode.current: ModeInfo() }
    event = threading.Event()
    callback_exceptions = []

    time1=time.time()
    start_time = perf_counter()
    frame=img
    #ret,frame = cap.read()
    ret=1
    request = empty_requests.popleft()
    in_frame = preprocess_frame(frame, input_height, input_width, nchw_shape,False)
    request.set_completion_callback(py_callback=async_callback,
                                    py_data=(request,
                                             next_frame_id,
                                             mode.current,
                                             frame,
                                             start_time,
                                             completed_request_results,
                                             empty_requests,
                                             mode,
                                             event,
                                             callback_exceptions))


    #执行推理request.async_infer函数
    request.async_infer(inputs={input_blob: in_frame})
    #print(in_frame.shape)

    

    #completed_request_results[frame_id] = (frame, request.output_blobs, start_time, frame_mode == mode.current)
    next_frame_id += 1
    time101=time.time()
    event.wait()
    time102=time.time()
    print("等待时间",time102-time101)
    #print("completed_request_results",len(completed_request_results))
         
    time2=time.time()
    print("推理时间为",time2-time1)
    #导出输出结果
    frame, output, start_time, is_same_mode = completed_request_results.pop(next_frame_id_to_show)


    next_frame_id_to_show += 1


    mode_info[mode.current].frames_count += 1
    objects = get_objects(output, net, (input_height, input_width), frame.shape[:-1], 0.5,
                          False)
    objects = filter_objects(objects, 0.4, 0.5)
    print("最终结果为",objects)
    time3=time.time()
    origin_im_size = frame.shape[:-1]
    #presenter.drawGraphs(frame)
    crack=[]                                #####################    crack
    for obj in objects:
        if obj['class_id'] == 0:
            #print("第零类")
            # Validation bbox of detected object
            obj['xmax'] = min(obj['xmax'], origin_im_size[1])
            obj['ymax'] = min(obj['ymax'], origin_im_size[0])
            obj['xmin'] = max(obj['xmin'], 0)
            obj['ymin'] = max(obj['ymin'], 0)
            color = (min(obj['class_id'] * 12.5, 255),
                     min(obj['class_id'] * 7, 255),
                     min(obj['class_id'] * 5, 255))
            #det_label = labels_map[obj['class_id']] if labels_map and len(labels_map) >= obj['class_id'] else \
            #    str(obj['class_id'])
            """
            if args.raw_output_message:
                log.info(
                    "{:^9} | {:10f} | {:4} | {:4} | {:4} | {:4} | {} ".format(det_label, obj['confidence'],
                                                                          obj['xmin'], obj['ymin'], obj['xmax'],
                                                                          obj['ymax'],
                                                                          color))
            """
            det_label = labels_map[obj['class_id']] if labels_map and len(labels_map) >= obj['class_id'] else \
                str(obj['class_id'])
            cv2.putText(frame,
                        "#" + det_label + ' ' + str(round(obj['confidence'] * 100, 1)) + ' %',
                        (obj['xmin'], obj['ymin'] - 7), cv2.FONT_HERSHEY_COMPLEX,2.0, (0,255,0), 4)
            cv2.rectangle(frame, (obj['xmin'], obj['ymin']), (obj['xmax'], obj['ymax']), (0,255,0), 10)
            #cv2.putText(frame,
            #            "#" + det_label + ' ' + str(round(obj['confidence'] * 100, 1)) + ' %',
            #            (obj['xmin'], obj['ymin'] - 7), cv2.FONT_HERSHEY_COMPLEX, 0.6, color, 1)
            #print("最终结果为",objects)
            ############################################################################   new array
            crack.append(obj['xmax'])
            crack.append(obj['ymax'])
            crack.append(obj['xmin'])
            crack.append(obj['ymin'])
            crack.append(obj["class_id"])
            crack.append(obj["confidence"])
    #cv2.namedWindow("img",0)
    #cv2.resizeWindow("img", 2000, 2000)
    #cv2.imshow('img',frame)
    #cv2.waitKey(300)
    global result_image                   ###################################################################    result
    result_image = frame
    time4=time.time()
    ##############pub.publish(bridge.cv2_to_imgmsg(frame, "rgb8"))
    if objects !=():
        pub_error=[]
        for i in objects:
            pub_error.append(i["xmin"])
            pub_error.append(i["xmax"])
            pub_error.append(i["ymin"])
            pub_error.append(i["ymax"])
            pub_error.append(i["class_id"])
            pub_error.append(i["confidence"])
        my_array_for_publishing = Int32MultiArray(data=pub_error)
        #a.data=pub_error
        ######################pub2.publish(my_array_for_publishing)
    else:
        ######################pub3.publish("normal")
        print("画图时间为",time4-time3)
    return frame



def line_detect(image):
    # 将图片转换为HSV
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    # 设置阈值
    lowera = np.array([0, 0, 221])
    uppera = np.array([180, 30, 255])
    mask1 = cv2.inRange(hsv, lowera, uppera)
    kernel = np.ones((3, 3), np.uint8)

    # 对得到的图像进行形态学操作（闭运算和开运算）
    mask = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, kernel) #闭运算：表示先进行膨胀操作，再进行腐蚀操作
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)   #开运算：表示的是先进行腐蚀，再进行膨胀操作

    # 绘制轮廓
    edges = cv2.Canny(mask, 50, 150, apertureSize=3)
    # 显示图片
    cv2.namedWindow("edges", 0)
    cv2.resizeWindow("edges", 640, 480)
    cv2.imshow("edges", edges)
    # 检测白线    这里是设置检测直线的条件，可以去读一读HoughLinesP()函数，然后根据自己的要求设置检测条件
    lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 40,minLineLength=10,maxLineGap=10)
    print("lines=",lines)
    print("========================================================")
    i=1
    # 对通过霍夫变换得到的数据进行遍历
    uplinex=[]
    upliney=[]
    mediumlinex=[]
    mediumliney=[]
    downlinex=[]
    downliney=[]
    for line in lines:
        # newlines1 = lines[:, 0, :]
        print("line["+str(i-1)+"]=",line)
        x1,y1,x2,y2 = line[0]   #两点确定一条直线，这里就是通过遍历得到的两个点的数据 （x1,y1）(x2,y2)
        #loat(x2)
        y1 = float(y1)
        y2 = float(y2)
        #print("x1=%s,x2=%s,y1=%s,y2=%s" % (x1, x2, y1, y2))
        if x2 - x1 == 0:
            #print("直线是竖直的")
            result=90
        elif y2 - y1 == 0 :
            print("直线是水平的")
            print("x1=%s,x2=%s,y1=%s,y2=%s" % (x1, x2, y1, y2))
            result=0
            if y2<1000:
                uplinex.append(x1)
                upliney.append(y1)
                uplinex.append(x2)
                upliney.append(y2)
            if 1000<y2<2000:
                mediumlinex.append(x1)
                mediumliney.append(y1)
                mediumlinex.append(x2)
                mediumliney.append(y2)
            if 2000<y2<3000:
                downlinex.append(x1)
                downliney.append(y1)
                downlinex.append(x2)
                downliney.append(y2)
        else:
            # 计算斜率
            k = -(y2 - y1) / (x2 - x1)
            # 求反正切，再将得到的弧度转换为度
            result = np.arctan(k) * 57.29577
            #print("直线倾斜角度为：" + str(result) + "度")
        i = i+1
    #     显示最后的成果图
    #cv2.namedWindow("line_detect", 0)
    #cv2.resizeWindow("line_detect", 640, 480)
    #cv2.imshow("line_detect",a)
    return uplinex, upliney, mediumlinex, mediumliney, downlinex, downliney


def linefit( x,y):
    N = len(x)
    sx,sy,sxx,syy,sxy=0,0,0,0,0
    for i in range(0,N):
        sx  += x[i]
        sy  += y[i]
        sxx += x[i]*x[i]
        syy += y[i]*y[i]
        sxy += x[i]*y[i]
    a = (sy*sx/N -sxy)/( sx*sx/N -sxx)
    b = (sy - a*sx)/N
    r = abs(sy*sx/N-sxy)/math.sqrt( (sxx-sx*sx/N)*(syy-sy*sy/N))
    return a,b,r 

NA=1

dir = "/home/dji/openvino_yolov3/src/yolov3_crack_detection/scripts/crack/pic"

for root, dirs, files in os.walk(dir):
    for file in files:
        print(os.path.join(root,file))
    
        print("###############")

        crack_image=cv2.imread(os.path.join(root,file))
        #######################################################
        time_BG=time.time()

        #(b, g, r) = cv2.split(image)
        #bH = cv2.equalizeHist(b)
        #gH = cv2.equalizeHist(g)
        #rH = cv2.equalizeHist(r)
        # 合并每一个通道
        #crack_image = cv2.merge((bH, gH, rH))
        ######################################################
        uplinex, upliney, mediumlinex, mediumliney, downlinex, downliney = line_detect(crack_image)
        a,b,r = linefit(uplinex,upliney)
        a1,b1,r1 = linefit(mediumlinex, mediumliney)
        a2,b2,r2 = linefit(downlinex, downliney)
        if b<0:
            b=0
        b=b+100

        if b2>3000:
            b2=2700
        print(a,b,r)
        print(a1,b1,r1)
        print(a2,b2,r2)  
        image_up = crack_image[0:int(b)]
        image_left = crack_image[int(b):int(b1),0:1500]
        image_medium = crack_image[int(b):int(b1),1500:2700]
        image_right = crack_image[int(b):int(b1),2700:4096]
        image_left2 = crack_image[int(b1):int(b2),0:1500]
        image_medium2 = crack_image[int(b1):int(b2),1500:2700]
        image_right2 = crack_image[int(b1):int(b2),2700:4096]
        image_down = crack_image[int(b2):3000]

        image_left_result = yolo3_go(image_left)
        image_medium_result = yolo3_go(image_medium)
        image_right_result = yolo3_go(image_right)
        image_left2_result = yolo3_go(image_left2)
        image_medium2_result = yolo3_go(image_medium2)
        image_right2_result = yolo3_go(image_right2)

        imagefinal=np.zeros((3000,4096,3),np.uint8)

        #print(image_right_result.shape())
        #print(image_right.shape())
        #cv2.imwrite("re2.jpg", image_left_result)
    #cv2.imwrite("re1.jpg", image_medium_result)
    #cv2.imwrite("re4.jpg", image_right_result)
    #cv2.imwrite("re5.jpg", image_left2_result)
    #cv2.imwrite("re3.jpg", image_medium2_result)
    #cv2.imwrite("re6.jpg", image_right2_result)


        imagefinal[0:int(b)] = image_up
        imagefinal[int(b):int(b1),0:1500] = image_left_result
        imagefinal[int(b):int(b1),1500:2700] = image_medium_result
        imagefinal[int(b):int(b1),2700:4096] = image_right_result

        imagefinal[int(b1):int(b2),0:1500] = image_left2_result
        imagefinal[int(b1):int(b2),1500:2700] = image_medium2_result
        imagefinal[int(b1):int(b2),2700:4096] = image_right2_result
        imagefinal[int(b2):3000]=image_down

        time_fn=time.time()

        print(">>>>>>>>>>>>>>>>>> time all <<<<<<<<<<<<<<<<<<")
        print(time_fn - time_BG)
        print(">>>>>>>>>>>>>>>>>>#########<<<<<<<<<<<<<<<<<<")
        NA_STRING=NA
        cv2.imwrite(str(NA_STRING)+".jpg", imagefinal)
        NA=NA+1
    #img1=crack_image[500:1600,2000:4096]

    #obj=yolo3_go(img1)

    #obj=yolo3_go(crack_image)



