import numpy as np
import argparse
import cv2 as cv
import subprocess
import time
import os
import collections
import copy

def findNoneZero(rgb_image):
    rows,cols,_ = rgb_image.shape
    counter = 0
    for row in range(rows):
        for col in range(cols):
            pixels = rgb_image[row,col]
            if sum(pixels)!=0:
                counter = counter+1
    return counter


def red_green_yellow(rgb_image,display):
    '''
    Determines the red , green and yellow content in each image using HSV and experimentally
    determined thresholds. Returns a Classification based on the values
    '''
    hsv = cv.cvtColor(rgb_image,cv.COLOR_BGR2HSV)

    sum_saturation = np.sum(hsv[:,:,1])# Sum the brightness values
    area = 50*90
    avg_saturation = sum_saturation / area #find average

    sat_low = int(avg_saturation*1.3)#均值的1.3倍，工程经验
    val_low = 140
    #Green
    lower_green = np.array([70,sat_low,val_low])
    upper_green = np.array([100,255,255])
    green_mask = cv.inRange(hsv,lower_green,upper_green)
    green_result = cv.bitwise_and(rgb_image,rgb_image,mask = green_mask)
    #Yellow
    lower_yellow = np.array([20,sat_low,val_low])
    upper_yellow = np.array([40,255,255])
    yellow_mask = cv.inRange(hsv,lower_yellow,upper_yellow)
    yellow_result = cv.bitwise_and(rgb_image,rgb_image,mask=yellow_mask)

    # Red 
    lower_red = np.array([150,sat_low,val_low])
    upper_red = np.array([180,255,255])
    red_mask = cv.inRange(hsv,lower_red,upper_red)
    red_result = cv.bitwise_and(rgb_image,rgb_image,mask = red_mask)
    if display==True:
        _,ax = plt.subplots(1,5,figsize=(20,10))
        ax[0].set_title('rgb image')
        ax[0].imshow(cv.cvtColor(rgb_image,cv.COLOR_BGR2RGB))
        ax[1].set_title('red result')
        ax[1].imshow(red_result)
        ax[2].set_title('yellow result')
        ax[2].imshow(yellow_result)
        ax[3].set_title('green result')
        ax[3].imshow(green_result)
        ax[4].set_title('hsv image')
        ax[4].imshow(hsv)
        plt.show()
    sum_green = findNoneZero(green_result)
    sum_red = findNoneZero(red_result)
    sum_yellow = findNoneZero(yellow_result)
    if sum_red >= sum_yellow and sum_red>=sum_green:
        return 'red'#Red
    if sum_yellow>=sum_green:
        return 'yellow'#yellow
    return 'green'#green


def show_image(img):
    cv.imshow("Image", img)
    cv.waitKey(0)

def getColorList(sat_low,val_low):
    dict = collections.defaultdict(list)

    # 红色
    lower_red = np.array([156, sat_low, val_low])
    upper_red = np.array([180, 255, 255])
    color_list = []
    color_list.append(lower_red)
    color_list.append(upper_red)
    dict['红色'] = color_list

    # 红色2
    lower_red = np.array([0, sat_low, val_low])
    upper_red = np.array([10, 255, 255])
    color_list = []
    color_list.append(lower_red)
    color_list.append(upper_red)
    dict['red2'] = color_list


    # 黄色
    lower_yellow = np.array([20, sat_low, val_low])
    upper_yellow = np.array([40, 255, 255])
    color_list = []
    color_list.append(lower_yellow)
    color_list.append(upper_yellow)
    dict['黄色'] = color_list

    # 绿色
    lower_green = np.array([50, sat_low, val_low])
    upper_green = np.array([100, 255, 255])
    color_list = []
    color_list.append(lower_green)
    color_list.append(upper_green)
    dict['绿色'] = color_list
    return dict


def get_color(frame):
    hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
    sum_saturation = np.sum(hsv[:,:,1])# Sum the brightness values
    area = frame.shape[0]*frame.shape[1]
    avg_saturation = sum_saturation / area
    sat_low = int(avg_saturation*1.3)#均值的1.3倍，工程经验
    val_low = 140
    maxsum = -100
    color = None
    color_dict =getColorList(sat_low,val_low)
    score = 0
    traffic_color = '黑色'
    for d in color_dict:
        mask = cv.inRange(hsv, color_dict[d][0], color_dict[d][1])
        # print(cv.inRange(hsv, color_dict[d][0], color_dict[d][1]))
        #cv.imwrite('images/triffic/' + f + d + '.jpg', mask)
        binary = cv.threshold(mask, 127, 255, cv.THRESH_BINARY)[1]
        binary = cv.dilate(binary, None, iterations=2)
        cnts, hiera = cv.findContours(binary.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
        sum = 0
        for c in cnts:
            sum += cv.contourArea(c)

        if sum > maxsum:
            maxsum = sum
            color = d
        if sum > score:
            score = sum
            traffic_color = d
    return traffic_color


def draw_labels_and_boxes(img, detected_objects, colors, labels):
    # If there are any detections
    if len(detected_objects) > 0:
        for obj_info in detected_objects:
            class_id, confidence, box = obj_info['class_id'], obj_info['confidence'], obj_info['box']
            x, y, w, h = box
            
            # Get the unique color for this class
            color_rgb = [int(c) for c in colors[class_id]]

            # Draw the bounding box rectangle
            cv.rectangle(img, (x, y), (x+w, y+h), color_rgb, 2)
            
            # Prepare text for label and confidence
            text = f"{labels[class_id]}: {confidence:.2f}"
            
            # If it's a traffic light and color information is available, add it to the text
            if labels[class_id] == 'traffic light' and 'color' in obj_info:
                text += f" ({obj_info['color']})"
            
            cv.putText(img, text, (x, y-5), cv.FONT_HERSHEY_SIMPLEX, 0.5, color_rgb, 2)
    return img


def get_outputs_names(net):
    # Get the names of all the layers in the network
    layersNames = net.getLayerNames()
    # Get the names of the output layers, i.e. the layers with unconnected outputs
    return [layersNames[i - 1] for i in net.getUnconnectedOutLayers()]

def generate_boxes_confidences_classids(outs, height, width, tconf):
    boxes = []
    confidences = []
    classids = []

    for out in outs:
        for detection in out:
            #print (detection)
            #a = input('GO!')
            
            # Get the scores, classid, and the confidence of the prediction
            scores = detection[5:]
            classid = np.argmax(scores)
            confidence = scores[classid]
            
            # Consider only the predictions that are above a certain confidence level
            if confidence > tconf:
                # TODO Check detection
                box = detection[0:4] * np.array([width, height, width, height])
                centerX, centerY, bwidth, bheight = box.astype('int')

                # Using the center x, y coordinates to derive the top
                # and the left corner of the bounding box
                x = int(centerX - (bwidth / 2))
                y = int(centerY - (bheight / 2))

                # Append to list
                boxes.append([x, y, int(bwidth), int(bheight)])
                confidences.append(float(confidence))
                classids.append(classid)

    return boxes, confidences, classids

def infer_image(net, layer_names, height, width, img, colors, labels, infer=True):
    
    if infer:
        # Contructing a blob from the input image
        blob = cv.dnn.blobFromImage(img, 1 / 255.0, (416, 416), 
                        swapRB=True, crop=False)

        # Perform a forward pass of the YOLO object detector
        net.setInput(blob)

        # Getting the outputs from the output layers
        start = time.time()
        outs = net.forward(layer_names)
        end = time.time()



        
        # Generate the boxes, confidences, and classIDs
        boxes, confidences, classids = generate_boxes_confidences_classids(outs, height, width, 0.5)
        
        # Apply Non-Maxima Suppression to suppress overlapping bounding boxes
        idxs = cv.dnn.NMSBoxes(boxes, confidences, 0.2, 0.2) #boxes, confidences, FLAGS.confidence, FLAGS.threshold

    if boxes is None or confidences is None or idxs is None or classids is None:
        raise '[ERROR] Required variables are set to None before drawing boxes on images.'
        
    return img, boxes, confidences, classids, idxs

def infer_image_feature(net, layer_names, height, width, img, colors, labels, feature=None, infer=True):
    
    if infer:
        # Contructing a blob from the input image
        blob = cv.dnn.blobFromImage(img, 1 / 255.0, (416, 416), 
                        swapRB=True, crop=False)

        # Perform a forward pass of the YOLO object detector
        net.setInput(blob)

        # Getting the outputs from the output layers
        start = time.time()
        outs = net.forward(layer_names)

        if len(outs)>3:

            feature=outs[-1]
            outs=outs[0:-1]

            # feature=np.mean(feature,axis=1).squeeze()
            feature=feature[0,int(feature.shape[1]/2),:,:]
            

        end = time.time()



        
        # Generate the boxes, confidences, and classIDs
        boxes, confidences, classids = generate_boxes_confidences_classids(outs, height, width, 0.5)
        
        # Apply Non-Maxima Suppression to suppress overlapping bounding boxes
        idxs = cv.dnn.NMSBoxes(boxes, confidences, 0.2, 0.3)

    if boxes is None or confidences is None or idxs is None or classids is None:
        raise '[ERROR] Required variables are set to None before drawing boxes on images.'
        
    return img, boxes, confidences, classids, idxs, feature





def light_flow(prev_rgb,current_rgb, show_hsv=1, compress=1, step=10):
    flow_rgb=None

    prev_rgb = cv.resize(prev_rgb,
                         (int(prev_rgb.shape[1] / 2), int(prev_rgb.shape[0] / 2))) if compress else prev_rgb
    prev = cv.cvtColor(prev_rgb, cv.COLOR_BGR2GRAY)
    current_rgb = cv.resize(current_rgb,
                            (int(current_rgb.shape[1] / 2),
                             int(current_rgb.shape[0] / 2))) if compress else current_rgb
    current = cv.cvtColor(current_rgb, cv.COLOR_BGR2GRAY)
    hsv = np.zeros_like(current_rgb)
    hsv[..., 1] = 255
    flow = cv.calcOpticalFlowFarneback(prev, current, None, 0.5, 3, 15, 3, 5, 1.2, 0)
    # flow = cv.calcOpticalFlowFarneback(prvs,current,None,0.5,3,15,3,5,1.2,0)
    mag, ang = cv.cartToPolar(flow[..., 0], flow[..., 1])
    h, w = current.shape[:2]
    y, x = np.mgrid[step / 2:h:step, step / 2:w:step].reshape(2, -1).astype(int)
    fx, fy = flow[y, x].T
    lines = np.vstack([x, y, x + fx, y + fy]).T.reshape(-1, 2, 2)
    lines = np.int32(lines)
    line = []
    for l in lines:
        if l[0][0] - l[1][0] > 3 or l[0][1] - l[1][1] > 3:
            line.append(l)
    cv.polylines(current_rgb, line, 0, (0, 255, 255), 1)
    shown_img = current_rgb

    if show_hsv:
        hsv[..., 0] = ang * 180 / np.pi / 2
        hsv[..., 2] = cv.normalize(mag, None, 0, 255, cv.NORM_MINMAX)
        flow_rgb = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)
        # shown_img = np.concatenate((shown_img, flow_rgb), axis=1)

    shown_img = cv.resize(shown_img,
                          (shown_img.shape[1] * 2, shown_img.shape[0] * 2)) if compress else shown_img

    #print(prev_rgb.shape)
    return shown_img,flow_rgb