"""
@breif 交通灯识别
@description
    交通灯亮时特征:
        - 中间大块区域亮度高, 饱和度低
    stage 1. 将图像二值化，提取出交通灯的中心区域
    stage 2. 从上述区域中筛选出一定尺寸范围内的图形，并求出中心
    stage 3. 以中心为圆心画圆，取与stage 1中心不相交区域， 统计色调（Hue）的值
"""
import logging
import os

import cv2
import numpy as np

from constants import *


logger = logging.getLogger(__name__)


# in 320x240 image
# r_light_min ~ 8px, r_light_max ~ 16px
# TODO 当心现场其他如灯管、大屏幕等高亮度、低饱和度的干扰
def detect_traffic_light(bgr_img, r_light_min, r_light_max, seg_y=None,
                        # h_red =(0, 15), h_yellow=(16, 20), h_green=(30, 60),
                        hsv_lower = np.array([  0,   0, 240]),
                        hsv_upper = np.array([255,  40, 255])):
    """
    检测交通灯的存在.
    一个交通灯被表示为一个圆: ((x,y), r)
    """
    im_h, im_w = bgr_img.shape[:2]  # width and height of img
    ctr_x, ctr_y = int(im_w/2), int(im_h/2)  # CenTeR x, cneter y
    if seg_y is None or seg_y < 0:
        seg_y = 0
    elif seg_y >= im_h:
        seg_y = im_h - 1
    
    # 将图像上下一部分区域均涂黑，上避强光，下避反光
    y1, y2 = int(seg_y), int(im_h - seg_y)
    bgr_img[0:y1] = 0
    bgr_img[y2:im_h] = 0
    # print(seg_y, y1, y2, im_h)
    
    # stage 1. 二值化
    hsv = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2HSV)
    bw = cv2.inRange(hsv, hsv_lower, hsv_upper)
    bw = cv2.erode(bw, None, iterations=1)

    # stage 2. 筛选目标, by 尺寸 & 位置
    contours, _ = cv2.findContours(bw, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    if len(contours) == 0:
        # logger.debug('traffic light not detected, no contours found.')
        return None 
    else:
        circles = [cv2.minEnclosingCircle(c) for c in contours]
        circles = [c for c in circles if r_light_min < c[1] < r_light_max]  # 筛选出尺寸在给定范围的圆
    if circles:
        # TODO 当ctr_x处于红灯中心与黄灯中心时, 则两灯同时亮起会导致当前识别到哪个灯时不确定的(频繁跳动).
        circle = min(circles, key=lambda c: abs(c[0][0] - ctr_x))  # 选出x方向上距图像中心最近
    else:
        # logger.debug('traffic light not detected, no circles in size found.')
        return None

    if __debug__:
        bgr_to_show = bgr_img.copy()
        for c in circles:
            cv2.circle(bgr_to_show, (int(c[0][0]), int(c[0][1])), int(c[1]), [0,0,255], 1)
        cv2.circle(bgr_to_show, (int(circle[0][0]), int(circle[0][1])), int(circle[1]), [0,255,0], 2)
        cv2.circle(bgr_to_show, (int(ctr_x), int(ctr_y)), int(3), [0,255,0], -1)
    
        # cv2.circle(bgr_img, (50,50), 8, [0,255,0], 1)
        # cv2.circle(bgr_img, (50,50), 25, [0,255,0], 1)
        cv2.imshow('bw', bw)
        cv2.imshow('bgr', bgr_to_show)
        cv2.waitKey(1)
    
    # print('({:.2f}, {:.2f}), {:.1f}'.format(circle[0][0], circle[0][1], circle[1]))  # debug
    return circle
    
    
    
"""
    # 所用摄像头图像由于交通灯亮度高而饱和, 不易(不可能)判断颜色. 故略去此部分.
    # stage 3. 判断颜色
    # experimental version
    # mask = bw*0  # initial mask 
    
    # regions = []  # roi of traffic light
    # for c in circles:
        # (x, y), r = c
        # thickness = int(r*r_ratio)
        # r = int(r + thickness/2)
        # r = int(r)
        # mask = cv2.circle(mask, (int(x), int(y)), r, [1,1,1], thickness)
        
        # x1, x2 = int(x -r -thickness/2), int(x +r +thickness/2)
        # y1, y2 = int(y -r -thickness/2), int(y +r +thickness/2)
        # regions.append((x1, x2, y1, y2))
    
    # bgr_img[:,:,0] *= mask
    # bgr_img[:,:,1] *= mask
    # bgr_img[:,:,2] *= mask
    
    # lights = []
    # width, height = bgr_img.shape[:2]
    # for x1, x2, y1, y2 in regions:
        # if 0<x1<width and 0<x2<width and 0<y1<height and 0<y2<height:
            # roi = bgr_img[y1:y2, x1:x2]
            # hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
            
            # h_avg = np.average(hsv[:,:,0])
            # print(h_avg, end=', ')  # debug
            # if h_red[0] <= h_avg <= h_red[1]:
                # lights.append(TF_RED)
            # elif h_yellow[0] <= h_avg <= h_yellow[1]:
                # lights.append(TF_YELLOW)
            # elif h_green[0] <= h_avg <= h_green[1]:
                # lights.append(TF_GREEN)
    
    # if __debug__:
        # font = cv2.FONT_HERSHEY_COMPLEX_SMALL
        # font_scale = 2
        # color = {TF_RED:'R', TF_YELLOW:'Y', TF_GREEN:'G'}
        # for i, j in enumerate(lights):
            # cv2.putText(bgr_img, color[j], (0, int(height/3)+i*20), font, font_scale, [0,255,255], 1)
        # cv2.imshow('masked raw', bgr_img)
        # cv2.imshow('mask', mask*255)
    
    # return lights
 """   

if __name__ == '__main__':
    dir_ = os.path.dirname(__file__)
    dir_ = os.path.join(dir_, r'../../../img_and_videos')
    
    # pth_vid = os.path.join(dir_, r'tf-light-noon.mp4')
    pth_vid = os.path.join(dir_, r'11-28-0522.avi')
    cap = cv2.VideoCapture(pth_vid)
    # cap = cv2.VideoCapture(1)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
    
    while True:
        ret, raw = cap.read()
        cv2.imshow('raw', raw)
        if not ret:
            print('camera fail')
            exit(0)
        
        # img_size: 320x240
        lights = detect_traffic_light(raw, seg_y=240/4, r_light_min=8, r_light_max=20)
        print(lights)
        
        # cv2.imshow('raw', raw)
        if cv2.waitKey(20) & 0xFF == 27:  # Esc
            exit()
    
    