# -- coding:UTF-8 --
import cv2
import numpy as np
import time
import vibe_me
import pyrealsense2 as rs
import find_transformation as ft
import extract_auxiliary_area as ea

IMG_PATH='./result_image/'
WORKING_AREA_SIZE=(210,297)

vibe = vibe_me.ViBe()
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)
pipeline.start(config)

def get_video_frame():
    global pipeline
    frames = pipeline.wait_for_frames()
    color_frame = frames.get_color_frame()
    img = np.asanyarray(color_frame.get_data())
    return img

def find_diff(img, bgsize, scale=2,circle_size=0, threshold_value=70, offset=5):
    wide, high, xxx = bgsize
    global vibe
    kernel1 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
    result= vibe.Diff(img).astype(np.uint8)
    # 开运算
    result = cv2.morphologyEx(result, cv2.MORPH_CLOSE, kernel1)
    result = cv2.morphologyEx(result, cv2.MORPH_OPEN, kernel1)
    kernel2 = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))
    result = cv2.morphologyEx(result, cv2.MORPH_OPEN, kernel2)

    a=np.sum(result==255)
    if(a>(result.shape[0]*result.shape[1]*2/3)):
        return False,0,0,0,0,0
    result = cv2.resize(result, (wide * result.shape[1] / result.shape[0], wide))
    img = cv2.resize(img,  (wide * result.shape[1] / result.shape[0], wide))
    cv2.imshow("result ", result)

    # if circle_size != 0:
    #     result = separate(result, circle_size, scale)

    return generate_box(result, img, scale)


def generate_box(binary_image, image, scale):

    global vibe,WORKING_AREA_SIZE
    contours, hierarchy = cv2.findContours(binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    # cnts = imutils.grab_contours(cnts)
    expansion = 0  # 让box扩大6个像素点
    expansion = int(expansion / scale)
    images = []  # 扣下来的每一个物体的图片
    boxs = []  # 对应图片的box列表
    attitudes = []  # 对应图片的box姿态
    vibe.foreground_area[:, :] = 0
    for i in range(len(contours)):
        if cv2.contourArea(contours[i]) > 1000 / scale:
            ((x1, y1), (x2, y2), angl) = cv2.minAreaRect(contours[i])
            attitudes.append([x1, y1, angl])
            x1 *= scale
            y1 *= scale
            x2 *= scale
            y2 *= scale
            rect = ((x1, y1), (x2, y2), angl)
            # print(rect)
            # 得到最小矩形的坐标
            box = cv2.boxPoints(rect)

            # 标准化坐标到整数
            box = np.int0(box)
            box[2][0] += expansion
            box[2][1] -= expansion
            box[1][0] -= expansion
            box[1][1] -= expansion
            box[0][0] -= expansion
            box[0][1] += expansion
            box[3][0] += expansion
            box[3][1] += expansion

            hight, width = x2 + 2 * expansion, y2 + 2 * expansion

            aim_size = np.float32([[0, 0], [width, 0], [width, hight], [0, hight]])
            raw_size = []

            for x, y in box:
                raw_size.append([x, y])

            raw_size = np.float32(raw_size)
            translate_map = cv2.getPerspectiveTransform(raw_size, aim_size)
            translate_img = cv2.warpPerspective(image, translate_map, (int(width), int(hight)))
            images.append(translate_img)
            # cv2.imshow("translate_img"+str(x1),translate_img)  # 显示每一个扣出来的物体
            boxs.append(box)
            cv2.drawContours(image, [box], 0, (0, 0, 255), 3)  # 画出边界
            cv2.drawContours(vibe.foreground_area, [box], 0, 255, -1)
            x1,y1=coordinate_mapping(x1, y1, WORKING_AREA_SIZE, binary_image.shape)
            if x2<=y2:
                angl=angl-90

            cv2.putText(image, '({},{}),{}'.format(int(x1), int(y1),int(-angl)), (box[2][0], box[2][1]), cv2.FONT_HERSHEY_DUPLEX, 0.6, (0, 255, 255), 1)
    return True, image, attitudes, images, boxs, binary_image


def separate(result, circle_size, scale):
    kernel_circle = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, \
                                              (int(circle_size / scale), int(circle_size / scale)))
    centre = cv2.filter2D(cv2.bitwise_not(result), -1, kernel_circle)  # 用稍小尺寸的圆形kernel卷积寻找中心
    # cv2.imshow("centre", centre)
    centre = cv2.bitwise_not(centre)
    unknown = cv2.subtract(result, centre)  # 找出分界区
    ret, markers = cv2.connectedComponents(centre)
    markers = markers + 1
    img_base = np.zeros(result.shape, np.uint8)
    markers[unknown == 255] = 0
    # markers = cv2.watershed(image_B, markers)
    markers = cv2.watershed(cv2.cvtColor(result, cv2.COLOR_GRAY2BGR), markers)
    img_base[markers == -1] = 255
    kernel = np.ones((3, 3), np.uint8)
    img_base = cv2.dilate(img_base, kernel, iterations=1)
    img_base = cv2.bitwise_not(img_base)
    result = cv2.bitwise_and(img_base, result)
    # cv2.imshow("image2", result)
    return result

def coordinate_mapping(x, y, physical_size, pixel_size):
    pixel_x, pixel_y = pixel_size
    hight, width = physical_size
    x = x / pixel_x * hight
    y = y / pixel_y * width
    return x, y

def create_background(background_img_ori):
    background_img, suc = ea.extract_focus(background_img_ori, 1)
    vibe.ProcessFirstFrame(background_img)
    return background_img,suc

def foreground_detection():
    global vibe
    while True:
        img=get_video_frame()
        cv2.imshow('Please press ENTER to create background and start detection', img)
        key = cv2.waitKey(30)

        if key == 13:  # ENTER
            background_img,suc=create_background(img)
            print (suc)
            if suc==False:
                pass
            else:
                print("Background created successfully")
                background_img_ori=img
                cv2.imshow("background_img_ori", background_img_ori)
                pre_img = img
                count=0
                while True:
                    cur_img = get_video_frame()
                    cv2.imshow('cur_img_ori', cur_img)
                    key = cv2.waitKey(10)
                    diff = cv2.absdiff(cur_img, pre_img)
                    max = np.max(diff)
                    print (max)
                    pre_img = cur_img
                    if max < 70:
                        print ("_____________________________")
                        foreground_image,res = ea.extract_focus(cur_img, 1)
                        if res:
                            count+=1
                        else:
                            count=0
                    else:
                        count = 0
                    if count>4:
                            foreground_image = cv2.resize(foreground_image,
                                                          (int(background_img.shape[1]), int(background_img.shape[0])))
                            if ft.ORB_Feature(background_img,foreground_image):
                                sul,res_img, T, images, boxs,thresh = find_diff(foreground_image.copy(), img.shape, 1,circle_size=40, threshold_value=70)
                                if sul:
                                    cv2.imshow('Please press ESC to create background and start detection', res_img)
                                    vibe.Update()

                    if key == 13:
                        ticks = time.time()
                        print("Image saved "+str(ticks))
                        cv2.imwrite(IMG_PATH+str(ticks)+'a:'+ '.png',background_img_ori)
                        cv2.imwrite(IMG_PATH+str(ticks)+'b:' + '.png', cur_img)
                        cv2.imwrite(IMG_PATH+str(ticks)+'c:' + '.png', thresh)
                        cv2.imwrite(IMG_PATH+str(ticks)+'d:' + '.png', res_img)

                    if key == 27:  # ESC
                        cv2.destroyAllWindows()
                        return

if __name__ == '__main__':
    foreground_detection()
