from abc import ABC, ABCMeta, abstractmethod
from pyzbar import pyzbar
import cv2
import numpy as np
import serial, time
import pyrealsense2 as rs


class AbstractDetection(metaclass=ABCMeta):

    @abstractmethod
    def precondition(self, frame): pass
    
    @abstractmethod
    def detect(self): pass

    @abstractmethod
    def view(self): pass


class QRCodeDetection(AbstractDetection):

    def __init__(self):
        super().__init__()
        self.frames = {}

    def precondition(self, frame):
        self.frames['frame'] = frame

    def detect(self):
        return [
            barcode.data.decode('utf-8')
            for barcode in pyzbar.decode(self.frames['frame'])
        ]
    
    def view(self):
        for i, j in self.frames.items():
            cv2.imshow(i, j)

    
class ObjectDetection(AbstractDetection):

    RED_MODE = 0
    GREEN_MODE = 1
    BLUE_MODE = 2

    def __init__(self, mode: int):
        super().__init__()
        self.frames = {}
        if mode == self.RED_MODE:
            self.low_h = 0
            self.high_h = 65
            self.low_s = 18
            self.high_s = 75
            self.low_v = 88
            self.high_v = 179
            self.ksize = 10
            self.kernel1 = 10
            self.kernel2 = 10
            self.kernel3 = 10
        elif mode == self.GREEN_MODE:
            self.low_h = 0
            self.high_h = 78
            self.low_s = 75
            self.high_s = 201
            self.low_v = 0
            self.high_v = 62
            self.ksize = 10
            self.kernel1 = 10
            self.kernel2 = 10
            self.kernel3 = 10
        elif mode == self.BLUE_MODE:
            self.low_h = 88
            self.high_h = 158
            self.low_s = 37
            self.high_s = 96
            self.low_v = 0
            self.high_v = 40
            self.ksize = 10
            self.kernel1 = 10
            self.kernel2 = 10
            self.kernel3 = 10
        self.area = 100000

    def precondition(self, frame):
        image = frame.copy()
        image = cv2.GaussianBlur(
            src=image,
            ksize=(self.ksize * 2 + 1, self.ksize * 2 + 1),
            sigmaX=0
        )
        mask = image
        mask = cv2.inRange(
            src=mask,
            lowerb=np.array([
                self.low_h, self.low_s, self.low_v
            ], dtype=np.uint8),
            upperb=np.array([
                self.high_h, self.high_s, self.high_v
            ], dtype=np.uint8)
        )
        mask = cv2.morphologyEx(
            src=mask,
            op=cv2.MORPH_OPEN,
            kernel=cv2.getStructuringElement(
                shape=cv2.MORPH_RECT,
                ksize=(self.kernel1 * 2 + 1, self.kernel1 * 2 + 1),
                anchor=(-1, -1)
            ),
            anchor=(-1, -1)
        )
        mask = cv2.morphologyEx(
            src=mask,
            op=cv2.MORPH_CLOSE,
            kernel=cv2.getStructuringElement(
                shape=cv2.MORPH_RECT,
                ksize=(self.kernel2 * 2 + 1, self.kernel2 * 2 + 1),
                anchor=(-1, -1)
            ),
            anchor=(-1, -1)
        )
        mask = cv2.dilate(
            src=mask,
            kernel=cv2.getStructuringElement(
                shape=cv2.MORPH_RECT,
                ksize=(self.kernel3 * 2 + 1, self.kernel3 * 2 + 1),
                anchor=(-1, -1)
            ),
            anchor=(-1, -1)
        )
        mask = cv2.Canny(
            image=mask,
            threshold1=30,
            threshold2=60,
            apertureSize=5
        )
        self.frames['frame'] = frame
        self.frames['mask'] = mask

    def detect(self):
        contours, hierarchy = cv2.findContours(
            image=self.frames['mask'],
            mode=cv2.RETR_EXTERNAL,
            method=cv2.CHAIN_APPROX_SIMPLE
        )
        rectangles = []
        for contour in contours:
            perimeter = cv2.arcLength(contour, True)
            approx = cv2.approxPolyDP(contour, 0.01 * perimeter, True)
            rectangle = cv2.minAreaRect(approx)
            a, b = rectangle[1][0], rectangle[1][0]
            vertices = cv2.boxPoints(rectangle)
            vertices = vertices.astype(np.int32)
            if a * b > self.area:
                rectangles.append(vertices.reshape((-1, 1, 2)))
        for vertex in rectangles:
            cv2.polylines(
                img=self.frames['frame'],
                pts=[vertex],
                isClosed=True,
                color=(255, 0, 255),
                thickness=3
            )
        return rectangles
    
    def view(self):
        for i, j in self.frames.items():
            cv2.imshow(i, j)

        
class ControllerModel:

    def __init__(self, name):
        self.interface = serial.Serial(name, baudrate=115200, timeout=2)

    def read(self):
        time.sleep(0.1)
        return self.interface.read_all()

    def write(self, data):
        for i in data:
            time.sleep(0.1)
            self.interface.write(ord(i).to_bytes(1, 'big'))
    

class TaskModel:

    def __init__(self):

        self.pipeline = rs.pipeline()
        config = rs.config()
        config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
        config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)
        self.pipeline.start(config)

        self.sensor = self.pipeline.get_active_profile().get_device().query_sensors()[1]
        self.sensor.set_option(rs.option.auto_exposure_priority, True)
        self.sensor.set_option(rs.option.exposure, 106)

        # self.device = ControllerModel(name="/dev/ttyUSB0")

        self.status = 0

        self.data = ""

        self.code = QRCodeDetection()

        self.red = ObjectDetection(ObjectDetection.RED_MODE)
        self.green = ObjectDetection(ObjectDetection.GREEN_MODE)
        self.blue = ObjectDetection(ObjectDetection.BLUE_MODE)

        self.detections = [
            ObjectDetection(ObjectDetection.RED_MODE),
            ObjectDetection(ObjectDetection.GREEN_MODE),
            ObjectDetection(ObjectDetection.BLUE_MODE)
        ]

    def fetch_frame(self):
        frames = self.pipeline.wait_for_frames()
        color_frame = frames.get_color_frame()
        depth_frame = frames.get_depth_frame()
        return np.asanyarray(color_frame.get_data()), np.asanyarray(depth_frame.get_data())

    def automate(self):

        def wait_for(byte):
            data = self.device.read()
            if data is not None and len(data) and byte in data:
                self.status += 1

        def status_0():
            wait_for(b'B')
        
        def status_1():
            img = self.fetch_frame()
            self.code.precondition(img)
            data = self.code.detect()
            data = [i for i in data if len(i) == 7]
            self.code.view()
            if len(data):
                self.data = data
                self.status += 1
                self.device.write(f"{data[0][0:3]}{data[0][4:7]}")
                self.sensor.set_option(rs.option.exposure, 300)
            cv2.waitKey(1)
        
        def status_2_6():
            wait_for(b'K')
        
        def status_3_4_5_7_8_9():
            index = ord(self.data[0][self.status - 3]) - ord('1')
            self.detections[index].precondition(self.fetch_frame())
            res = self.detections[index].detect()
            self.detections[index].view()
            if len(res):
                # self.device.write('1')
                self.status += 1
            if cv2.waitKey(1) & 0xff == ord('q'):
                self.pipeline.stop()
                cv2.destroyAllWindows()
                exit(0)
        
        def status_10():
            self.pipeline.stop()
            cv2.destroyAllWindows()
            exit(0)
        
        status_function = [
            status_0,
            status_1,
            status_2_6,
            status_3_4_5_7_8_9,
            status_3_4_5_7_8_9,
            status_3_4_5_7_8_9,
            status_2_6,
            status_3_4_5_7_8_9,
            status_3_4_5_7_8_9,
            status_3_4_5_7_8_9,
            status_10
        ]

        exposure = 300

        cv2.namedWindow("Control")

        def nothing(x):
            pass

        cv2.createTrackbar("low_h", "Control", 0, 255, nothing)
        cv2.createTrackbar("high_h", "Control", 0, 255, nothing)
        cv2.createTrackbar("low_s", "Control", 0, 255, nothing)
        cv2.createTrackbar("high_s", "Control", 0, 255, nothing)
        cv2.createTrackbar("low_v", "Control", 0, 255, nothing)
        cv2.createTrackbar("high_v", "Control", 0, 255, nothing)
        cv2.createTrackbar("ksize", "Control", 0, 10, nothing)
        cv2.createTrackbar("kernel1", "Control", 0, 10, nothing)
        cv2.createTrackbar("kernel2", "Control", 0, 10, nothing)
        cv2.createTrackbar("exposure", "Control", 10, 600, nothing)

        self.data=["123"]

        self.status = 5
        index = ord(self.data[0][self.status - 3]) - ord('1')

        cv2.setTrackbarPos("low_h", "Control", self.detections[index].low_h)
        cv2.setTrackbarPos("high_h", "Control", self.detections[index].high_h)
        cv2.setTrackbarPos("low_s", "Control", self.detections[index].low_s)
        cv2.setTrackbarPos("high_s", "Control", self.detections[index].high_s)
        cv2.setTrackbarPos("low_v", "Control", self.detections[index].low_v)
        cv2.setTrackbarPos("high_v", "Control", self.detections[index].high_v)
        cv2.setTrackbarPos("ksize", "Control", self.detections[index].ksize)
        cv2.setTrackbarPos("kernel1", "Control", self.detections[index].kernel1)
        cv2.setTrackbarPos("kernel2", "Control", self.detections[index].kernel2)
        cv2.setTrackbarPos("exposure", "Control", exposure)
        
        while True:

            self.status = 5
            index = ord(self.data[0][self.status - 3]) - ord('1')

            self.detections[index].low_h = cv2.getTrackbarPos("low_h", "Control")
            self.detections[index].high_h = cv2.getTrackbarPos("high_h", "Control")
            self.detections[index].low_s = cv2.getTrackbarPos("low_s", "Control")
            self.detections[index].high_s = cv2.getTrackbarPos("high_s", "Control")
            self.detections[index].low_v = cv2.getTrackbarPos("low_v", "Control")
            self.detections[index].high_v = cv2.getTrackbarPos("high_v", "Control")
            self.detections[index].ksize = cv2.getTrackbarPos("ksize", "Control")
            self.detections[index].kernel1 = cv2.getTrackbarPos("kernel1", "Control")
            self.detections[index].kernel2 = cv2.getTrackbarPos("kernel2", "Control")
            exposure = cv2.getTrackbarPos('exposure', 'Control')

            if self.sensor.get_option(rs.option.exposure) != exposure:
                self.sensor.set_option(rs.option.exposure, exposure)
            

            print(self.status)
            status_function[self.status]()


def main():
    TaskModel().automate()


if __name__ == "__main__":
    main()

