#!/usr/bin/python3
# -*- encoding:UTF-8 -*-
import sys

sys.path.append("../")

from vision.Pretreat import Pretreat
from vision.ClassifierVersion import Img2Status
from ui.cube_ui import *

import os
import cv2
import time
import serial
import pickle
import configparser
from datetime import datetime

from PyQt5.QtWidgets import QMainWindow
from PyQt5.QtGui import QPixmap


class MyWindow(QMainWindow, Ui_MainWindow):
    def __init__(self, parent=None):
        super(MyWindow, self).__init__(parent)
        self.setupUi(self)


def rotateFR():
    ser = serial.Serial("/dev/ttyS0", 115200)
    hex_str = bytes.fromhex('ff ff 03 59 6d ae 74 00')
    ser.write(hex_str)
    ser.close()


def rotateFL():
    ser = serial.Serial("/dev/ttyS0", 115200)
    hex_str = bytes.fromhex('ff ff 03 59 6d ae 74 00')
    ser.write(hex_str)
    ser.close()


def make_sure_open(cp, idx):
    __, frame = cp.read()
    if not __:
        __, frame = cp.read()
        print("waiting for cam {} open...".format(idx))
        w.output_text.append("Waiting for cam {} open".format(idx))


def getFinalResult(str_result):
    #java_res = os.popen("cd ..\n cd min2phase-dev-min\n java demo {0} | ../transform/build/main\n cd ..\n cd strategy".format(str_result))
    # test = "RLUFURBDLDULLRBLRFUFBRFFUFDRRFDDDDDDUULBLBBBFFUBLBURLR"
    java_res = os.popen(
        "cd ..\n cd min2phase-dev-min\n java demo {0} | ../transform/build/transform"
        .format(str_result))

    return java_res.read()


def collectPoint():
    os.system("cd ./../vision/ClfTrain && python3 GetData.py")


def trainData():
    os.system("cd ./../vision/ClfTrain && python3 Train.py")


def program_exit():
    w.close()


def program_start():

    # take-lots-time part
    cam_idx_file = open("../configs/cam_idx.bin", 'rb')
    cam_idx_list = pickle.load(cam_idx_file)
    print("Cam List: {}".format(cam_idx_list))
    w.output_text.setText("Cam List: {}".format((cam_idx_list)))

    # # just for debugging
    # TODO
    cam_idx_list[0] = "../configs/0.jpg"
    up_cam_cp = cv2.VideoCapture(cam_idx_list[0])
    make_sure_open(up_cam_cp, cam_idx_list[0])

    config = configparser.ConfigParser()
    config.read("../configs/vision_pretreat.ini")
    pretreater = Pretreat(config)
    solver = Img2Status("../vision/ClfTrain/now.model")

    ################
    # Ready Signal #
    ################
    w.output_text.append("Start Rotate&cam:")

    pics = []
    print("Ready?")
    input()

    start_time = datetime.now()
    print("--- get first image ---")
    input()
    __, frame = up_cam_cp.read()
    pics.insert(0, frame)

    print("--- Rotate FR ---")
    rotateFR()
    time.sleep(1)
    print("--- get second image ---")
    input()
    __, frame = up_cam_cp.read()
    pics.insert(2, frame)

    print("--- Rotate FL ---")
    rotateFL()
    time.sleep(1)
    print("--- Rotate FR ---")
    rotateFR()
    time.sleep(1)
    print("--- get third image ---")
    input()
    __, frame = up_cam_cp.read()
    pics.insert(1, frame)

    print("--- Rotate FL ---")
    rotateFL()
    time.sleep(1)
    print("--- get forth image ---")
    input()
    __, frame = up_cam_cp.read()
    pics.insert(3, frame)

    # back to start
    print("--- Rotate FR ---")
    rotateFR()
    time.sleep(1)
    print("--- Rotate FL ---")
    rotateFL()
    time.sleep(1)
    print("--- Rotate FR ---")
    rotateFR()
    time.sleep(1)

    w.output_text.append("End Rotate&cam Cost: {}ms".format(
        (datetime.now() - start_time).microseconds))

    # TODO
    #pics[0] = cv2.imread("../configs/0.jpg")
    #pics[1] = cv2.imread("../configs/1.jpg")
    #pics[2] = cv2.imread("../configs/2.jpg")
    #pics[3] = cv2.imread("../configs/3.jpg")
    
    pics[0] = cv2.imread("./image/0.jpg")
    pics[1] = cv2.imread("./image/1.jpg")
    pics[2] = cv2.imread("./image/2.jpg")
    pics[3] = cv2.imread("./image/3.jpg")

   # for i in range(0, 4):
   #     cv2.imwrite("./tmp_image/%d.jpg" % (i), pics[i])

    w.lab_image1.setPixmap(QPixmap("./image/0.jpg"))
    w.lab_image2.setPixmap(QPixmap("./image/1.jpg"))
    w.lab_image3.setPixmap(QPixmap("./image/2.jpg"))
    w.lab_image4.setPixmap(QPixmap("./image/3.jpg"))

    # w.lab_image1.setPixmap(QPixmap("./tmp_image/0.jpg"))
    # w.lab_image2.setPixmap(QPixmap("./tmp_image/1.jpg"))
    # w.lab_image3.setPixmap(QPixmap("./tmp_image/2.jpg"))
    # w.lab_image4.setPixmap(QPixmap("./tmp_image/3.jpg"))
    
    ################
    # Process Part #
    ################

    # image process part
    w.output_text.append("Start Image Process Part")
    start_time = datetime.now()
    color_vectors = pretreater.GetResult(pics)
    solver.GetResult(color_vectors)
    w.vision_seq.setText(solver.status)
    w.output_text.append("End Process Color Cost: {}ms".format(
        (datetime.now() - start_time).microseconds))

    # min2phase part
    w.output_text.append("Start Min2phase Part")
    start_time = datetime.now()
    cube_status = solver.status
    moves = getFinalResult(cube_status)
    w.output_text.append("Result: \n{}".format(moves))
    print("Result: \n{}".format(moves))
    w.output_text.append("End Min2phase Cost: {}ms".format(
        (datetime.now() - start_time).microseconds))

    for i in range(4):
        cv2.imwrite("../configs/%d.jpg" % (i + 4), pics[i])


def main():
    w.but_start.clicked.connect(program_start)
    w.but_exit.clicked.connect(program_exit)
    w.act_collectPoint.triggered.connect(collectPoint)
    w.act_trainData.triggered.connect(trainData)

    w.lab_image1.setScaledContents(True)
    w.lab_image2.setScaledContents(True)
    w.lab_image3.setScaledContents(True)
    w.lab_image4.setScaledContents(True)

    w.lab_image1.resize(w.widget.size())
    w.lab_image2.resize(w.widget_2.size())
    w.lab_image3.resize(w.widget_3.size())
    w.lab_image4.resize(w.widget_4.size())
    sys.exit(app.exec_())


app = QtWidgets.QApplication(sys.argv)
w = MyWindow()
w.show()

if __name__ == '__main__':
    main()
