# Untitled - By: luodongxu - 周一 5月 29 2023
import sensor, image, time ,math,seekfree
import os, tf
from ulab import numpy as np
from DataCommunicate import *
import openmv_numpy as Onp
import Anti
from pyb import LED #导入LED

white = LED(4)
#white.on()

brintness = 500

MapMaxX = 6
MapMaxY = 5
CoodSize = 12
#CoodSize = 16

PointOK = 0

IamgeClassifyRes_enum= {
    "apple":0 ,
    "banana":1,
    "bean":2 ,
    "cabbage":3,
    "chili":4,
    "corn":5,
    "cucumber":6,
    "durian":7,
    "eggplant":8,
    "grape":9,
    "orange":10,
    "peanut":11,
    "potato":12,
    "radish":13,
    "rice":14,
    "noneCres":15,
    "ing":16,
}

SearchBoundariesRes_enum = {
    "NoneBoundar":0,
    "Left":1,
    "Right":2,
    "Top":3,
    "Bottom":4,
}

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA) # we run out of memory if the resolution is much bigger...
sensor.set_brightness(brintness)
sensor.skip_frames(time = 20)
sensor.set_auto_gain(False)  # must turn this off to prevent image washout...
sensor.set_auto_whitebal(False)  # must turn this off to prevent image washout...


DataCommu = DataCommunicate(921600)

net_path = "mobilenet.tflite"                                  # 定义模型的路径
labels = [line.rstrip() for line in open("/sd/lebal_eiq.txt")]   # 加载标签
net = tf.load(net_path, load_to_fb=True)

lcd = seekfree.IPS200(2)
lcd.full()

def ImageClassiflyFun():
    """
    图像分类
    """
    img = sensor.snapshot()
    for r in img.find_rects(threshold = 3000):             # 在图像中搜索矩形
        if r.w() > 80 and r.w() < 120 and r.h() > 80 and r.h() < 120:
            img.draw_rectangle(r.rect(), color = (255,0, 0))   # 绘制矩形外框，便于在IDE上查看识别到的矩形位置

            img1 = img.copy(1,1,r.rect())                           # 拷贝矩形框内的图像
            for obj in tf.classify(net , img1, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0):
                 # print("**********\nTop 1 Detections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect())
                sorted_list = sorted(zip(labels, obj.output()), key = lambda x: x[1], reverse = True)
                DataCommu.SendImageClassifyRes(IamgeClassifyRes_enum[sorted_list[0][0]],sorted_list[0][1])
                lcd.show_image(img, 320, 240, zoom=0)
                lcd.show_str(sorted_list[0][0],0, 44, lcd.RED, 1)
            # 打印准确率最高的结果
                #for i in range(1):
                    #print("%s = %f" % (sorted_list[i][0], sorted_list[i][1]))
                #return
    DataCommu.SendImageClassifyRes(IamgeClassifyRes_enum["noneCres"],0)



def GetTransformM(A, B):
    """
    获取变换矩阵 A 是原坐标点 B 是变化后的坐标点\n
    例子：\n
    A_ = [[75, 0], [137, 0], [22, 89], [189, 89]]\n
    B_ = [[41, 0], [158, 0], [41, 89], [158, 89]]\n
    res = GetTransformM(A_, B_)\n
    """
    mata = Onp.array([
        [A[0][0], A[0][1], 1, 0, 0, 0, -A[0][0] * B[0][0], -A[0][1] * B[0][0]],
        [0, 0, 0, A[0][0], A[0][1], 1, -A[0][0] * B[0][1], -A[0][1] * B[0][1]],
        [A[1][0], A[1][1], 1, 0, 0, 0, -A[1][0] * B[1][0], -A[1][1] * B[1][0]],
        [0, 0, 0, A[1][0], A[1][1], 1, -A[1][0] * B[1][1], -A[1][1] * B[1][1]],
        [A[2][0], A[2][1], 1, 0, 0, 0, -A[2][0] * B[2][0], -A[2][1] * B[2][0]],
        [0, 0, 0, A[2][0], A[2][1], 1, -A[2][0] * B[2][1], -A[2][1] * B[2][1]],
        [A[3][0], A[3][1], 1, 0, 0, 0, -A[3][0] * B[3][0], -A[3][1] * B[3][0]],
        [0, 0, 0, A[3][0], A[3][1], 1, -A[3][0] * B[3][1], -A[3][1] * B[3][1]],
    ])

    matb = Onp.array([[B[0][0]],[B[0][1]],[B[1][0]],[B[1][1]],
                     [B[2][0]],[B[2][1]],[B[3][0]],[B[3][1]]])

    #val = spy.linalg.solve_triangular(mata, matb)
    val = Onp.solve(mata, matb)
    val = Onp.array([
        [val[0][0], val[1][0], val[2][0]],
        [val[3][0], val[4][0], val[5][0]],
        [val[6][0], val[7][0], 1.0],
    ])

    #return val.inv()
    return val






def PointListFun():
    sensor.set_brightness(900)

    img = sensor.snapshot()

    B =[] #[[-1,26],[31,26],[31,-1],[-1,-1]] # 目标坐标系空间   5 6
    if MapMaxX == 6:
        B=[[-1,25],[30,25],[30,-1],[-1,-1]]
    elif MapMaxX == 7:
        B=[[-1,25],[35,25 ],[35,-1],[-1,-1]]
    pointlist = []  #坐标点坐标
    mat = []        #坐标转移矩阵
    Antires = []    #蚁群算法结果 最优路径
    res = []        #返回jieguo

    for r in img.find_rects(threshold = 3000):#获取图像边界
        if r.w() > 160 and r.w() < 320 and r.h() > 120 and r.h() < 240:
            RectCorners = r.corners()
            img.draw_line(RectCorners[0][0],RectCorners[0][1],RectCorners[3][0],RectCorners[3][1],(255,255,0))
            img.draw_line(RectCorners[0][0],RectCorners[0][1],RectCorners[1][0],RectCorners[1][1],(255,0,255))
            mat = GetTransformM(r.corners(),B)            #获取透视变换坐标转移矩阵
            pointlist.append([r.corners()[0][0],r.corners()[0][1]]) #添加起始点
            cir = img.find_circles(roi = r.rect(),threshold = 1800, x_margin = 5, y_margin = 5, r_margin = 5,r_min = 1, r_max = 5, r_step = 1)
            if len(cir) == CoodSize:
                for c in cir:
                    img.draw_circle(c.x(), c.y(), c.r(), color = (0,255,0))
                    pointlist.append([c.x(), c.y()])
            lcd.show_image(img, 320, 240, zoom=0)
    if len(pointlist) == CoodSize+1:
        tmp = []
        Antires = Anti.AntsAlg(pointlist,12,8,0.4,3,1.1)
        for i in range(CoodSize):
              img.draw_line(pointlist[Antires[i]][0],pointlist[Antires[i]][1],
              pointlist[Antires[(i+1)%(CoodSize+1)]][0],pointlist[Antires[(i+1)%(CoodSize+1)]][1],color = (0,0,255))
        #坐标系转换
        intdex = 0;
        for cood in pointlist:
            test = Onp.array([[cood[0]],[cood[1]],[1]])
            point = mat*test
            tmp.append([round(point[0][0]/point[2][0]),round(24-point[1][0]/point[2][0])])
            img.draw_string(cood[0]-50,cood[1],"(%.1f,%.1f)"%(tmp[intdex][0],tmp[intdex][1]),(255,0,0))
            intdex = intdex + 1

        lcd.show_image(img, 320, 240, zoom=0)
        print(tmp)
        ##获取路径
        for index in range(CoodSize+1):
            if Antires[index] == 0:
                break
        while len(res)!= CoodSize:
            index = (index+1)%(CoodSize+1)
            res.append(tmp[Antires[index]])
        res.reverse()
        PointOK = 1
        print(res)

    lcd.show_image(img, 320, 240, zoom=0)
    sensor.set_brightness(brintness)
    DataCommu.SendPointList(res)





def SearchBoundariesFun():
    yellow = (63, 100, -49, 68, 24, 106)
    Auto = [0,0,320,240]
    img = sensor.snapshot()
    for blob in img.find_blobs([yellow],roi=Auto,x_stride=20,y_stride=20):
        img.draw_rectangle(blob.rect())
        img.draw_cross(blob.cx(), blob.cy())
        lcd.show_image(img, 320, 240, zoom=0)
        if blob.w()>blob.h() and blob.cy()>120 :
            print("Bottom")
            DataCommu.SendSearchBoundariesRes(SearchBoundariesRes_enum["Bottom"])
            return
        if blob.w()>blob.h() and blob.cy()<120 :
            print("Top")
            DataCommu.SendSearchBoundariesRes(SearchBoundariesRes_enum["Top"])
            return
        if blob.w()<blob.h() and blob.cx()>160 :
            print("Right")
            DataCommu.SendSearchBoundariesRes(SearchBoundariesRes_enum["Right"])
            return
        if blob.w()<blob.h() and blob.cx()<160 :
            print("Left")
            DataCommu.SendSearchBoundariesRes(SearchBoundariesRes_enum["Left"])
            return
    lcd.show_image(img, 320, 240, zoom=0)
    DataCommu.SendSearchBoundariesRes(SearchBoundariesRes_enum["NoneBoundar"])



def AdjFun():
    img = sensor.snapshot()
    for r in img.find_rects(threshold = 2000):
        tmp = r.h() / r.w()
        if r.w() > 80 and r.w() < 160 and r.h() > 80 and r.h() < 160 and tmp>0.7 and tmp<1.3:
            img.draw_rectangle(r.rect(), color = (255, 0, 0))
            img1 = img.copy(1,1,r.rect())

            for obj in tf.classify(net , img1, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0):
                sorted_list = sorted(zip(labels, obj.output()), key = lambda x: x[1], reverse = True)

            center_x=int(r.rect()[0] + r.rect()[2]/2)
            center_y=int(r.rect()[1] + r.rect()[3]/2)
            p = r.corners()
            dxgain = 0.12/math.sqrt((p[1][0] - p[0][0])**2+(p[1][1] - p[0][1])**2)
            dx=(160-center_x)*dxgain
            dy=(center_y -120)*dxgain
                                     # 拷贝矩形框内的图像
            if math.sqrt(dx*dx+dy*dy) < 0.02:
                    DataCommu.SendAdjInfo(-dx,-dy,IamgeClassifyRes_enum[sorted_list[0][0]],sorted_list[0][1],1)
            else:
                    DataCommu.SendAdjInfo(-dx,-dy,IamgeClassifyRes_enum[sorted_list[0][0]],sorted_list[0][1],0)
            lcd.show_image(img, 320, 240, zoom=0)
            lcd.show_str("%.3f,%.3f,%s,%.3f"%(dx,dy,sorted_list[0][0],sorted_list[0][1]),0, 44, lcd.RED, 1)
            return

    img.draw_line(0,120,319,120,(255,0,0))
    img.draw_line(160,0,160,239,(255,0,0))
    lcd.show_image(img, 320, 240, zoom=0)
    DataCommu.SendAdjInfo(0,0,0,0,0)


def SerchGarageFun():
    DataCommu.SendSerchGarageRes(1)


def Init(data):
    global MapMaxX
    global MapMaxY
    global CoodSize
    global brintness
    start_index = data.find(":") + 1
    end_index = data.find("\n")
    nums_str = data[start_index:end_index]
    brintness,width,height,CitySize = map(int, nums_str.split(","))
    MapMaxX = width
    MapMaxY = height
    CoodSize = CitySize
    sensor.set_brightness(brintness)













while(True):
    cmd = DataCommu.GetOneCommand()


    print(cmd)
    if cmd == "GCR\n":
        ImageClassiflyFun()
        pass
    elif cmd == "GPL\n":
        PointListFun()
        pass
    elif cmd == "GSBR\n":
        SearchBoundariesFun()
        pass
    elif cmd == "GAI\n":
        AdjFun()
        pass
    elif cmd == "GSGR\n":
        SerchGarageFun()
    elif cmd.startswith("init"):
        Init(cmd)
        pass

