#...............................Imports..................................................................
import serial
import os
import torch
import math
import numpy as np
import FCN_NetModel as FCN # The net Class
import CategoryDictionary as CatDic
import cv2
from picamera2 import Picamera2, Preview
#import scipy.misc as misc


def detclenth(InputDir):
    # 读取图像
    image = cv2.imread(InputDir)
    original = image.copy()
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

    # 设置目标颜色的HSV范围
    lower_color = np.array([150, 255, 255])  # 最低HSV值
    upper_color = np.array([150, 255, 255])  # 最高HSV值

    # 创建颜色掩膜
    mask = cv2.inRange(hsv, lower_color, upper_color)
    mask = cv2.erode(mask, None, iterations=2)
    mask = cv2.dilate(mask, None, iterations=2)

    # 查找轮廓
    contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    contours = contours[0] if len(contours) == 2 else contours[1]

    if len(contours) > 0:
        # 取最大轮廓
        c = max(contours, key=cv2.contourArea)

        # 初始化最高点和最低点
        min_y = original.shape[0]  # 初始化为图像底部
        max_y = 0  # 初始化为图像顶部
        top_point = None
        bottom_point = None

        # 遍历轮廓点寻找最高和最低点
        for point in c:
            x, y = point[0]
            if y < min_y:
                min_y = y
                top_point = (x, y)
            if y > max_y:
                max_y = y
                bottom_point = (x, y)

        # 计算垂直像素差
        pixel_diff = bottom_point[1] - top_point[1]
    return pixel_diff

def caculenth(Liuqiddir,Vessldir):
    Liuqid=detclenth(Liuqiddir)
    Vessl=detclenth(Vessldir)
    high=102#容器高度，单位毫米
    high_fir=51#初始液面高度
    r=0.0375#容器半径
    lenth=Liuqid/Vessl*high
    lenth=abs((lenth-high_fir)*0.002)
    w=math.sqrt(lenth*2*9.8/r/r)
    print("w={:.2f}rad/s".format(w))
    return w
####serial####
ser=serial.Serial(port="/dev/ttyUSB0",baudrate=9600,timeout=5)
############################################camera###################################################################################
picam2 = Picamera2()
picam2.start_preview(Preview.NULL)
############################################Input parameters###################################################################################
#-------------------------------------Input parameters-----------------------------------------------------------------------
InputDir="/home/pi/Desktop/Semantic_Segmentation_Materials_In_VesselLabPics_PreTrained_WITH_WEIGHT/InputImages/" # Folder of input images
OutDir="/home/pi/Desktop/Semantic_Segmentation_Materials_In_VesselLabPics_PreTrained_WITH_WEIGHT/Out/" # Folder of output

UseGPU=False # Use GPU or CPU  for prediction (GPU faster but demend nvidia GPU and CUDA installed else set UseGPU to False)
FreezeBatchNormStatistics=False # wether to freeze the batch statics on prediction  setting this true or false might change the prediction mostly False work better
OutEnding="" # Add This to file name
if not os.path.exists(OutDir): os.makedirs(OutDir) # Create folder for trained weight

#-----------------------------------------Location of the pretrain model-----------------------------------------------------------------------------------
Trained_model_path ="/home/pi/Desktop/Semantic_Segmentation_Materials_In_VesselLabPics_PreTrained_WITH_WEIGHT/logs//TrainedModelWeiht1m_steps_Semantic_TrainedWithLabPicsAndCOCO_AllSets.torch"
##################################Load net###########################################################################################
#---------------------Create and Initiate net and create optimizer------------------------------------------------------------------------------------
Net=FCN.Net(CatDic.CatNum) # Create net and load pretrained encoder path
if UseGPU==True:
    print("USING GPU")
    Net.load_state_dict(torch.load(Trained_model_path))
else:
    print("USING CPU")
    Net.load_state_dict(torch.load(Trained_model_path, map_location=torch.device('cpu')))
#--------------------------------------------------------------------------------------------------------------------------
while 1:
    picam2.start_and_capture_file('/home/pi/Desktop/Semantic_Segmentation_Materials_In_VesselLabPics_PreTrained_WITH_WEIGHT/InputImages/test2.jpg')
    # Net.half()
    for name in os.listdir(InputDir): # Main read and predict results for all files


    #..................Read and resize image...............................................................................
        print(name)
        InPath=InputDir+"/"+name
        Im=cv2.imread(InPath)
        Im=cv2.rotate(Im,cv2.ROTATE_90_CLOCKWISE)
        h,w,d=Im.shape
        r=np.max([h,w])
        if r>840: # Image larger then 840X840 are shrinked (this is not essential, but the net results might degrade when using to large images
            fr=840/r
            Im=cv2.resize(Im,(int(w*fr),int(h*fr)))
        Imgs=np.expand_dims(Im,axis=0)
        if not (type(Im) is np.ndarray): continue
    #................................Make Prediction.............................................................................................................
        with torch.autograd.no_grad():
              OutProbDict,OutLbDict=Net.forward(Images=Imgs,TrainMode=False,UseGPU=UseGPU, FreezeBatchNormStatistics=FreezeBatchNormStatistics) # Run net inference and get prediction
    #...............................Save prediction on fil
        print("Saving output to: " + OutDir)
        for nm in OutLbDict:
            Lb=OutLbDict[nm].data.cpu().numpy()[0].astype(np.uint8)
            if Lb.mean()<0.001: continue
            if nm=='Ignore': continue
            ImOverlay1 = Im.copy()
            ImOverlay1[:, :, 0][Lb==1] = 255
            ImOverlay1[:, :, 1][Lb==1] = 0
            ImOverlay1[:, :, 2][Lb==1] = 255
            #FinIm=np.concatenate([Im,ImOverlay1],axis=1) #把原图和预测图拼接起来
            OutPath = OutDir + "//" + nm+"/"

            if not os.path.exists(OutPath): os.makedirs(OutPath)
            OutName=OutPath+name[:-4]+OutEnding+".png"
            #cv2.imwrite(OutName,FinIm)
            cv2.imwrite(OutName,ImOverlay1)
    
    Liuqiddir='/home/pi/Desktop/Semantic_Segmentation_Materials_In_VesselLabPics_PreTrained_WITH_WEIGHT/Out/Liquid GENERAL/test2.png'
    Vessldir='/home/pi/Desktop/Semantic_Segmentation_Materials_In_VesselLabPics_PreTrained_WITH_WEIGHT/Out/Vessel/test2.png'
    num=caculenth(Liuqiddir,Vessldir)
    a="t0.txt=\""
    b="%.2f"%num
    c="\""
    result=ser.write((a+b+c).encode("GB2312"))
    # 发送结束符
    ser.write(bytes.fromhex('ff ff ff'))
    
