import sys
sys.path.append('/project/')
import json
#from train.src_repo.deeplab import DeeplabV3  
from PIL import Image
import numpy as np
import cv2
import copy
from  train.models.deeplabv3_plus import DeepLab
import torch
import torch.nn.functional as F
import logging
def resize_image(image, size):
    iw, ih  = image.size
    w, h    = size

    scale   = min(w/iw, h/ih)
    nw      = int(iw*scale)
    nh      = int(ih*scale)

    image   = image.resize((nw,nh), Image.BICUBIC)
    new_image = Image.new('RGB', size, (128,128,128))
    new_image.paste(image, ((w-nw)//2, (h-nh)//2))

    return new_image, nw, nh
def preprocess_input(image):
    image /= 255.0
    return image


def cvtColor(image):
    if len(np.shape(image)) == 3 and np.shape(image)[2] == 3:
        return image 
    else:
        image = image.convert('RGB')
        return image 




def init():
    
    num_classes=5
    device      = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    backbone="xception"
    model_path="/project/train/models/best_epoch_weights.pth"
    net=DeepLab(num_classes=num_classes,backbone=backbone,downsample_factor=16, pretrained=False)
    net.load_state_dict(torch.load(model_path))
    net.to(device)
    net.eval()
    return net
def process_image(deeplab,input_image,args=None,**kwargs):
    #图像是
     input_image = Image.fromarray(input_image)
     label_id=["background","algae","dead_twigs_leaves","garbage","water"]
     garbage_size=0;
     num_classes=5
     max_rect=None
     input_shape=[512,512]
     flag=0;# 判断是否报警输出
     rectangles = []  #外界矩形框
     segmented_boxes = []  #保存每个垃圾的外接矩形框
     #print(input_image.size)
     alert_size=1 #占比达到1即报警
     alert_time=0.015  #0.015s报警
     count=True
     garbage_box=[]
     colors=[(0,0,0),(1,1,1),(2,2,2),(3,3,3),(4,4,4)]
     image = cvtColor(input_image)
     # ---------------------------------------------------#
     #   对输入图像进行一个备份，后面用于绘图
     # ---------------------------------------------------#
     old_img = copy.deepcopy(image)
     orininal_h = np.array(image).shape[0]
     orininal_w = np.array(image).shape[1]
     # ---------------------------------------------------------#
     #   给图像增加灰条，实现不失真的resize
     #   也可以直接resize进行识别
     # ---------------------------------------------------------#
     image_data, nw, nh = resize_image(image, (512, 512))
     #print(image_data.size)
     # ---------------------------------------------------------#
     #   添加上batch_size维度
     # ---------------------------------------------------------#
     image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, np.float32)), (2, 0, 1)), 0)

     with torch.no_grad():
         images = torch.from_numpy(image_data)
         images = images.cuda()
     pr = deeplab(images)[0]

    # ---------------------------------------------------#
    #   取出每一个像素点的种类
    # ---------------------------------------------------#
     pr = F.softmax(pr.permute(1, 2, 0), dim=-1).cpu().detach().numpy()
     #print(pr)
# --------------------------------------#
#   将灰条部分截取掉
# --------------------------------------#
     pr = pr[int((input_shape[0] - nh) // 2): int((input_shape[0] - nh) // 2 + nh), \
     int((input_shape[1] - nw) // 2): int((input_shape[1] - nw) // 2 + nw)]
     #print(pr.shape)
# ---------------------------------------------------#
#   进行图片的resize
# ---------------------------------------------------#
     pr = cv2.resize(pr, (orininal_w, orininal_h), interpolation=cv2.INTER_LINEAR)
     #print(pr.shape)
# ---------------------------------------------------#
#   取出每一个像素点的种类
# ---------------------------------------------------#
     pr = pr.argmax(axis=-1)
     #print(pr)
     #print(pr)
# ---------------------------------------------------------#
#   计数
# ---------------------------------------------------------#
     if count:
         classes_nums = np.zeros([num_classes])
         total_points_num = orininal_h * orininal_w
    #print('-' * 63)
    #print("|%25s | %15s | %15s|" % ("Key", "Value", "Ratio"))
    #print('-' * 63)
         for i in range(num_classes):
             num = np.sum(pr == i)
             ratio = num / total_points_num * 100
            ##############################################
            ######  如果是垃圾且大于指定大小就求矩形框   ########
            ##############################################
             class_pixels = np.where(pr == i)
                # 将该类别的像素点合并成一个区域
             mask = np.zeros_like(pr,dtype = np.uint8)
             #print(mask.dtype)
             mask[class_pixels] = 255
             contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
             #print(contours)
                # 对每个连通区域，找到其外接矩形框
             for contour in contours:
                 rect = cv2.boundingRect(contour)
                    # 将外接矩形框的信息添加到列表中
                 if max_rect==None or rect[2]*rect[3]>max_rect[2]*max_rect[3]:
                     max_rect=rect
             x,y,w,h=max_rect
             #print(max_rect)        
             if i==3 and ratio>alert_size:
                 n=len(contours)
                 flag=1
                 garbage_box.append({'x':x,'y':y,'height':h,'weight':w,"name":"garbage","area_ratio":ratio})
                        # 将分割外接框的信息添加到列表中
             segmented_boxes.append({'x': x, 'y': y, 'height': h, 'weight': w,"name": label_id[i],
                "area_ratio": ratio,})

            #classes_nums[i] = num
        #print("classes_nums:", classes_nums)
     seg_img = np.reshape(np.array(colors, np.uint8)[np.reshape(pr, [-1])], [orininal_h, orininal_w, -1])
# ------------------------------------------------#
#   将新图片转换成Image的形式
# ------------------------------------------------#
     #print(seg_img.shape)
     image = Image.fromarray(np.uint8(seg_img))
     img = image.convert('L')
     #print(img.size)
     #img.show()
     if args !=None:
         args = json.loads(args)
         mask_output_path = args['mask_output_path']
         img.save(mask_output_path, format='PNG')
         if flag ==0:
                    return json.dumps( {
                "algorithm_data": {
                "is_alert": "false",
                "target_count": 0,
                "target_info": []
                },
            "model_data": {
                "objects": segmented_boxes,
                "mask":mask_output_path
                }
            }, indent=4)
         else:
                 return json.dumps( {
    "algorithm_data": {
        "is_alert": "true",
        "target_count": n,
        "target_info": garbage_box
        },
        "model_data": {
        "objects": segmented_boxes,
          "mask":mask_output_path,# 如果输入参数args中存在字符串型的"mask_output_path"字段且字段不为空字符串，则需要用这个路径保存mask，并在model_data中添加一个"mask"字段，默认不需要
        }
    }, indent=4)
     else:
             if flag ==0:
                return json.dumps( {
                "algorithm_data": {
                "is_alert": "false",
                "target_count": 0,
                "target_info": []
                },
            "model_data": {
                "objects": [],
                }
            }, indent=4)
             else:
                 return json.dumps( {
                "algorithm_data": {
                "is_alert": "true",
                "target_count": n,
                "target_info": garbage_box
                },
                "model_data": {
                "objects": segmented_boxes,
                #"mask":mask_output_path,# 如果输入参数args中存在字符串型的"mask_output_path"字段且字段不为空字符串，则需要用这个路径保存mask，并在model_data中添加一个"mask"字段，默认不需要
                }
    }, indent=4)
   
if __name__ =="__main__":
    net=init()
    img=cv2.imread("/home/data/1945/ZDSfloating_objects20230206_V3_train_sea_1_011769.jpg")
    #img.show()
    res=process_image(net,img)
    #print(res)