'''
Author: goog
Date: 2021-12-15 12:31:41
LastEditTime: 2022-01-13 20:15:15
LastEditors: goog
Description: 定位ls
FilePath: /chengdu/TensorRT20220110/DetectionFM/LS/LS.py
Time Limit Exceeded!
'''

import cv2
import json
import numpy as np
import os
import torch
from PIL import Image
from torchvision import transforms
from torchvision.models import resnet18
import uuid

def result_cls(cfg, net, roi):
    device = torch.device(cfg['device'])
    imgtransform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    roi = cv2.resize(roi, (100, 100)) # 这个是关键 使用opencv的resize 使用PIL的导致错误
    img = Image.fromarray(roi)
    img = imgtransform(img)
    img = img.unsqueeze(0)
    img = img.to(device)
    with torch.no_grad():
        result = net(img)  # 1*3
        result = torch.softmax(result, dim=1)  
        # print(result)          
        state = int(result.argmax())
        # output = torch.squeeze(net(img)).cpu()
        # # # print(output.cpu().data.numpy())
        # predict = torch.softmax(output, dim=0)
        # print(predict)
        # predict_cla = torch.argmax(predict).numpy()
        # state = predict_cla
        return state

def findLsPoints(points, xs_near_ls, thresh):
    """
    若检测出 拉手盒那个易被遮挡的螺钉，屏蔽掉
    
    """
    xmin, ymin, xmax, ymax = xs_near_ls
    
    results = []
    for i, (_xmin, _ymin, _xmax, _ymax, conf) in enumerate(points):
        dist = abs(xmin-_xmin)+abs(ymin-_ymin)+abs(xmax-_xmax)+abs(ymax-_ymax)
        # print('dsit: ', dist)
        if dist>thresh:
            results.append([_xmin, _ymin, _xmax, _ymax, conf])
    return np.array(results)

def findByPoints(points, tp_near_ls1, tp_near_ls2, thresh):
    """
    若检测出 铁片左侧两个螺钉 则螺钉存在，否则不存在 
    先屏蔽掉
    """
    cx_ls1, cy_ls1 = tp_near_ls1
    cx_ls2, cy_ls2 = tp_near_ls2
    exist_ls1, exist_ls2 = False, False
    for i, (_xmin, _ymin, _xmax, _ymax, conf) in enumerate(points):
        cx, cy = (_xmin+_xmax)/2, (_ymin+_ymax)/2

        dist1 = abs(cx_ls1-cx)+abs(cy_ls1-cy)
        dist2 = abs(cx_ls2-cx)+abs(cy_ls2-cy)
        # print('dsit: ', dist)
        if dist1<thresh:
            exist_ls1 = True
        elif dist2<thresh:
            exist_ls2 = True
        # results.append([_xmin, _ymin, _xmax, _ymax, conf])
    return exist_ls1, exist_ls2

def ls(cfg, image1, item1, barcode, cam_ip, det_net, visualization=False):
    image = image1.copy()
    image_roi = cv2.cvtColor(image1.copy(), cv2.COLOR_BGR2RGB)
    defect = "LS"

    # 参考线 修正
    if 'RR-L' in barcode:
        bias8 = -350
        bias7 = -300
    elif 'FR-R' in barcode:
        bias8 = 150
        bias7 = 100
    elif 'RR-R' in barcode:
        bias8 = 100
        bias7 = 100  
    elif 'FR-L' in barcode:
        bias8 = 100
        bias7 = 100       
    else:
        bias8 = 100
        bias7 = 100 
        print('ls error')
    
    # # 设备
    # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # # 网络       
    # classif_net = resnet18(pretrained=True).to(device)
    # classif_net.eval()
    # image_transform = transforms.Compose([
    #             transforms.Resize((50, 50))
    #             transforms.ToTensor(),
    #             transforms.Normalize(mean=(0.485, 0.456, 0.406),
    #                              std=(0.229, 0.224, 0.225))
    #         ])
    # with torch.no_grad():
    #     t = Image.open("/home/det/桌面/chengdu/chengdu/TensorRT/DetectionFM/LS/34.png").convert('RGB')
    #     feature = classif_net(t_img)
    #     target_feature = feature.data.cpu().numpy()
       
    # 配置文件
    abs_dir = cfg['abs_dir']
    config_path = os.path.join(abs_dir, 'DetectionFM/'+defect+'/'+defect+'.json')
    
    # 根据铁片判断
    item_tp = item1['tp']
    if len(item_tp)>0:

        item_defect = item1[defect.lower()]
        defect_np = np.array(item_defect)
        if cam_ip == "192.168.8.8":
            ind = np.where((defect_np[:, 2]<item_tp[0][2]+bias8)&(defect_np[:, 2]>1000))
            plot_bias = bias8
        else:
            ind = np.where((defect_np[:, 2]>(item_tp[0][2]+bias7))&(defect_np[:, 2]<4000))
            plot_bias = bias7
        defect_np_lt = defect_np[ind[0], :]

        # 筛选出 特定阈值的框
        conf_ind = np.where(defect_np_lt[:, -1]>0.2)   
        defect_np_lt = defect_np_lt[conf_ind[0], :]
        # pre
        # for point in defect_np_lt:
        #     xmin, ymin, xmax, ymax = point
        #     RImage = image[ymin:ymax, xmin:xmax, :]

        #     # cv2.imwrite('34.png', RImage)
        #     ss = np.sum(np.sum(np.sum(RImage, axis=0), axis=0), axis=0)/(RImage.shape[0]*RImage.shape[1]*RImage.shape[2])
        #     #print(ss)
            
            # if ss > 1500000:
            #     image = cv2.rectangle(image, 
            #                     (xmin, xmax), 
            #                     (ymin, ymax), 
            #                     color=(0, 255, 255),
            #                     thickness=16)
            # with torch.no_grad():
            #     feature = classif_net(t_img)
            #     f_n = feature.data.cpu().numpy()
            #     feature_list.append(f_n)
       
        # 屏蔽线束遮挡的一个螺丝, 屏蔽 
        # tp左侧两个不明显螺钉
        exist_ls1, exist_ls2 = True, True
        thresh = 400
        
        # 只有前门有线束
        # 前门 去掉线束挡住的ls和tp左侧两个不明显螺钉
        """
            [cx   , cy  ]
        FR-L 无
        FR-R [3283, 2429], [3649, 2040] 8.8
        RR-L [2090, 1900], [2458, 2286] 7.7
        RR-R [2301, 2288], [2661, 1910] 8.8
        """
        if 'FR-R' in barcode and cam_ip == "192.168.7.7":
            if 'xs' in item1.keys():
                defect_np_lt = findLsPoints(defect_np_lt, 
                                    (567, 2734, 629, 2802), thresh)
            
        elif 'FR-R' in barcode and cam_ip == "192.168.8.8":
            if 'xs' in item1.keys():
                defect_np_lt = findLsPoints(defect_np_lt, 
                                    (3686, 2770, 3723, 2813), thresh)
                   
            exist_ls1, exist_ls2 = findByPoints(defect_np_lt, [3283, 2429], [3649, 2040], 200)
        elif 'FR-L' in barcode and cam_ip == "192.168.7.7":
            if 'xs' in item1.keys():
                defect_np_lt = findLsPoints(defect_np_lt, 
                                    (1149, 2755, 1215, 2823), thresh)
        elif 'FR-L' in barcode and cam_ip == "192.168.8.8":
            if 'xs' in item1.keys():
                defect_np_lt = findLsPoints(defect_np_lt, 
                                    (4162, 2753, 4238, 2818), thresh)
        # 后门
        # 去掉左侧不明显的两个螺钉

        elif 'RR-L' in barcode and cam_ip == "192.168.7.7":
            exist_ls1, exist_ls2 = findByPoints(defect_np_lt, [2090, 1900], [2458, 2286], 200)
        elif 'RR-L' in barcode and cam_ip == "192.168.8.8":
            pass
        elif 'RR-R' in barcode and cam_ip == "192.168.7.7":
            pass
        elif 'RR-R' in barcode and cam_ip == "192.168.8.8":
            exist_ls1, exist_ls2 = findByPoints(defect_np_lt, [2301, 2288], [2661, 1910], 200)
        else:
            print('error 203')
            
        # 二次判断 是否确实螺钉
        # for v in defect_np_lt:
        #     xmin, ymin, xmax, ymax = int(v[0]), int(v[1]), int(v[2]), int(v[3])

        #     cv2.imwrite(os.path.join('./DetectionFM/LS' ,str(uuid.uuid1())+'.png'), cv2.resize(image_roi[ymin-10:ymax+10, xmin-10:xmax+10, :], (100, 100)) )
        
        
        ## test
       
        # data_transform = transforms.Compose(
        #     [
        #     transforms.ToTensor(),
        #     transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])

        # # img = Image.open('/media/Harddisk_A/Workers/goog/chengdu/TensorRT20220104/DetectionFM/LS/1.png')
        # roi = cv2.imread('/media/Harddisk_A/Workers/goog/chengdu/TensorRT20220104/DetectionFM/LS/1.png', cv2.IMREAD_COLOR)
        # roi = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB) # 必须要转换格式
        # img = Image.fromarray(roi)
        # img = img.resize((100, 100))
        # img = data_transform(img)
    
        # # expand batch dimension
        # img = torch.unsqueeze(img, dim=0)
        # with torch.no_grad():
        #     # predict class
        #     output = torch.squeeze(det_net(img.cuda())).cpu()
        #     print(output.cpu().data.numpy())
        #     predict = torch.softmax(output, dim=0)
        #     predict_cla = torch.argmax(predict).numpy()
        #     print(predict.cpu().data.numpy())
        #     print(predict_cla)
        # img = Image.fromarray(roi)
        # img = img.resize((100, 100))
        # img = data_transform(img)
        # img = img.unsqueeze(0)
        # img = img.cuda()
        # with torch.no_grad():
        #     result = det_net(img)  # 1*3
        #     result = torch.softmax(result, dim=1)  
        #     print('123', result)          
        #     state = int(result.argmax())
        with open(config_path, 'r') as fp:
            config = json.load(fp)
            if barcode not in config.keys():
                barcode = 'C50'+barcode[3:]
            defect_count = config[barcode][cam_ip]
        
        object_count = 0
        defect_object_count = 0
        if visualization:
            img = image
            # 可视化 漏掉的
            if 'RR-L' in barcode:
                tp_point1, tp_point2 = [2090, 1900], [2458, 2286]
            elif 'RR-R' in barcode:
                tp_point1, tp_point2 = [2301, 2288], [2661, 1910]
            elif 'FR-R' in barcode:
                tp_point1, tp_point2 = [3283, 2429], [3649, 2040]
            print(exist_ls1, exist_ls2)
            if not exist_ls1:
                img = cv2.rectangle(img, 
                                        (tp_point1[0]-50, tp_point1[1]-50), 
                                        (tp_point1[0]+50, tp_point1[1]+50), 
                                        color=(0, 0, 255),
                                    thickness=18)
            if not exist_ls2:
                img = cv2.rectangle(img, 
                                    (tp_point2[0]-50, tp_point2[1]-50), 
                                    (tp_point2[0]+50, tp_point2[1]+50), 
                                    color=(0, 0, 255),
                                    thickness=18)

            for v in defect_np_lt:
                # xmin, ymin, xmax, ymax = v
                # RImage = image[ymin+20:ymax-20, xmin+20:xmax-20, :]
                # ss = np.sum(np.sum(np.sum(RImage, axis=0), axis=0), axis=0)/(RImage.shape[0]*RImage.shape[1]*RImage.shape[2])
                # # print(ss)

                
                # if ss>40:
                #     count+=1
                img = cv2.line(image, 
                            (item_tp[0][2]+plot_bias, 0), 
                            (item_tp[0][2]+plot_bias, 3647), 
                            color=(255, 0 ,255),
                            thickness=10)
                
                xmin, ymin, xmax, ymax = int(v[0]), int(v[1]), int(v[2]), int(v[3])
                state = result_cls(cfg, net=det_net, roi=image_roi[ymin-10:ymax+10, xmin-10:xmax+10, :])
                # img = cv2.rectangle(img, 
                #                         (3686, 2770), 
                #                         (3723, 2813), 
                #                         color=(0, 255, 0),
                #                     thickness=6)
                if state:
                    img = cv2.rectangle(img, 
                                        (xmin, ymin), 
                                        (xmax, ymax), 
                                        color=(0, 255, 0),
                                    thickness=6)
                    object_count += 1
                else:
                    # print(xmin, ymin, xmax, ymax)
                    defect_object_count +=1
                    cv2.imwrite(os.path.join('./DetectionFM/LS' ,str(1)+'.png'), cv2.resize(image_roi[ymin-10:ymax+10, xmin-10:xmax+10, :], (100, 100)) )
                    img = cv2.rectangle(img, 
                                        (xmin, ymin), 
                                        (xmax, ymax), 
                                        color=(0, 0, 255),
                                    thickness=18)
    

            cv2.imwrite(os.path.join(abs_dir, 'Test', defect+cam_ip+'.jpg'), img=img)
            print("{}_{}\tyolo:{}\\{}\t\tcls:noraml[{}]\tabnormal[{}]:".format(cam_ip, defect, defect_np_lt.shape[0], defect_count, object_count, defect_object_count))

        
        

        
        if 'FR-R' in barcode and 'xs' in item1.keys() and cam_ip == "192.168.8.8":
            defect_count -= 1
        elif 'FR-L' in barcode and 'xs' in item1.keys() and cam_ip == "192.168.7.7":
            defect_count -= 1
        # # else:
        #     defect_count = defect_count
            # print('ls and xs error')
            
        # return (defect_np_lt.shape[0])==defect_count, img 
        return object_count == defect_count, img
    else:
        return True, None