# -*- coding: utf-8 -*-
# -------------------------------
# @项目：机器视觉模型
# @文件：jietu_shuimian.py
# @时间：2024/4/3 10:41
# @作者：AnFany
# -------------------------------
import configparser
import os
import shutil

# 实现基于标注文件的图片特定区域选取
# 特定区域内的颜色空间聚类
# 实现问题区域的划分
import pandas as pd
import cv2
import numpy as np
from sklearn.cluster import KMeans

class SX:
    def __init__(self, orifig, labcsv, config_ini, testfig, nc=2, colo=(0, 0, 255), per=0.1):
        self.orifig = orifig # 基准图片
        self.labcsv = labcsv # 框的csv文件
        self.testfig = testfig  # 对比图片
        self.nc = nc  # 类别数
        self.colo = colo #标记出的污水颜色
        self.per = per

        # 标注信息
        self.Y, self.X, self.xp, self.yp = self.read_lab()
        # 阈值信息
        self.rate, self.distance_error = self.read_ini(config_ini)

    def readimg(self, filename, mode=-1):
        # 解决中文路径问题
        raw_data = np.fromfile(filename, dtype=np.uint8)
        img = cv2.imdecode(raw_data, mode)
        return img

    def read_ini(self, config_ini):
        config = configparser.ConfigParser()
        config.read(config_ini, encoding='utf8')

        # 需要转换的hdf文件路径
        rate = config.get('Section1', 'rate')

        # 程序运行的中间结果的保存文件
        distance_error = config.get('Section1', 'distance_error')

        return eval(rate), eval(distance_error)

    # 读取标注文件
    def read_lab(self):
        # 多边形数据
        data = eval(pd.read_csv(self.labcsv)['region_shape_attributes'].values[0])
        x_p = data['all_points_x']
        y_p = data['all_points_y']
        # 标注的文件大小
        figdata = self.readimg(self.orifig)
        Y, X, _ = figdata.shape
        return Y, X, x_p, y_p

    # RGB to LAB :LAB更适合人眼对颜色的感知，因此计算颜色之间的距离，更符合人的主观感受 LAB范围L[0,100] AB[-128,127]
    def RGBtoLAB(self, rgb):  # [0-255, 0-255, 0-255], [34, 0, 255]
        rgb = [k / 255 for k in rgb]
        ##################RGB to XYZ######################
        rgb = [k / 12.92 if k <= 0.04045 else np.power((k + 0.055) / 1.055, 2.4) for k in rgb]
        # 转为XYZ
        trans_m = [[0.412453, 0.357580, 0.180423], [0.212671, 0.715160, 0.072169], [0.019334, 0.119193, 0.950227]]
        XYZ = np.matmul(trans_m, np.array(rgb).reshape(-1, 1)) * 100
        ##################XYZ to LAB######################
        XYZ = XYZ.flatten() / [95.0456, 100, 108.8754]
        Y1 = XYZ[1]
        XYZ = [7.787 * k + 16 / 116 if k <= 0.008856 else np.power(k, 1 / 3) for k in XYZ]
        X, Y, Z = XYZ
        # 保留4位小数
        lab_L = round(116 * np.power(Y1, 1 / 3) - 16 if Y1 > 0.008856 else 903.3 * Y1, 4)
        lab_A = round(500.0 * (X - Y), 4)
        ab_B = round(200.0 * (Y - Z), 4)
        return [lab_L, lab_A, ab_B]

    # 定义LAB颜色之间的距离
    def com_dis(self, lab1, lab2, kl=1, kc=1, kh=1):
        """
        https://hajim.rochester.edu/ece/sites/gsharma/ciede2000/ciede2000noteCRNA.pdf
        """
        l1, a1, b1 = lab1
        l2, a2, b2 = lab2
        c_mean = (np.sqrt(a1 ** 2 + b1 ** 2) + np.sqrt(a2 ** 2 + b2 ** 2)) / 2  # (3)
        G = 0.5 * (1 - np.sqrt((c_mean ** 7) / (c_mean ** 7 + 25 ** 7)))  # (4)

        a_1 = (1 + G) * a1
        a_2 = (1 + G) * a2  # (5)

        C_1 = np.sqrt(a_1 ** 2 + b1 ** 2)
        C_2 = np.sqrt(a_2 ** 2 + b2 ** 2)  # (6)

        h_1 = 0 if ((b1 == a_1) and (a_1 == 0)) else np.degrees(np.arctan2(b1, a_1))
        h_2 = 0 if ((b2 == a_2) and (a_2 == 0)) else np.degrees(np.arctan2(b2, a_2))  # (7) 度数
        if h_1 < 0:
            h_1 += 360
        if h_2 < 0:
            h_2 += 360

        l_d = l2 - l1  # (8)
        c_d = C_2 - C_1  # (9)
        # (10)
        if C_1 * C_2 == 0:
            h_d = 0
        else:
            if abs(h_1 - h_2) <= 180:
                h_d = h_2 - h_1
            elif (h_2 - h_1) > 180:
                h_d = h_2 - h_1 - 360
            else:
                h_d = h_2 - h_1 + 360
        H_d = 2 * np.sqrt(C_2 * C_1) * np.sin(np.radians(h_d / 2))  # (11)
        L_D = (l1 + l2) / 2  # (12)
        C_D = (C_2 + C_1) / 2  # (13)
        # (14)
        if C_2 * C_1 == 0:
            h_heng = h_2 + h_1
        else:
            if abs(h_1 - h_2) <= 180:
                h_heng = (h_2 + h_1) / 2
            else:
                if h_1 + h_2 >= 360:
                    h_heng = (h_1 + h_2 - 360) / 2
                else:
                    h_heng = (h_1 + h_2 + 360) / 2

        T = 1 - 0.17 * np.cos(np.radians(h_heng - 30)) + 0.24 * np.cos(np.radians(2 * h_heng)) + \
            0.32 * np.cos(np.radians(3 * h_heng + 6)) - 0.2 * np.cos(np.radians(4 * h_heng - 63))  # (15)

        theta_d = 30 * np.exp(-((h_heng - 275) / 25) ** 2)  # (16)
        R_c = 2 * np.sqrt((C_D ** 7) / (C_D ** 7 + 25 ** 7))  # (17)
        Sl = 1 + (0.015 * (L_D - 50) ** 2) / (np.sqrt(20 + (L_D - 50) ** 2))  # (18)
        Sc = 1 + 0.045 * C_D  # (19)
        Sh = 1 + 0.015 * C_D * T  # (20)
        RT = - np.sin(np.radians(2 * theta_d)) * R_c  # (21)
        # (22)
        delta_e = np.sqrt(
            (l_d / (kl * Sl)) ** 2 + (c_d / (kc * Sc)) ** 2 + (H_d / (kh * Sh)) ** 2 +
            RT * ((c_d / (kc * Sc)) * (H_d / (Sh * kh))))
        return delta_e

    def ColourDistance(self, rgb_1, rgb_2):
        lab1 = self.RGBtoLAB(rgb_1)
        lab2 = self.RGBtoLAB(rgb_2)
        return self.com_dis(lab1, lab2)

    # 测试图片
    def select_fig(self):

        # 测试图片
        figdata_testfig = self.readimg(self.testfig)
        #figdata = cv2.cvtColor(figdata, cv2.COLOR_RGB2HSV)
        ty, tx, *_ = figdata_testfig.shape
        # 计算比例
        per_y = ty / self.Y
        per_x = tx / self.X
        # 对标注进行同比例
        xp = [int(x * per_x) for x in self.xp]
        yp = [int(y * per_y) for y in self.yp]
        xypp = [[x, y] for x, y in zip(xp, yp)]
        duobian = np.array(xypp, dtype=np.int32)
        # 选择数据
        # 初始化内部像素数据
        internal_data_testfig = []
        data_suoyin_testfig = []
        # 遍历多边形内部的每个点
        for y2 in range(ty):
            for x2 in range(tx):
                # 检查点是否在多边形内
                inside = cv2.pointPolygonTest(duobian, (x2, y2), False)
                if inside > 0:
                    # 获取内部像素数据
                    internal_data_testfig.append(list(figdata_testfig[y2, x2]))
                    data_suoyin_testfig.append([y2, x2])

        # 基准图片
        figdata_orifig = self.readimg(self.orifig)
        #figdata = cv2.cvtColor(figdata, cv2.COLOR_RGB2HSV)
        ty, tx, *_ = figdata_orifig.shape
        # 计算比例
        per_y = ty / self.Y
        per_x = tx / self.X
        # 对标注进行同比例
        xp = [int(x * per_x) for x in self.xp]
        yp = [int(y * per_y) for y in self.yp]
        xypp = [[x, y] for x, y in zip(xp, yp)]
        duobian = np.array(xypp, dtype=np.int32)
        # 选择数据
        # 初始化内部像素数据
        internal_data_orifig = []
        data_suoyin_orifig = []
        # 遍历多边形内部的每个点
        for y2 in range(ty):
            for x2 in range(tx):
                # 检查点是否在多边形内
                inside = cv2.pointPolygonTest(duobian, (x2, y2), False)
                if inside > 0:
                    # 获取内部像素数据
                    internal_data_orifig.append(list(figdata_orifig[y2, x2]))
                    data_suoyin_orifig.append([y2, x2])

        error_lst = []
        error_value = 0
        for index in range(len(internal_data_orifig)):

            ori_rgb = internal_data_orifig[index]
            test_rgb = internal_data_testfig[index]
            dis_ori_test = self.ColourDistance(ori_rgb, test_rgb)

            if dis_ori_test >= self.distance_error:
                error_lst.append(data_suoyin_orifig[index])
                error_value += 1

        if error_value >= len(internal_data_orifig) * self.rate:
            for x, y in error_lst:
                figdata_testfig[x, y] = self.colo

            cv2.polylines(figdata_testfig, [duobian], True, (103, 230, 183), thickness=2)
            imgPath = './images_ready/images_alarm/alarm/sewage_%s' % os.path.basename(self.testfig)
            cv2.imwrite(imgPath, figdata_testfig)
            # 存一份标记的预警图片到模型文件夹下
            imgPath = './images_ready/images_alarm/alarm/sewage_%s' % os.path.basename(self.testfig)
            shutil.copyfile(imgPath, self.orifig.replace('ori.jpg', 'sewage_' + os.path.basename(self.testfig)))
            # 存一份抓拍的原图到模型文件夹下
            shutil.copyfile(self.testfig, self.orifig.replace('ori.jpg', 'ori_' + os.path.basename(self.testfig)))

            print('异常 图片保存为result_%s' % os.path.basename(self.testfig))
            return '异常'

        else:
            print('正常')
            return '正常'











        # # 开始聚类
        # clusteruu = KMeans(n_clusters=self.nc, random_state=0, init='k-means++').fit(np.array(internal_data))
        # cluster_cc = clusteruu.labels_.tolist()
        #
        # count_dict = {}
        # for k in range(self.nc):
        #     count_dict[k] = cluster_cc.count(k)
        # # 选择最接近黑色或者白色的
        # cen = clusteruu.cluster_centers_
        # min_dd = []
        # for kin, kva in enumerate(cen):
        #     # 计算距离
        #     dis_pp1 = self.ColourDistance(kva, [0, 0, 0])
        #     dis_pp2 = self.ColourDistance(kva, [255, 255, 255])
        #     min_dd += [dis_pp1, dis_pp2]
        # # 两个聚类的中心
        # cem_oo = self.ColourDistance(cen[0], cen[1])
        # # 选择最小的
        # ui = min_dd.index(min(min_dd))
        # print(cem_oo, min_dd)
        # # 最小的
        # kyy = ui // 2
        # small = count_dict[kyy]
        # if small > len(cluster_cc) * self.per and min(min_dd) > 5 and cem_oo > 10:
        #     # 说明有问题，更改颜色
        #     for lin, lva in enumerate(cluster_cc):
        #         if lva == kyy:
        #             # 获得索引
        #             yy, xx = data_suoyin[lin]
        #             figdata[yy, xx] = self.colo
        #     # 输出内部像素数据
        #     # print(internal_data)
        #     cv2.polylines(figdata, [duobian], True, (103, 230, 183), thickness=2)
        #     cv2.imwrite('result_%s' % os.path.basename(self.testfig), figdata)
        #     return '异常 图片保存为result_%s' % os.path.basename(self.testfig)
        # else:
        #     return '正常'


if __name__ == '__main__':
    uu = SX('徒骇河-桂松路/tuhaihe_guisonglu.jpg',
            '徒骇河-桂松路/tuhai_guisong.csv', 'config_CNN.ini', '徒骇河-桂松路/tuhaihe_guisonglu.jpg')

    print(uu.select_fig())
