# -*- coding: utf-8 -*-
"""
图像定位功能
@author:   XueJunJie
@file:     ImageLocation.py
@date:     2024-9-20
@version:  1.0
"""
import time
import numpy
import warnings
import threading
from PIL import Image

# 网格切分数，图片将切分为NxN个区域同时识别
GRID_CUT = 3


def set_grid_cut(n: int):
    """
    设置网格切片数
    """
    global GRID_CUT
    GRID_CUT = n


def png_location_element(target_png: str, body_png: str, cut_count=10, vague=1,
                         confidence_rate=0.9, colourful=False, shadow=False, timeout=30,
                         farm=[(0, 0), (-1, -1)]):
    """
    图片点位元素坐标
    :target_png        str             目标图文件
    :body_png          str             原始图文件
    :cut_count         str             切片数，默认10，数值越大，定位越准确，慢指数级增加耗时
                                       （图特征性越强，建议切片越少）
    :vague             int             模糊度，默认1，数值越大，定位越不准确，慢指数级减少耗时。
                                       （图越小，建议数值越小）
    :confidence_rate   float           置信度，默认0.9，影响可能结果数。会根据模糊度自动稍微调整
                                       （连续同色区域越多，建议置信度越低）
    :colourful         bool            彩色匹配，默认False使用灰度比较，全彩耗时+30%
                                       （建议在灰度匹配有歧义时才使用）【shadow不可同时为真】
    :shadow            bool            投影匹配，默认False不使用投影。投影则黑白二项值比较，
                                       投影需要较大的cut_count以保证切片间隔小于3像素。耗时+30%
                                       （建议仅匹配图形形状时使用）【colourful不可同时为真】
    :timeout           int             超时，默认30秒，此处指执行耗时
    :farm              [               目标出现范围，默认全域
                           (int, int)  起始坐标，接受负数，代表反序位置
                           (int, int)  结尾坐标，接受负数，代表反序位置
                       ]
    res:
        (int, int)   目标中心坐标，无目标则值为None
        [            目标区域
            (int, int)   起始坐标，无目标则值为None
            (int, int)   结尾坐标，无目标则值为None
        ]
        float        置信度，无目标则值为0
    """
    assert not all([colourful, shadow]), 'colourful 和 shadow 不可同时为真'
    # =========图片初期处理=========
    body_img = Image.open(body_png)
    target_img = Image.open(target_png)
    # 转数组
    if colourful:
        body_seed_map = numpy.asarray(body_img).tolist()
        target_seed_map = numpy.asarray(target_img).tolist()
    else:
        # 灰度处理
        body_l = body_img.convert('L')
        target_l = target_img.convert('L')
        body_l_num = numpy.asarray(body_l)
        target_l_num = numpy.asarray(target_l)
        if shadow:
            # 投影
            body_l_num[body_l_num < 240] = 0
            body_l_num[body_l_num >= 240] = 1
            target_l_num[target_l_num < 240] = 0
            target_l_num[target_l_num >= 240] = 1
        body_seed_map = body_l_num.tolist()
        target_seed_map = target_l_num.tolist()

    # =========如果有指定区域则裁剪原图=======
    if farm != [(0, 0), (9999, 9999)]:
        _body_seed_map = []
        for y in body_seed_map[farm[0][1]: farm[1][1]]:
            _body_seed_map.append(y[farm[0][0]: farm[1][0]])
        body_seed_map = _body_seed_map

    # =========特征点取样==========
    t1 = time.time()
    # 提取目标特征点[特征矩阵]
    target_size_y = len(target_seed_map)
    target_size_x = len(target_seed_map[0])
    cut_count_y = cut_count
    cut_count_x = cut_count
    if shadow:
        # 投影需要更加高精度的匹配
        if target_size_y // cut_count_y > 3:
            warnings.warn(
                f'投影模式下,y轴切片间隔不应超过3,当前{target_size_y // cut_count_y}')
            cut_count_y = (target_size_y // cut_count_y) + 1
        if target_size_x // cut_count_x > 3:
            warnings.warn(
                f'投影模式下,x轴切片间隔不应超过3,当前{target_size_x // cut_count_x}')
            cut_count_x = (target_size_x // cut_count_x) + 1
    # 特征y坐标集
    features_y_list = list(set(
        [i * (target_size_y-1)//cut_count_y for i in range(0, cut_count_y+1)]
    ))
    features_y_list.sort()
    # 特征x坐标集
    features_x_list = list(set(
        [i * (target_size_x-1)//cut_count_x for i in range(0, cut_count_x+1)]
    ))
    features_x_list.sort()
    # 提取特征值矩阵
    features_map = []
    for j in features_y_list:
        features_map.append([target_seed_map[j][i] for i in features_x_list])

    # 受体起始点不会超越目标极限可能位置
    # 可能是正确目标[(x,y)]
    ok_target = []
    # 共享结果集，当发现完全匹配项时应当停止
    res = {'stop': False}
    global GRID_CUT
    if GRID_CUT <= 1:
        # _image_matching(
        #     body_seed_map, features_map, features_x_list, features_y_list,
        #     target_size_x, target_size_y, [0, 3000], [0, 3000],
        #     vague=vague, confidence_rate=confidence_rate, colourful=colourful,
        #     ok_target=ok_target, res=res
        # )
        t = threading.Thread(
            target=_image_matching,
            args=(
                body_seed_map,
                features_map,
                features_x_list,
                features_y_list,
                target_size_x,
                target_size_y,
                [0, 3000],
                [0, 3000],
                vague, confidence_rate, colourful, shadow,
                ok_target, res
            )
        )
        t.start()
        t.join(timeout=timeout)
    else:
        _x_seed = (len(body_seed_map[0]) - target_size_x) // GRID_CUT
        _y_seed = (len(body_seed_map) - target_size_y) // GRID_CUT
        _ranges_x = []
        _ranges_y = []
        for i in range(GRID_CUT-1):
            _ranges_x.append([i*_x_seed, (i+1)*_x_seed])
            _ranges_y.append([i*_y_seed, (i+1)*_y_seed])
        _ranges_x.append(
            [(GRID_CUT-1)*_x_seed, len(body_seed_map[0]) - target_size_x])
        _ranges_y.append(
            [(GRID_CUT-1)*_y_seed, len(body_seed_map) - target_size_y])
        _runner = []
        for seed_y in _ranges_y:
            for seed_x in _ranges_x:
                t = threading.Thread(
                    target=_image_matching,
                    args=(
                        body_seed_map,
                        features_map,
                        features_x_list,
                        features_y_list,
                        target_size_x,
                        target_size_y,
                        seed_x,
                        seed_y,
                        vague, confidence_rate, colourful, shadow,
                        ok_target, res
                    )
                )
                t.start()
                _runner.append(t)
        for t in _runner:
            t.join(timeout=timeout)

    # ====特征投票===
    # 置信度排序，选择最优解
    ok_target.sort(key=lambda x: x[2])
    if not ok_target:
        print(f'无目标结果,耗时{round(time.time()-t1, 2)}秒')
        return None, [None, None], 0
    # 此处注意需要修正回原图x坐标
    last_start_target_x = ok_target[-1][0] + farm[0][0]
    if last_start_target_x < 0:
        # 负数修正（负数标识反序位置，应当计算为正在坐标值）
        last_start_target_x = last_start_target_x + body_img.size[0]
    # 此处注意需要修正回原图y坐标
    last_start_target_y = ok_target[-1][1] + farm[0][1]
    if last_start_target_y < 0:
        # 负数修正（负数标识反序位置，应当计算为正在坐标值）
        last_start_target_y = last_start_target_y + body_img.size[1]
    last_start_target = (
        last_start_target_x,
        last_start_target_y
    )
    # 置信度
    last_rate = ok_target[-1][2]
    last_end_target = (
        last_start_target[0] + target_size_x,
        last_start_target[1] + target_size_y
    )
    print(
        f'最终选定区域为({last_start_target[0]},{last_start_target[1]}),({last_end_target[0]},{last_end_target[1]}),置信度{last_rate},耗时{round(time.time()-t1, 2)}秒'
    )
    return ((last_start_target[0] + last_end_target[0]) // 2, (last_start_target[1] + last_end_target[1]) // 2), \
        [last_start_target, last_end_target], \
        last_rate


def _image_matching(body_seed_map: list, features_map: list, features_x_list: list, features_y_list: list,
                    target_size_x: int, target_size_y: int, body_x_range=[0, 3000], body_y_range=[0, 3000],
                    vague=1, confidence_rate=0.9, colourful=False, shadow=False,
                    ok_target=[], res={}):
    """
    图像匹配逻辑
    :body_seed_map     list        图像矩阵值,[[],[]]
    :features_map      list        目标特征矩阵值,[[],[]]
    :features_x_list   list        目标特征x坐标偏移
    :features_y_list   list        目标特征y坐标偏移
    :target_size_x     int         目标尺寸x
    :target_size_y     int         目标尺寸y
    :body_x_range      [int, int]  x轴起始点区间,默认0~3000
    :body_y_range      [int, int]  y轴起始点区间,默认0~3000
    :vague             int         模糊度，默认1，数值越大，定位越不准确，慢指数级减少耗时。
                                  （图越小，建议数值越小）
    :confidence_rate   float       置信度，默认0.9，影响可能结果数
                                  （连续同色区域越多，建议置信度越低）
    :colourful         bool        彩色匹配，默认False使用灰度比较，全彩耗时+30%
                                   （建议在灰度匹配有歧义时才使用）【shadow不可同时为真】
    :shadow            bool        投影匹配，默认False不使用投影。投影则黑白二项值比较，
                                   投影需要较大的cut_count以保证切片间隔小于3像素。耗时+30%
                                   （建议仅匹配图形形状时使用）【colourful不可同时为真】
    :ok_target         list        回传结果，结果写入此处
    :res               dict        同享回传结果
    """
    agree = 0
    disagree = 0
    # 受体起始点不会超越目标极限可能位置
    _end_x = len(body_seed_map[0]) - target_size_x if len(body_seed_map) - \
        target_size_x < body_x_range[1] else body_x_range[1]
    _end_y = len(body_seed_map) - target_size_y if len(body_seed_map) - \
        target_size_y < body_y_range[1] else body_y_range[1]
    for j in range(body_y_range[0], _end_y):
        for i in range(body_x_range[0], _end_x):
            agree = 0
            disagree = 0
            _is_pass = False
            if i % vague or j % vague:
                continue
            # 判断是否置信
            for _j, d_j in enumerate(features_y_list):
                for _i, d_i in enumerate(features_x_list):
                    if colourful:
                        # 彩色判定法则
                        if abs(body_seed_map[j + d_j][i + d_i][0] - features_map[_j][_i][0]) < 5 and \
                           abs(body_seed_map[j + d_j][i + d_i][1] - features_map[_j][_i][1]) < 5 and \
                           abs(body_seed_map[j + d_j][i + d_i][2] - features_map[_j][_i][2]) < 5:
                            agree += 1
                        else:
                            disagree += 1
                    else:
                        if shadow:
                            # 投影判断法则
                            if body_seed_map[j + d_j][i + d_i] == features_map[_j][_i]:
                                agree += 1
                            else:
                                disagree += 1
                        else:
                            # 灰度判定法则
                            if abs(body_seed_map[j + d_j][i + d_i] - features_map[_j][_i]) < 5:
                                agree += 1
                            else:
                                disagree += 1
                    # 不置信，否决
                    if agree / (agree + disagree) < confidence_rate:
                        _is_pass = True
                        break
                if _is_pass:
                    break
            # 收集比较认同的区域
            if not _is_pass:
                ok_target.append((i, j, round(agree / (agree + disagree), 2)))
                if round(agree / (agree + disagree), 2) >= (1 - 0.02 * (vague-1) - 1):
                    # 完全匹配项，不继续
                    res['stop'] = True
                    break
        if res['stop']:
            break
    return ok_target
