import shutil
import uuid
import simplejson
import os
import cv2
import numpy as np
from PIL import Image
from io import BytesIO
import urllib3
import requests
from urllib.request import urlretrieve

urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)


def aHash(img):
    # 均值哈希算法
    # 缩放为8*8
    img = cv2.resize(img, (8, 8))
    # 转换为灰度图
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # s为像素和初值为0，hash_str为hash值初值为''
    s = 0
    hash_str = ''
    # 遍历累加求像素和
    for i in range(8):
        for j in range(8):
            s = s + gray[i, j]
    # 求平均灰度
    avg = s / 64
    # 灰度大于平均值为1相反为0生成图片的hash值
    for i in range(8):
        for j in range(8):
            if gray[i, j] > avg:
                hash_str = hash_str + '1'
            else:
                hash_str = hash_str + '0'
    return hash_str


def dHash(img):
    # 差值哈希算法
    # 缩放8*8
    img = cv2.resize(img, (9, 8))
    # 转换灰度图
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    hash_str = ''
    # 每行前一个像素大于后一个像素为1，相反为0，生成哈希
    for i in range(8):
        for j in range(8):
            if gray[i, j] > gray[i, j + 1]:
                hash_str = hash_str + '1'
            else:
                hash_str = hash_str + '0'
    return hash_str


def pHash(img):
    # 感知哈希算法
    # 缩放32*32
    img = cv2.resize(img, (32, 32))  # , interpolation=cv2.INTER_CUBIC

    # 转换为灰度图
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # 将灰度图转为浮点型，再进行dct变换
    dct = cv2.dct(np.float32(gray))
    # opencv实现的掩码操作
    dct_roi = dct[0:8, 0:8]

    hash = []
    avreage = np.mean(dct_roi)
    for i in range(dct_roi.shape[0]):
        for j in range(dct_roi.shape[1]):
            if dct_roi[i, j] > avreage:
                hash.append(1)
            else:
                hash.append(0)
    return hash


def calculate(image1, image2):
    # 灰度直方图算法
    # 计算单通道的直方图的相似值
    hist1 = cv2.calcHist([image1], [0], None, [256], [0.0, 255.0])
    hist2 = cv2.calcHist([image2], [0], None, [256], [0.0, 255.0])
    # 计算直方图的重合度
    degree = 0
    for i in range(len(hist1)):
        if hist1[i] != hist2[i]:
            degree = degree + \
                     (1 - abs(hist1[i] - hist2[i]) / max(hist1[i], hist2[i]))
        else:
            degree = degree + 1
    degree = degree / len(hist1)
    return degree


def classify_hist_with_split(image1, image2, size=(256, 256)):
    # RGB每个通道的直方图相似度
    # 将图像resize后，分离为RGB三个通道，再计算每个通道的相似值
    image1 = cv2.resize(image1, size)
    image2 = cv2.resize(image2, size)
    sub_image1 = cv2.split(image1)
    sub_image2 = cv2.split(image2)
    sub_data = 0
    for im1, im2 in zip(sub_image1, sub_image2):
        sub_data += calculate(im1, im2)
    sub_data = sub_data / 3
    return sub_data


def cmpHash(hash1, hash2):
    # Hash值对比
    # 算法中1和0顺序组合起来的即是图片的指纹hash。顺序不固定，但是比较的时候必须是相同的顺序。
    # 对比两幅图的指纹，计算汉明距离，即两个64位的hash值有多少是不一样的，不同的位数越小，图片越相似
    # 汉明距离：一组二进制数据变成另一组数据所需要的步骤，可以衡量两图的差异，汉明距离越小，则相似度越高。汉明距离为0，即两张图片完全一样
    n = 0
    # hash长度不同则返回-1代表传参出错
    if len(hash1) != len(hash2):
        return -1
    # 遍历判断
    for i in range(len(hash1)):
        # 不相等则n计数+1，n最终为相似度
        if hash1[i] != hash2[i]:
            n = n + 1
    return n


def getImageByUrl(url):
    # 根据图片url 获取图片对象
    html = requests.get(url, verify=False)
    image = Image.open(BytesIO(html.content))
    return image


def bytes_to_cvimage(filebytes):
    # 图片字节流转换为cv image
    image = Image.open(filebytes)
    img = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
    return img


def encodeChName(path):
    file_name = os.path.join(path)
    print(file_name)
    return file_name


# 读取图像,解决imread不能读取中文路径的问题
def cv_imread(file_path):
    return cv2.imdecode(np.fromfile(file_path, dtype=np.uint8), -1)


def cv_imread2(file_path):
    root_dir, file_name = os.path.split(file_path)
    pwd = os.getcwd()
    if root_dir:
        os.chdir(root_dir)
    cv_img = cv2.imread(file_name)
    os.chdir(pwd)

    if cv_img is None:
        return cv_imread(file_path)
    else:
        return cv_img


def loadImage(source):
    # print(source)
    if source.startswith("http"):
        # 根据链接下载图片，并转换为opencv格式
        return cv2.cvtColor(np.asarray(getImageByUrl(source)), cv2.COLOR_RGB2BGR)
    else:
        # 有中文路径不识别的问题
        return cv_imread2(source)


def urllib_download(link, dir, file_name):
    print(link + "; to -> " + dir + '/' + file_name)
    try:
        urlretrieve(link, dir + '/' + file_name)
    except:
        r = requests.get(link)
        with open(dir + '/' + file_name, 'wb') as f:
            f.write(r.content)


def image_diff_main(img1, img2, para1, para2):
    # 均值、差值、感知哈希算法三种算法值越小，则越相似,相同图片值为0
    # 三直方图算法和单通道的直方图 0-1之间，值越大，越相似。 相同图片为1

    hash_info1 = adp_hash(img1, para1)
    hash_info2 = adp_hash(img2, para2)

    n1 = cmpHash(hash_info1.get('aHash'), hash_info2.get('aHash'))
    n1 = (1 - float(n1 / 64)) * 100
    n2 = cmpHash(hash_info1.get('dHash'), hash_info2.get('dHash'))
    n2 = (1 - float(n2 / 64)) * 100
    n3 = cmpHash(hash_info1.get('pHash'), hash_info2.get('pHash'))

    n4 = classify_hist_with_split(img1, img2)
    if n4 != 1.0:
        n4 = round(n4[0] * 100, 2)
    else:
        n4 = round(n4 * 100, 2)
    # print('三直方图算法相似度：', n4)
    n5 = calculate(img1, img2)
    if n5 != 1.0:
        n5 = round(n5[0] * 100, 2)
    else:
        n5 = round(n5 * 100, 2)
    return n1, n2, n3, n4, n5


def adp_hash(img, link):
    if glob_image_diff_cache and glob_image_diff_cache.get(link):
        return glob_image_diff_cache.get(link)
    else:
        hash_info = {
            "aHash": aHash(img),
            "dHash": dHash(img),
            "pHash": pHash(img),
        }
        glob_image_diff_cache[link] = hash_info
        return hash_info


def loadImagePath(files, path):
    image_path_list = []
    for file in files:
        full_path = path + '/' + file
        # print(full_path)
        if os.path.isfile(full_path):
            suffix = file.split('.')[-1]
            if str(suffix).lower() in ['jpg', 'jpeg', 'png']:
                f = open(full_path)
                image_path_list.append(f.name)
        else:
            images = loadImagePath(os.listdir(full_path), full_path)
            for p in iter(images):
                image_path_list.append(p)
    return image_path_list


def writeJsonToFile(name, data):
    file = open(name, "w")
    file.write(simplejson.dumps(data))
    file.close()


# 加载待比较的源图片
def load_dir_image(path):
    files = os.listdir(path)
    images = loadImagePath(files, path)
    return images


def loadCacheImage(link, cache=False):
    if cache is False:
        image_data = loadImage(link)
    elif glob_image_cache.get(link) is None:
        image_data = loadImage(link)
        glob_image_cache[link] = image_data
        print('图片写入缓存......')
    else:
        image_data = glob_image_cache.get(link)
    return image_data


def diffSameImage(para1, para2, cache=True):
    img1, img2 = loadCacheImage(para1, cache), loadCacheImage(para2, cache)
    n1, n2, n3, n4, n5 = image_diff_main(img1, img2, para1, para2)
    return [n1, n2, n3, n4, n5]


def log_diff_text(diff_info, path, datum, file_name=None):
    diff_text = get_diff_json(diff_info, path, datum)

    if not file_name:
        file_info = getFileNameByPath(path)
        file_folder = file_info.get('file_folder')
        file_name = file_folder + '/' + file_info.get("file_name")

    writeJsonToFile(file_name + '-diff_info.json', diff_text)
    return diff_text


def get_diff_json(diff_info, path, datum):
    diff_text = {
        'imagePath': path,
        'aHash': diff_info[0],
        'dHash': diff_info[1],
        'pHash': diff_info[2],
        'threeHistogram': diff_info[3],
        'oneHistogram': diff_info[4],
        'checkList': diff_info,
        "datum": datum,
        "diffed_datum_count": len(
            list(filter(lambda f: f, map(lambda y: y >= datum, diff_info)))
        )
    }
    return diff_text


def check_diffed_datum_count(item, datum=100):
    diff_info = item.get("diff_info")
    diffed_datum_info = list(map(lambda m: m >= datum, diff_info))
    diffed_datum_count = len(list(filter(lambda x: x, diffed_datum_info)))

    check_info = {
        "path": item.get("path"),
        "datum": datum,
        "diff_info": diff_info,
        "diffed_datum_info": diffed_datum_info,
        "diffed_datum_count": diffed_datum_count
    }
    # print(check_info)
    return check_info


def recursionRemoveSameImage():
    if not os.path.exists(source_before_dir):
        print('没有这个目录,无需清理;')
        return
    print("开始清理重复图片,迁移图片到新目录;")
    image_list = load_dir_image(source_before_dir)

    if len(image_list) > 1:
        first_image = image_list[0]

        diff_info_list = []
        for index, image in enumerate(image_list):
            if image != first_image:
                diff_info = diffSameImage(first_image, image, False)
                diff_info_list.append({
                    "diff_info": diff_info,
                    "path": image
                })

        same_image = map(
            lambda r: r.get("path"),
            filter(
                lambda x: x.get("diffed_datum_count") > 0,
                map(check_diffed_datum_count, diff_info_list)
            )
        )

        # 删除文件夹中相同的图片
        for to_be_del in same_image:
            if os.path.exists(to_be_del):
                os.unlink(to_be_del)
        # 移动 第一张图片到另一个文件夹
        file_info = getFileNameByPath(first_image)
        new_file_name = file_info.get('file_name')
        new_file_suffix = file_info.get('file_suffix')
        if os.path.exists(source_clean_diff_dir + "/" + new_file_name + "." + new_file_suffix):
            shutil.move(first_image,
                        source_clean_diff_dir + "/" + new_file_name + "_" + str(uuid.uuid1()) + "." + new_file_suffix)
        else:
            shutil.move(first_image, source_clean_diff_dir)
        # 递归...
        recursionRemoveSameImage()
    else:
        if os.path.exists(source_before_dir):
            shutil.rmtree(source_before_dir)
        return


def getFileNameByPath(target):
    _temp = target.split("/")
    target_full_name = _temp.pop()
    target_folder = "/".join(_temp)
    target_name_list = target_full_name.split(".")
    target_suffix = target_name_list.pop()
    target_name = "".join(target_name_list)

    file_info = {
        # 文件全路径名
        "file_full_name": target_full_name,
        # 文件名
        "file_name": target_name,
        # 文件夹
        "file_folder": target_folder,
        # 后缀名
        "file_suffix": target_suffix
    }
    return file_info


def diffImages(target_diff, source_clean_diff, datum, gen_diff_log):
    for targetIndex, target in enumerate(target_diff):
        print("开始第" + str(targetIndex + 1) + "张图片相似度匹配;精度: " + str(datum) + ";共: " + str(len(target_diff)) + "张;")
        print("缓存图片数：" + str(len(glob_image_cache)) + ";缓存图片哈希数：" + str(len(glob_image_diff_cache)))

        # 为target目录的每个图片进行查询相似图
        sameList = []
        for source in source_clean_diff:
            diff_info = diffSameImage(target, source, True)
            sameList.append({
                "diff_info": diff_info,
                "path": source
            })
        # print(sameList)
        for index, same in enumerate(sameList):
            same_path = same.get("path")
            same_diff_info = same.get("diff_info")

            log_diff_json = get_diff_json(same_diff_info, same_path, datum)
            diffed_datum_count = log_diff_json.get('diffed_datum_count')

            # 当前所有的匹配满足 指定精度的匹配项
            if diffed_datum_count > 0:
                # 获取target文件名称
                target_file_info = getFileNameByPath(target)
                same_folder_name = target_file_info.get("file_name")
                # 创建文件夹
                now_same_image_folder_path = target_same_dir + same_folder_name
                if not os.path.exists(now_same_image_folder_path):
                    os.mkdir(now_same_image_folder_path)

                same_file_name = str(diffed_datum_count) + "_" + str(index)

                # 生成检测报告
                if gen_diff_log:
                    writeJsonToFile(
                        now_same_image_folder_path + "/" + same_file_name + '_diff_info.json',
                        log_diff_json
                    )

                # 把相似的图片copy到当前文件夹中
                shutil.copyfile(
                    same_path,
                    now_same_image_folder_path + "/" + same_file_name + "." + target_file_info.get("file_suffix")
                )
        print("第" + str(targetIndex + 1) + "张图片相似度匹配完成")


def intiDir(dirs):
    for dir in dirs:
        if dir:
            if not os.path.exists(dir):
                os.mkdir(dir)
            else:
                shutil.rmtree(dir)
                os.mkdir(dir)


def loadOlineImages():
    # 如果有网络图片链接 可以在这里初始化一下
    # 已打标主图
    target_diff = [
        "https://ae01.alicdn.com/kf/H0dddea94f03943a8885d9eaf688fe7318.jpg",
        "https://ae04.alicdn.com/kf/Hbe370cd7f7514b80bacb883b0532672d5.jpg",
        "https://ae04.alicdn.com/kf/Hc78be9f8d936415dabe652516f7389f42.jpg",
        "https://ae04.alicdn.com/kf/Hba60ec0c206d4eefb8455a3a87b56b8da.jpg",
        "https://ae04.alicdn.com/kf/H3bc6dbb98cfb48298f00e0c4fd8c554eB.jpg",
        "https://ae04.alicdn.com/kf/Ha62c99aeadef4aa49b54a9b1a614ff05h.jpg",
        "https://ae01.alicdn.com/kf/H47a581e8af5d4988bc1cd54552c3b759V.jpg",
        "https://ae04.alicdn.com/kf/H3defea5ff2c24773a561498572ccc12bS.jpg",
        "https://ae04.alicdn.com/kf/H5803809607ff4b78bd243e17ff13ce97O.jpg",
        "https://ae04.alicdn.com/kf/H1a5fb5b8da2e4378af4d564a73c0f8c4t.jpg",
        "https://ae04.alicdn.com/kf/H0f4d43b1af4946edb8e8d74a20724574x.jpg",
        "https://ae04.alicdn.com/kf/H26323069ddd44c95ad9763bc48ac5280W.jpg",
        "https://ae01.alicdn.com/kf/Hf9b0b15c8c7d4b58887ef4e9aa454e2aR.jpg",
        "https://ae04.alicdn.com/kf/H3cef04ea964142daa06dbc0b98741f6dp.jpg",
        "https://ae04.alicdn.com/kf/Hc18c01438b2a4803a7940ce4248c7296U.jpg",
        "https://ae04.alicdn.com/kf/Hfb9236f891484f37bf210aa684257131f.jpg",
        "https://ae04.alicdn.com/kf/H0db0547262124bea8649cdcf72afa8b70.jpg",
        "https://ae04.alicdn.com/kf/H60449298ff684260b082ace8cb879c30E.jpg"
    ]
    # 待匹配的底图
    source_clean_diff = [
        "https://ae04.alicdn.com/kf/H1dc122b0d10a4ec28141a57b344c77f83.jpg",
        "https://ae04.alicdn.com/kf/Hbe370cd7f7514b80bacb883b0532672d5.jpg",
        "https://ae04.alicdn.com/kf/Hc78be9f8d936415dabe652516f7389f42.jpg",
        "https://ae04.alicdn.com/kf/Hba60ec0c206d4eefb8455a3a87b56b8da.jpg",
        "https://ae04.alicdn.com/kf/H3bc6dbb98cfb48298f00e0c4fd8c554eB.jpg",
        "https://ae04.alicdn.com/kf/Ha62c99aeadef4aa49b54a9b1a614ff05h.jpg",
        "https://ae04.alicdn.com/kf/Hc302c2cc118b48b99907fdcc42e4c3d17.jpg",
        "https://ae04.alicdn.com/kf/H3defea5ff2c24773a561498572ccc12bS.jpg",
        "https://ae04.alicdn.com/kf/H5803809607ff4b78bd243e17ff13ce97O.jpg",
        "https://ae04.alicdn.com/kf/H1a5fb5b8da2e4378af4d564a73c0f8c4t.jpg",
        "https://ae04.alicdn.com/kf/H0f4d43b1af4946edb8e8d74a20724574x.jpg",
        "https://ae04.alicdn.com/kf/H26323069ddd44c95ad9763bc48ac5280W.jpg",
        "https://ae04.alicdn.com/kf/H2af25156e4204fa78447a53e35b68db6s.jpg",
        "https://ae04.alicdn.com/kf/H3cef04ea964142daa06dbc0b98741f6dp.jpg",
        "https://ae04.alicdn.com/kf/Hc18c01438b2a4803a7940ce4248c7296U.jpg",
        "https://ae04.alicdn.com/kf/Hfb9236f891484f37bf210aa684257131f.jpg",
        "https://ae04.alicdn.com/kf/H0db0547262124bea8649cdcf72afa8b70.jpg",
        "https://ae04.alicdn.com/kf/H60449298ff684260b082ace8cb879c30E.jpg",

        # 模拟有重复的图（这里的重复是指相似度100%，忽略下面图片链接和上面一模一样）
        "https://ae04.alicdn.com/kf/H1dc122b0d10a4ec28141a57b344c77f83.jpg",
        "https://ae04.alicdn.com/kf/Hbe370cd7f7514b80bacb883b0532672d5.jpg",
        "https://ae04.alicdn.com/kf/Hc78be9f8d936415dabe652516f7389f42.jpg",
        "https://ae04.alicdn.com/kf/Hba60ec0c206d4eefb8455a3a87b56b8da.jpg",
        "https://ae04.alicdn.com/kf/H3bc6dbb98cfb48298f00e0c4fd8c554eB.jpg",
        "https://ae04.alicdn.com/kf/Ha62c99aeadef4aa49b54a9b1a614ff05h.jpg",
        "https://ae04.alicdn.com/kf/Hc302c2cc118b48b99907fdcc42e4c3d17.jpg",
        "https://ae04.alicdn.com/kf/H3defea5ff2c24773a561498572ccc12bS.jpg",
        "https://ae04.alicdn.com/kf/H5803809607ff4b78bd243e17ff13ce97O.jpg",
        "https://ae04.alicdn.com/kf/H1a5fb5b8da2e4378af4d564a73c0f8c4t.jpg",
        "https://ae04.alicdn.com/kf/H0f4d43b1af4946edb8e8d74a20724574x.jpg",
        "https://ae04.alicdn.com/kf/H26323069ddd44c95ad9763bc48ac5280W.jpg",
        "https://ae04.alicdn.com/kf/H2af25156e4204fa78447a53e35b68db6s.jpg",
        "https://ae04.alicdn.com/kf/H3cef04ea964142daa06dbc0b98741f6dp.jpg",
        "https://ae04.alicdn.com/kf/Hc18c01438b2a4803a7940ce4248c7296U.jpg",
        "https://ae04.alicdn.com/kf/Hfb9236f891484f37bf210aa684257131f.jpg",
        "https://ae04.alicdn.com/kf/H0db0547262124bea8649cdcf72afa8b70.jpg",
        "https://ae04.alicdn.com/kf/H60449298ff684260b082ace8cb879c30E.jpg"
    ]

    download_list = [
        {
            "image_list": target_diff,
            "dir": target_diff_dir
        },
        {
            "image_list": source_clean_diff,
            "dir": source_before_dir
        },
    ]

    for itemInfo in download_list:
        if len(itemInfo.get("image_list")) != 0:
            for image_link in itemInfo.get("image_list"):
                file_info = getFileNameByPath(image_link)
                file_full_name = file_info.get('file_full_name')
                if os.path.exists(target_diff_dir + '/' + file_info.get('file_full_name')):
                    new_file_name = file_info.get('file_name')
                    new_file_suffix = file_info.get('file_suffix')
                    file_full_name = new_file_name + "_" + str(uuid.uuid1()) + "." + new_file_suffix
                urllib_download(image_link, itemInfo.get("dir"), file_full_name)


def runningDiffImages():
    target_diff = load_dir_image(target_diff_dir)
    # 清理完重复图片的图片银行文件夹
    source_clean_diff = load_dir_image(source_clean_diff_dir)

    if not os.path.exists(target_same_dir):
        os.mkdir(target_same_dir)

    # print(target_diff)
    # print(source_clean_diff)
    diffImages(target_diff, source_clean_diff, 80, False)


if __name__ == "__main__":
    # 图片缓存
    glob_image_cache = {}
    # 图片哈希值缓存
    glob_image_diff_cache = {}

    # 打标图片的文件夹
    target_diff_dir = "E:/target_diff"

    # 底图文件夹
    # 如果底图很多，且不确定有没有重复图片的情况下，底图应该先放在这个文件夹
    source_before_dir = "E:/source_before"
    # 去重之后的底图文件夹
    source_clean_diff_dir = "E:/source_clean_diff"
    # 相似图文件夹目录
    target_same_dir = "E:/target_same/"

    # 需要初始化的文件夹
    intiDir([
        source_before_dir,
        target_diff_dir,
        source_clean_diff_dir,
        target_same_dir
    ])

    # 如果有网络图片链接 可以在这里初始化一下
    loadOlineImages()

    # 清理用来匹配的图片库中重复的图片 --- > 相似度 百分百的图 转移到另一个文件夹;
    recursionRemoveSameImage()

    # 正式开始匹配
    runningDiffImages()
