import sys
import os
import pandas as pd
import numpy as np

root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(root)

from main_v2 import tools
import time
import cv2
from main_v3.utils import *

data = os.path.join(root,"datasets")

funcs = {
        "entropy": tools.entropy,  # 熵函数
        "EAV": tools.EAV,  # EAV点锐度算法函数
        "SMD": tools.SMD,  # SMD（灰度方差）函数
        "SMD2": tools.SMD2,  # SMD2 （灰度方差乘积）函数
        "variance": tools.variance,  # 方差函数
        "tenengrad": tools.tenengrad,  # Tenengrad 梯度函数
        "tenengrad2": tools.tenengrad2, # Tenengrad2 原项目实现的Tenengrad 梯度函数
        "vollath": tools.vollath, # Vollath函数
        "laplacian": tools.laplacian, # Laplacian 梯度函数
        "brenner": tools.brenner, # Brenner 梯度函数
        "nrss": tools.nrss, # NRSS 梯度结构相似度
        "energy": tools.energy, # 能量梯度函数
        "NR_IQA": tools.NR_IQA, # No-Reference Image Quality Assessment  using Blur and Noise
        "JPEQ": tools.JPEQ, # No-Reference Perceptual Quality Assessment of JPEG Compressed Images
        "JPEQ2": tools.JPEQ2 # No-Reference Image Quality Assessment forJPEG/JPEG2000 Coding
    }

def calcul_gradient_score(gray_image,method=None):
    u"""
    描述： 针对传输来的图像进行水平和垂直方向的梯度计算，计算梯度的平均得分，
           从而给出输入图像的清晰度得分
    """
    score = funcs[method](gray_image)
    return score

def gradient_assessment(gray_image, bgr_image, maser_th, method=None):
    u"""
    描述： 利用MSER获取文本外接矩形框，过滤掉重复的内嵌矩形后，通过连通区域调用梯度计算函数，
    最终返回梯度数值
    """
    mser_image = cv2.MSER_create(_min_area=maser_th, _max_variation=0.7)
    regions, boxes = mser_image.detectRegions(gray_image)
    hulls = [ cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions ]
    vis = bgr_image.copy()
    cv2.polylines(vis, hulls, 1, (0, 255, 0))
    height, width = gray_image.shape
    mask = np.zeros((height, width), np.uint8)
    for contour in hulls:
        cv2.drawContours(mask, [contour], -1, (255, 255, 255), -1)

    cc_info = find_connection_region(mask)
    bbox_list = cc_info['CC_POS']
    mean_score = []

    if len(bbox_list) == 0:
        return 0.0
    for box in bbox_list:
        xmin, ymin = box[0], box[1]
        xmax, ymax = box[2], box[3]
        crop_text_img = gray_image[ymin:ymax, xmin:xmax]
        try:
            final_score = calcul_gradient_score(crop_text_img,method)
            mean_score.append(final_score)
        except:
            continue

    return sum(mean_score) / len(mean_score)

def image_quality_assessment(image_ori,method = None):
    u"""
    描述： 根据获取到的原始图像进行缩放提速，利用MSER算法获取文本矩形框，
    计算这些矩形框内部的梯度得分，从而判断整张图像的清晰度。
    注意：代码需要利用网格搜索的参数有2个，得分阈值和MSER面积阈值
    """
    height, width, _ = image_ori.shape
    max_size_th = 1500
    if max(height, width) > max_size_th:
        ratio = float(max_size_th) / max(height, width)
        image_ori = cv2.resize(image_ori, None, fx=ratio, fy=ratio, interpolation=cv2.INTER_NEAREST)
    image_gray = cv2.cvtColor(image_ori, cv2.COLOR_BGR2GRAY)
    if max(height, width) >= 1000:
        mean_score = gradient_assessment(image_gray, image_ori, 100 ,method)
    else:
        mean_score = gradient_assessment(image_gray, image_ori, 80 , method)
    return mean_score

def get_clarity(input_file_list):
    images = []
    with open(input_file_list,"r") as op:
        for line in op.readlines():
            images.append(line.split()[0])

    if not os.path.exists(os.path.join(root, "outputs")):
        os.makedirs(os.path.join(root, "outputs"))

    dict = {"images": []}
    for image in images:
        dict["images"].append(os.path.split(image)[-1])
        try:
            for key, _ in funcs.items():

                start = time.time()
                score = image_quality_assessment(cv2.imread(image),method=key)
                end = time.time()

                t = end - start

                print(image, key, score, t)
                if key not in dict:
                    dict[key] = [score]
                    dict["{}_{}".format(key, "time")] = [t]
                else:
                    dict[key].append(score)
                    dict["{}_{}".format(key, "time")].append(t)
        except Exception as e:
             print(e)
    df = pd.DataFrame(dict)
    df.to_csv(os.path.join(root, "outputs", "blur_test_iqa_.csv"), index=False)

if __name__ == '__main__':
    get_clarity(os.path.join(data,"blur","images_list.txt"))