import argparse

import numpy as np
from PIL import Image, ImageDraw
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input

import cfg
from label import point_inside_of_quad
from network import East
from preprocess import resize_image
from nms import nms
import math
import os
import glob
import cv2
import matplotlib.pyplot as plt
def sigmoid(x):
    """`y = 1 / (1 + exp(-x))`"""
    return 1 / (1 + np.exp(-x))


def cut_text_line(geo, scale_ratio_w, scale_ratio_h, im_array, img_path, s):
    geo /= [scale_ratio_w, scale_ratio_h]
    p_min = np.amin(geo, axis=0)
    p_max = np.amax(geo, axis=0)
    min_xy = p_min.astype(int)
    max_xy = p_max.astype(int) + 2
    sub_im_arr = im_array[min_xy[1]:max_xy[1], min_xy[0]:max_xy[0], :].copy()
    for m in range(min_xy[1], max_xy[1]):
        for n in range(min_xy[0], max_xy[0]):
            if not point_inside_of_quad(n, m, geo, p_min, p_max):
                sub_im_arr[m - min_xy[1], n - min_xy[0], :] = 255
    sub_im = image.array_to_img(sub_im_arr, scale=False)
    sub_im.save(img_path + '_subim%d.jpg' % s)


def predict(east_detect, img_path, pixel_threshold, quiet=False):
    img = image.load_img(img_path)
    d_wight, d_height = resize_image(img, cfg.max_predict_img_size)
    img = img.resize((d_wight, d_height), Image.NEAREST).convert('RGB')
    img = image.img_to_array(img)
    img = preprocess_input(img, mode='tf')
    x = np.expand_dims(img, axis=0)
    y = east_detect.predict(x)

    y = np.squeeze(y, axis=0)
    y[:, :, :3] = sigmoid(y[:, :, :3])
    cond = np.greater_equal(y[:, :, 0], pixel_threshold)
    activation_pixels = np.where(cond)
    quad_scores, quad_after_nms = nms(y, activation_pixels)
    with Image.open(img_path) as im:
        im_array = image.img_to_array(im.convert('RGB'))
        d_wight, d_height = resize_image(im, cfg.max_predict_img_size)
        scale_ratio_w = d_wight / im.width
        scale_ratio_h = d_height / im.height
        im = im.resize((d_wight, d_height), Image.NEAREST).convert('RGB')
        quad_im = im.copy()
        draw = ImageDraw.Draw(im)
        for i, j in zip(activation_pixels[0], activation_pixels[1]):
            px = (j + 0.5) * cfg.pixel_size
            py = (i + 0.5) * cfg.pixel_size
            line_width, line_color = 1, 'red'
            if y[i, j, 1] >= cfg.side_vertex_pixel_threshold:
                if y[i, j, 2] < cfg.trunc_threshold:
                    line_width, line_color = 2, 'yellow'
                elif y[i, j, 2] >= 1 - cfg.trunc_threshold:
                    line_width, line_color = 2, 'green'
            draw.line([(px - 0.5 * cfg.pixel_size, py - 0.5 * cfg.pixel_size),
                       (px + 0.5 * cfg.pixel_size, py - 0.5 * cfg.pixel_size),
                       (px + 0.5 * cfg.pixel_size, py + 0.5 * cfg.pixel_size),
                       (px - 0.5 * cfg.pixel_size, py + 0.5 * cfg.pixel_size),
                       (px - 0.5 * cfg.pixel_size, py - 0.5 * cfg.pixel_size)],
                      width=line_width, fill=line_color)
        im.save(img_path + '_act.jpg')
        quad_draw = ImageDraw.Draw(quad_im)
        txt_items = []
        for score, geo, s in zip(quad_scores, quad_after_nms,
                                 range(len(quad_scores))):
            if np.amin(score) > 0:
                quad_draw.line([tuple(geo[0]),
                                tuple(geo[1]),
                                tuple(geo[2]),
                                tuple(geo[3]),
                                tuple(geo[0])], width=2, fill='red')
                if cfg.predict_cut_text_line:
                    cut_text_line(geo, scale_ratio_w, scale_ratio_h, im_array,
                                  img_path, s)
                rescaled_geo = geo / [scale_ratio_w, scale_ratio_h]
                rescaled_geo_list = np.reshape(rescaled_geo, (8,)).tolist()
                txt_item = ','.join(map(str, rescaled_geo_list))
                txt_items.append(txt_item + '\n')
            elif not quiet:
                print('quad invalid with vertex num less then 4.')
        quad_im.save(img_path + '_predict.jpg')
        if cfg.predict_write2txt and len(txt_items) > 0:
            with open(img_path[:-4] + '.txt', 'w') as f_txt:
                f_txt.writelines(txt_items)


def predict_txt(east_detect, img_path, txt_path, pixel_threshold, quiet=False):
    img = image.load_img(img_path)
    d_wight, d_height = resize_image(img, cfg.max_predict_img_size)
    scale_ratio_w = d_wight / img.width
    scale_ratio_h = d_height / img.height
    img = img.resize((d_wight, d_height), Image.NEAREST).convert('RGB')
    img = image.img_to_array(img)
    img = preprocess_input(img, mode='tf')
    x = np.expand_dims(img, axis=0)
    y = east_detect.predict(x)

    y = np.squeeze(y, axis=0)
    y[:, :, :3] = sigmoid(y[:, :, :3])
    cond = np.greater_equal(y[:, :, 0], pixel_threshold)
    activation_pixels = np.where(cond)
    quad_scores, quad_after_nms = nms(y, activation_pixels)

    txt_items = []
    for score, geo in zip(quad_scores, quad_after_nms):
        if np.amin(score) > 0:
            rescaled_geo = geo / [scale_ratio_w, scale_ratio_h]
            rescaled_geo_list = np.reshape(rescaled_geo, (8,)).tolist()
            txt_item = ','.join(map(str, rescaled_geo_list))
            txt_items.append(txt_item + '\n')
        elif not quiet:
            print('quad invalid with vertex num less then 4.')
    if cfg.predict_write2txt and len(txt_items) > 0:
        with open(txt_path, 'w') as f_txt:
            f_txt.writelines(txt_items)


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--path', '-p',
                        default='demo/012.png',
                        help='image path')
    parser.add_argument('--threshold', '-t',
                        default=cfg.pixel_threshold,
                        help='pixel activation threshold')
    return parser.parse_args()

# AdvancedEAST 模型
def east_detect():
    east = East()
    east_detect = east.east_network()
    east_detect.load_weights(cfg.saved_model_weights_file_path)
    return east_detect

# 基于 Advanced EAST 的文本检测
# 输入：AdvancedEAST模型，图片路径，像素分类阈值
# 返回：检测后文本框的位置信息
def text_detect(east_detect,img_path,pixel_threshold=0.9):
    img = image.load_img(img_path)
    d_wight, d_height = resize_image(img, cfg.max_predict_img_size)
    scale_ratio_w = d_wight / img.width
    scale_ratio_h = d_height / img.height
    img = img.resize((d_wight, d_height), Image.NEAREST).convert('RGB')
    img = image.img_to_array(img)
    img = preprocess_input(img, mode='tf')

    x = np.expand_dims(img, axis=0)
    y = east_detect.predict(x)

    y = np.squeeze(y, axis=0)
    y[:, :, :3] = sigmoid(y[:, :, :3])
    cond = np.greater_equal(y[:, :, 0], pixel_threshold)
    activation_pixels = np.where(cond)
    quad_scores, quad_after_nms = nms(y, activation_pixels)

    bboxes = []
    for score, geo in zip(quad_scores, quad_after_nms):
        if np.amin(score) > 0:
            rescaled_geo = geo / [scale_ratio_w, scale_ratio_h]
            rescaled_geo_list = np.reshape(rescaled_geo, (8,)).tolist()
            bboxes.append(rescaled_geo_list)

    return bboxes

def dealResult(bboxes):
    arr = []
    for i in bboxes:
        # print(i)
        dw_str = []
        x_min = i[0]
        if i[2] <= i[0]:
            x_min = i[2]
        # print(x_min)

        y_min = i[1]
        if i[7] <= i[1]:
            y_min = i[7]
        # print(y_min)

        x_max = i[4]
        if i[6] >= i[4]:
            x_max = i[6]
        # print(x_max)

        y_max = i[3]
        if i[5] >= i[3]:
            y_max = i[5]
        # print(y_max)

        name = img_path.replace("/workspace/cjone/predict_imgs/", "")
        dw_str.append(name)
        # dw_str.append(str(math.ceil(x_max) + 20))
        # dw_str.append(str(math.ceil(y_max) + 40))
        # dw_str.append(str(math.floor(x_min) - 20))
        # dw_str.append(str(math.floor(y_min) - 40))

        dw_str.append(str(math.ceil(x_max) ))
        dw_str.append(str(math.ceil(y_max) ))
        dw_str.append(str(math.floor(x_min) ))
        dw_str.append(str(math.floor(y_min) ))
        # print(x_max,y_max,x_min,y_min)
        # print(x_min,y_min,x_max,y_max)
        # print(dw_str)
        arr.append(",".join(dw_str))
    # x_min =
    # print(arr)
    return arr
def compare(v):
    if int(v) <= 0:
        return 0
    else:
        return int(v)

def cut(img, oneLine):
    arr1 = oneLine.strip(',').split(',')
#     print(arr1)
    cut_path = arr1[0]
    # print(cut_path)
    
#     print(img)
    img = cv2.imread('/workspace/cjone/predict_imgs/' + img)

    x_max = compare(arr1[1])
    y_max = compare(arr1[2])
    x_min = compare(arr1[3])
    y_min = compare(arr1[4])

    # plt.imshow(img)
    # plt.axis('off')
    # plt.show()
#     print(cut_path)
    cropped = img[y_min:y_max, x_min:x_max]  # 裁剪坐标为[ymin:ymax, xmin:xmax]
    cv2.imwrite("/workspace/wzwork/image_data/cut_pic/"+ cut_path, cropped)

    
    
# 准备好数据结果的生成目录
def checkRetDir():
    if not os.path.exists('/workspace/wzwork/image_data/cut_pic/'):
        os.mkdir('/workspace/wzwork/image_data/cut_pic/')
        
        
if __name__ == '__main__':
    checkRetDir()
    args = parse_args()

    WSI_MASK_PATH = '/workspace/cjone/predict_imgs/'  # 存放图片的文件夹路径
    paths = glob.glob(os.path.join(WSI_MASK_PATH, '*.jpg'))
    east = East()
    east_detect = east.east_network()
    east_detect.load_weights(cfg.saved_model_weights_file_path)
    total_line = []
    for index, path in enumerate(paths):
        # print(path)
#         if index > 3:
#             exit(1111)
        if os.path.exists(path):
            # print("存在")
            # print(index)
            img_path = path
            threshold = float(args.threshold)
            # print(img_path, threshold)

            # predict(east_detect, img_path, threshold)
            bboxes = text_detect(east_detect, img_path, threshold)
            ret = dealResult(bboxes)

            total_line += ret
            print("正在识别第"+str(index)+"张图片")
#             print(total_line)
            if index+1 == len(paths):
#             if index+1 == 4:
                print("-----------------------")
                print("开始写入和裁剪标识块图")
                print("-----------------------")
                txtPath = '/workspace/wzwork/image_data/cj_test.txt'
                # # 写之前，先检验文件是否存在，存在就删掉
                if os.path.exists(txtPath):
                    os.remove(txtPath)
                # 以写的方式打开文件，如果文件不存在，就会自动创建
                file_write_obj = open(txtPath, 'w')
                for index1, val in enumerate(total_line):
                    new_arr = []
                    arr1 = val.strip(',').split(',')
                    cut_path = arr1[0].replace(".jpg", "_" + str(index1) + ".jpg")

                    new_arr.append(cut_path)
                    new_arr.append(str(compare(arr1[1])))
                    new_arr.append(str(compare(arr1[2])))
                    new_arr.append(str(compare(arr1[3])))
                    new_arr.append(str(compare(arr1[4])))
                    new_arr.append(arr1[0])

                    oneLine = ",".join(new_arr)
                    print(oneLine)
                    cut(arr1[0],oneLine)
                    file_write_obj.writelines(oneLine)
                    file_write_obj.write('\n')
                # print(total_line)
            
    print("恭喜！！定位及裁剪工作完成^-^")

