# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys

__dir__ = os.path.dirname(os.path.abspath(__file__))

from tqdm import tqdm

from tools.infer.predict_rec import TextRecognizer

from tools.infer.toolfunc import cv2ImgAddText

sys.path.append(__dir__)
sys.path.append(os.path.abspath(os.path.join(__dir__, '../../../..')))

os.environ["FLAGS_allocator_strategy"] = 'auto_growth'

import cv2
import numpy as np
import time
import sys

import tools.infer.__utility__.utility_fileocr as utility
from ppocr.utils.logging import get_logger
from ppocr.utils.utility import get_image_file_list, check_and_read_gif
from ppocr.data import create_operators, transform
from ppocr.postprocess import build_post_process

logger = get_logger()


class TextDetector(object):
    def __init__(self, args):
        self.args = args
        self.det_algorithm = args.det_algorithm
        pre_process_list = [{
            'DetResizeForTest': {
                'limit_side_len': args.det_limit_side_len,
                'limit_type': args.det_limit_type
            }
        }, {
            'NormalizeImage': {
                'std': [0.229, 0.224, 0.225],
                'mean': [0.485, 0.456, 0.406],
                'scale': '1./255.',
                'order': 'hwc'
            }
        }, {
            'ToCHWImage': None
        }, {
            'KeepKeys': {
                'keep_keys': ['image', 'shape']
            }
        }]
        postprocess_params = {}
        if self.det_algorithm == "DB":
            postprocess_params['name'] = 'DBPostProcess'
            postprocess_params["thresh"] = args.det_db_thresh
            postprocess_params["box_thresh"] = args.det_db_box_thresh
            postprocess_params["max_candidates"] = 1000
            postprocess_params["unclip_ratio"] = args.det_db_unclip_ratio
            postprocess_params["use_dilation"] = args.use_dilation
            if hasattr(args, "det_db_score_mode"):
                postprocess_params["score_mode"] = args.det_db_score_mode

        elif self.det_algorithm == "EAST":
            postprocess_params['name'] = 'EASTPostProcess'
            postprocess_params["score_thresh"] = args.det_east_score_thresh
            postprocess_params["cover_thresh"] = args.det_east_cover_thresh
            postprocess_params["nms_thresh"] = args.det_east_nms_thresh
        elif self.det_algorithm == "SAST":
            pre_process_list[0] = {
                'DetResizeForTest': {
                    'resize_long': args.det_limit_side_len
                }
            }
            postprocess_params['name'] = 'SASTPostProcess'
            postprocess_params["score_thresh"] = args.det_sast_score_thresh
            postprocess_params["nms_thresh"] = args.det_sast_nms_thresh
            self.det_sast_polygon = args.det_sast_polygon
            if self.det_sast_polygon:
                postprocess_params["sample_pts_num"] = 6
                postprocess_params["expand_scale"] = 1.2
                postprocess_params["shrink_ratio_of_width"] = 0.2
            else:
                postprocess_params["sample_pts_num"] = 2
                postprocess_params["expand_scale"] = 1.0
                postprocess_params["shrink_ratio_of_width"] = 0.3
        else:
            logger.info("unknown det_algorithm:{}".format(self.det_algorithm))
            sys.exit(0)

        self.preprocess_op = create_operators(pre_process_list)
        self.postprocess_op = build_post_process(postprocess_params)
        self.predictor, self.input_tensor, self.output_tensors = utility.create_predictor(
            args, 'det', logger)  # paddle.jit.load(args.det_model_dir)
        # self.predictor.eval()

    def order_points_clockwise(self, pts):
        """
        reference from: https://github.com/jrosebr1/imutils/blob/master/imutils/perspective.py
        # sort the points based on their x-coordinates
        """
        xSorted = pts[np.argsort(pts[:, 0]), :]

        # grab the left-most and right-most points from the sorted
        # x-roodinate points
        leftMost = xSorted[:2, :]
        rightMost = xSorted[2:, :]

        # now, sort the left-most coordinates according to their
        # y-coordinates so we can grab the top-left and bottom-left
        # points, respectively
        leftMost = leftMost[np.argsort(leftMost[:, 1]), :]
        (tl, bl) = leftMost

        rightMost = rightMost[np.argsort(rightMost[:, 1]), :]
        (tr, br) = rightMost

        rect = np.array([tl, tr, br, bl], dtype="float32")
        return rect

    def clip_det_res(self, points, img_height, img_width):
        for pno in range(points.shape[0]):
            points[pno, 0] = int(min(max(points[pno, 0], 0), img_width - 1))
            points[pno, 1] = int(min(max(points[pno, 1], 0), img_height - 1))
        return points

    def filter_tag_det_res(self, dt_boxes, image_shape):
        img_height, img_width = image_shape[0:2]
        dt_boxes_new = []
        for box in dt_boxes:
            box = self.order_points_clockwise(box)
            box = self.clip_det_res(box, img_height, img_width)
            rect_width = int(np.linalg.norm(box[0] - box[1]))
            rect_height = int(np.linalg.norm(box[0] - box[3]))
            if rect_width <= 3 or rect_height <= 3:
                continue
            dt_boxes_new.append(box)
        dt_boxes = np.array(dt_boxes_new)
        return dt_boxes

    def filter_tag_det_res_only_clip(self, dt_boxes, image_shape):
        img_height, img_width = image_shape[0:2]
        dt_boxes_new = []
        for box in dt_boxes:
            box = self.clip_det_res(box, img_height, img_width)
            dt_boxes_new.append(box)
        dt_boxes = np.array(dt_boxes_new)
        return dt_boxes

    def __call__(self, img):
        ori_im = img.copy()
        data = {'image': img}
        data = transform(data, self.preprocess_op)
        img, shape_list = data
        if img is None:
            return None, 0
        img = np.expand_dims(img, axis=0)
        shape_list = np.expand_dims(shape_list, axis=0)
        img = img.copy()
        starttime = time.time()

        self.input_tensor.copy_from_cpu(img)
        self.predictor.run()
        outputs = []
        for output_tensor in self.output_tensors:
            output = output_tensor.copy_to_cpu()
            outputs.append(output)

        preds = {}
        if self.det_algorithm == "EAST":
            preds['f_geo'] = outputs[0]
            preds['f_score'] = outputs[1]
        elif self.det_algorithm == 'SAST':
            preds['f_border'] = outputs[0]
            preds['f_score'] = outputs[1]
            preds['f_tco'] = outputs[2]
            preds['f_tvo'] = outputs[3]
        elif self.det_algorithm == 'DB':
            preds['maps'] = outputs[0]
        else:
            raise NotImplementedError
        self.predictor.try_shrink_memory()
        post_result = self.postprocess_op(preds, shape_list)
        dt_boxes = post_result[0]['points']
        if self.det_algorithm == "SAST" and self.det_sast_polygon:
            dt_boxes = self.filter_tag_det_res_only_clip(dt_boxes, ori_im.shape)
        else:
            dt_boxes = self.filter_tag_det_res(dt_boxes, ori_im.shape)
        elapse = time.time() - starttime
        return dt_boxes, elapse

def process(img,sorts):
    sp = img.shape
    h = sp[0]
    w = sp[1]

    #cv2.imshow('1', src_img)
    y0 = sorts[0][1]
    x0 = sorts[0][0]
    y1 = sorts[2][1]
    x1 = sorts[2][0]
    if x0<0:
        x0=0
    if y0<0:
        y0=0
    if x1>w:
        x1 = w
    if y1 >h:
        y1 = h

    src_img = img[y0:y1, x0:x1]

    return src_img

def juan():
    args = utility.parse_args()
    print(args)
    image_dir = 'E:/data/BaiduNetdiskDownload/0201_7.2_xsz/test'
    #image_dir = 'D:/program/xmltotxt/xsz/cut'
    # image_file_list = get_image_file_list(args.image_dir)
    image_file_list = get_image_file_list(image_dir)

    text_detector = TextDetector(args)
    text_recognizer = TextRecognizer(args)
    # global text_detector, text_recognizer
    count = 0
    total_time = 0
    draw_img_save = "./inference_results"
    if not os.path.exists(draw_img_save):
        os.makedirs(draw_img_save)
    for image_file in tqdm(image_file_list):
        image_name = image_file.split("\\")[1].split(".")[0]

        img, flag = check_and_read_gif(image_file)
        if not flag:
            img = cv2.imread(image_file)
            # fx = 1.5
            # fy = 1.5
            # img = cv2.resize(img, (0, 0), fx=fx, fy=fy, interpolation=cv2.INTER_CUBIC)

        if img is None:
            logger.info("error in loading image:{}".format(image_file))
            continue
        #gray2 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        dt_boxes, elapse = text_detector(img)
        if count > 0:
            total_time += elapse
        count += 1
        logger.info("Predict time of {}: {}".format(image_file, elapse))
        src_im = utility.draw_text_det_by_cv2img(dt_boxes, img)

        #cv2.imshow("1", src_im)
        #cv2.waitKey()
        #cv2.destroyAllWindows()
        #print(dt_b oxes)
        sorts = []
        results = []
        test_img_list = []
        rem_sorts = []
        for nn in range(len(dt_boxes)):
            box = dt_boxes[nn]
            sorts = []

            box = np.array(box).astype(np.int32).reshape(-1, 2)
            for value in box:
                sorts.append(value.tolist())

            #cv2.putText(img, name, (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, colors[name], 2)
            #print(sorts)
            #print("type1", type(img))
            cut_img = process(img, sorts)
            #print("type2", type(cut_img))
            if cut_img.size > 0:
                rem_sorts.append(sorts)
                test_img_list.append(cut_img)

        rec_rec, predict_time = text_recognizer(test_img_list)
        for p in range(len(rem_sorts)):
            src_im = cv2ImgAddText(src_im, rec_rec[p][0], rem_sorts[p][0][0] - 5, rem_sorts[p][0][1] - 5, (0, 255, 0), 20)
        cv2.imwrite(os.path.join("./results", image_name + ".jpg"), src_im)
        cv2.imshow("2", src_im)
        cv2.waitKey()
        cv2.destroyAllWindows()

'''
        times = 0
        lines = []
        line = []
        basic = get_center_point(rem_sorts[0])
        print(basic)
        for po in rem_sorts:
            #print("basic:",basic)
            #print(basic[1]-get_center_point(po)[1],(po[2][1]-po[1][1])*0.95)
            if (basic[1]-get_center_point(po)[1])> int((po[2][1]-po[1][1])*0.95):
                lines.append(line)
                line = []
                basic = get_center_point(po)
                #print("basic1:", basic)
                line.append(po)
            else:
                line.append(po)
        print(lines)

        for line_ in lines:
            if len(line_)> 2:
                for li in range(len(line_)):
                    if li + 1< len(line_):
                        cv2.line(src_im, tuple(get_center_point(line_[li])), tuple(get_center_point(line_[li+1])),(0, 255, 0), 1, 4)


        def procee_jybg(points, words):
           result_loc = []
           standard_loc = []
           ex_result_loc = []
           for i in range(len(words)):
               if words[i][0] == '结果判定':
                   result_loc.append(points[i])
               if words[i][0] == '标准限值':
                   standard_loc.append(points[i])
               if words[i][0] == '检验结果':
                   ex_result_loc.append(points[i])
           print(result_loc, standard_loc, ex_result_loc)
           return result_loc, standard_loc, ex_result_loc

        result_loc, standard_loc, ex_result_loc = procee_jybg(rem_sorts,rec_rec)

        def get_result(results,points):
            final_res = []
            for i in results:
                may_point = []
                w = i[1][0]-i[0][0]
                h = i[2][1]-i[1][1]
                i_center =get_center_point(i)
                for j in points:
                    if abs(get_center_point(j)[0]-i_center[0])< w*0.3 and get_center_point(j)[1]-i_center[1] > 0 and get_center_point(j)[1]-i_center[1]< 8*h:
                        may_point.append(j)
                if len(may_point)>3:
                    final_res.append(may_point)
            for po1 in final_res:
                for po in po1:
                  cv2.circle(src_im, tuple(get_center_point(po)), 2, (255, 0, 0), 0)
            return final_res


        print(get_result(result_loc, rem_sorts))
        print(get_result(standard_loc, rem_sorts))
        print(get_result(ex_result_loc, rem_sorts))





        #for p in range(len(rem_sorts)):
            #times +=1

            #src_im = cv2ImgAddText(src_im, str(times)+" "+rec_rec[p][0], rem_sorts[p][0][0] - 5, rem_sorts[p][0][1] - 5, (0, 255, 0), 20)
            #cv2.putText(src_im, str(rec_rec[p][0]), (rem_sorts[p][0][0] - 5, rem_sorts[p][0][1] - 5), cv2.FONT_HERSHEY_SIMPLEX, 2, (0,255,0), 1)
        cv2.imwrite(os.path.join("./results",image_name+".jpg"), src_im)
        cv2.imshow("2", src_im)
        cv2.waitKey()

        cv2.destroyAllWindows()

        # rec_rec, predict_time = text_recognizer(test_img_list)
        # print(rec_rec)

        # cut_img = process(img, sorts)
        # rec_rec,predict_time = text_recognizer([cut_img])
        # print(rec_rec)
        # print(os.path.splitext(image_file))
        # cv2.imwrite(os.path.join("./temp",os.path.splitext(image_file)))
        # cv2.imshow("cut_img", cut_img)

        # cv2.waitKey()
        # cv2.destroyAllWindows()

        print(type(dt_boxes))

        img_name_pure = os.path.split(image_file)[-1]
        img_path = os.path.join(draw_img_save,
                                "det_res_{}".format(img_name_pure))

        # cv2.imwrite(img_path, src_im)
        logger.info("The visualized image saved in {}".format(img_path))
    if count > 1:
        logger.info("Avg Time: {}".format(total_time / (count - 1)))

'''
def test():
    args = utility.parse_args()
    img_dir = '../../../PaddleOCR/tools/infer/barcode'
    # image_file_list = get_image_file_list(args.image_dir)
    image_file_list = get_image_file_list(img_dir)
    text_recognizer = TextRecognizer(args)
    total_run_time = 0.0
    total_images_num = 0
    valid_image_file_list = []
    img_list = []

    for idx, image_file in enumerate(image_file_list):
        img, flag = check_and_read_gif(image_file)
        if not flag:
            img = cv2.imread(image_file)
        if img is None:
            logger.info("error in loading image:{}".format(image_file))
            continue
        valid_image_file_list.append(image_file)
        img_list.append(img)
        if len(img_list) >= args.rec_batch_num or idx == len(
                image_file_list) - 1:
            try:
                rec_res, predict_time = text_recognizer(img_list)
                total_run_time += predict_time
            except:
                logger.info(traceback.format_exc())
                logger.info(
                    "ERROR!!!! \n"
                    "Please read the FAQ：https://github.com/PaddlePaddle/PaddleOCR#faq \n"
                    "If your model has tps module:  "
                    "TPS does not support variable shape.\n"
                    "Please set --rec_image_shape='3,32,100' and --rec_char_type='en' "
                )
                exit()
            for ino in range(len(img_list)):
                logger.info("Predicts of {}:{}".format(valid_image_file_list[
                                                           ino], rec_res[ino]))
            total_images_num += len(valid_image_file_list)
            valid_image_file_list = []
            img_list = []
    logger.info("Total predict time for {} images, cost: {:.3f}".format(
        total_images_num, total_run_time))

juan()



