import os, cv2
import numpy as np
import tensorflow as tf
from networks.detectors import TextDetector
from networks.text_connector.text_connect_cfg import Config as TextLineCfg
from networks.network import Network
from networks.cfg.config import cfg
from networks.ctpn_test import ctpn_detector
from networks.ctpn_train import ctpn_train
from imgs2blob import ctpn_text_detector
from networks.get_network import get_network

def resize_im(im, scale, max_scale=None):
    f = float(scale) / min(im.shape[0], im.shape[1])
    if max_scale != None and f * max(im.shape[0], im.shape[1]) > max_scale:
        f = float(max_scale) / max(im.shape[0], im.shape[1])
    return cv2.resize(im, None, None, fx=f, fy=f, interpolation=cv2.INTER_LINEAR), f
    

def draw_boxes(img, boxes, scale):
    box_id = 0
    img = img.copy()
    text_recs = np.zeros((len(boxes), 8), np.int)
    for box in boxes:
        if np.linalg.norm(box[0] - box[1]) < 5 or np.linalg.norm(box[3] - box[0]) < 5:
            continue

        if box[8] >= 0.8:
            color = (255, 0, 0)  # red
        else:
            color = (0, 255, 0)  # green

        cv2.line(img, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), color, 2)
        cv2.line(img, (int(box[0]), int(box[1])), (int(box[4]), int(box[5])), color, 2)
        cv2.line(img, (int(box[6]), int(box[7])), (int(box[2]), int(box[3])), color, 2)
        cv2.line(img, (int(box[4]), int(box[5])), (int(box[6]), int(box[7])), color, 2)

        for i in range(8):
            text_recs[box_id, i] = box[i]

        box_id += 1

    img = cv2.resize(img, None, None, fx=1.0/scale, fy=1.0/scale, interpolation=cv2.INTER_LINEAR)
    return text_recs, img

def text_detect(img):
    scores, boxes, img, scale = ctpn(img)
    text_recs, img_drawed = draw_boxes(img, boxes, scale)
    return text_recs, img_drawed, img

def draw_detected_boxes(image, boxes, scale=1):
    
    img = image.copy()
    color = (0, 255, 0)
    for box in boxes:
        # xmin = np.around(box[0] - box[2]/2)
        # xmax = np.around(box[0] + box[2]/2)
        # ymin = np.around(box[1] - box[3]/2)
        # ymax = np.around(box[1] - box[3]/2)
        xmin = box[0]
        xmax = box[2]
        ymin = box[1]
        ymax = box[3]
        
        cv2.line(img, (int(xmin), int(ymin)), (int(xmin), int(ymax)), color, 2)
        cv2.line(img, (int(xmin), int(ymin)), (int(xmax), int(ymin)), color, 2)
        cv2.line(img, (int(xmax), int(ymax)), (int(xmax), int(ymin)), color, 2)
        cv2.line(img, (int(xmax), int(ymax)), (int(xmin), int(ymax)), color, 2)
    return img
    
def drwa_text_lines(img, boxes, scale):
    box_id = 0
    img = img.copy()
    text_recs = np.zeros((len(boxes), 8), np.int)
    for box in boxes:
        if np.linalg.norm(box[0] - box[1]) < 5 or np.linalg.norm(box[3] - box[0]) < 5:
            continue

        if box[8] >= 0.8:
            color = (255, 0, 0)  # red
        else:
            color = (0, 255, 0)  # green

        cv2.line(img, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), color, 2)
        cv2.line(img, (int(box[0]), int(box[1])), (int(box[4]), int(box[5])), color, 2)
        cv2.line(img, (int(box[6]), int(box[7])), (int(box[2]), int(box[3])), color, 2)
        cv2.line(img, (int(box[4]), int(box[5])), (int(box[6]), int(box[7])), color, 2)

        for i in range(8):
            text_recs[box_id, i] = box[i]

        box_id += 1

    img = cv2.resize(img, None, None, fx=1.0/scale, fy=1.0/scale, interpolation=cv2.INTER_LINEAR)
    return text_recs, img
    
if __name__ == "__main__":
    
    out_path = "./data/outputs"
    in_path = "./data/inputs"
    
    net = get_network("ctpn_test")
    
    # init session 
    # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)
    # config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)
    # sess = tf.Session(config=config)
    sess = tf.Session()
    
    saver = tf.train.Saver()
    rpn = tf.train.get_checkpoint_state("./data/checkpoints/")
    saver.restore(sess, rpn.model_checkpoint_path)
    
    print(rpn)
    
    for imgn in os.listdir(in_path):
        print(os.path.join(in_path, imgn))
        img = cv2.imread(os.path.join(in_path, imgn))
        img, scale = resize_im(img, scale=TextLineCfg.SCALE, max_scale=TextLineCfg.MAX_SCALE)
        # img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        
        scores, boxes1 = ctpn_text_detector(sess, net, img)
        img_drawed1 = draw_detected_boxes(img, boxes1, scale=scale)
        
        textdetector = TextDetector()
        boxes2 = textdetector.detect(boxes1, scores[:, np.newaxis], img.shape[:2])
        text_recs, img_drawed2 = drwa_text_lines(img, boxes2, scale=scale)
        
        # _________ save _________
        save_path = os.path.join(out_path, imgn.split(".")[0])
        if not os.path.exists(save_path):
            os.makedirs(save_path)
        cv2.imwrite( os.path.join(save_path, "propoal_{}.png".format(imgn)), img_drawed1)
        cv2.imwrite( os.path.join(save_path, "boxes_text_{}.png".format(imgn)), img_drawed2)
        # cropped boxes
        if not os.path.exists( os.path.join(save_path, "cropped") ):
            os.makedirs( os.path.join(save_path, "cropped") )
        cnt = 0
        for b in text_recs:
            cnt+=1
            cv2.imwrite(os.path.join(save_path, "cropped", "{}.png".format(cnt)), img[b[1]:b[7], b[0]:b[2], :])
        
             
             
         
         
         
         
