import tensorflow as tf
import numpy as np
import os, sys, cv2
import glob
import shutil
import time
import datetime
import uuid
import json
import functools
import logging
import collections
import model
from icdar import restore_rectangle
import lanms
from eval import resize_image, sort_poly, detect
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# handler = logging.FileHandler("server_log/info.txt")
# handler.setLevel(logging.DEBUG)
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# handler.setFormatter(formatter)
# logger.addHandler(handler)

checkpoint_path = './model'
class EASTSERVER(object):
    '''
    EAST SERVER
    '''
    def register_mtp(self):
        config = tf.ConfigProto()
	#config =                                                                 
        config.gpu_options.per_process_gpu_memory_fraction = 0.3
        self.input_images = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_images')
        self.global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)

        self.f_score, self.f_geometry = model.model(self.input_images, is_training=False)

        variable_averages = tf.train.ExponentialMovingAverage(0.997, self.global_step)
        saver = tf.train.Saver(variable_averages.variables_to_restore())
        # self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
        self.sess = tf.Session(config=config)
        ckpt_state = tf.train.get_checkpoint_state(checkpoint_path)
        model_path = os.path.join(checkpoint_path, os.path.basename(ckpt_state.model_checkpoint_path))
        logger.info('Restore from {}'.format(model_path))
        saver.restore(self.sess, model_path)


    def predictor(self,img):
        start_time = time.time()
        rtparams = collections.OrderedDict()
        rtparams['start_time'] = datetime.datetime.now().isoformat()
        rtparams['image_size'] = '{}x{}'.format(img.shape[1], img.shape[0])
        timer = collections.OrderedDict([
            ('net', 0),
            ('restore', 0),
            ('nms', 0)
        ])
        im_resized, (ratio_h, ratio_w) = resize_image(img)
        rtparams['working_size'] = '{}x{}'.format(
            im_resized.shape[1], im_resized.shape[0])
        start = time.time()
        score, geometry = self.sess.run([self.f_score, self.f_geometry],feed_dict={self.input_images: [im_resized[:,:,::-1]]})
        timer['net'] = time.time() - start
        boxes, timer = detect(score_map=score, geo_map=geometry, timer=timer)
        logger.info('net {:.0f}ms, restore {:.0f}ms, nms {:.0f}ms'.format(
            timer['net']*1000, timer['restore']*1000, timer['nms']*1000))

        if boxes is not None:
            scores = boxes[:,8].reshape(-1)
            boxes = boxes[:, :8].reshape((-1, 4, 2))
            boxes[:, :, 0] /= ratio_w
            boxes[:, :, 1] /= ratio_h

        duration = time.time() - start_time
        timer['overall'] = duration
        logger.info('[timing] {}'.format(duration))
        # print(boxes[0])
        text_lines = []
        if boxes is not None:
            for box, score in zip(boxes, scores):
                if box[0][0] < 0 :
                    box[0][0] = 0
                if box[0][1] < 0 :
                    box[0][1] = 0
                if box[1][0] > img.shape[1]:
                    box[1][0] = img.shape[1]
                if box[2][1] > img.shape[0]:
                    box[2][1] = img.shape[0]
                text_lines.append([int(box[0][0]),int(box[0][1]),int(box[1][0]),int(box[2][1])])
        # print(text_lines)
        return text_lines
