
# coding: utf-8

from __future__ import absolute_import, with_statement
from __future__ import division
from __future__ import print_function
from functools import partial
from typing import List, Text
from image_tagging import image_tagging
import math
import os
import re
import sys
from numpy.core.fromnumeric import ptp
from numpy.lib.npyio import load

from pdfminer.pdfdocument import PDFDestinationNotFound
sys.path.append('../')



import socket



import os
os.environ["TF_CPP_MIN_LOG_LEVEL"]='2'
import tensorflow as tf
from im2txt import configuration
from im2txt import inference_wrapper
from im2txt.inference_utils import caption_generator
from im2txt.inference_utils import vocabulary


from abstract.text import load_stopwords ,filter_keys
from abstract.image import clear_temp

from abstract.extract_infov3 import get_image , abstract_file
from abstract.extract_infov3 import extract_keys 
from abstract.extract_infov4 import  extract_keys_lac 
from abstract.translate import translate_zh 

from paddleocr import PaddleOCR, draw_ocr


# ======================
# 训练好的模型存放路径
checkpoint_path = "./model/model.ckpt-3000000"
# 词汇表
vocab_file = "./im2txt/data/word_counts.txt"
# 图片路径
g = tf.Graph()
with g.as_default():
    model = inference_wrapper.InferenceWrapper()
    restore_fn = model.build_graph_from_config(configuration.ModelConfig(), checkpoint_path)

    # 载入词表
vocab = vocabulary.Vocabulary(vocab_file)
# 临时存放pdf、word图片的地址
tmp_dir = './tmp'
# 全局加载停词  
stopwords_path = './im2txt/stopwords.copy.txt'
stopwords = load_stopwords(stopwords_path)


# ===================

def abstract_image(image_path):
    """
         
    """
    with tf.Session(graph=g) as sess:
        # 载入训练好的模型
        restore_fn(sess)
        generator = caption_generator.CaptionGenerator(model, vocab)

        # image_path = ima
        # print(image_path)
        # 载入图片
        image = tf.gfile.FastGFile(image_path, 'rb').read()   
        # 获得图片描述
        captions = generator.beam_search(sess, image)[0]
        sentence = [vocab.id_to_word(w) for w in captions.sentence[1:-1]]
        
        sentence = " ".join(sentence)
        keys_en = extract_keys(sentence)
        keys_zh =  extract_keys(translate_zh(sentence)) # "key1##key2##key3"
        keys = keys_zh + "##"+ keys_en
        sents = translate_zh(sentence,english=True)

        # print(sents)

        return keys , sents

def get_images_keys(file_path,tmp_dir):
    
    paths = get_image(file_path , tmp_dir)

    if len(paths)==0:
        clear_temp(tmp_dir) #   删除临时文件夹
        return "No Images in document!"

    doc_keys = []
    doc_sents = []

    for file in paths:
        keys , sents = abstract_image(file)
        doc_keys.append(keys)
        doc_sents.append(sents)

    doc_keys = "##".join(doc_keys) 
    doc_sents = "##".join(doc_sents)

    clear_temp(tmp_dir) #   删除临时文件夹

    return  doc_keys , doc_sents

ocr = PaddleOCR(use_angle_cls=True, lang="ch" ,use_gpu=False)  
# need to run only once to download and load model into memory

def get_image_content(img_path:str):
    """
        每次传入一个图片路径
    """
    content= []
    result = ocr.ocr(img_path, cls=True)
    for line in result:
        if len(line[-1][0])>1:
           content.append(line[-1][0])
    return '##'.join(content)

def get_ocr_tag_images_keys(file_path,tmp_dir):
    
    paths = get_image(file_path , tmp_dir)

    if len(paths)==0:
        clear_temp(tmp_dir) #   删除临时文件夹
        return " ","No Images in document!"

    doc_keys = []
    ocr_keys =[]
    doc_sents = []
    tag_keys =[]

    for file in paths:
        keys , sents = abstract_image(file)
        ocr_keys = get_image_content(file)
        tag_keys = image_tagging(file)
        keys = "T##"+tag_keys+ "##" + keys + "##" + ocr_keys
    
        doc_keys.append(keys)
        doc_sents.append(sents)

    doc_keys = "##".join(doc_keys) 
    doc_sents = "##".join(doc_sents)

    clear_temp(tmp_dir) #   删除临时文件夹

    return  doc_keys , doc_sents


 

#  =====================

class SocketServer:
    def __init__(self):

        server_address = '/tmp/66'
        socket_family = socket.AF_UNIX
        socket_type = socket.SOCK_SEQPACKET

        # 其他代码完全一样
        self.sock = socket.socket(socket_family, socket_type)
        self.sock.bind(server_address)
        self.sock.listen(2048)
        print(f"本地服务器启动.listening on '{server_address}'.")
        pass

    def wait_and_deal_client_connect(self):
        connection, client_address = self.sock.accept()
        while True:
            data = connection.recv(2048)
            print(f".recv data from client '{client_address}': {data.decode()}")
            file_suffix= os.path.splitext(data.decode())[-1]
            if file_suffix == None:
                connection.sendall("No abstract".encode())
            elif file_suffix in ['.txt','.pdf','.docx']:
                if file_suffix == '.txt':
                    keys,abstract = abstract_file(data.decode())
                    keys = filter_keys(keys,stopwords) # 过滤停词
                    info = "T##"+ keys +'$$$'+ abstract
                else:
                    keys,abstract = abstract_file(data.decode())
                    images_keys,_ = get_ocr_tag_images_keys(data.decode(),tmp_dir=tmp_dir)

                    keys = filter_keys(keys,stopwords) # 过滤停词 

                    images_keys = filter_keys(images_keys , stopwords)  # 过滤停词 

                    info = "T##"+ keys + "##" + images_keys+'$$$'+ abstract

                fmt = "files :{}\ninfo:{}".format(data.decode() , info)   
                print(fmt)            
                connection.sendall(info.encode()) 
            elif file_suffix in ['.png','.jpg','.jpeg']:
                keys,abstract = abstract_image(data.decode())
                ocr_keys = get_image_content(data.decode())
                tag_keys = image_tagging(data.decode())
                keys ="P##"+tag_keys + "##"+ keys + "##"+ ocr_keys

                keys = filter_keys(keys,stopwords) # 过滤停词 

                info = keys +'$$$'+ abstract
                
                fmt = "files :{}\ninfo:{}".format(data.decode() , info) 
                print(fmt)
                connection.sendall(info.encode())  
            else:
                connection.sendall("No abstract".encode())


    def __del__(self):
        self.sock.close()
        os.remove('/tmp/66')


if __name__ == '__main__':

    socket_server_obj = SocketServer()
    socket_server_obj.wait_and_deal_client_connect()

    
    # def test_ocr(img_path):
    #     print("Testing OCR")
    #     print(get_image_content(img_path))
    #     print("Ocr PASS")

    # test_ocr(img_path='/home/ultraman/Desktop/2.jpg')


    # def test_textrank(test_path):
    #     print("Testing text4rank")
    #     keys,abstract = abstract_file(test_path)
    #     images_keys,_ = get_images_keys(test_path,tmp_dir=tmp_dir)
    #     print(images_keys)
    #     keys = filter_keys(keys,stopwords) # 过滤停词 
    #     images_keys = filter_keys(images_keys , stopwords) # 过滤停词 

    #     info = keys + "##" + images_keys+'$$$'+ abstract
    #     fmt = "files :{}\ninfo:{}".format(test_path , info)   
    #     print(fmt)    
    #     print("text4rank PASS")

    # test_textrank('./test-example/test.docx') 


    # def test_image():
    #     print("="*10)
    #     fmt = translate_zh("a street sign that reads " , english=True)
    #     print(fmt)
    #     fmt = translate_zh("a street sign that reads `` <UNK> <UNK> '' on the side of a building" , english=True)
    #     print(fmt)
        

    # def test_textrankkey():
    #     fmt =  extract_keys_lac("当日上午，在黄山之巅的玉屏楼广场、西海广场、排云楼广场、天海广场等地，黄山风景区举行升国旗仪式")
    #     print(fmt)

    

