#!/home/data/liuchang/anaconda3/envs/py3/bin/python
# -*- coding:utf-8 -*-
import os 
import re
import sys
import time
import jieba
import gensim   
import logging
import numpy as np
import pandas as pd
from tqdm import tqdm
import multiprocessing
from bert4keras.snippets import sequence_padding, DataGenerator
from bert4keras.optimizers import Adam
import warnings
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')# 忽略警告

from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.regularizers import l2
from keras.callbacks import *
from keras.optimizers import *
from keras.callbacks import *
from keras import backend as K

from src.model.txt2p import txt2p_bilstm_model
model = txt2p_bilstm_model()
model.load_weights('./best_model.weights')
w2v_model = gensim.models.Word2Vec.load('./data/word2vec_modelfile/word2vec_wx')
def print_punc(sentence):
    senseg = ' '.join(jieba.cut(sentence,cut_all=False))
#     print(senseg)
    zerovec = np.zeros(256)
    xpara = []
    for word in senseg.split():
        try:
            xpara.append(w2v_model[word])
        except:
            xpara.append(zerovec)
    x = np.array( [xpara] )
    tag_scores = model.predict(x, verbose=0)
    punc_dict = {0:'',1:'，',2:'。',3:'！',4:'？',5:'、'}
    word = senseg.split()
    puncsen = ''
    for i in range(0,len(tag_scores[0])):
        index = np.argmax(tag_scores[0][i])
        # print (index)
        puncsen += word[i]+punc_dict[int(index)]
    print(puncsen)


while(True):
    sentence = input("输入待断句的部分：\n")
    try:
        print_punc( sentence ) 
    except:
        continue 



  
