# -*- encoding:utf-8 -*-
from __future__ import print_function
import tensorflow as tf
import keras
import os
import pickle as pkl
from collections import Counter
from nltk import sent_tokenize, word_tokenize
from nltk.corpus import stopwords, wordnet
from nltk.stem import WordNetLemmatizer
from stanfordcorenlp import StanfordCoreNLP
import jieba
import sys
import importlib

importlib.reload(sys)
stopwords = stopwords.words('english')
english_punctuations = [',', '.', ':', ';', '?', '(', ')', '[', ']', '&', '!', '*', '@', '#', '$', '%']
stopwords += english_punctuations
max_src_len = 40
max_tgt_len = 40
src_embedding_size = 200
tgt_embedding_size = 200
train_size = 0.8
max_src_vocab_size = 60000
max_tgt_vocab_size = 5000
raw_data_path = '../data'

def padding(data, max_len):
    return tf.keras.preprocessing.sequence.pad_sequences(data, max_len, padding='post', truncating='post')

#数据出来是list  in list's items are sentence
def data_preprocess():
    train_en_corpus = []
    train_ch_corpus = []
    test_en_corpus = []
    test_ch_corpus = []
    count = 0
    for dirname in ('Bilingual', 'Testing'):
        if dirname == 'Bilingual':
            sub_dir_names = ['Education', 'Laws', 'Microblog', 'News',
                             'Science', 'Spoken', 'Subtitles', 'Thesis']
            for filename in sub_dir_names:
                with open(os.path.join(raw_data_path, dirname, filename, ''.join(['Bi-', filename, '.txt'])),
                          encoding='utf-8') as fr:
                    for i, line in enumerate(fr):
                        line = line.strip()
                        count += 1
                        if i % 2 == 0:
                            train_en_corpus.append(line)
                        else:
                            train_ch_corpus.append(line)
                        # if i==399999:   # 预料及缩小
                        #     break;
        else:
            with open(os.path.join(raw_data_path, dirname, 'Testing-Data.txt'), encoding='utf-8') as fr:
                for i, line in enumerate(fr):
                    line = line.strip()
                    count += 1
                    if i % 2 == 0:
                        test_en_corpus.append(line)
                    else:
                        test_ch_corpus.append(line)
    train_en_corpus = '\n'.join(train_en_corpus)
    train_ch_corpus = '\n'.join(train_ch_corpus)
    test_en_corpus = '\n'.join(test_en_corpus)
    test_ch_corpus = '\n'.join(test_ch_corpus)
    return train_en_corpus, train_ch_corpus, test_en_corpus, test_ch_corpus

def vocab(data, topK=None):
    word2id = Counter()
    for sentence in data:
        for word in sentence.split():
            word2id[word] += 1
    word2id = word2id.most_common()
    if topK:
        word2id = word2id[:topK]
    word2id, _ = zip(*word2id)
    word2id = {word: i + 4 for i, word in enumerate(word2id)}
    word2id['<PAD>'] = 0
    word2id['<UNK>'] = 1
    word2id['<S>'] = 2
    word2id['</S>'] = 3
    id2word = dict(zip(word2id.values(), word2id.keys()))
    return word2id, id2word
#词汇表生成，借助统计词频

#seg into vocabs
def segment(corpus, tokenizer, savepath=None):
    tokenized_corpus = []
    count = 0
    corpus = corpus.split('\n')
    for sentence in corpus:
    	count += 1
    	tokenized_corpus.append(' '.join(tokenizer(sentence)))
    if not savepath:
        return tokenized_corpus
    else:
        with open(savepath, 'wb') as fw:
            pkl.dump(tokenized_corpus, fw)

def transform(data, word2id):
    ret_data = []
    for sentence in data:
        ret_data.append([word2id.get(word, 1) for word in sentence.split()])
    return ret_data

train_en_corpus, train_ch_corpus, test_en_corpus, test_ch_corpus = data_preprocess()
train_en_corpus = segment(train_en_corpus, jieba.cut, './data/preprocess/train_en_segment.pkl')
train_ch_corpus = segment(train_ch_corpus, lambda k: iter(k.strip()), './data/preprocess/train_ch_segment.pkl')
test_en_corpus = segment(test_en_corpus, jieba.cut, './data/preprocess/test_en_segment.pkl')
test_ch_corpus = segment(test_ch_corpus, lambda k: iter(k.strip()), './data/preprocess/test_ch_segment.pkl')



#reload to certain
with open('./data/preprocess/train_en_segment.pkl', 'rb') as fr:
    train_en_corpus = pkl.load(fr)
with open('./data/preprocess/train_ch_segment.pkl', 'rb') as fr:
    train_ch_corpus = pkl.load(fr)


with open('./data/preprocess/test_en_segment.pkl', 'rb') as fr:
    test_en_corpus = pkl.load(fr)
with open('./data/preprocess/test_ch_segment.pkl', 'rb') as fr:
    test_ch_corpus = pkl.load(fr)

en_word2id, en_id2word = vocab(train_en_corpus)
ch_word2id, ch_id2word = vocab(train_ch_corpus)


train_en_corpus = transform(train_en_corpus, en_word2id)
train_ch_corpus = transform(train_ch_corpus, ch_word2id)



with open('./data/preprocess/vocab_dict.pkl', 'wb') as fw:
    pkl.dump([en_word2id, en_id2word, ch_word2id, ch_id2word], fw)
with open('./data/preprocess/vocab_dict_and_corpus.pkl', 'wb') as fw:
    pkl.dump([en_word2id, en_id2word, ch_word2id, ch_id2word, train_en_corpus, train_ch_corpus, test_en_corpus,
              test_ch_corpus], fw)
with open('./data/preprocess/demo_vocab_dict_and_corpus.pkl', 'wb') as fw:
    pkl.dump([en_word2id, en_id2word, ch_word2id, ch_id2word, train_en_corpus[:100], train_ch_corpus[:100],
              test_en_corpus[:100], test_ch_corpus[:100]], fw)

print("Finished!")
