# coding: utf-8
import glob
import re
from collections import Counter, defaultdict
import json
import tensorflow as tf
from keras.optimizers import SGD
from keras.backend.tensorflow_backend import set_session
from keras.layers import *
from keras.models import Sequential, Model
from keras.preprocessing.sequence import pad_sequences
from keras.layers.wrappers import *
from gensim.models import Word2Vec
from tqdm import tqdm
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.4
set_session(tf.Session(config=config))

txt_names = glob.glob('./*/EMRdata.txt')

pure_texts = []
pure_tags = []
for name in iter(txt_names):
    txt = open(name).read().decode('utf-8', 'ignore').strip('\n')
    for origin_text in txt.split('\n'):
        pure_texts.append(u'')
        pure_tags.append(u'')
        origin_text = re.sub('/[a-z1-9]+', '', origin_text)
        origin_text = re.sub('\][a-z1-9]+', ']', origin_text)
        for text in origin_text.split(' '):
            text = list(text)
            tag = list('n' * len(text))
            while (re.search('\[\{', ''.join(text))):
                startIndex = re.search('\[\{', ''.join(text)).span()[0]
                endIndex = re.search('\}\]', ''.join(text)).span()[0]
                del text[endIndex + 1]
                del text[endIndex]
                del text[startIndex + 1]
                del text[startIndex]
                endIndex = endIndex - 2
                for i in range(startIndex, endIndex):
                    tag[i] = 'b' if i == startIndex else 'e' if i == (endIndex - 1) else 'm'
            pure_texts[-1] += ''.join(text)
            pure_tags[-1] += ''.join(tag)

min_count = 2
word_count = Counter(''.join(pure_texts))
samples_per_epoch = sum(word_count.values())
word_count = Counter({i:j for i,j in word_count.iteritems() if j >= min_count})
word2id = defaultdict(lambda: len(word_count)+1)
id_here = 0
for i in word_count.most_common():
    id_here += 1
    word2id[i[0]] = id_here

json.dump(word2id, open('word2id.json', 'w'))
tag2id = {'n':[1, 0, 0, 0], 'b':[0, 1, 0, 0], 'm':[0, 0, 1, 0], 'e':[0, 0, 0, 1]}
nb_word = len(word2id) + 2
embedding_size = 64
sentence_length = 150

word2vec = Word2Vec(pure_texts, window=7, sg=1, workers=12)
embedding_matrix = np.zeros((nb_word, word2vec.vector_size))
for i in tqdm(word2vec.vocab) :
    embedding_matrix[word2id[i]] = word2vec[i]
model = Sequential()
model.add(Embedding(nb_word, word2vec.vector_size, weights=[embedding_matrix], mask_zero=True))
model.add(Bidirectional(GRU(32, return_sequences=True)))
model.add(TimeDistributed(Dense(4, activation='softmax')))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
model.summary()

coded_text = []
coded_tags = []
for i in range(len(pure_texts)):
    text_ids = [word2id[j] for j in pure_texts[i]]
    tag_ids = [tag2id[j] for j in pure_tags[i]]
    coded_text.append(text_ids)
    coded_tags.append(tag_ids)
sentences = pad_sequences(coded_text, maxlen=sentence_length)
tags = pad_sequences(coded_tags, maxlen=sentence_length)

model.fit(sentences, tags, validation_split=0.3, nb_epoch=30, verbose=1, batch_size=1024)
np.save('ner_weights', model.get_weights())
model.save('ner_model', overwrite=True)