# Simple usage
import codecs
import json
import os
import sys
import subprocess
import tempfile
import time
from multiprocessing import Pool, Lock, Value

from nltk.internals import config_java, _java_options
from stanfordcorenlp import StanfordCoreNLP

name_list = ['Clancy', 'Frank', 'Sally', 'Marcus', 'Jess', 'Thomas', 'Rosie', 'Alan', 'Alex']
name_list = [('<NE>'+str(i+1), name) for i, name in enumerate(name_list)]
name_dict = {}
for n in name_list:
    name_dict[n[0]] = n[1]

os.environ['NO_PROXY'] = 'localhost'
stanford_dir = '/home/rickwwang/project_research/stanford-corenlp-full-2018-10-05'
# nlp = StanfordCoreNLP(r'/home/rickwwang/project_research/stanford-corenlp-full-2018-10-05')
# Use an existing server
nlp = StanfordCoreNLP('http://localhost', port=9000)
props = {'annotators': 'tokenize,ssplit,pos,lemma,ner,coref',
         'ner.model': 'edu/stanford/nlp/models/ner/english.all.3class.distsim.crf.ser.gz',
         'ner.applyFineGrained': 'false',
         'ner.useSUTime': 'false', 'ner.applyNumericClassifiers': 'false',
         'output.prettyPrint': 'false',
         # 'output.printSingletonEntities': 'true',
         # 'coref.removeSingletonClusters': 'false',
         'tokenize.whitespace': 'true',
         'pipelineLanguage': 'en', 'outputFormat': 'json', 'encoding': 'utf8', 'threads': '32'}

MAX_SEN_LEN = 250


def contain_sep(item):
    return ' <SEP> ' in item


def clear_text(text):
    text = text.strip()
    global is_filter
    if is_filter == 'true':
        text = text.split(' <SEP> ')[1]
    for name in name_dict:
        text = text.replace(name, name_dict[name])
    text = text.split(' ')[:MAX_SEN_LEN]
    text = ' '.join(text)
    if not text.endswith('.'):
        last_index = text.rfind('.')
        if last_index != -1:
            text = text[:last_index + 1]
    return text


def parse_one(text):
    result = nlp.annotate(text, properties=props)
    result = result.strip()
    try:
        r_dict = json.loads(result)
        del r_dict['sentences']
        # for s in r_dict['sentences']:
        #     del s['basicDependencies']
        #     del s['enhancedDependencies']
    except Exception as e:
        print e
        r_dict = {'data': 'null'}
    result = json.dumps(r_dict, encoding='utf-8')
    return result


def parse_texts(texts):
    parsed = []
    count = 0
    start = time.time()
    for text in texts:
        result = parse_one(text)
        parsed.append(result)

        count += 1
        if count % 10 == 0:
            print (count, (time.time() - start) / count)
    return parsed


lock = Lock()
counter = Value('i', 0)


def parse_one_wrap(text, start):
    result = parse_one(text)
    global lock, counter
    with lock:
        counter.value += 1
    if counter.value % 1000 == 0:
        print counter.value, (time.time() - start) / counter.value
    return result


def multi_parse_texts(texts):
    parsed = []
    start = time.time()
    pool = Pool(processes=32)
    for text in texts:
        parsed.append(pool.apply_async(parse_one_wrap, (text, start, )))
    pool.close()
    pool.join()
    parsed = [p.get() for p in parsed]
    return parsed


input_file = sys.argv[1]
is_filter = sys.argv[2]
# input_file = '/home/rickwwang/project_own/story_generation/data/test.wp_target_500'
output_file = input_file + '.parsedtxt'
# output_file = 'parsed.txt'
texts = open(input_file).readlines()

print('total number of test example', len(texts))
if is_filter == 'true':
    texts = filter(contain_sep, texts)
texts = map(clear_text, texts)
print('correct number of test example', len(texts))
print('hyp')
print(texts[0])

batch_size = 1000
begin = time.time()
for i in range(0, len(texts), batch_size):
    # parsed_texts = parse_texts(texts)
    batch = texts[i:i + batch_size]
    parsed_texts = multi_parse_texts(batch)

    with codecs.open(output_file, 'ab', encoding='utf8') as fout:
        fout.write('\n'.join(parsed_texts))
        fout.write('\n')
print 'all done', time.time() - begin

nlp.close()  # Do not forget to close! The backend server will consume a lot memory.
