#encoding:utf-8
from ner import Ner_recognition,product_line,get_tags_dict
from datetime import datetime, timedelta
from model.configner import ConfigNer
from model.ner_model import NERModel
from multiprocessing.dummy import Pool as ThreadPool
from train_data import genera_data
#import multiprocessing
from time import ctime
import threading
import argparse
import shutil
import os
#data_root_path = r'/home/yzfu/PycharmProjects/tread/report_data/'
#data_output_path=r'/home/yzfu/PycharmProjects/tread/result/'
#os.environ["CUDA_VISIBLE_DEVICES"] = "1"

parser = argparse.ArgumentParser()
parser.add_argument('--analysed_html', dest='analysed_html', type=bool, default=True,
                    help='whether html text is analysed or not')
parser.add_argument('--home_dir_', dest='home_dir_', type=str, default='/home/yzfu/nlp/kg_abc/fyz_kg_nlp/abc_project_data',
                    help='path to the root dir')
parser.add_argument('--html_dataset', dest='html_dataset', type=str, default='/analysed_html/',
                    help='path to the html_dataset')
parser.add_argument('--result_dataset', dest='result_dataset', type=str, default='/ner_html_result/',
                    help='path to the result_dataset')
parser.add_argument('--tags_path', dest='tags_path', type=str, default='/tags.txt',
                    help='path to the tags dict')
parser.add_argument('--mark_path_', dest='mark_path_', type=str, default='mark.txt',
                        help='path to the mark tags')

args = parser.parse_args()
params = vars(args)
home_dir_ = params['home_dir_']
analysed_html = params['analysed_html']
html_dataset = home_dir_+params['html_dataset']
result_dataset = home_dir_+params['result_dataset']
tags_path = home_dir_+params['tags_path']
mark_path_ = params['mark_path_']
mark_path = home_dir_+mark_path_
#creat model
config = ConfigNer()
model = NERModel(config)
model.build()
model.restore_session(config.dir_model)
ner = Ner_recognition()

print ("model restore complete")

mark_dict = genera_data.fyz_get_mark_info(mark_path)

def get_file_name(data_root_path,file_type):
    file_name=[]
    for root ,dir, files in os.walk(data_root_path):
        for file in files :
            if file_type in file :
                file_name.append(file)
    return file_name

def product_str(line):
    line = line.strip()
    if line != "":
        tag_result = product_line(ner=ner, model=model, line=line)
        return tag_result

def product_file(data_file):
    print ("The current threading {Thread} start at {time}").format(Thread=threading.currentThread(), time=ctime())
    tags_file = open(tags_path, 'r')
    tags_dict = get_tags_dict(tags_file)
    input_file = html_dataset+data_file
    print input_file
    output_file = result_dataset+data_file
    tags_result = []
    output=""
    input_file = open(input_file, 'r')
    output_file = open(output_file, 'w')
    for line in input_file.readlines():
        line = line.strip()
        if len(line)>25:#句子长度小于25过滤
            try:
                if line != "":
                    tag_result = product_line(ner=ner,model=model,line=line,mark_dict=mark_dict)
                    #print tags_result
                    tags_result.append(tag_result)
            except IndexError :
                continue

    for line in tags_result:
        for word in line :
            word_list = word.split('/')
            output=output+word_list[0].encode('utf-8')+" "+word_list[-1]+'\n'
            if word_list[-1] in tags_dict.keys():
                if word_list[0] not in tags_dict[word_list[-1]]:
                    tags_dict[word_list[-1]].append(word_list[0])
        output=output+'\n'
    sorted(tags_dict.items(), key=lambda e: e[0], reverse=True)
    for key in tags_dict:
        if key=='rel' or key=='events' or  key=='O' or key=='time':
            continue
        else:
            output_file.write(key + ":" + " ")
            output_file.writelines(" ".join(tags_dict[key])+'\n')
    output_file.close()
    print ("The current threading {Thread} end at {time}").format(Thread=threading.currentThread(), time=ctime())


def work():
    file_type = '.txt'
    print ("The current threading {Thread}").format(Thread=threading.currentThread())
    file_name_list = get_file_name(html_dataset, file_type)
    #for file in file_name_list:
        #product_file(file)
    pool = ThreadPool(processes=1)
    pool.map(product_file, file_name_list)
    pool.close()
    pool.join()


def runTask(func,week=0,day=0, hour=0, min=0, second=0):
   # Init time
   now = datetime.now()
   strnow = now.strftime('%Y-%m-%d %H:%M:%S')
   print "now:",strnow
   # First next run time
   period = timedelta(days=day, weeks=week,hours=hour, minutes=min, seconds=second)
   next_time = now + period
   strnext_time = next_time.strftime('%Y-%m-%d %H:%M:%S')

   print "next run:",strnext_time
   print "start work: %s" % strnow
   # Call task func
   func()
   end = datetime.now()
   print "task done."
   print end-now

   while True:
       # Get system current time
       iter_now = datetime.now()
       iter_now_time = iter_now.strftime('%Y-%m-%d %H:%M:%S')
       if str(iter_now_time) == str(strnext_time):
           # Get every start work time
           print "start work: %s" % iter_now_time
           # Call task func
           func()
           print "task done."
           # Get next iteration time
           iter_time = iter_now + period
           strnext_time = iter_time.strftime('%Y-%m-%d %H:%M:%S')
           print "next_iter: %s" % strnext_time
           # Continue next iteration
           continue

# runTask(work, min=0.5)

def main():
    if os.path.isdir(result_dataset):
        shutil.rmtree(result_dataset)
    os.mkdir(result_dataset)
    runTask(work, week=100, day=0, hour=0, min=0, second=0)

if __name__ == "__main__":
    main()
