# -*- coding: utf-8 -*-
'''
Created on 2017年4月9日

@author: ZhuJiahui
'''
import os
import time
import numpy as np
from operator import itemgetter
from nlp_utils.word_frequency import count_tf
from global_info.global_nlp import GlobalNLP
from file_utils.file_writer import quick_write_1d_to_text
from file_utils.file_reader import read_to_2d_list_gbk, read_to_1d_list_gbk
from nlp_utils.keyword_extraction import get_cn_global_keyword

def select_feature_words(text_corpus, word_list, write_filename):
    
    combined_wl_tf = count_tf(text_corpus, word_list)
    
    text_corpus_without_tag = []
    for each in text_corpus:
        text_corpus_without_tag.append([x.split(GlobalNLP.CN_WORD_INNER_DELIMITER)[0] for x in each])
    
    global_keywords = get_cn_global_keyword(text_corpus_without_tag, 5)
    
    N = 5000
    word_score = []
    
    #根据词性分配权值
    score_dict = {"nr":2.0, "nr1":1.0, "nr2":1.5, "nrt":2.0, "nrf":2.0, "ns":2.0, "nsf":2.0, "nt":2.0, \
                   "nz":3.0, "nl":1.0, "ng":1.0, "n":1.5, "t":0.5, "tg":0.5, "s":0.3, "f":0.3, "j":0.5, \
                   "v":0.7, "vd":0.6, "vn":0.9, "vshi":0.0, "vyou":0.0, "vf":0.3, "vx":0.3, "vi":0.7, \
                   "vl":0.3, "vg":0.5, "a":0.6, "ad":0.3, "an":0.9, "ag":0.5, "al":0.3, "b":0.3, "bl":0.2, \
                    "z":0.9, "zg":0.3, "r":0.3, "rr":0.3, "rz":0.3, "rzt":0.3, "rzs":0.3, "rzv":0.3, "ry":0.2, \
                    "ryt":0.2, "rys":0.2, "ryv":0.2, "rg":0.2, "m":0.2, "mq":0.5, "q":0.6, "qv":0.7, "qt":0.7, \
                    "d":0.4, "p":0.0, "pba":0.0, "pbei":0.0, "c":0.0, "cc":0.0, "u":0.0, "ug":0.0, "e":0.0, \
                    "y":0.0, "o":0.0, "h":0.0, "k":0.0, "x":0.1, "xx":0.0, "xu":0.9, "w":0.0, "l":0.6, "i":0.6, \
                    "g":0.0, "vq":0.0, "nrfg":1.0, "dg":0.0, "mg":0.2, "yg":0.0, "eng":0.1}
    
    for each in combined_wl_tf:
        word_with_tag = each[0]
        word_entity = word_with_tag.split(GlobalNLP.CN_WORD_INNER_DELIMITER)[0]
        word_tag = word_with_tag.split(GlobalNLP.CN_WORD_INNER_DELIMITER)[-1]
        word_frequency = each[1]
        
        if word_entity in global_keywords and word_tag not in ["eng"]:
            try:
                word_score.append(np.log(float(word_frequency)) * score_dict[word_tag] * 1.0)
            except KeyError:
                word_score.append(float(0.0))
        else:
            try:
                word_score.append(np.log(float(word_frequency)) * score_dict[word_tag] * 0.5)
            except KeyError:
                word_score.append(float(0.0))
    
    # 按权值降序排序
    ww = zip(word_list, word_score)
    ww = sorted(ww, key = itemgetter(1), reverse = True)    
        
    result_all = []
    count_number = 1
    for each in ww:
        result_all.append(each[0] + " " + str(each[1]))
        count_number += 1
        if count_number > N:
            break
    
    quick_write_1d_to_text(write_filename, result_all)

if __name__ == '__main__':
    start = time.clock()    
    now_directory = os.getcwd()
    root_directory = os.path.dirname(now_directory) + '/'
    read_filename1 = root_directory + u'dataset/sogou/train_segment_pos.txt'
    read_filename2 = root_directory + u'dataset/sogou/train_all_word_list.txt'
    write_filename = root_directory + u'dataset/sogou/train_feature_words.txt'
    
    text_corpus = read_to_2d_list_gbk(read_filename1, " ")
    word_list = read_to_1d_list_gbk(read_filename2)
    
    select_feature_words(text_corpus, word_list, write_filename)
        
    print('Total time %f seconds' % (time.clock() - start))
    