#-*- encoding:utf-8 -*-
import jieba
import datetime
from jieba.analyse import *
from collections import Counter
import pandas as pd 

# jieba.load_userdict('./data/new_dict.txt')
dict_data = ['ETC', 'EMS', '4S店', 'GDP', 'HPV', 'IPTV', 'P2P', 'etc', '12345APP','12345app', '12345热线', 'ATM', 'atm', '12123APP', '12123app']


def get_stopword_list():
    stop_words_file = open('data/stop_words_ch.txt', 'r', encoding='utf-8')
    stopwords_list = []
    for line in stop_words_file.readlines():
        stopwords_list.append(line[:-1])
    return stopwords_list

def count_words(content):
    count = {}
    words = jieba.cut(content)
    for word in words:
        if len(word) < 1:
            continue
        else:
            count[word] = count.get(word,0) + 1
    return count


def tf_idf_word_dev(content,num,content2id_map):
    str_content = '\n'.join(content)
    cutwords = jieba.cut(str_content.replace(" ", "").replace("\n", "").replace("\r", "").replace("&nbsp;",""), cut_all=False)
    stopwords_list=get_stopword_list()

    words = ''
    for word in cutwords:  # for循环遍历分词后的每个词语
        if (word >= u'\u4e00' and word <= u'\u9fa5') or (word in dict_data):
            if word not in stopwords_list and len(word) > 1:  # 判断分词后的词语是否在停用词表内
                if word != '\t':
                    words += word
                    words += "/"
#    print('\n【去除停用词后的分词：】' + '\n' + words + '\n')
    keywords = jieba.analyse.extract_tags(words, topK=num, withWeight=False)  # 使用结巴默认的idf文件进行关键词提取，展示权重前十的关键词
    # return keywords
    result = []
    for keyword in keywords:
        result_item = {}
        result_item["kw"] = keyword
        #词频
        # count = count_words(str_content)
        # if count:
        #     result_item["num"] = count[keyword]
        # else:
        #     result_item["num"] = None
        #关联工单id
        ids = []
        for content_item in content:
            if keyword in content_item:
                ids.append(content2id_map[content_item])
                continue
        result_item["ids"] = list(set(ids))
        result_item["num"] = len(list(set(ids)))
        result.append(result_item)
    return result

def tf_idf_word(content,num):
    str_content = '\n'.join(content)
    cutwords = jieba.cut(str_content.replace(" ", "").replace("\n", "").replace("\r", "").replace("&nbsp;",""), cut_all=False)
    stopwords_list=get_stopword_list()

    words = ''
    for word in cutwords:  # for循环遍历分词后的每个词语
        if (word >= u'\u4e00' and word <= u'\u9fa5') or (word in dict_data):
            if word not in stopwords_list and len(word) > 1:  # 判断分词后的词语是否在停用词表内
                if word != '\t':
                    words += word
                    words += "/"
#    print('\n【去除停用词后的分词：】' + '\n' + words + '\n')
    keywords = jieba.analyse.extract_tags(words, topK=num, withWeight=False)  # 使用结巴默认的idf文件进行关键词提取，展示权重前十的关键词
    # return keywords
    result = {}
    for keyword in keywords:
        count = count_words(str_content)
        if count:
            result[keyword] = count[keyword]
        else:
            result = {}
    # return keywords,result
    return result
if __name__ == '__main__':
    print("开始 %s" % datetime.datetime.now())
    result = []
    df_1 = pd.read_excel('/root/worksapce/jingbo/text2vec_zhipai/user_data/9类事项工单清单5.31.xlsx',sheet_name=['乱晾晒','乱设摊','跨门营业','街头散发小广告','装修扰民','加梯噪声','居住证咨询','养老','老年餐'],usecols=[6])
    # df_1 = pd.read_excel('12345_data/网1000件.xlsx',usecols=[1])
    
    
    # sen = ''
    # for item in df_1.values.tolist():
    #     sen = sen + item[0]
    #     keyword = tf_idf_word(sen,100) 
    #     for i, word in enumerate(keyword):
    #         result_item ={} 
    #         # result_item['class'] = i
    #         result_item['keyword'] = word
    #         result.append(result_item)

    for item in df_1:
        sen = ''
        result_item ={} 
        result_item["class"] = item
        for text in df_1[item].values.tolist():
            sen = sen + text[0]
        keyword = tf_idf_word(sen,100)
        result_item['keyword'] = keyword[:5]
        result.append(result_item)
    pf = pd.DataFrame(result)
    pf.to_excel('keyword_0621.xlsx',encoding = 'utf-8',index = False)
    # print(tf_idf_word(sen,10))
    print("结束 %s" % datetime.datetime.now())