import pandas as pd
import save
import collections
import jieba
import os
import jieba.analyse


#a=jieba.analyse.extract_tags(sentence, topK = 20, withWeight = False, allowPOS = ())
# sentence:待提取的文本。
#  topK:返回几个 TF/IDF 权重最大的关键词，默认值为20。
#  withWeight:是否一并返回关键词权重值，默认值为False。
#  allowPOS:仅包括指定词性的词，默认值为空，即不进行筛选。如果还想把量词删去，我比较low的想法是靠正则表达式，带数字的或者表示数字的就去掉

# 转换数据类型为int：df[A].astypes(int)
# 查看各列数据类型：df.dtypes

#载入停用词表
def stopwordslist(filepath):
    stopwords = [line.strip() for line in open(filepath, 'r', encoding='utf-8').readlines()]
    return stopwords

#将传入内容中小于两个字的词加入到停用词列表
def limit_stop_words(seg_list):
    seg_set = set(seg_list)
    stop_list = []#创建停用词表
    for ci in seg_set:
        if len(ci)<2:stop_list.append(ci)#将小于两个字的词加入到停用词列表
    return stop_list

#从txt导入的词表中删除停用词
def del_stop_words_txt(cantent,path):
    stopwords = stopwordslist(path)  # 这里加载停用词的路径
    object_list =[x for x in cantent if len(x) >1 and x not in stopwords]
    return object_list

#从词表中删除停用词
def del_stop_words(seg_list,stop_list):
    object_list = [word for word in seg_list if word not in stop_list]#创建剔除停用词后的分词表
    return object_list

#统计词频，并返回高频词
def word_counts(object_list,top=100):
    word_counts = collections.Counter(object_list) #获取的是字典形式的
    word_counts_top = word_counts.most_common(top) # 获取高频词
    #获取的是[('分析', 7), ('数据', 6), ('业务', 5)]这样的list
    return word_counts_top

#将分词结果写入DataFrame中
def wordcount_to_frame(word_count):
    frame = pd.DataFrame(columns=['分词','词频'],index=[i for i in range(len(word_count))])
    index = 0
    for word, count in word_count:
        frame['分词'][index] = word
        frame['词频'][index] = count
        index +=1
    return frame

#将word_count写入txt文件中
def write_txt_word_count(write_name,word_count,only_short=False):
    file = open(write_name,"w")
    if only_short==False:
        for word,count in word_count:
            word_count = word + ':' + str(count) + '\n'
            file.write(word_count)
    if only_short!=False:
        for word,count in word_count:
            word_count = word + '\n'
            file.write(word_count)
    file.close()

#将frame中某列的内容提取出来组合在一起
def column_combine_str (data,column_name,split):
    detail_text_list = []
    for content in data[column_name]:
        if type(content) == str:
            detail_text_list.append(content)
    column_combine_str = split.join(detail_text_list)
    return column_combine_str

#使用结巴分词进行分词，获取出现频率top的词写入DataFrame
def jieba_word_count(content,top = 500,stops = []):
    seg_list = jieba.lcut(content, cut_all=False)#使用结巴分词进行分词
    stop_list = limit_stop_words(seg_list)#从分词列表中 提取 长度小于2的词作为停用词
    stop_list = stop_list + stops #将要补充的停用词添加进来
    object_list = del_stop_words(seg_list,stop_list)#从分词列表中删除长度小于2的停用词
    word_count = word_counts(object_list,top=top)#获取出现频率top的词,获取的是[('分析', 7),('数据', 6),('业务', 5)]这样的list
    frame = wordcount_to_frame(word_count)#将list转为dataframe
    return frame

#使用结巴分词进行分词，获取出现频率top的词并写入到txt中
def jieba_word_count_txt(content,write_name,top=500,only_short=False,for_search=True):
    if for_search==True:
        seg_list = jieba.lcut_for_search(content)
    else:
        seg_list = jieba.lcut(content, cut_all=False)#使用结巴分词进行分词
    stop_list = limit_stop_words(seg_list)#从分词表中 提取 长度小于2的词作为停用词
    object_list = del_stop_words(seg_list,stop_list)#从分词表中删除长度小于2的停用词
    word_count = word_counts(object_list,top=top)#获取出现频率top的词
    write_txt_word_count(write_name,word_count,only_short=only_short)

#统计frame中指定列的词频并分页储存到excel中
def excel_word_count(data,columns,save_name):
    content_dict = {}
    for column in columns:
        content_list = list(data[column].values)
        content_list_str = [str(i) for i in content_list] #内容可能有int flaot，全转换成str
        content = ''.join(content_list_str)#合成一个str
        frame = jieba_word_count(content)#用结巴分词后做成frame
        content_dict[column+'词频'] = frame#将做好的frame放入字典
    save.save_excel_sheets(content_dict, save_name)

#传入内容，用结巴分词进行分词，删除长度小于2的词，成可以直接用于制作词云的内容，保存到txt文本
def wordclod_content(content):
    seg_list = jieba.lcut_for_search(content)
    stop_list = limit_stop_words(seg_list)#从分词表中 提取 长度小于2的词作为停用词
    object_list = del_stop_words(seg_list,stop_list)#从分词表中删除长度小于2的停用词
    content = " ".join(object_list)
    save.save_txt('500高频词_空格连接.txt',content)

#传入内容，用结巴分词进行分词，删除长度小于2的词，成可以直接用于制作词云的内容
def jieba_for_wordcloud(content):
    seg_list = jieba.lcut_for_search(content)
    stop_list = limit_stop_words(seg_list)#从分词表中 提取 长度小于2的词作为停用词
    object_list = del_stop_words(seg_list,stop_list)#从分词表中删除长度小于2的停用词
    content = " ".join(object_list)

    return content

