# 导入相关库
import pandas as pd
import jieba

# 读取数据
data = pd.read_csv('../../data/data-完整连城诀数据集.csv', encoding='utf-8')

# 查看数据
data.head()


data['cut'] = data['comment'].apply(lambda x : list(set(jieba.cut(x))))
#对每一行及inxing分词，然后去重


print(data['cut'].loc[0])



# data['cut'] = data['comment'].apply(lambda x : list(jieba.cut(x)))#如果分词后不想去重,可以使用下面代码:
# data.head()
# print(data['cut'].loc[14])


#------------------------------------------让停用词生效-----------------------------------------------

# 读取停用词数据
stopwords = pd.read_csv('../../data/StopwordsCN.txt', encoding='utf8', names=['stopword'], index_col=False)
stopwords.head()
# 转化词列表
stop_list = stopwords['stopword'].tolist()
# 去除停用词
data['cut'] = data['comment'].apply(lambda x : [i for i in jieba.cut(x) if i not in stop_list])
data.head()


# 将所有的分词合并
words = []
for content in data['cut']:
    words.extend(content)


#----------------------------------------下面是在dataframe中创建[word],[cnt]的格式，便于最后词频统计-----------------------------------------------
# 创建分词数据框
corpus = pd.DataFrame(words, columns=['word'])
corpus['cnt'] = 1

# 分组统计
g = corpus.groupby(['word']).agg({'cnt': 'count'}).sort_values('cnt', ascending=False)
 
print("--------------------------输出词频-------------------------------------")
# print(type(g.head(10)))
# print(g.head(10))

g.to_csv('./jieba_result.csv',index=True,sep=' ')