import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import re
import jieba.posseg as psg

my_table = pd.read_csv('y_type.csv', encoding='utf-8')
content = my_table['描述']

# 进行简单的分词
worker = lambda s: [(x.word, x.flag) for x in psg.cut(str(s))]  # 自定义简单分词函数
seg_word = content.apply(worker)

n_word = seg_word.apply(lambda x: len(x))
# 构造词语所在的句子id
n_content = [[x+1]*y for x, y in zip(list(seg_word.index), list(n_word))]
# 将嵌套的列表展开，作为词所在评论的id
index_content = sum(n_content, [])
seg_word = sum(seg_word,[])
# 词
word = [x[0] for x in seg_word]
# 词性
nature = [x[1] for x in seg_word]
# content_type评论类型
content_type = [[x]*y for x, y in zip(list(content), list(n_word))]
content_type = sum(content_type, [])

# 构造数据框
result = pd.DataFrame({'index_content': index_content, 'word': word, 'nature': nature, 'content_type': content_type})

# 作基础处理
result = result[result['nature'] != 'x']

# 删除停用词
# 加载停用词
stop_path = open('stopwords.txt', 'r', encoding='utf-8')
stop = [x.replace('\n', '') for x in stop_path.readlines()]
# 得到非停用词序列
word = list(set(word) - set(stop))
# 判断表格中的单词列是否在非停用词列中
result = result[result['word'].isin(word)]
n_word = list(result.groupby(by=['index_content'])['index_content'].count())
index_word = [list(np.arange(0,x)) for x in n_word]
index_word = sum(index_word,[])
result['index_word'] = index_word
result.reset_index(drop=True,inplace=True)
# 提取含名词的评论的句子id
ind = result[[x == 'n' for x in result['nature']]]['index_content'].unique()
# 提取评论
result = result[result['index_content'].isin(ind)]
# 重置索引
result.reset_index(drop=True,inplace=True)
result.to_csv('result.csv', index=False, encoding='utf-8')
print(result.head())
