import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import re
import jieba.posseg as psg

# word = pd.read_csv("./word.csv")

reviews = pd.read_csv('./data of 电商产品评论数据情感分析/reviews.csv')

reviews = reviews.drop_duplicates(subset=['content', 'content_type'])
content = reviews["content"]
# 去除英文、数字、京东、美的、电热水器等词语,pattern
strinfo = re.compile('[0-9a-zA-Z]|京东|美的|电热水器|热水器|')
content = content.apply(lambda x: strinfo.sub('', x))
# 分词
worker = lambda s: [(x.word, x.flag) for x in psg.cut(s)]  # 自定义简单分词函数
seg_word = content.apply(worker)
# 删除停用词
stop_path = open(r".\data of 电商产品评论数据情感分析\stoplist.txt", 'r', encoding='UTF-8')
stop = stop_path.readlines()
stop = [x.replace('\n', '') for x in stop]
# 遍历所有词，取出停用词并选出名词，统计词频
word_posneg = pd.DataFrame(columns=['index_content', 'word', 'nature', 'content_type', 'index_word'
                                    ])
index_content = 0
for word_set in seg_word:
    index_content += 1
    index_word = 0
    for w in word_set:
        index_word += 1
        if w[0] not in stop and 'n' in w[1]:
            # DataFrame每行要添加的Series
            # word_series = pd.Series(
            #     [index_content, w[0], w[1], reviews.iloc[index_content - 1]["content_type"], index_word])
            # word_posneg = pd.concat([word_posneg, word_series], axis=0, ignore_index=True)
            word_posneg.loc[len(word_posneg)] = [index_content, w[0], w[1], reviews.iloc[index_content - 1]["content_type"], index_word]

# 读入正面、负面情感评价词
pos_comment = pd.read_csv("./data of 电商产品评论数据情感分析/正面评价词语（中文）.txt", header=None, sep="/n",
                          encoding='utf-8', engine='python')
neg_comment = pd.read_csv("./data of 电商产品评论数据情感分析/负面评价词语（中文）.txt", header=None, sep="/n",
                          encoding='utf-8', engine='python')
pos_emotion = pd.read_csv("./data of 电商产品评论数据情感分析/正面情感词语（中文）.txt", header=None, sep="/n",
                          encoding='utf-8', engine='python')
neg_emotion = pd.read_csv("./data of 电商产品评论数据情感分析/负面情感词语（中文）.txt", header=None, sep="/n",
                          encoding='utf-8', engine='python')

# 合并情感词与评价词
positive = set(pos_comment.iloc[:, 0]) | set(pos_emotion.iloc[:, 0])
negative = set(neg_comment.iloc[:, 0]) | set(neg_emotion.iloc[:, 0])

# 正负面情感词表中相同的词语
intersection = positive & negative

positive = list(positive - intersection)
negative = list(negative - intersection)

positive = pd.DataFrame({"word": positive,
                         "weight": [1] * len(positive)})
negative = pd.DataFrame({"word": negative,
                         "weight": [-1] * len(negative)})

posneg = pd.concat([positive, negative], ignore_index=True)


# 将分词结果与正负面情感词表合并，定位情感词
data_posneg = posneg.merge(word_posneg, left_on='word', right_on='word',
                           how='right')
# data_posneg = data_posneg.sort_values(by = ['index_content','index_word'])

data_posneg.head()
# 查看原来该句评论问pos，但其中分词后词情感标注未负面的
# data_posneg[(data_posneg["content_type"]=='pos')&(data_posneg["weight"]<1)]
# data_posneg[(data_posneg["content_type"]=='neg')&(data_posneg["weight"]==1)]

