# 首先筛选数据，去除重复...到最后一步量化评论
#
import re
import nltk
import pandas as pd
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from gensim import corpora, models

TOPIC_NUM = 1  # 主题数

lmtzr = WordNetLemmatizer()

m_files = [r"..\data\microwave.tsv",
           r"..\data\microwave_lda_1rmv_cols.tsv",
           r"..\data\microwave_lda_2dup_revs.tsv",
           r"..\data\microwave_lda_3rmv_invds.tsv",
           r"..\data\microwave_lda_4pos_revs.txt",
           r"..\data\microwave_lda_5neg_revs.txt",
           r"..\data\microwave_lda_6pos_rev_words.txt",  # 文本进行了处理
           r"..\data\microwave_lda_7neg_rev_words.txt",
           r"..\data\microwave_lda_8pos_topic.tsv",
           r"..\data\microwave_lda_9neg_topic.tsv",
           r"..\data\microwave_lda_10pos_topic_words.txt",
           r"..\data\microwave_lda_11neg_topic_words.txt",
           r"..\data\microwave_lda_12rev_words.tsv",
           r"..\data\microwave_lda_13rev_score.tsv"]

#  停用词集合
stop_words = set(stopwords.words('english'))
stop_words = [word for word in stop_words if word not in ['not']]
# print(stop_words)
# 自定义停用词
m_stop_words = ['would', 'br', 'microwave', 'use', 'get', 'old', 'new', 'look', 'work', 'could', 'oven',
                'purchase', 'take', 'make', 'buy', 'go', 'come', 'say', 'not', 'bought', 'even', 'ge',
                'also', 'ca', 'dry']
# 情感分析中重要的词性
m_tags = ['MD', 'UH', 'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ', 'RP', 'RB', 'RBR', 'RBS', 'JJ', 'JJR', 'JJS']
# 正则表达式过滤特殊符号用空格符占位，双引号、单引号、句点、逗号
pat_letter = re.compile(r'[^a-zA-Z \']+')
# 还原常见缩写单词
pat_is = re.compile("(it|he|she|that|this|there|here)(\'s)", re.I)
pat_s = re.compile("(?<=[a-zA-Z])\'s")  # 找出字母后面的字母
pat_s2 = re.compile("(?<=s)\'s?")
pat_not = re.compile("(?<=[a-zA-Z])n\'t")  # not的缩写
pat_would = re.compile("(?<=[a-zA-Z])\'d")  # would的缩写
pat_will = re.compile("(?<=[a-zA-Z])\'ll")  # will的缩写
pat_am = re.compile("(?<=[I|i])\'m")  # am的缩写
pat_are = re.compile("(?<=[a-zA-Z])\'re")  # are的缩写
pat_ve = re.compile("(?<=[a-zA-Z])\'ve")  # have的缩写


# 剔除冗余的列
def remove_cols():
    data = pd.read_csv(m_files[0], sep='\t', encoding='utf-8')
    data = data.drop(['marketplace', 'product_category', 'product_parent', 'product_title'], axis=1)  # 剔除了多列
    data.to_csv(m_files[1], sep='\t', encoding='utf-8')


# 获取重复的评论
def get_dup_revs():
    m_df = pd.read_csv(m_files[1], index_col=0, sep='\t', encoding='utf-8')
    data_review = m_df['review_body']  # 获取评论这一列
    # 计算数组有哪些不同的值，并计算每个值有多少个重复值,原值变成了行索引
    dup_df = pd.DataFrame(data_review.value_counts())
    m_review = dup_df.index.values.tolist()  # 获取评论值列表
    m_num = dup_df['review_body'].values.tolist()  # 获取原来评论的重复值
    #  新建一个df
    m_review_num = pd.DataFrame([m_review, m_num])
    m_review_num = pd.DataFrame(m_review_num.values.T)  # 转置
    m_review_num.columns = ['review_body', 'num']
    #  筛选出重复的评论
    m_review_num = m_review_num[m_review_num['num'] > 1]
    m_review_num.to_csv(m_files[2], sep='\t', index=False, header=True, encoding='utf-8')
    # print(m_review_num)


#  去除无效评论
def remvove_invds(*invd_list): # 参数为无效评论在“重复评论”中的行号
    #print("remvove_invds", invd_list)
    m_df = pd.read_csv(m_files[1], sep='\t', encoding='utf-8')

    m_invds = pd.read_csv(m_files[2], sep='\t', encoding='utf-8')
    #print("m_invds",m_invds)
    m_invds = m_invds[m_invds.index.isin(invd_list)]

    m_invd_revs = m_invds['review_body'].values.tolist()
    # print("m_invd_revs:" + m_invd_revs)
    #  筛选出不在无效评论中的
    m_df = m_df[~m_df.review_body.isin(m_invd_revs)]
    m_df.to_csv(m_files[3], sep='\t', index=False, header=True, encoding='utf-8')


#  抽取1、2,4、5星的评论
def get_pos_neg_revs():
    m_df = pd.read_csv(m_files[3], sep='\t', encoding='utf-8')
    m_neg_df = m_df[m_df.star_rating.isin([1, 2])]
    m_pos_df = m_df[m_df.star_rating.isin([4, 5])]
    m_neg_revs = m_neg_df['review_body']
    m_pos_revs = m_pos_df['review_body']
    m_neg_revs.to_csv(m_files[5], sep='\t', index=False, header=True, encoding='utf-8')
    m_pos_revs.to_csv(m_files[4], sep='\t', index=False, header=True, encoding='utf-8')


# 还原常见缩写单词函数
def replace_abbreviations(text):
    new_text = text
    new_text = pat_letter.sub(' ', text).strip().lower()
    new_text = pat_is.sub(r"\1 is", new_text)
    new_text = pat_s.sub("", new_text)
    new_text = pat_s2.sub("", new_text)
    new_text = pat_not.sub(" not", new_text)
    new_text = pat_would.sub(" would", new_text)
    new_text = pat_will.sub(" will", new_text)
    new_text = pat_am.sub(" am", new_text)
    new_text = pat_are.sub(" are", new_text)
    new_text = pat_ve.sub(" have", new_text)
    new_text = new_text.replace('\'', ' ')
    return new_text


# pos和tag有相似的地方，通过tag获得pos,  pos是原型
def get_wordnet_pos(treebank_tag):
    if treebank_tag.startswith('J'):
        return nltk.corpus.wordnet.ADJ
    elif treebank_tag.startswith('V'):
        return nltk.corpus.wordnet.VERB
    elif treebank_tag.startswith('N'):
        return nltk.corpus.wordnet.NOUN
    elif treebank_tag.startswith('R'):
        return nltk.corpus.wordnet.ADV
    else:
        return ''


# 将单词变回原型
def words_normalize(words):
    new_words = []
    for word in words:
        if word:
            tag = nltk.pos_tag(nltk.word_tokenize(word))  # tag is like [('bigger', 'JJR')]
            pos = get_wordnet_pos(tag[0][1])
            if pos:
                # lemmatize()方法将word单词还原成pos词性的形式
                lemmatized_word = lmtzr.lemmatize(word, pos)
                new_words.append(lemmatized_word)
            else:
                new_words.append(word)
    return new_words


# 从文本抽取单词
def extract_words(text, debug=False):
    text = replace_abbreviations(text)
    if debug:
        print('去除非字母符号:', text)
    m_words = nltk.word_tokenize(text)  # 分词
    if debug:
        print('分词:', m_words)
    m_word_tags = nltk.pos_tag(m_words)  # 获取单词词性
    if debug:
        print('获取词性:', m_word_tags)
    m_words = [word for word, tag in m_word_tags if tag in m_tags]  # 过滤词性
    if debug:
        print('过滤词性后:', m_words)
    m_words = words_normalize(m_words)  # 归一化
    if debug:
        print('归一化后:', m_words)
    m_words = [word for word in m_words if word not in stop_words]  # 过滤停词表
    m_words = [word for word in m_words if word not in m_stop_words]  # 过滤自定义停词表
    if debug:
        print('过滤停词表后:', m_words)
    return m_words


# 获取文章主题
def get_topics(input_file):
    fr = open(input_file, 'r', encoding='utf-8')
    words_list = []  # 二维单词列表
    for line in fr.readlines():
        words_list.append(extract_words(line, debug=False))
    # """构建词频矩阵，训练LDA模型"""
    dictionary = corpora.Dictionary(words_list)
    # corpus[0]: [(0, 1), (1, 1), (2, 1), (3, 1), (4, 1),...]
    # corpus是把每条新闻ID化后的结果，每个元素是新闻中的每个词语，在字典中的ID和频率
    corpus = [dictionary.doc2bow(words) for words in words_list]  # text单篇文章
    lda = models.LdaModel(corpus=corpus, id2word=dictionary, num_topics=TOPIC_NUM)  # lda训练
    topic_list = lda.print_topics(TOPIC_NUM)
    print(len(topic_list), "个主题的单词分布为：\n")
    for topic in topic_list:
        print(topic)
    return topic_list


# 获取文章主题, 使用预处理后的评论文本(已经进行了归一化，筛选词性，去停词表等操作)
def get_topics2(input_file):
    fr = open(input_file, 'r', encoding='utf-8')
    words_list = []  # 二维单词列表
    for line in fr.readlines():
        m_words = nltk.word_tokenize(line)
        # m_words = [word for word in m_words if word not in m_stop_words]
        words_list.append(m_words)
    # """构建词频矩阵，训练LDA模型"""
    dictionary = corpora.Dictionary(words_list)
    # corpus[0]: [(0, 1), (1, 1), (2, 1), (3, 1), (4, 1),...]
    # corpus是把每条新闻ID化后的结果，每个元素是新闻中的每个词语，在字典中的ID和频率
    corpus = [dictionary.doc2bow(words) for words in words_list]  # text单篇文章
    lda = models.LdaModel(corpus=corpus, id2word=dictionary, num_topics=TOPIC_NUM)  # lda训练
    topic_list = lda.print_topics(TOPIC_NUM)
    print(len(topic_list), "个主题的单词分布为：\n")
    for topic in topic_list:
        print(topic)
    return topic_list


#  主题转换为df格式
def get_topics_df(c_topic_list):
    # 取得每个主题的特征词
    key_words = []
    for mi in range(TOPIC_NUM):
        text = replace_abbreviations(c_topic_list[mi][1])
        m_words = nltk.word_tokenize(text)  # 分词
        key_words.append(m_words)
    # 变成 DataFrame 格式
    m_indexs = ['主题' + str(x + 1) for x in range(TOPIC_NUM)]
    m_key_words_df = pd.DataFrame(data=key_words, index=m_indexs)
    return m_key_words_df


#  将消极评论文本和积极评论文本预处理一遍, 单词以空格分开
# 使用到了extract_words函数预处理文本
def write_selected_words():
    for i in range(4, 6):
        fr = open(m_files[i], 'r', encoding='utf-8')
        fw = open(m_files[i + 2], 'w', encoding='utf-8')
        words_list = []  # 二维单词列表
        for line in fr.readlines():
            fw.write(" ".join(extract_words(line)))
            fw.write('\n')


#  将df数据抽取成一列, 并且去重
def get_words_from_df(df):
    m_list = []
    m_columns = df.columns
    for c in m_columns:
        m_list += df[c].values.tolist()
    m_list = list(set(m_list))
    return m_list


#  输入已经过滤一遍的评论单词，生成主题df, 主题单词文本
def get_topic_words():
    # 生成主题
    for i in range(6, 8):
        # topic_list = get_topics(m_files[i])
        topic_list = get_topics2(m_files[i])
        key_words_df = get_topics_df(topic_list)
        key_words_df.to_csv(m_files[i + 2], sep='\t', index=True, header=True, encoding='utf-8')
        # print(key_words_df)
    # 有10个主题生成其单词集合
    for i in range(8, 10):
        df = pd.read_csv(m_files[i], index_col=0, sep='\t', encoding='utf-8')
        # print(df)
        m_list = get_words_from_df(df)
        print(m_list)
        fw = open(m_files[i + 2], 'w', encoding='utf-8')
        fw.write("\n".join(m_list))


# # 将评论预处理再写入 方便与主题词比对
def get_rev_words():
    m_df = pd.read_csv(m_files[1], sep='\t', index_col=0, encoding='utf-8')
    m_reviews = m_df['review_body'].values.tolist()
    m_reviews_words = []
    for rv in m_reviews:
        m_words = extract_words(rv)
        m_reviews_words.append(" ".join(m_words))
    m_df['review_words'] = m_reviews_words
    m_df.to_csv(m_files[12], sep='\t', encoding='utf-8')


#  评论量化
def get_rev_score():
    # 读取pos,neg词汇
    m_words_list = []
    for i in range(10, 12):
        fr = open(m_files[i], 'r', encoding='utf-8')
        m_list = []
        for line in fr.readlines():
            # m_list.append(nltk.word_tokenize(line))
            m_list.append(line.strip())
        m_words_list.append(m_list)
    m_pos_words = m_words_list[0]
    m_neg_words = m_words_list[1]

    m_df = pd.read_csv(m_files[12], sep='\t', index_col=0, encoding='utf-8')
    m_reviews = m_df['review_words'].values.tolist()
    m_reviews_score = []
    m_pos_score = []
    m_neg_score = []
    for rv in m_reviews:
        rv = str(rv)
        # print(rv)
        m_words = []
        if not rv.isspace():
            m_words = nltk.word_tokenize(rv)
        # print(m_words)
        m_score = 0
        m_neg = 0
        m_pos = 0
        for word in m_words:
            if word in m_neg_words:
                m_neg += 1
            elif word in m_pos_words:
                m_pos += 1
        m_pos_score.append(m_pos)
        m_neg_score.append(m_neg)
        m_reviews_score.append(m_pos - m_neg)

    m_df['review_score'] = m_reviews_score
    m_df['rv_pos_score'] = m_pos_score
    m_df['rv_neg_score'] = m_neg_score

    m_df.to_csv(m_files[13], sep='\t', index=0, header=1, encoding='utf-8')
    print('add_lie')


# lda训练，得到主题词
def lda_step1():
    remove_cols()  # 剔除多余列 file[0]->file[1]
    get_dup_revs()  # 获取重复评论 file[1]->file[2]


def lda_step2():  # 需要查看step1中获取的重复评论的信息
    invd_list = [1, 2]  # 无效评论的行号
    remvove_invds(*invd_list)  # 剔除无效评论 file[1]->file[1],使用了file[2]
    get_pos_neg_revs()  # 获取消极、积极评论 file[1]->file[4,5]


def lda_step3():  # lda训练
    write_selected_words()  # 预处理文本（归一化，筛选词性，去停词表等) file[4]->file[6],file[5]->file[7]
    get_topic_words()  # file[6]->file[8]->file[10],file[7]->file[9]-file[11]


# lda_step1()
# lda_step2()
lda_step3()
""" 分析结果
1 个主题的单词分布为：

(0, '0.022*"great" + 0.019*"well" + 0.015*"small" + 0.014*"good" + 0.013*"easy" + 0.011*"fit" + 0.010*"love" + 0.010*"need" + 0.009*"little" + 0.008*"much"')
1 个主题的单词分布为：

(0, '0.014*"replace" + 0.009*"last" + 0.008*"stop" + 0.008*"start" + 0.008*"back" + 0.008*"well" + 0.007*"never" + 0.007*"call" + 0.007*"turn" + 0.007*"open"')
['well', 'small', 'fit', 'good', 'great', 'easy', 'need', 'much', 'little', 'love']
['replace', 'well', 'turn', 'last', 'never', 'call', 'back', 'stop', 'open', 'start']
"""