import jieba
import snownlp
from snownlp import SnowNLP
from snownlp import sentiment
from collections import Counter
import numpy as np


def toRemarkList(addr):
    with open(addr, encoding='utf-8') as fp:
        content = fp.read().split('^&')
        a = 1
        remark = []
        while a < len(content) - 1:
            if ((not content[a + 1].startswith('2')) and (not content[a].startswith('2')) and (
            not content[a - 1].startswith('2')) and (not content[a - 2].startswith('2')) and (
            not content[a - 3].startswith('2'))):
                remark.append(content[a])
            a += 1
        return remark



def keywordextract(list):
    res = []
    for i in list:
        s = SnowNLP(i)
        res = res + s.keywords(len(i)//3)
    for j in res:
        if len(j) == 1 and j != "好":
            res.remove(j)
    key = Counter(res)
    return key.most_common(24)


def printkeywords(remark):
    dic = keywordextract(remark)
    for i in dic:
        print(i, end=' ')
    print("")


# 返回情感分析后数据集的均值和方差
def calculation(remarks):
    data = []
    for i in remarks:
        if len(i)!=0:
            s = SnowNLP(i)
            data.append(2*s.sentiments-1)
    res = []
    res.append(np.mean(data))
    res.append(np.var(data))
    return res


def toArticleList(addr):
    with open(addr, encoding='utf-8') as fp:
        content = fp.read().split('^&')
    res = []
    alist = []
    count = 0
    for i in range(len(content)-2):
        if content[i+2].startswith('2020'):
            alist.append(content[i])
            res.append(alist)
            alist = []
        elif content[i].startswith('2020') or content[i-3].startswith('2020') or content[i-2].startswith('2020') :
            count+=1
        else:
            alist.append(content[i])
    alist.append(content[-2])
    alist.append(content[-1])
    res.append(alist)
    return res


def relation(article:list)-> list:
    remark = article
    if len(remark) <= 2:
        return []
    arti = jieba.lcut(article[1])
    for i in range(len(arti) - 1, -1, -1):

        if arti[i] == "\u3000" or len(arti[i])==1:
            arti.remove(arti[i])
    sen = Counter(arti)
    keyeffect = sen.most_common(3)
    remark.pop(0)
    remark.pop(0)
    a = calculation(remark)
    for i in range(len(keyeffect)):
        keyeffect[i] = list(keyeffect[i])
        keyeffect[i][1] = keyeffect[i][1] * a[0] / len(arti)
    return keyeffect


def most_effective(ar: list):
    relations = []
    for i in ar:
        relations += relation(i)
    dictionary = {}
    for i in relations:
        dictionary[i[0]] = i[1]
    dictionary = sorted(dictionary.items(), key=lambda x: x[1], reverse=True)
    for i in range(7):
        print(dictionary.__getitem__(i))
        print(dictionary.__getitem__(-i - 1))


sentiment.train('neg.txt', 'pos.txt')
remark1 = toRemarkList("1.txt")
remark2 = toRemarkList("2.txt")
remark3 = toRemarkList("3.txt")
remark4 = toRemarkList("4.txt")
ar1 = toArticleList("1.txt")
ar2 = toArticleList("2.txt")
ar3 = toArticleList("3.txt")
ar4 = toArticleList("4.txt")
# 提取关键字
printkeywords(remark1)
printkeywords(remark2)
printkeywords(remark3)
printkeywords(remark4)
# 情绪分析：均值与方差
print(calculation(remark1))
print(calculation(remark2))
print(calculation(remark3))
print(calculation(remark4))
# 影响力分析
most_effective(ar1)
most_effective(ar2)
most_effective(ar3)
most_effective(ar4)

