import re
import jieba
from collections import Counter
import numpy as np
import torch.nn as nn
import torch
import matplotlib.pyplot as plt


good_file = './good.txt'
bad_file = './bad.txt'

def filter_punc(sentence):
    sentence = re.sub("[\s+\.\!\/_,$%^*(+\"\'“”《》?“]+|[+——！，。？、~@#￥%……&*（）：]+", "", sentence)
    return sentence

# print(jieba.lcut("经过我的亲身体验，这家店信誉是相当地不错。宝贝的质量更像钻石一般。太感谢了！"))

def prepare_data(good_file,bad_file,is_filter=True):
    all_words = []
    pos_sentences = []
    neg_sentences = []
    with open(good_file,'r',encoding='utf-8') as fr:
        for idx,line in enumerate(fr):
            if is_filter:
                line = filter_punc(line)
            words = jieba.lcut(line)
            if len(words) > 0:
                all_words += words
                pos_sentences.append(words)
    print('{0}包含{1}行，{2}个单词'.format(good_file,idx+1,len(all_words)))

    count = len(all_words)
    with open(bad_file,'r',encoding='utf-8') as fr:
        for idx,line in enumerate(fr):
            if is_filter :
                line = filter_punc(line)
            words = jieba.lcut(line)
            if len(words) >0:
                all_words += words
                neg_sentences.append(words)
    print('{0}包含{1}行，{2}个单词'.format(bad_file, idx + 1, len(all_words) -count))

    #建立字典
    diction = {}
    cnt = Counter(all_words)
    for word,freq in cnt.items():
        diction[word] = [len(diction),freq]
    print('字典大小:{}'.format(len(diction)))
    return pos_sentences,neg_sentences,diction

pos_sentences,neg_sentences,diction = prepare_data(good_file,bad_file)

def word2index(word,diction):
    if word in diction:
        value = diction[word][0]
    else:
        value = -1
    return value

def index2word(index,diction):
    for w,v in diction.items():
        if v[0] == index:
            return w
    return None


#词袋模型
def sentence2vec(sentence,dictionary):
    vector = np.zeros(len(dictionary))
    for l in sentence:
        vector[l] += 1
    return 1.0 *vector / len(sentence)

#遍历所有句子，将每一个单词映射成编码
dataset = []
labels = []
sentences = []
for sentence in pos_sentences:
    new_sentence = []
    for l in sentence:
        if l in diction:
            new_sentence.append(word2index(l,diction))
    dataset.append(sentence2vec(new_sentence,diction))
    labels.append(0)
    sentences.append(sentence)

for sentence in neg_sentences:
    new_sentence = []
    for l in sentence:
        if l in diction:
            new_sentence.append(word2index(l,diction))
    dataset.append(sentence2vec(new_sentence,diction))
    labels.append(1)
    sentences.append(sentence)

indices = np.random.permutation(len(dataset))
dataset = [dataset[i] for i in indices]
lables = [labels[i] for i in indices]
sentences = [sentences[i] for i in  indices]

#对整个数据集进行划分，分为：训练集、校准集和测试集，其中校准和测试集合的长度都是整个数据集的10分之一
test_size = len(dataset) // 10
train_data = dataset[2 * test_size :]
train_label = labels[2 * test_size :]

valid_data = dataset[: test_size]
valid_label = labels[: test_size]

test_data = dataset[test_size : 2 * test_size]
test_label = labels[test_size : 2 * test_size]

# 一个简单的前馈神经网络，三层，第一层线性层，加一个非线性ReLU，第二层线性层，中间有10个隐含层神经元

# 输入维度为词典的大小：每一段评论的词袋模型
model = nn.Sequential(
    nn.Linear(len(diction), 10),
    nn.ReLU(),
    nn.Linear(10, 2),
    nn.LogSoftmax(dim=1),
)

def rightness(predictions, labels):
    """计算预测错误率的函数，其中predictions是模型给出的一组预测结果，batch_size行num_classes列的矩阵，labels是数据之中的正确答案"""
    pred = torch.max(predictions.data, 1)[1] # 对于任意一行（一个样本）的输出值的第1个维度，求最大，得到每一行的最大元素的下标
    rights = pred.eq(labels.data.view_as(pred)).sum() #将下标与labels中包含的类别进行比较，并累计得到比较正确的数量
    return rights, len(labels) #返回正确的数量和这一次一共比较了多少元素


# 损失函数为交叉熵
cost = torch.nn.NLLLoss()
# 优化算法为Adam，可以自动调节学习率
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
records = []

# 循环10个Epoch
losses = []
for epoch in range(10):
    for i, data in enumerate(zip(train_data, train_label)):
        x, y = data

        # 需要将输入的数据进行适当的变形，主要是要多出一个batch_size的维度，也即第一个为1的维度
        x = torch.tensor(x, requires_grad=True, dtype=torch.float).view(1, -1)
        # x的尺寸：batch_size=1, len_dictionary
        # 标签也要加一层外衣以变成1*1的张量
        y = torch.tensor(np.array([y]), dtype=torch.long)
        # y的尺寸：batch_size=1, 1

        # 清空梯度
        optimizer.zero_grad()
        # 模型预测
        predict = model(x)
        # 计算损失函数
        loss = cost(predict, y)
        # 将损失函数数值加入到列表中
        losses.append(loss.data.numpy())
        # 开始进行梯度反传
        loss.backward()
        # 开始对参数进行一步优化
        optimizer.step()

        # 每隔3000步，跑一下校验数据集的数据，输出临时结果
        if i % 3000 == 0:
            val_losses = []
            rights = []
            # 在所有校验数据集上实验
            for j, val in enumerate(zip(valid_data, valid_label)):
                x, y = val
                x = torch.tensor(x, requires_grad=True, dtype=torch.float).view(1, -1)
                y = torch.tensor(np.array([y]), dtype=torch.long)
                predict = model(x)
                # 调用rightness函数计算准确度
                right = rightness(predict, y)
                rights.append(right)
                loss = cost(predict, y)
                val_losses.append(loss.data.numpy())

            # 将校验集合上面的平均准确度计算出来
            right_ratio = 1.0 * np.sum([i[0] for i in rights]) / np.sum([i[1] for i in rights])
            print('第{}轮，训练损失：{:.2f}, 校验损失：{:.2f}, 校验准确率: {:.2f}'.format(epoch, np.mean(losses),
                                                                        np.mean(val_losses), right_ratio))
            records.append([np.mean(losses), np.mean(val_losses), right_ratio])

# 绘制误差曲线
a = [i[0] for i in records]
b = [i[1] for i in records]
c = [i[2] for i in records]
plt.plot(a, label = 'Train Loss')
plt.plot(b, label = 'Valid Loss')
plt.plot(c, label = 'Valid Accuracy')
plt.xlabel('Steps')
plt.ylabel('Loss & Accuracy')
plt.legend()
plt.show()