import re
import jieba
from collections import Counter
import pandas as pd
import numpy as np
import torch
from matplotlib import pyplot
from torch import nn

good_file = 'pytorch/jizhi/emotion/data/good.txt'
bad_file = 'pytorch/jizhi/emotion/data/bad.txt'

def filter_punc_sample(sentence):
    sentence = re.sub("[\s+\.\!\/_,$%^*(+\"\'“”《》?“]+|[+——！，。？、~@#￥%……&*（）：]+", "", sentence)  
    return(sentence)

def deal_punc(sentence):
    sentence = re.sub('[\s+\.\!\/_,$%^*(+\"\'“”《》?]+|[+——！，。？、~@#￥%……&*（）：)]+', '', sentence)
    return sentence

def prepare_data_sample(good_file, bad_file, is_filter = True):
    all_words = [] #存储所有的单词
    pos_sentences = [] #存储正向的评论
    neg_sentences = [] #存储负向的评论
    with open(good_file, 'r', encoding='utf-8') as fr:
        for idx, line in enumerate(fr):
            if is_filter:
                #过滤标点符号
                line = filter_punc_sample(line)
            #分词
            words = jieba.lcut(line)
            if len(words) > 0:
                all_words += words
                pos_sentences.append(words)
    print('{0} 包含 {1} 行, {2} 个词.'.format(good_file, idx+1, len(all_words)))

    count = len(all_words)
    with open(bad_file, 'r', encoding='utf-8') as fr:
        for idx, line in enumerate(fr):
            if is_filter:
                line = filter_punc_sample(line)
            words = jieba.lcut(line)
            if len(words) > 0:
                all_words += words
                neg_sentences.append(words)
    print('{0} 包含 {1} 行, {2} 个词.'.format(bad_file, idx+1, len(all_words)-count))

    #建立词典，diction的每一项为{w:[id, 单词出现次数]}
    diction = {}
    cnt = Counter(all_words)
    for word, freq in cnt.items():
        diction[word] = [len(diction), freq]
    print('字典大小：{}'.format(len(diction)))
    return(pos_sentences, neg_sentences, diction)

def prepare_data(good_file, bad_file, is_filter=True):
    print('prepare data begin')
    all_words = []
    pos_sen, neg_sen = [], []
    for path, sen in zip((good_file, bad_file), (pos_sen, neg_sen)):
        with open(path, 'r', encoding='utf-8') as f:
            for index, line in enumerate(f):
                if is_filter:
                    line = deal_punc(line)
                
                words = jieba.lcut(line)
                if len(words) > 0:
                    all_words += words
                    sen.append(words)
            print(f'{path} include {index} rows, all words:{len(all_words)}')
    print(f'pos_sen len:{len(pos_sen)}, neg_sen len:{len(neg_sen)}')
            
    diction = {}
    cnt = Counter(all_words)
    for word, freq in cnt.items():
        diction[word] = [len(diction), freq]
    print(f'diction len:{len(diction)}')
    return (pos_sen, neg_sen, diction)

def word2index(word, diction):
    if word in diction:
        value = diction[word][0]
    else:
        value = -1
    return (value)

def index2word(index, diction):
    for w, v in diction.items():
        if v[0] == index:
            return (w)
    return (None)

def word2index_sample(word, diction):
    if word in diction:
        value = diction[word][0]
    else:
        value = -1
    return(value)

#根据编码获得单词
def index2word_sample(index, diction):
    for w,v in diction.items():
        if v[0] == index:
            return(w)
    return(None)

def sen2vec(sen, diction):
    vector = np.zeros(len(diction))
    for i in sen:
        vector[i] += 1
    return (1.0 * vector / len(sen))

def sentence2vec_sample(sentence, dictionary):
    vector = np.zeros(len(dictionary))
    for l in sentence:
        vector[l] += 1
    return(1.0 * vector / len(sentence))

def rightness(predictions, labels):
    count = 0
    if count == 0:
        print(np.shape(predictions.data))
        count += 1
    pred = torch.max(predictions.data, 1)[1]
    rights = pred.eq(labels.data.view_as(pred)).sum()
    return rights, len(labels)

def rightness_sample(predictions, labels):
    """计算预测错误率的函数，其中predictions是模型给出的一组预测结果，batch_size行num_classes列的矩阵，labels是数据之中的正确答案"""
    pred = torch.max(predictions.data, 1)[1] # 对于任意一行（一个样本）的输出值的第1个维度，求最大，得到每一行的最大元素的下标
    rights = pred.eq(labels.data.view_as(pred)).sum() #将下标与labels中包含的类别进行比较，并累计得到比较正确的数量
    return rights, len(labels) #返回正确的数量和这一次一共比较了多少元素

def plot(records):
    print('plot begin')
    a = [i[0] for i in records]
    b = [i[1] for i in records]
    c = [i[2] for i in records]
    pyplot.plot(a, label='train loss')
    pyplot.plot(b, label='verify loss')
    pyplot.plot(c, label='valid accuracy')
    pyplot.xlabel('step')
    pyplot.ylabel('losses & accuracy')
    pyplot.legend()
    pyplot.show()

def plot_sample(records):
    # 绘制误差曲线
    plt = pyplot
    a = [i[0] for i in records]
    b = [i[1] for i in records]
    c = [i[2] for i in records]
    plt.plot(a, label = 'Train Loss')
    plt.plot(b, label = 'Valid Loss')
    plt.plot(c, label = 'Valid Accuracy')
    plt.xlabel('Steps')
    plt.ylabel('Loss & Accuracy')
    plt.legend()
    plt.show()

def main_sample():
    pos_sentences, neg_sentences, diction = prepare_data_sample(good_file, bad_file, True)
    st = sorted([(v[1], w) for w, v in diction.items()])
    dataset = [] #数据集
    labels = [] #标签
    sentences = [] #原始句子，调试用
    # 处理正向评论
    for sentence in pos_sentences:
        new_sentence = []
        for l in sentence:
            if l in diction:
                new_sentence.append(word2index(l, diction))
        dataset.append(sentence2vec_sample(new_sentence, diction))
        labels.append(0) #正标签为0
        sentences.append(sentence)

    # 处理负向评论
    for sentence in neg_sentences:
        new_sentence = []
        for l in sentence:
            if l in diction:
                new_sentence.append(word2index(l, diction))
        dataset.append(sentence2vec_sample(new_sentence, diction))
        labels.append(1) #负标签为1
        sentences.append(sentence)
        
    indices = np.random.permutation(len(dataset))

#重新根据打乱的下标生成数据集dataset，标签集labels，以及对应的原始句子sentences
    dataset = [dataset[i] for i in indices]
    labels = [labels[i] for i in indices]
    sentences = [sentences[i] for i in indices]

    #对整个数据集进行划分，分为：训练集、校准集和测试集，其中校准和测试集合的长度都是整个数据集的10分之一
    test_size = len(dataset) // 10
    train_data = dataset[2 * test_size :]
    train_label = labels[2 * test_size :]

    valid_data = dataset[: test_size]
    valid_label = labels[: test_size]

    test_data = dataset[test_size : 2 * test_size]
    test_label = labels[test_size : 2 * test_size]
    
    model = nn.Sequential(
        nn.Linear(len(diction), 10),
        nn.ReLU(),
        nn.Linear(10, 2),
        nn.LogSoftmax(dim=1),
    )
    
    cost = torch.nn.NLLLoss()
    # 优化算法为Adam，可以自动调节学习率
    optimizer = torch.optim.Adam(model.parameters(), lr = 0.01)
    records = []

    #循环10个Epoch
    losses = []
    
    print(f'train len:{len(train_data)}, test len:{len(test_data)}')
    print(f'train top2:{train_data[:9]}, train label top2:{train_label[:9]}')
    for epoch in range(3):
        for i, data in enumerate(zip(train_data, train_label)):
            x, y = data
            
            # 需要将输入的数据进行适当的变形，主要是要多出一个batch_size的维度，也即第一个为1的维度
            x = torch.tensor(x, requires_grad = True, dtype = torch.float).view(1,-1)
            # x的尺寸：batch_size=1, len_dictionary
            # 标签也要加一层外衣以变成1*1的张量
            y = torch.tensor(np.array([y]), dtype = torch.long)
            # y的尺寸：batch_size=1, 1
            
            # 清空梯度
            optimizer.zero_grad()
            # 模型预测
            predict = model(x)
            # 计算损失函数
            loss = cost(predict, y)
            # 将损失函数数值加入到列表中
            losses.append(loss.data.numpy())
            # 开始进行梯度反传
            loss.backward()
            # 开始对参数进行一步优化
            optimizer.step()
            
            # 每隔3000步，跑一下校验数据集的数据，输出临时结果
            if i % 3000 == 0:
                val_losses = []
                rights = []
                # 在所有校验数据集上实验
                for j, val in enumerate(zip(valid_data, valid_label)):
                    x, y = val
                    x = torch.tensor(x, requires_grad = True, dtype = torch.float).view(1,-1)
                    y = torch.tensor(np.array([y]), dtype = torch.long)
                    predict = model(x)
                    # 调用rightness函数计算准确度
                    right = rightness_sample(predict, y)
                    rights.append(right)
                    loss = cost(predict, y)
                    val_losses.append(loss.data.numpy())
                    
                # 将校验集合上面的平均准确度计算出来
                right_ratio = 1.0 * np.sum([i[0] for i in rights]) / np.sum([i[1] for i in rights])
                print('第{}轮，训练损失：{:.2f}, 校验损失：{:.2f}, 校验准确率: {:.2f}'.format(epoch, np.mean(losses),
                                                                            np.mean(val_losses), right_ratio))
                records.append([np.mean(losses), np.mean(val_losses), right_ratio])
    plot_sample(records)
    

def main():
    print('main func begin')
    pos_sen, neg_sen, diction = prepare_data_sample(good_file, bad_file)
    st = sorted([(v[1], w) for w, v in diction.items()])
    datasets, labels, sentences = [], [], []
    '''
    for sens, tag in zip((pos_sen, neg_sen), (0, 1)):
        for sen in sens:
            new_sen = []
            for l in sen:
                if l in diction:
                    new_sen.append(word2index_sample(1, diction))
            datasets.append(sentence2vec_sample(new_sen, diction))
            labels.append(tag)
            sentences.append(sen)
    '''
    for sentence in pos_sen:
        new_sentence = []
        for l in sentence:
            if l in diction:
                new_sentence.append(word2index(l, diction))
        datasets.append(sentence2vec_sample(new_sentence, diction))
        labels.append(0) #正标签为0
        sentences.append(sentence)

    # 处理负向评论
    for sentence in neg_sen:
        new_sentence = []
        for l in sentence:
            if l in diction:
                new_sentence.append(word2index(l, diction))
        datasets.append(sentence2vec_sample(new_sentence, diction))
        labels.append(1) #负标签为1
        sentences.append(sentence)
    indices = np.random.permutation(len(datasets))
    datasets = [datasets[i] for i in indices]
    labels = [labels[i] for i in indices]
    sentences = [sentences[i] for i in indices]
    
    # split train, test, verify datasets
    test_size = int(len(datasets) // 10)
    train_data = datasets[2 * test_size :]
    train_label = labels[2 * test_size : ]
    
    valid_data = datasets[: test_size]
    valid_label = labels[: test_size]
    
    test_data = datasets[test_size : 2 * test_size]
    test_label = labels[test_size : 2 * test_size]
        
    # define neu model and train, predict
    model = torch.nn.Sequential(
        torch.nn.Linear(len(diction), 10),
        torch.nn.ReLU(),
        torch.nn.Linear(10, 2),
        torch.nn.LogSoftmax(dim=1),
    )
    cost = torch.nn.NLLLoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
    records = []
    losses = []
    for epoch in range(5):
        for i, data in enumerate(zip(train_data, train_label)):
            x, y = data
            x = torch.tensor(x, dtype=torch.float).view(1, -1)
            y = torch.tensor(np.array([y]), dtype=torch.long)
            optimizer.zero_grad()
            predict = model(x)
            loss = cost(predict, y)
            losses.append(loss.data.numpy())
            loss.backward()
            optimizer.step()

        val_losses = []
        rights = []
        for j, val in enumerate(zip(valid_data, valid_label)):
            x, y = val
            x = torch.tensor(x, requires_grad=True, dtype=torch.float).view(1, -1)
            y = torch.tensor(np.array([y]), dtype = torch.long)
            predict = model(x)
            right = rightness_sample(predict, y)
            rights.append(right)
            loss = cost(predict, y)
            val_losses.append(loss.data.numpy())
        right_ratio = 1.0 * np.sum([i[0] for i in rights]) / np.sum([i[1] for i in rights])
        print(f'No.{epoch}, train loss:{np.mean(losses):.4f}, verify loss:{np.mean(val_losses):.4f}, verify accuracy:{right_ratio}')
        records.append([np.mean(losses), np.mean(val_losses), right_ratio])
    # plot
    # plot(records)
    
    # test 
    test_val = []
    for data, target in zip(test_data, test_label):
        data, target = torch.tensor(data, dtype=torch.float), torch.tensor(np.array([target]),dtype=torch.long)
        output = model(data)
        val = rightness(output, target)
        test_val.append(val)
    rights = (sum([tup[0] for tup in test_val]), sum([tup[1] for tup in test_val]))
    right_rate = rights[0].data.numpy() / rights[1]
    
if __name__ == '__main__':
    main()
    # main_sample()  # for debug