'''
这里使用的Pytorch来构建模型，选用CNN
'''


import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as Data
import torch.nn.functional as F
from pymongo import MongoClient
import matplotlib.pylab as pyl


# 判断在GPU上训练模型还是CPU上
dtype = torch.FloatTensor
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 连接Mongodb数据库
client = MongoClient()
db = client["LawData"]

# 训练数据、训练标签、测试数据、测试标签的列表
train_data = []
train_label = []
test_data = []
test_label = []

epoch_list = []
loss_list = []

# 将数据库中的数据装入列表
for i in db.train_jieba.find():
    train_data.append(i["content"])
    train_label.append(i["label"])

for i in db.test_jieba.find():
    test_data.append(i["content"])
    test_label.append(i["label"])

# 所有的文本
all_text = train_data + test_data

# TextCNN的一些参数
# 每个单词用多少维的向量来编码
embedding_size = 100
# 每个样本的最大长度
sequence_length = 450
# 总共只有两个分类标签
num_classes = 2
# 没个批次训练数
batch_size = 5

# 所有词语的列表
world_list = " ".join(all_text).split()
# 所有词去重
vocab = list(set(world_list))
# 所有词进行字典映射，都有唯一编码
word2idx = {w: i for i, w in enumerate(vocab)}
# 去重后有多少词
vocab_size = len(vocab)


# 数据预处理
def make_data(dataes, labels):
    inputs = []
    for sen in dataes:
        # 原始文本的编码后的列表
        temp = [word2idx[n] for n in sen.split()]
        t_l = len(temp)
        # 编码出的列表要统一长度，长度不够的通过末尾加0补齐
        if t_l < sequence_length:
            for i in range(sequence_length - t_l):
                temp.append(0)
        inputs.append(temp)

    targets = []
    for out in labels:
        targets.append(out)
    return inputs, targets


# 将数据进行预处理并转化为张量
input_batch, target_batch = make_data(train_data, train_label)
input_batch, target_batch = torch.LongTensor(input_batch), torch.LongTensor(target_batch)

# 使用Data包来载入数据
dataset = Data.TensorDataset(input_batch, target_batch)
loader = Data.DataLoader(dataset, batch_size, True)


# 构建模型
class TextCNN(nn.Module):
    def __init__(self):
        super(TextCNN, self).__init__()
        self.W = nn.Embedding(vocab_size, embedding_size)
        output_channel = 5
        self.conv = nn.Sequential(
            nn.Conv2d(1, output_channel, (10, embedding_size)),
            nn.ReLU(),
            nn.MaxPool2d((300, 1)),
        )
        self.fc = nn.Linear(output_channel, num_classes)
    
    def forward(self, X):
        batch_size = X.shape[0]
        embedding_X = self.W(X)
        print(embedding_X.shape)
        embedding_X = embedding_X.unsqueeze(1)
        print(embedding_X.shape)
        conved = self.conv(embedding_X)
        print(conved.shape)
        flatten = conved.view(batch_size, -1)
        print(flatten.shape)
        output = self.fc(flatten)
        return output


# 实例化我们的模型
model = TextCNN().to(device)
criterion = nn.CrossEntropyLoss().to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-3)

n = 1

# 训练加入训练
for epoch in range(5):
    for batch_x, batch_y in loader:
        batch_x, batch_y = batch_x.to(device), batch_y.to(device)
        pred = model(batch_x)
        print(pred.shape)
        loss = criterion(pred, batch_y)
        print('Epoch: ', "%04d" % (epoch + 1), 'loss = ', "{:.6f}".format(loss))
        
        epoch_list.append(n)
        n += 1
        loss_list.append(float("{:.6f}".format(loss)))
        
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()


# 画出loss值变化折线图
'''
pyl.plot(epoch_list, loss_list)
pyl.title("Loss Values Change")
pyl.ylabel("loss")
pyl.savefig("Loss.jpg")
'''

# 计算测试集的正确率
num_x = [i for i in range(len(test_label))]
right = 0
results = []
for item in test_data:
    temp = [word2idx[n] for n in item.split()]
    t_l = len(temp)
    if t_l < sequence_length:
        for i in range(sequence_length - t_l):
            temp.append(0)
    tests = [temp]
    # texts = [[word2idx[n] for n in item.split()]]
    test_batch = torch.LongTensor(tests).to(device)

    model = model.eval()
    predict = model(test_batch).data.max(1, keepdim=True)[1]

    results.append(int(predict[0][0]))

    if predict[0][0] == test_label[test_data.index(item)]:
        right += 1
print(results)
print(right / len(test_label))

# 画出预测结果的图
'''
pyl.plot(num_x, results, c='green', linestyle='--', label="predict results")
pyl.plot(num_x, test_label, c='blue', linestyle='-.', label="test label")
pyl.scatter(num_x, results, c='green')
pyl.scatter(num_x, test_label, c='blue')
pyl.legend(loc='best')
pyl.ylabel("Predict/Test Label")
pyl.title("Model Prediction Effect")
pyl.savefig("Model-Prediction-Effect.jpg")
'''

# 如果要测试某一个输入，改为True就可以了
if False:
    test_text = "本院认为 当事人 有权 处分 自己 的 诉讼 权利 北辰 公司 在 本案 审查 期间 提出 撤回 申诉 的 请求 系其 真实 意思 表示 并 不 违反 法律 规定 本院 予以 准许"
    temp = [word2idx[n] for n in test_text.split()]
    t_l = len(temp)
    if t_l < sequence_length:
        for i in range(sequence_length - t_l):
            temp.append(0)
    tests = [temp]
    test_batch = torch.LongTensor(tests).to(device)
    # Predict
    model = model.eval()
    predict = model(test_batch).data.max(1, keepdim=True)[1]
    print(predict[0][0])

