#!/usr/bin/python
# -*- coding: utf-8 -*-

import torch
from torch import nn
import numpy as np
from torch.autograd import Variable
import torch.nn.functional as F


class TextRNN(nn.Module):
    def __init__(self):
        super(TextRNN, self).__init__()
        # 5000维one-hot embedding 64维vector
        self.embedding = nn.Embedding(5000, 64)
        # LSTM模型
        # self.rnn = nn.LSTM(input_size=64, hidden_size=128, bidirectional=True)
        # GRU模型
        self.rnn = nn.GRU(input_size=64, hidden_size=128, num_layers=1, bidirectional=True)
        # softmax层，将256个neuron的输出转为10个类别的概率分布
        self.f1 = nn.Sequential(nn.Linear(256, 10),
                                nn.Softmax())

    def forward(self, x):
        # 对输入进行embedding，输出顺序为 batch_size * text_len * embedding_size = 64 * 600 * 64
        x = self.embedding(x)
        # 调换顺序成 text_len * batch_size * embedding_size = 600 * 64 * 64
        x = x.permute(1, 0, 2)  # GRU(batch_first = true)
        # h_n 为 hidden_layer * batch_size * hidden_size = 2 * 64 * 128
        # x, (h_n, c_n)= self.rnn(x)  # LSTM
        x, h_n = self.rnn(x)  # GRU
        # 0.8概率置零，防止过拟合
        final_feature_map = F.dropout(h_n, 0.8)
        # Batch_size * (hidden_size * hidden_layers * 2) = 64 * 256
        feature_map = torch.cat([final_feature_map[i, :, :] for i in range(final_feature_map.shape[0])], dim=1)
        # batch_size * class_num = 64 * 10
        final_out = self.f1(feature_map)
        return final_out


class TextCNN(nn.Module):
    def __init__(self):
        super(TextCNN, self).__init__()
        self.embedding = nn.Embedding(5000, 64)
        self.conv = nn.Sequential(nn.Conv1d(in_channels=64,
                                            out_channels=256,
                                            kernel_size=5),
                                  nn.ReLU(),
                                  nn.MaxPool1d(kernel_size=596))
        self.f1 = nn.Linear(256, 10)

    def forward(self, x):
        x = self.embedding(x)  # batch_size x text_len x embedding_size 64*600*64
        x = x.permute(0, 2, 1)  # 64*64*600

        x = self.conv(x)  # Conv1后64*256*596,ReLU后不变,NaxPool1d后64*256*1

        x = x.view(-1, x.size(1))  # 64*256
        x = F.dropout(x, 0.8)
        x = self.f1(x)  # 64*10 batch_size * class_num
        return x


if __name__ == '__main__':
    net = TextRNN()
    print(net)
