#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author: Gaoxiang


import torch
from torch import nn
from text_cls_config import Config


class fastText(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.hidden_size = config["hidden_size"]
        self.embedding_size = config["embedding_size"]
        self.num_class = config["num_class"]
        self.vocab_size = config["vocab_size"]

        self.embedding = nn.Embedding(num_embeddings=self.vocab_size, embedding_dim=self.embedding_size)
        self.fc = nn.Sequential(
            nn.Linear(in_features=self.embedding_size, out_features=self.hidden_size),
            nn.BatchNorm1d(num_features=self.hidden_size),
            nn.ReLU(inplace=True),
            nn.Linear(in_features=self.hidden_size, out_features=self.num_class)
        )

    def forward(self, x):
        embedding_output = self.embedding(x)
        fc_output = self.fc(torch.mean(embedding_output, dim=1))

        return fc_output


class TextCNN_Conv2(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.embedding_size = config["embedding_size"]
        self.num_class = config["num_class"]
        self.vocab_size = config["vocab_size"]
        self.embedding = nn.Embedding(num_embeddings=self.vocab_size, embedding_dim=self.embedding_size)
        # num_embedding：词向量总容量，embedding_dim：词向量维度

        channels = 1
        filter_num = 128  # 特征维度
        kernels = [2, 3, 4]  # 卷积核
        self.convs = nn.ModuleList([torch.nn.Conv2d(in_channels=channels,
                                                    out_channels=filter_num,
                                                    kernel_size=(kernel, self.embedding_size))
                                    for kernel in kernels])  # 多个卷积层并行工作
        self.dropout = nn.Dropout()  # 随机丢失
        self.fc = nn.Linear(in_features=len(kernels) * filter_num,  # 连接并行特征
                            out_features=self.num_class,
                            bias=True)  # 全连接层

    def forward(self, x):
        embedding_output = self.embedding(x)  # batch*seq_len*embed_size
        conv_input = embedding_output.unsqueeze(1)  # batch*1*seq_len*embed_size

        conv_outputs = [conv(conv_input) for conv in self.convs]  # [batch*filter_num*(seq_len+1-kernel)*1]

        pool_outputs = [torch.max_pool2d(input=conv_output,
                                         kernel_size=(conv_output.size(2),
                                                      conv_output.size(3))
                                         )
                        for conv_output in conv_outputs]  # [batch*filter_num*1*1]

        # 展平，以batch为基准，也可以以filter_num
        fc_inputs = [pool_output.view(pool_output.size(0), -1) for pool_output in pool_outputs]  # [batch*filter_num]
        # 并行特征组合
        fc_input = torch.cat(fc_inputs, dim=1)  # batch*(filter_num*len(kernels))

        # dropout
        fc_input = self.dropout(fc_input)

        fc_ouput = self.fc(fc_input)  # batch*num_class

        return fc_ouput


if __name__ == "__main__":
    Config["num_class"] = 3
    Config["vocab_size"] = 20

    model = fastText(Config)
    content = torch.LongTensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
    label = torch.LongTensor([1, 2])
    output = model(content)
    probability, predictions = torch.max(output.data, 1)
    print(probability, predictions)
