# _*_ coding: utf-8 _*_
# @Time : 2021/8/21 22:21 
# @Author : xupeng
# contact: ipeng_x1029@163.com
# @File : TextCNN.py

import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np


class Config:
    """
    填写模型配置类
    """
    def __init__(self, dataset_path, embedding):
        self.model_name = 'TextCNN'
        self.train_path = dataset_path + '/data/train.txt'
        self.test_path = dataset_path + '/data/test.txt'
        self.dev_path = dataset_path + '/data/dev.txt'
        self.class_list = [word.strip() for word in open(dataset_path + '/data/class.txt', 'r', encoding='utf-8').readlines()]
        self.vocab_path = dataset_path + '/data/vocab.pkl'
        self.dataset_pkl = dataset_path + '/data/dataset.pkl'
        self.save_path = dataset_path + '/saved_dict/' + self.model_name + '.pkl'
        self.pretrained_embedding = torch.tensor(np.load(dataset_path + '/data/' + embedding)["embeddings"].astype('float32')) \
                                                if embedding != 'random' else None
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.dropout = 0.5
        self.nums_need_cut = 1000
        self.nums_classes = len(self.class_list)
        self.n_vocab = 0
        self.num_epochs = 20
        self.batch_size = 128
        self.epoches = 10
        self.max_seq_size = 32
        self.learning_rate = 1e-3
        self.embedding_size = self.pretrained_embedding.size(1) if self.pretrained_embedding is not None else 300
        self.filter_size = (2,3,4)
        self.num_filters = 256


class Model(nn.Module):
    def __init__(self, config):
        super(Model, self).__init__()
        if config.pretrained_embedding is not None:
            self.embedding = nn.Embedding.from_pretrained(config.pretrained_embedding, freeze=False)
        else:
            self.embedding = nn.Embedding(config.n_vocab, config.embedding_size, padding_idx=config.n_vocab-1)

        self.convs = nn.ModuleList([nn.Conv2d(1, config.num_filters, kernel_size=(kernel_size, config.embedding_size))
                                   for kernel_size in config.filter_size])
        self.dropout = nn.Dropout(config.dropout)
        self.fc1 = nn.Linear(len(config.filter_size)*config.num_filters, 100)
        self.fc2 = nn.Linear(100, 50)
        self.fc3 = nn.Linear(50, config.nums_classes)

    def _conv(self, x, conv):
        x = F.relu(conv(x))
        x = x.squeeze()
        x = F.max_pool1d(x, x.size(-1))
        x = x.squeeze()
        return x

    def forward(self, input):
        x = input[0]
        x = self.embedding(x)
        x = x.unsqueeze(1)
        x = torch.cat([self._conv(x, conv) for conv in self.convs], dim=1)
        out = self.dropout(x)
        out = F.relu(self.fc1(out))
        out = F.relu(self.fc2(out))
        out = self.fc3(out)
        return out


