import torch
from torch import nn
import os
import sys

path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../'))
sys.path.append(path)
from shell.knowledge_graph.text_classification.model_config import *


class BaseModel(nn.Module):

    def __init__(self):
        super().__init__()

    # 训练
    def fit(self,
            train_data_loader,
            eval_data,
            epochs):
        eval_x, eval_y = eval_data
        eval_x = torch.from_numpy(eval_x)
        if torch.cuda.is_available():
            eval_x = eval_x.cuda()
        if torch.cuda.is_available():
            self = self.cuda()

        optimizer = torch.optim.Adam(self.parameters(), lr=lr)
        loss_func = nn.CrossEntropyLoss()

        best_acc = 0

        for epoch in range(epochs):
            for step, (b_x, b_y) in enumerate(train_data_loader):
                if torch.cuda.is_available():
                    b_x = b_x.cuda()
                    b_y = b_y.cuda()
                output = self(b_x)
                loss = loss_func(output, b_y)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                if step % 20 == 0:
                    test_output = self(eval_x)
                    pred_y = torch.max(test_output, 1)[1].cpu().data.numpy()
                    accuracy = float((pred_y == eval_y).astype(int).sum()) / float(eval_y.size)
                    if accuracy > best_acc:
                        best_acc = accuracy
                        self.save_model()
                        print('save model, accuracy: %.2f' % accuracy)
                    print('Epoch: ', epoch, '| train loss: %.4f' % loss.cpu().data.numpy(),
                          '| test accuracy: %.2f' % accuracy)

    # 保存模型
    def save_model(self):
        torch.save(self.state_dict(), model_path)


class BiLSTM(BaseModel):
    def __init__(self, vocab_len,
                 embedding_size,
                 lstm_hidden_size,
                 lstm_layer_nums,
                 lstm_bidirectional,
                 drop_out,
                 n_classes):
        super().__init__()

        self.embedding = nn.Embedding(vocab_len, embedding_size)
        self.lstm = nn.LSTM(input_size=embedding_size,
                            hidden_size=lstm_hidden_size,
                            num_layers=lstm_layer_nums,
                            dropout=drop_out,
                            bidirectional=lstm_bidirectional,
                            batch_first=True)
        if lstm_bidirectional:
            self.full_connect = nn.Linear(lstm_hidden_size * 2, n_classes)
        else:
            self.full_connect = nn.Linear(lstm_hidden_size, n_classes)
        self.softmax = nn.Softmax()

    def forward(self, x):
        embedding = self.embedding(x)
        out = self.lstm(embedding)
        out = self.full_connect(out[0][:, -1, :])
        out = self.softmax(out)
        return out


class TextCNN(BaseModel):
    def __init__(self, n_classes):
        super().__init__()

        self.embedding = nn.Embedding(vocab_len, embedding_size)
        self.cnn1 = nn.Conv2d(in_channels=1, out_channels=100, kernel_size=[3, embedding_size])
        self.cnn2 = nn.Conv2d(in_channels=1, out_channels=100, kernel_size=[4, embedding_size])
        self.cnn3 = nn.Conv2d(in_channels=1, out_channels=100, kernel_size=[5, embedding_size])
        self.max_pool1 = nn.MaxPool1d(kernel_size=13)
        self.max_pool2 = nn.MaxPool1d(kernel_size=12)
        self.max_pool3 = nn.MaxPool1d(kernel_size=11)
        self.full_connect = nn.Linear(300, n_classes)
        self.drop_out = nn.Dropout(0.5)
        self.softmax = nn.Softmax()

    def forward(self, x):
        embedding = self.embedding(x)
        embedding = embedding.unsqueeze(1)
        cnn1_out = self.cnn1(embedding).squeeze(-1)
        cnn2_out = self.cnn2(embedding).squeeze(-1)
        cnn3_out = self.cnn3(embedding).squeeze(-1)
        out1 = self.max_pool1(cnn1_out)
        out2 = self.max_pool2(cnn2_out)
        out3 = self.max_pool3(cnn3_out)
        out = torch.cat([out1, out2, out3], dim=1).squeeze(-1)
        out = self.drop_out(out)
        out = self.full_connect(out)
        out = self.softmax(out)

        return out
