#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File    :   models.py
@Contact :   xxzhang16@fudan.edu.cn

@Modify Time      @Author    @Version    @Desciption
------------      -------    --------    -----------
2021/6/9 21:53   zxx      1.0         None
'''

import torch
import torch.nn as nn
import torch.nn.functional as F


class Encoder(nn.Module):
    def __init__(self, embedding_dim, hidden_size, with_attention=False):
        super(Encoder, self).__init__()
        self.feature = nn.LSTM(
            input_size=embedding_dim,
            hidden_size=hidden_size,
            num_layers=2,
            bidirectional=True,
            batch_first=True)
        self.with_attention = with_attention
        if self.with_attention:
            self.query = nn.Linear(hidden_size * 2, 1, bias=False)

    def forward(self, x):
        out, _ = self.feature(x)    # b*l*c
        if self.with_attention:
            temp = self.query(torch.tanh(out)) # b*l*c->b*l*1
            score = torch.softmax(temp, dim=1) # b*l*1
            out = torch.bmm(score.transpose(1, 2), out).squeeze(1) # bmm: b*1*l,b*l*c->b*1*c->b*c
        return out


class Decoder(nn.Module):
    def __init__(self, in_size, hidden_size):
        super(Decoder, self).__init__()
        self.lstm = nn.LSTM(
            input_size=in_size,
            hidden_size=hidden_size,
            num_layers=1,
            bidirectional=False,
            batch_first=True)

    def forward(self, x, h0, c0):
        y, (h, c) = self.lstm(x, (h0, c0))
        return y, h, c


def compute_attention(out, y):
    if len(out.size()) > 2:
        temp = torch.matmul(y, out.transpose(1, 2))
        score = torch.softmax(temp, dim=2)
        return torch.matmul(score, out)
    else:
        temp = torch.matmul(y, out.T)
        score = torch.softmax(temp, dim=1)
        return torch.matmul(score, out)


class Generator(nn.Module):
    def __init__(self, vocab_size, embedding_dim, hidden_size):
        super(Generator, self).__init__()
        self.embedding = nn.Embedding(
            num_embeddings=vocab_size,
            embedding_dim=embedding_dim)
        self.encoder = Encoder(embedding_dim, hidden_size)
        self.decoder = Decoder(
            embedding_dim + hidden_size * 2,
            hidden_size * 2)

        self.fc = nn.Linear(hidden_size * 2, embedding_dim)

    def forward(self, content, length):
        content = self.embedding(content)
        batch_size = content.shape[0]
        out = self.encoder(content)
        h0 = torch.mean(out, dim=1)
        c0 = torch.zeros_like(h0)
        y0 = torch.tensor([2] * batch_size, dtype=torch.int32)
        out_lst = [y0.tolist()]
        y0 = torch.cat([self.embedding(y0), torch.zeros_like(h0)], dim=1)
        y0 = y0.unsqueeze(1)
        h0 = h0.unsqueeze(0)
        c0 = c0.unsqueeze(0)
        y, h, c = self.decoder(y0, h0, c0)
        for i in range(length - 1):
            # input 应该改成teacher force
            att = compute_attention(out, y)
            y = torch.softmax(self.fc(y), dim=2)
            y = torch.argmax(y, dim=2)
            out_lst.append(y.squeeze(1).tolist())
            y = self.embedding(y)
            y, h, c = self.decoder(torch.cat([y, att], dim=2), h, c)
        return torch.tensor(out_lst)

    def predict(self, content, length=10):
        content = self.embedding(content)
        out = self.encoder(content)
        h0 = torch.mean(out, dim=1)
        c0 = torch.zeros_like(h0)

        y0 = torch.tensor([2], dtype=torch.int32)
        out_lst = [y0.item()]
        y0 = torch.cat([self.embedding(y0), torch.zeros_like(h0)], dim=1)
        y0 = y0.unsqueeze(1)
        h0 = h0.unsqueeze(0)
        c0 = c0.unsqueeze(0)
        y, h, c = self.decoder(y0, h0, c0)
        for i in range(length - 1):
            att = compute_attention(out, y)
            y = torch.softmax(self.fc(y), dim=2)
            y = torch.argmax(y, dim=2)
            out_lst.append(y.item())
            if y.item() == 3:
                break
            y = self.embedding(y)
            y, h, c = self.decoder(torch.cat([y, att], dim=2), h, c)
        return out_lst


class BasicConv(nn.Module):
    def __init__(self, in_channel, out_channel, kernel_size, padding, stride):
        super(BasicConv, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv1d(in_channel, out_channel, kernel_size, padding, stride),
            nn.BatchNorm1d(out_channel),
            nn.ReLU(),
            nn.MaxPool1d(2),
        )

    def forward(self, x):
        x = x.transpose(1, 2)
        x = self.conv(x)
        x = x.transpose(1, 2)
        return x


class Classifier(nn.Module):
    def __init__(self, vocab_size, opt):
        super(Classifier, self).__init__()
        self.name = 'Classifier'
        embedding_dim = opt.embedding_dim
        hidden_size = opt.hidden_size
        self.with_conv = opt.with_conv
        self.with_att = opt.with_att

        self.embedding = nn.Embedding(vocab_size, embedding_dim)

        self.feature = Encoder(embedding_dim, hidden_size, with_attention=opt.with_att)
        self.feature_drop_out = nn.Dropout(0.3)

        if self.with_conv:
            self.conv = nn.Sequential(
                BasicConv(hidden_size * 2, opt.kernel_nums, 3, 1, 1),
                BasicConv(opt.kernel_nums, opt.kernel_nums, 3, 1, 1),
                BasicConv(opt.kernel_nums, hidden_size * 2, 3, 1, 1)
            )

        self.dropout = nn.Dropout(0.7)

        self.fc1 = nn.Sequential(
            nn.Linear(hidden_size * 2, 1024),
            nn.BatchNorm1d(1024),
            # nn.Dropout(0.7),
            nn.ReLU(),
        )
        self.fc2 = nn.Sequential(
            nn.Linear(1024, 256),
            nn.BatchNorm1d(256),
            # nn.Dropout(0.7),
            nn.ReLU(),
        )
        self.out = nn.Linear(256, 2)

    def forward(self, *input):
        x = None
        if len(input) == 2:
            label, content = input
            label = self.embedding(label)
            # label = self.embedd_drop_out(label)
            label = self.feature(label)
            label = self.feature_drop_out(label)

            content = self.embedding(content)
            # content = self.embedd_drop_out(content)
            content = self.feature(content)
            content = self.feature_drop_out(content)
            r = torch.matmul(label.T, content)
            score_l_c = torch.softmax(r, dim=1)
            score_c_l = torch.softmax(r, dim=0)
            #
            score_content = torch.matmul(label, score_c_l)
            score_label = torch.matmul(content, score_l_c)
            # label = label + score_label
            # content = content + score_content
            x = score_content + score_label

        elif len(input) == 1:
            x = self.embedding(input[0])
            x = self.feature(x)

        # cnn
        if self.with_conv:
            x = self.conv(x)
        if not self.with_att:
            x, _ = torch.max(x, dim=1)

        x = x.reshape(x.shape[0], -1)
        x = self.dropout(x)
        x = self.fc1(x)
        x = self.dropout(x)
        x = self.fc2(x)
        return self.out(x)


class Classifier2(nn.Module):
    # 效果太差
    def __init__(self, vocab_size, opt):
        super(Classifier2, self).__init__()
        self.name = 'Classifier2'
        embedding_dim = opt.embedding_dim
        hidden_size = opt.hidden_size

        self.embedding = nn.Embedding(vocab_size, embedding_dim)
        self.embedd_drop_out = nn.Dropout(0.3)
        self.feature = Encoder(embedding_dim, hidden_size)

        self.dp1 = nn.Dropout(0.7)

        self.feature2 = Encoder(hidden_size * 8, hidden_size)
        self.dp2 = nn.Dropout(0.7)

        self.fc1 = nn.Sequential(
            nn.Linear(hidden_size * 8, 512),
            nn.BatchNorm1d(512),
            nn.ReLU(),
        )
        self.fc2 = nn.Sequential(
            nn.Linear(512, 64),
            nn.BatchNorm1d(64),
            nn.ReLU(),
        )
        self.out = nn.Linear(64, 2)

    def att(self, x, y):
        score_x_y = torch.softmax(torch.matmul(x, y.transpose(1, 2)), dim=1)
        score_y_x = torch.softmax(torch.matmul(y, x.transpose(1, 2)), dim=1)

        res_x = torch.matmul(score_x_y, y)
        res_y = torch.matmul(score_y_x, x)

        return res_x, res_y

    def forward(self, title, content):
        title = self.embedding(title)
        content = self.embedding(content)
        title = self.embedd_drop_out(title)
        content = self.embedd_drop_out(content)
        title = self.dp1(self.feature(title))
        content = self.dp1(self.feature(content))

        title_, content_ = self.att(title, content)

        m_title = torch.cat([title, title_, title - title_, title * title_], dim=2)
        m_content = torch.cat([content, content_, content - content_, content * content_], dim=2)

        v_title = self.dp2(self.feature2(m_title))
        v_content = self.dp2(self.feature2(m_content))

        ave_title, ave_content = torch.mean(v_title, dim=1), torch.mean(v_content, dim=1)
        max_title, _ = torch.max(v_title, dim=1)
        max_content, _ = torch.max(v_content, dim=1)
        v = torch.cat([
            ave_title.reshape(ave_title.shape[0], -1),
            max_title.reshape(max_title.shape[0], -1),
            ave_content.reshape(ave_content.shape[0], -1),
            max_content.reshape(max_content.shape[0], -1)
        ], dim=1)

        v = self.fc1(v)
        v = self.fc2(v)
        v = self.out(v)
        return v


if __name__ == '__main__':
    content = torch.randint(0, 19, (4, 10))
    title = torch.randint(0, 19, (4, 5))
    label = torch.tensor([0, 1, 0, 1], dtype=torch.long)
    # generator = Generator(20, 10, 8)
    # generator.eval()
    # out_lst = generator(content, length=10)
    # print(out_lst)
    cls = Classifier2(vocab_size=20)
    criterion = nn.CrossEntropyLoss()
    from torch import optim
    optimizer = optim.Adam(cls.parameters())
    for i in range(100):
        optimizer.zero_grad()
        out = cls(title, content)
        loss = criterion(out, label)
        loss.backward()
        optimizer.step()
        print(loss.item())
        acc = torch.mean(
            torch.eq(
                torch.argmax(out, dim=1),
                label),
            dtype=torch.float32).item()
        print(acc)
