import numpy as np
import torch, math, random
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from torch.nn.parameter import Parameter
from tensorboardX import SummaryWriter


train_data = {
    'good': True,
    'bad': False,
    'happy': True,
    'sad': False,
    'not good': False,
    'not bad': True,
    'not happy': False,
    'not sad': True,
    'very good': True,
    'very bad': False,
    'very happy': True,
    'very sad': False,
    'i am happy': True,
    'this is good': True,
    'i am bad': False,
    'this is bad': False,
    'i am sad': False,
    'this is sad': False,
    'i am not happy': False,
    'this is not good': False,
    'i am not bad': True,
    'this is not sad': True,
    'i am very happy': True,
    'this is very good': True,
    'i am very bad': False,
    'this is very sad': False,
    'this is very happy': True,
    'i am good not bad': True,
    'this is good not bad': True,
    'i am bad not good': False,
    'i am good and happy': True,
    'this is not good and not happy': False,
    'i am not at all good': False,
    'i am not at all bad': True,
    'i am not at all happy': False,
    'this is not at all sad': True,
    'this is not at all happy': False,
    'i am good right now': True,
    'i am bad right now': False,
    'this is bad right now': False,
    'i am sad right now': False,
    'i was good earlier': True,
    'i was happy earlier': True,
    'i was bad earlier': False,
    'i was sad earlier': False,
    'i am very bad right now': False,
    'this is very good right now': True,
    'this is very sad right now': False,
    'this was bad earlier': False,
    'this was very good earlier': True,
    'this was very bad earlier': False,
    'this was very happy earlier': True,
    'this was very sad earlier': False,
    'i was good and not bad earlier': True,
    'i was not good and not happy earlier': False,
    'i am not at all bad or sad right now': True,
    'i am not at all good or happy right now': False,
    'this was not happy and not good earlier': False,
}
test_data = {
    'this is happy': True,
    'i am good': True,
    'this is not happy': False,
    'i am not good': False,
    'this is not bad': True,
    'i am not sad': True,
    'i am very good': True,
    'this is very bad': False,
    'i am very sad': False,
    'this is bad not good': False,
    'this is good and happy': True,
    'i am not good and not happy': False,
    'i am not at all sad': True,
    'this is not at all good': False,
    'this is not at all bad': True,
    'this is good right now': True,
    'this is sad right now': False,
    'this is very bad right now': False,
    'this was good earlier': True,
    'i was not happy and not good earlier': False,
}

train_target = torch.LongTensor([int(i) for i in train_data.values()]).unsqueeze(dim=1)
train_len = len(train_target)

vocab = list(set([w for text in train_data.keys() for w in text.split(' ')]))
vocab_size = len(vocab)
# print(vocab_size)
word_to_idx = {w: i + 3 for i, w in enumerate(vocab)}
word_to_idx[0] = '[PAD]'
word_to_idx[1] = '[UNK]'
word_to_idx[2] = '[MASK]'
vocab_size = vocab_size + 3

d_model = d_ff = 50
d_k = d_v = 54
embedding_num = vocab_size
embedding_dim = d_model
batch_size = 5
maxlen = 10


def gelu(x):
    """
      Implementation of the gelu activation function.
      For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
      0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
      Also see https://arxiv.org/abs/1606.08415
    """
    return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))


def tokens_lsit(sentences, word2idx):
    token_list = list()
    # print(sentences)
    for sentence in sentences:
        arr = [word2idx[s] for s in sentence.split()]
        token_list.append(arr)
    return token_list


class MyDataSet(Dataset):
    def __init__(self, input_ids, targets):
        self.input_ids = input_ids
        self.targets = targets

    def __len__(self):
        return len(self.input_ids)

    def __getitem__(self, idx):
        return self.input_ids[idx], self.targets[idx]


def make_data(batch_size, token_list, maxlen):
    batch, tmp = [], []
    i = 1
    for input_ids in token_list:
        # if i % batch_size != 0:
        #     n_pad = maxlen - len(input_ids)
        #     input_ids.extend([0] * n_pad)
        #     tmp.append(input_ids)
        # else:
        #     n_pad = maxlen - len(input_ids)
        #     input_ids.extend([0] * n_pad)
        #     tmp.append(input_ids)
        #     batch.append(tmp)
        #     tmp = []
        # i += 1
        n_pad = maxlen - len(input_ids)
        input_ids.extend([0] * n_pad)
        batch.append(input_ids)
    return torch.tensor(batch)


def get_attn_pad_mask(seq_q, seq_k):
    # seq_q: [5, 10], seq_k: [5, 10]
    batch_size, seq_len = seq_q.size()  # 查询矩阵q
    # eq(zero) is PAD token()
    pad_attn_mask = seq_k.data.eq(0).unsqueeze(1)  # [batch_size, 1, seq_len]
    # 是为了为0的数据标记出来为了简化计算，只计算有效数据，padding的数据不计算
    return pad_attn_mask.expand(batch_size, seq_len, seq_len)  # [batch_size, seq_len, seq_len]


class ScaledDotProductAttention(nn.Module):
    def __init__(self):
        super(ScaledDotProductAttention, self).__init__()

    def forward(self, Q, K, V, attn_mask):
        scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(d_k)
        # scores : [batch_size, seq_len, seq_len]
        scores.masked_fill_(attn_mask, -1e9)
        # attn_mask [batch_size, seq_len, seq_len]
        # Fills elements of self tensor with value where mask is one.
        attn = nn.Softmax(dim=-1)(scores)
        context = torch.matmul(attn, V)
        # print(context)
        # print(torch.sum(context, dim=2))
        return context


class SelfAttention(nn.Module):
    def __init__(self):
        super(SelfAttention, self).__init__()
        self.W_Q = nn.Linear(d_model, d_k)
        self.W_K = nn.Linear(d_model, d_k)
        self.W_V = nn.Linear(d_model, d_v)
        self.d_model = d_model
        self.d_k = d_k
        self.d_v = d_v

    def forward(self, Q, K, V, attn_mask):
        residual, batch_size = Q, Q.size(0)

        q_s = self.W_Q(Q).view(batch_size, -1, self.d_k)
        k_s = self.W_K(K).view(batch_size, -1, self.d_k)
        v_s = self.W_V(V).view(batch_size, -1, self.d_v)

        # attn_mask = attn_mask.unsqueeze(1).repeat(1, 1, 1)
        attn_mask = attn_mask.repeat(1, 1, 1)

        context = ScaledDotProductAttention()(q_s, k_s, v_s, attn_mask)
        output = nn.Linear(self.d_v, self.d_model)(context)
        # return nn.LayerNorm(self.d_model)(output + residual)
        return output


class GeneralAttention(nn.Module):
    def __init__(self):
        super(GeneralAttention, self).__init__()
        self.linear = nn.Linear(d_model, maxlen, bias=True)
        self.a = Parameter(torch.FloatTensor(maxlen, 1))
        self.reset_parameters()

    def reset_parameters(self):
        stdv = 1. / math.sqrt(self.a.size(1))
        self.a.data.uniform_(-stdv, stdv)

    def forward(self, inputs):
        # print(self.a.size(), self.linear(inputs).size())
        x = torch.matmul(nn.Tanh()(self.linear(inputs)), self.a)
        weights = F.softmax(x, dim=1)
        return weights


class Classifier(nn.Module):
    def __init__(self):
        super(Classifier, self).__init__()
        self.embedding = nn.Embedding(embedding_num, embedding_dim)
        self.attn = SelfAttention()
        self.fc1 = nn.Linear(d_model, d_ff)
        self.fc2 = nn.Linear(d_ff, d_model)
        self.ga = GeneralAttention()
        self.fc3 = nn.Linear(d_model, 2)

    def forward(self, input_ids):
        output = self.embedding(input_ids)  # [5, 10, 24]
        # print(output)
        self_attn_mask = get_attn_pad_mask(input_ids, input_ids)  # [5, 10, 10]
        # print(self_attn_mask)
        output = self.attn(output, output, output, self_attn_mask)
        output = self.fc2(gelu(self.fc1(output)))
        # print(output.size())
        weights = self.ga(output)
        print(weights.squeeze()[0])
        # print(output.size(), weights.size())
        output = self.fc3(torch.matmul(output.transpose(1, 2), weights).squeeze())
        # batch=1：output.unsqueeze(0)
        return F.softmax(output, dim=1)


if __name__ == '__main__':

    # print(word_to_idx)

    tokens_list = tokens_lsit(train_data.keys(), word_to_idx)
    batch = make_data(batch_size, tokens_list, maxlen)
    # print(batch, type(batch))
    dataloader = DataLoader(MyDataSet(batch, train_target), batch_size, False)
    # print(dataloader)

    # writer = SummaryWriter(comment='attention')
    # model = Classifier()
    # criterion = nn.CrossEntropyLoss()
    # for input_ids, target in dataloader:
    #     writer.add_graph(model, input_ids, verbose=False)
    model = Classifier()

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

    for epoch in range(1000):
        loss, n_correct, total_loss = 0, 0, 0
        for input_ids, target in dataloader:
            predict = model(input_ids)
            # print(input_ids, target)
            # print(predict)
            # print(predict.max(dim=1).values)
            # print(predict, target.squeeze())
            loss = criterion(predict, target.squeeze())
            total_loss += loss
            # print(loss)
            n_correct += int((predict.max(dim=1).indices == target.squeeze()).sum())
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        if (epoch + 1) % 10 == 0:
            acc = n_correct / train_len
            print('Epoch:', '%04d' % (epoch + 1), 'train loss = {:.3f}'.format(total_loss), 'train accuracy = {:.3f}'.format(acc))
