#!/usr/bin/env python
# coding: utf-8

import torch
from utils import adict
import dgl
from dgl.nn.pytorch.conv import GraphConv
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import accuracy_score
import time
from pytorch_transformers.modeling_bert import BertLayer
import os


class WordsEmbedding(nn.Module):
    def __init__(self, vocab_size, out_size=768, max_sent_len=512, emb_file="./model/emb_layer/W2V_CN.pkl"):
        super(WordsEmbedding, self).__init__()
        self.w2v = nn.Embedding(vocab_size, 300)
        self.pos = nn.Embedding(max_sent_len, 300)
        self.linear = nn.Linear(300, out_size)
        if os.path.exists(emb_file):
            checkpoint = torch.load(emb_file)
            self.w2v.load_state_dict(checkpoint['embedding'])

    def forward(self, token_ids):
        return self.linear(self.w2v(token_ids))

class GCN_my(nn.Module):
    def __init__(self, in_feats, h_feats, num_classes):
        super(GCN_my, self).__init__()
        self.linear_func = nn.Linear(in_feats, num_classes, bias=False).cuda()
        # self.linear_func2 = nn.Linear(h_feats, num_classes, bias=True)

    def masssage_func(self, edges):
        out1 = self.linear_func(edges.src['h'])
        return {'h': out1}

    def reduce_func(self, nodes): 
        res = torch.sum(nodes.mailbox['h'],dim=1)
        out_avti1 = F.relu(res)
        return {'z': out_avti1}

    def forward(self, g, i_f):
        g.ndata['h'] = i_f
        g.update_all(self.masssage_func, self.reduce_func)
        return g.ndata.pop('z')


class GCNMessage(nn.Module):
    def forward(self, edges):
        return {'h': edges.src['h']}

class GCNReduce(nn.Module):
    def __init__(self, in_feats, out_feats):
        super().__init__()
        self.linear = nn.Linear(in_feats, out_feats)
        self.activation = nn.ReLU()
    def forward(self, nodes):
        accum = torch.sum(nodes.mailbox['h'], 1)
        h = self.linear(accum)
        h = self.activation(h)
        return {'h': h}
class GCN_simple(nn.Module):
    def __init__(self, in_feats, out_feats):
        super(GCN_simple, self).__init__()  # my
        self.msg_func = GCNMessage()
        self.reduce_func = GCNReduce(in_feats, out_feats)

    def forward(self, g, inputs):
        g.ndata['h'] = inputs
        g.update_all(self.msg_func, self.reduce_func)
        return g.ndata.pop('h')


class GCN(nn.Module):
    def __init__(self, in_feats, h_feats, num_classes):
        super(GCN, self).__init__()
        self.gcn_layer1 = GraphConv(in_feats, h_feats)
        self.gcn_layer2 = GraphConv(h_feats, num_classes)

    def forward(self, graph, inputs):
        h = self.gcn_layer1(graph, inputs.cuda()).cuda()
        h = nn.functional.relu(h)
        h = self.gcn_layer2(graph, h.cuda()).cuda()
        return h

class GAT(nn.Module):
    def __init__(self, in_feats, out_feats):
        super(GAT, self).__init__()
        self.linear_func = nn.Linear(in_feats, out_feats, bias=False).cuda()
        self.attention_func = nn.Linear(2 * out_feats, 1, bias=False).cuda()

    def edge_attention(self, edges):
        concat_z = torch.cat([edges.src['z'], edges.dst['z']], dim=1)
        src_e = self.attention_func(concat_z)
        src_e = F.leaky_relu(src_e)
        return {'e': src_e}

    def message_func(self, edges):
        return {'z': edges.src['z'], 'e': edges.data['e']}

    def reduce_func(self, nodes):
        a = F.softmax(nodes.mailbox['e'], dim=1)
        h = torch.sum(a * nodes.mailbox['z'], dim=1)
        return {'h': h}

    def forward(self, graph, h):
        z = self.linear_func(h)
        graph.ndata['z'] = z
        graph.apply_edges(self.edge_attention)
        graph.update_all(self.message_func, self.reduce_func)
        return graph.ndata.pop('h')

 class GraphTransformer(nn.Module):
     def __init__(self):
        super(GraphTransformer, self).__init__()
        conf = adict({
              "attention_probs_dropout_prob": 0.1,
              "directionality": "bidi",
              "finetuning_task": None,
              "hidden_act": "gelu",
              "hidden_dropout_prob": 0.1,
              "hidden_size": 768,
              "initializer_range": 0.02,
              "intermediate_size": 3072,
              "layer_norm_eps": 1e-12,
              "max_position_embeddings": 512,
              "num_attention_heads": 12,
              "num_hidden_layers": 12,
              "num_labels": 2,
              "output_attentions": False,
              "output_hidden_states": False,
              "pooler_fc_size": 768,
              "pooler_num_attention_heads": 12,
              "pooler_num_fc_layers": 3,
              "pooler_size_per_head": 128,
              "pooler_type": "first_token_transform",
              "pruned_heads": {},
              "torchscript": False,
              "type_vocab_size": 2,
            })
        self.emblayer = WordsEmbedding(vocab_size=19280)
        self.bertlayer = BertLayer(conf)

     def forward(self, g):
        g.apply_nodes(self.update_nodes)

     def update_nodes(self, nodes):
        ids_tensor = torch.tensor(nodes.data['words'])
        sent_tensor = nodes.data['feat']
        w_tensor = self.emblayer(ids_tensor)
        # torch.cat()


class GraphModel(object):
    def __init__(self):

        self.gnn_model_1 = GAT(in_feats=768, out_feats=256)
        self.gnn_model_2 = GAT(in_feats=256, out_feats=64)
        self.cls_model = nn.Linear(64, 3).cuda()
        self.loss_func = nn.CrossEntropyLoss()

    def forward(self):

        gnn_out_1 = self.gnn_model_1(self.g, self.g.ndata['feat'])
        gnn_out_2 = self.gnn_model_2(self.g, gnn_out_1)
        res = self.cls_model(gnn_out_2)
        return res

    def train_and_valid(self, max_epoch=20):
        tr_loss, tr_acc, va_loss, va_acc = [], [], [], []
        optimizer = torch.optim.Adam([{'params':self.gnn_model_1.parameters(), 'lr':0.05},
                                      {'params': self.gnn_model_2.parameters(), 'lr': 0.05},
                                      {'params':self.cls_model.parameters(),'lr':0.02}])
        best_valid_loss = 10.0
        best_epoch = 100

        for epoch in range(max_epoch):

            logits = self.forward()
            loss = self.loss_func(logits[:self.train_num].cuda(), self.g.ndata['label'][:self.train_num].cuda())
            acc = accuracy_score(self.g.ndata['label'][:self.train_num], 
                                 logits[:self.train_num].cpu().argmax(dim=1))     
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            tr_loss.append(loss)
            tr_acc.append(acc)
            print('Epoch %d | Loss: %.4f | Acc: %.4f ' % (epoch, loss.item(), acc))

            loss_valid  = self.loss_func(
                              logits[self.train_num: self.train_num+self.valid_num].cuda(),
                              self.g.ndata['label'][self.train_num: self.train_num+self.valid_num].cuda())
            acc_valid = accuracy_score(
                              self.g.ndata['label'][self.train_num: self.train_num+self.valid_num],
                              logits[self.train_num: self.train_num+self.valid_num].cpu().argmax(dim=1))
            if loss_valid < best_valid_loss:
                best_valid_loss = loss_valid
                best_epoch = epoch
            va_loss.append(loss_valid)
            va_acc.append(acc_valid)
            print('                            valid loss: ', loss_valid.item())
            print('                            vaid acc: ', acc_valid)
        print('\n best valid Epoch %d | Loss: %.4f' % (best_epoch, best_valid_loss.item()))
        self.write_result_file(tr_loss, tr_acc, va_loss, va_acc)

    def infer(self):
        test_logits = self.forward()
        test_labels = test_logits[self.train_num+self.valid_num:]
        print('test labels are:',test_labels)

    def valid(self):
        pass
    
    def for_tiaoshi(self):
        logits = self.forward()
        fname = './rumor_result/valid_pred_{}.txt'.format(time.strftime('%H%M%S'))
        fn = open(fname, 'w+')
        acc_valid = accuracy_score(
                              self.g.ndata['label'][self.train_num: self.train_num+self.valid_num],
                              logits[self.train_num: self.train_num+self.valid_num].cpu().argmax(dim=1))
        for a, b in zip(
                       self.g.ndata['label'][self.train_num:self.train_num+self.valid_num],
                       logits[self.train_num:self.train_num+self.valid_num].argmax(dim=1)):
            line = 'true: {:50}| pred:{:50}'.format(str(a.data),str(b.data))
            fn.write(line + '\n')
        fn.write('acc is: {}\n'.format(acc_valid))
        fn.close()

    def write_result_file(self, trlo, trac, valo, vaac):
        file_name = './rumor_result/gat_{}.txt'.format(time.strftime('%m%d%H%M%S'))
        with open(file_name, 'w') as f:
            for t1, t2, v1, v2 in zip(trlo, trac, valo, vaac):
                f.write('train loss acc: {:<8.5} {:<8.5} | valid loss acc:{:<8.5} {:<8.5}\n'.format(t1,t2,v1,v2))

if __name__ == '__main__':
    model = GraphModel()
    model.graph_construtor()
    model.train_and_valid()