#!/usr/bin/env python
# coding: utf-8

import torch
from pytorch_transformers import BertModel, BertTokenizer
from dataloader import ContentSet
from torch.utils.data import DataLoader
import pandas as pd
import numpy as np
import os
import dgl
import pickle
from scipy import sparse


def pad_sentences(sent_list, padding_value, max_sent_len=256):
    max_len = min( max(map(lambda s:len(s), sent_list)), max_sent_len)
    batchsize = len(sent_list)
    out = torch.empty([batchsize, max_len], dtype=torch.int64).fill_(padding_value)
    mask = torch.ones([batchsize, max_len])
    for i, sent in enumerate(sent_list):
        sent_len = min(len(sent), max_len)
        out[i, :sent_len] = torch.tensor(sent[:sent_len])
        mask[i, sent_len:].fill_(0.0)
    return out, mask

def encode_sent_list(tt, sent_list, cls_token=False):
    return [tt.encode(sent.strip("\t"), add_special_tokens=cls_token) for sent in sent_list]



class BertSim(object):
    def __init__(self, target_file="./data/test_dataset.csv", bert_dir="../bert_cn/"):
        self.target_file = target_file
        self.bert = BertModel.from_pretrained(bert_dir).cuda()
        self.tokenizer = BertTokenizer.from_pretrained(bert_dir)

    def Sents2Vecs(self, batch):
        input_ids = encode_sent_list(self.tokenizer, batch[2])
        ipt_tensor, mask_tensor = pad_sentences(input_ids, 0.0)
        hiddens, outs = self.bert(ipt_tensor.cuda(), attention_mask=mask_tensor.cuda())
        return hiddens, outs

    def MeanVecSim(self, CosSim=0.9, retrive_file="./data/train_1_3.csv" ,
                           cached_semant="./data/test_semant.pkl",
                           retrive_semant="./data/retrive_semant.pkl",
                           new_file="./data/Sim0_9.csv"):
        if os.path.exists(cached_semant):
            with open(cached_semant, "rb") as fr:
                test_semant = pickle.load(fr)
        else:
            test_set =  ContentSet(self.target_file, label_type=-1)
            test_loader = DataLoader(test_set, batch_size=100)
            test_vecs = []
            for batch in test_loader:
                sent_lens = [min(len(list(sent)), 254) for sent in batch[2]]
                with torch.no_grad():
                    rst = self.Sents2Vecs(batch)
                    vecs = torch.stack([rst[0][i][:sent_lens[i], :].mean(dim=0) for i in range(len(sent_lens))])
                    test_vecs.append(vecs)
            with torch.no_grad():
                test_semant = torch.cat(test_vecs, dim=0)
            with open(cached_semant, "wb") as fw:
                pickle.dump(test_semant, fw, protocol=pickle.HIGHEST_PROTOCOL)
        norm_te = test_semant.norm(p=2, dim=1).unsqueeze(1)

        train_set = ContentSet(retrive_file, label_type=-1)
        train_loader = DataLoader(train_set, batch_size=100)
        ids = []
        train_vecs = []
        for batch in train_loader:
            sent_lens = [min(len(list(sent)), 254) for sent in batch[2]]
            with torch.no_grad():
                rst = self.Sents2Vecs(batch)
                vecs = torch.stack([rst[0][i][:sent_lens[i], :].mean(dim=0) for i in range(len(sent_lens))])
                dot = torch.matmul(test_semant, vecs.transpose(0, 1))
                norm_vecs = vecs.norm(p=2, dim=1).unsqueeze(0)
                norm_mtx = torch.matmul(norm_te, norm_vecs)
                sco = dot / norm_mtx
                mx = sco.max(dim=0)
                st = mx[0].sort(dim=0)
                cnt = int(mx[0].ge(CosSim).int().sum())
                train_vecs.append(vecs[st[1].tolist()[-cnt:]])
                ids.extend([batch[0][i] for i in st[1].tolist()[-cnt:]])
        with open(retrive_semant, "wb") as fw:
            pickle.dump(torch.cat(train_vecs, dim=0), fw, protocol=pickle.HIGHEST_PROTOCOL)
        df = pd.DataFrame(np.array(ids), columns=["id"])
        new_df = pd.merge(df, pd.read_csv(retrive_file), on=['id'])
        new_df.to_csv(new_file, index=False)

class GraphModel(object):
    def __init__(self, train_file="./data/train_1_3_sim093.csv",
                 dev_file="./data/dev_1_3_sim093.csv",
                 test_semant="./data/test_semant.pkl",
                 dev_semant="./data/dev_semant_1_3.pkl",
                 train_semant="./data/train_semant_1_3.pkl"):
        self.g = dgl.DGLGraph()
        self.dev_file = dev_file
        self.train_file = train_file
        self.test_semant = test_semant
        self.dev_semant = dev_semant
        self.train_semant = train_semant


    def graph_construtor(self, edge_sim=0.90):
        def ObtainScoMtx():
            with open(self.train_semant, "rb") as fr:
                tr = pickle.load(fr)
            with open(self.dev_semant, "rb") as fr:
                dev = pickle.load(fr)
            with open(self.test_semant, "rb") as fr:
                te = pickle.load(fr)
            vecs = torch.cat([tr, dev, te], dim=0)
            self.g.add_nodes(vecs.size(0))
            self.g.ndata['feat'] = vecs
            dot = torch.matmul(vecs, vecs.transpose(0, 1))
            norm_vecs = vecs.norm(p=2, dim=1).unsqueeze(0)
            norm_mtx = torch.matmul(norm_vecs.transpose(0, 1), norm_vecs)
            sco = dot / norm_mtx
            return sco
        def obtain_labels(csv_file):
            df = pd.read_csv(csv_file)
            return  torch.tensor(df[['ncw_label', 'fake_label', 'real_label']].values.argmax(axis=1))

        sco = ObtainScoMtx().ge(edge_sim).int().cpu()
        sr = sparse.coo_matrix((sco - torch.eye(sco.size(0))).numpy())
        src = sr.row
        dst = sr.col
        self.g.add_edges(src, dst)
        self.g.add_edges(dst, src)
        size_0 = self.g.ndata['feat'].size(0)
        l = torch.cat([obtain_labels(self.train_file), obtain_labels(self.dev_file)])
        size_re = size_0 - l.size(0)
        self.g.ndata['label'] = torch.cat([l, torch.ones(size_re, dtype=torch.int64)*-1], dim=0)

    def forward(self):
        pass

    def train(self):
        pass

    def infer(self):
        pass

    def valid(self):
        pass

if __name__ == '__main__':
    model = BertSim(bert_dir="./bert_cn/")
    model.MeanVecSim(CosSim=0.93, retrive_file="./data/train_1_3.csv", new_file="./data/train_1_3_sim093.csv", retrive_semant="./data/train_semant_1_3.pkl")
    model.MeanVecSim(CosSim=0.93, retrive_file="./data/dev_1_3.csv", new_file="./data/dev_1_3_sim093.csv", retrive_semant="./data/dev_semant_1_3.pkl")
    # model = GraphModel()
    # model.graph_construtor(edge_sim=0.98)