from Dataloader.twitterloader import TwitterSet, BiGCNTwitterSet
import torch, torch.nn as nn
import random, numpy as np
from SentModel.Sent2Vec import TFIDFBasedVec, TFIDFBasedVecV2
from PropModel.GraphPropagation import BiGCN, BiGCNV2
from RumdetecFramework.GraphRumorDect import BiGCNRumorDetec, BiGCNRumorDetecV2
import dgl


def Generator1(pseudo_target:TwitterSet, labeled_source:TwitterSet,
                    labeled_target:TwitterSet, batchSize):
    print("Generator1")
    if not hasattr(pseudo_target, 'valid_indexs'):
        pseudo_target.valid_indexs = list(range(len(pseudo_target)))
    if not hasattr(labeled_source, 'valid_indexs'):
        labeled_source.valid_indexs = list(range(len(labeled_source)))
    if not hasattr(labeled_target, 'valid_indexs'):
        labeled_target.valid_indexs = list(range(len(labeled_target)))
    PT_base, LS_base, LT_base = 0, len(pseudo_target), len(pseudo_target)+ len(labeled_target)
    idxsLS = random.sample(labeled_source.valid_indexs, len(labeled_source.valid_indexs)) * 2
    idxsPT = random.sample(pseudo_target.valid_indexs, len(pseudo_target.valid_indexs)) * 2
    if len(labeled_target.valid_indexs) > 5:
        bs_LT = 5
    else:
        bs_LT = len(labeled_target.valid_indexs)
    for i in range(0, len(pseudo_target.valid_indexs), batchSize//2):
        # i * bs1 could be larger than the len(labelSource), thus we need to start from the remainder
        start_LS, start_PT = i%len(labeled_source.valid_indexs), i
        end_LS, end_PT = start_LS + batchSize//2, (start_PT + batchSize//2) - bs_LT
        items1 = [labeled_source[jj] for jj in idxsLS[start_LS:end_LS]]
        items2 = [pseudo_target[jj] for jj in idxsPT[start_PT:end_PT]]
        idxs_LT = random.sample(labeled_target, bs_LT)
        items3 = [labeled_target[jj] for jj in idxs_LT]
        yield labeled_source.collate_raw_batch(items1 + items2 + items3), \
          idxsPT[start_PT:end_PT]+ [idx+LS_base for idx in idxsLS[start_LS:end_LS]] + [idx+LT_base for idx in idxs_LT]

def Generator2(pseudo_target:TwitterSet, labeled_source:TwitterSet, batchSize):
    print("Generator2")
    if not hasattr(pseudo_target, 'valid_indexs'):
        pseudo_target.valid_indexs = list(range(len(pseudo_target)))
    if not hasattr(labeled_source, 'valid_indexs'):
        labeled_source.valid_indexs = list(range(len(labeled_source)))
    PT_base, LS_base = 0, len(pseudo_target)
    idxsLS = random.sample(labeled_source.valid_indexs, len(labeled_source.valid_indexs)) * 2
    idxsPT = random.sample(pseudo_target.valid_indexs, len(pseudo_target.valid_indexs)) * 2
    for i in range(0, len(pseudo_target.valid_indexs), batchSize // 2):
        # i * bs1 could be larger than the len(labelSource), thus we need to start from the remainder
        start_LS, start_PT = i%len(labeled_source.valid_indexs), i
        end_LS, end_PT = start_LS + batchSize // 2, start_PT + batchSize // 2
        items1 = [labeled_source[jj] for jj in idxsLS[start_LS:end_LS]]
        items2 = [pseudo_target[jj] for jj in idxsPT[start_PT:end_PT]]
        yield labeled_source.collate_raw_batch(items1 + items2), \
              idxsPT[start_PT:end_PT]+ [idx+LS_base for idx in idxsLS[start_LS:end_LS]]

def Generator3(pseudo_target:TwitterSet, batchSize):
    print("Generator3")
    if not hasattr(pseudo_target, 'valid_indexs'):
        pseudo_target.valid_indexs = list(range(len(pseudo_target)))
    idxsPT = random.sample(pseudo_target.valid_indexs, len(pseudo_target.valid_indexs)) * 2
    for i in range(0, len(pseudo_target.valid_indexs), batchSize):
        # i * bs1 could be larger than the len(labelSource), thus we need to start from the remainder
        start_PT, end_PT = i, i+ batchSize
        items = [pseudo_target[jj] for jj in idxsPT[start_PT:end_PT]]
        batch = pseudo_target.collate_raw_batch(items)
        yield batch, idxsPT[start_PT:end_PT]

def DataIter(unlabeled_target:TwitterSet, labeled_source:TwitterSet=None,
                labeled_target:TwitterSet=None, batch_size=32):
    if labeled_target is not None:
        assert len(labeled_target) > 0
        return  Generator1(unlabeled_target, labeled_source, labeled_target, batch_size)
    elif labeled_source is not None:
        return Generator2(unlabeled_target, labeled_source, batch_size)
    else:
        return Generator3(unlabeled_target, batch_size)

class MSTDataset(BiGCNTwitterSet):
    def __init__(self, batchsize=20):
        super(MSTDataset, self).__init__(batchsize)

    @property
    def label(self):
        if isinstance(self.data_y, list):
            self.data_y = np.array(self.data_y, dtype=np.float32)
        return self.data_y

    def setLabel(self, label, idxs):
        if isinstance(self.data_y, list):
            self.data_y = np.array(self.data_y, dtype=np.float32)
        self.data_y[idxs] = label

    def labelTensor(self, device=None):
        return torch.tensor(self.data_y, dtype=torch.float32, device=device)

    def collate_raw_batch(self, batch):
        seqs = [item[0] for item in batch]
        TD_graphs = [item[1] for item in batch]
        BU_graphs = [item[2] for item in batch]
        labels = [item[3] for item in batch]
        topic_labels = [item[4] for item in batch]
        device = torch.device("cuda") if torch.cuda.is_available() else torch.device('cpu')
        num_nodes = [g.num_nodes() for g in TD_graphs]
        big_g_TD = dgl.batch(TD_graphs)
        big_g_BU = dgl.batch(BU_graphs)
        A_TD = big_g_TD.adjacency_matrix().to_dense().to(device)
        A_BU = big_g_BU.adjacency_matrix().to_dense().to(device)
        return seqs, num_nodes, A_TD, A_BU, \
                    torch.tensor(labels), torch.tensor(topic_labels)

def obtain_MST_set(fs_prefix, od_prefix, nd_prefix, lt_cnt=0):
    fs_set = MSTDataset()
    fs_set.load_data_fast(data_prefix=fs_prefix)
    od_set = MSTDataset()
    od_set.load_data_fast(data_prefix=od_prefix)
    nd_set = MSTDataset()
    nd_set.load_data_fast(data_prefix=nd_prefix)
    if lt_cnt > 0:
        nd_set_1, nd_set_2 = nd_set.split([lt_cnt*1.0 / len(nd_set), 1.0])
        return fs_set, od_set, nd_set_2, nd_set_1
    else:
        return fs_set, od_set, nd_set, None

def MST_Loader(domain_id, valid_cnt, lt_cnt=0):
    events = ['../../../pheme-rnr-dataset/charliehebdo',
              '../../../pheme-rnr-dataset/ferguson',
              '../../../pheme-rnr-dataset/germanwings-crash',
              '../../../pheme-rnr-dataset/ottawashooting',
              '../../../pheme-rnr-dataset/sydneysiege']
    source = MSTDataset()
    source.load_event_list([events[j] for j in range(len(events)) if j != domain_id],
                           cached_pkl_file="../../data/pheme.pkl")
    target = MSTDataset()
    target.load_event_list([events[domain_id]], cached_pkl_file="../../data/pheme.pkl")
    t_size = len(target)*1.0
    if lt_cnt > 0:
        labeled_target, valid_target, unlabeled_target = target.split(
            [lt_cnt/t_size, (lt_cnt+valid_cnt)/t_size, 1.0]
        )
        return valid_target, source, unlabeled_target, labeled_target
    else:
        if valid_cnt == 0:
            return None, source, target, None
        else:
            valid_target, unlabeled_target = target.split(
                [valid_cnt / t_size, 1.0]
            )
            return valid_target, source, unlabeled_target, None

class CRST_Dataset(BiGCNTwitterSet):
    def __init__(self, batchsize=20):
        super(CRST_Dataset, self).__init__(batchsize)

    def collate_raw_batch(self, batch):
        seqs = [item[0] for item in batch]
        TD_graphs = [item[1] for item in batch]
        BU_graphs = [item[2] for item in batch]
        labels = [item[3] for item in batch]
        topic_labels = [item[4] for item in batch]
        return seqs, TD_graphs, BU_graphs, torch.tensor(labels), torch.tensor(topic_labels)

def obtain_CRST_set(fs_prefix, od_prefix, nd_prefix, lt_cnt=0):
    fs_set = CRST_Dataset()
    fs_set.load_data_fast(data_prefix=fs_prefix)
    od_set = CRST_Dataset()
    od_set.load_data_fast(data_prefix=od_prefix)
    nd_set = CRST_Dataset()
    nd_set.load_data_fast(data_prefix=nd_prefix)
    if lt_cnt > 0:
        nd_set_1, nd_set_2 = nd_set.split([lt_cnt*1.0 / len(nd_set), 1.0])
        return fs_set, od_set, nd_set_2, nd_set_1
    else:
        return fs_set, od_set, nd_set, None

def CRST_Loader(domain_id, valid_cnt, lt_cnt=0):
    events = ['../../../pheme-rnr-dataset/charliehebdo',
              '../../../pheme-rnr-dataset/ferguson',
              '../../../pheme-rnr-dataset/germanwings-crash',
              '../../../pheme-rnr-dataset/ottawashooting',
              '../../../pheme-rnr-dataset/sydneysiege']
    source = CRST_Dataset()
    source.load_event_list([events[j] for j in range(len(events)) if j != domain_id],
                           cached_pkl_file="../../data/pheme.pkl")
    target = CRST_Dataset()
    target.load_event_list([events[domain_id]], cached_pkl_file="../../data/pheme.pkl")
    t_size = len(target)*1.0
    if lt_cnt > 0:
        labeled_target, valid_target, unlabeled_target = target.split(
            [lt_cnt/t_size, (lt_cnt+valid_cnt)/t_size, 1.0]
        )
        return valid_target, source, unlabeled_target, labeled_target
    else:
        if valid_cnt == 0:
            return None, source, target, None
        else:
            valid_target, unlabeled_target = target.split(
                [valid_cnt / t_size, 1.0]
            )
            return valid_target, source, unlabeled_target, None

def obtain_model(tfidf_vec, load_BiGCN1=False, load_BiGCN2=False, domainID=-1) -> BiGCNRumorDetec:
    BiGCN1_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.81.pkl",
                    "../../saved/TFIDF_BiGCN_ferguson_0.75.pkl",
                    "../../saved/TFIDF_BiGCN_germanwings-crash_0.70.pkl",
                    "../../saved/TFIDF_BiGCN_ottawashooting_0.72.pkl",
                    "../../saved/TFIDF_BiGCN_sydneysiege_0.67.pkl"
                    ]

    BiGCN2_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.80.pkl",
                    "../../saved/TFIDF_BiGCN_ferguson_0.76.pkl",
                    "../../saved/TFIDF_BiGCN_germanwings-crash_0.68.pkl",
                    "../../saved/TFIDF_BiGCN_ottawashooting_0.70.pkl",
                    "../../saved/TFIDF_BiGCN_sydneysiege_0.66.pkl"
                    ]
    if load_BiGCN1 or load_BiGCN2:
        assert domainID != -1

    lvec = TFIDFBasedVec(tfidf_vec, 20, embedding_size=300,
                         w2v_dir="../../saved/glove_en/",
                         emb_update=True, grad_preserve=True)
    prop = BiGCN(300, 256)
    cls = nn.Linear(1024, 2)
    model = BiGCNRumorDetec(lvec, prop, cls, batch_size=20, grad_accum_cnt=1)
    if load_BiGCN1:
        model.load_model(BiGCN1_Paths[domainID])
    if load_BiGCN2:
        model.load_model(BiGCN2_Paths[domainID])
    return model

def obtain_modelV2(tfidf_vec, load_BiGCN1=False, load_BiGCN2=False, domainID=-1) -> BiGCNRumorDetecV2:
    BiGCN1_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.81.pkl",
                    "../../saved/TFIDF_BiGCN_ferguson_0.75.pkl",
                    "../../saved/TFIDF_BiGCN_germanwings-crash_0.70.pkl",
                    "../../saved/TFIDF_BiGCN_ottawashooting_0.72.pkl",
                    "../../saved/TFIDF_BiGCN_sydneysiege_0.67.pkl"
                    ]

    BiGCN2_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.80.pkl",
                    "../../saved/TFIDF_BiGCN_ferguson_0.76.pkl",
                    "../../saved/TFIDF_BiGCN_germanwings-crash_0.68.pkl",
                    "../../saved/TFIDF_BiGCN_ottawashooting_0.70.pkl",
                    "../../saved/TFIDF_BiGCN_sydneysiege_0.66.pkl"
                    ]
    if load_BiGCN1 or load_BiGCN2:
        assert domainID != -1

    lvec = TFIDFBasedVecV2(tfidf_vec, 20, embedding_size=300,
                         w2v_dir="../../saved/glove_en/",
                         emb_update=True, grad_preserve=True)
    prop = BiGCNV2(300, 256)
    cls = nn.Linear(1024, 2)
    model = BiGCNRumorDetecV2(lvec, prop, cls, batch_size=20, grad_accum_cnt=1)
    if load_BiGCN1:
        model.load_model(BiGCN1_Paths[domainID])
    if load_BiGCN2:
        model.load_model(BiGCN2_Paths[domainID])
    return model

def obtain_adver_model(tfidf_vec, load_BiGCN1=False, load_BiGCN2=False, domainID=-1):
    BiGCN1_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.81.pkl",
                    "../../saved/TFIDF_BiGCN_ferguson_0.75.pkl",
                    "../../saved/TFIDF_BiGCN_germanwings-crash_0.70.pkl",
                    "../../saved/TFIDF_BiGCN_ottawashooting_0.72.pkl",
                    "../../saved/TFIDF_BiGCN_sydneysiege_0.67.pkl"
                    ]

    BiGCN2_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.80.pkl",
                    "../../saved/TFIDF_BiGCN_ferguson_0.76.pkl",
                    "../../saved/TFIDF_BiGCN_germanwings-crash_0.68.pkl",
                    "../../saved/TFIDF_BiGCN_ottawashooting_0.70.pkl",
                    "../../saved/TFIDF_BiGCN_sydneysiege_0.66.pkl"
                    ]
    if load_BiGCN1 or load_BiGCN2:
        assert domainID != -1
    lvec = TFIDFBasedVec(tfidf_vec, 20, embedding_size=300,
                         w2v_dir="../../saved/glove_en/",
                         emb_update=True, grad_preserve=True)
    prop = BiGCN(300, 256)
    cls = nn.Linear(1024, 2)
    discriminator = nn.Sequential(nn.Linear(1024, 512), nn.ReLU(), nn.Linear(512, 5))
    model = BiGCNRumorDetec(lvec, prop, cls, batch_size=20, grad_accum_cnt=1)
    if load_BiGCN1:
        model.load_model(BiGCN1_Paths[domainID])
    if load_BiGCN2:
        model.load_model(BiGCN2_Paths[domainID])
    return model, discriminator.cuda()


def obtain_adver_modelV2(tfidf_vec, load_BiGCN1=False, load_BiGCN2=False, domainID=-1):
    BiGCN1_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.81.pkl",
                    "../../saved/TFIDF_BiGCN_ferguson_0.75.pkl",
                    "../../saved/TFIDF_BiGCN_germanwings-crash_0.70.pkl",
                    "../../saved/TFIDF_BiGCN_ottawashooting_0.72.pkl",
                    "../../saved/TFIDF_BiGCN_sydneysiege_0.67.pkl"
                    ]

    BiGCN2_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.80.pkl",
                    "../../saved/TFIDF_BiGCN_ferguson_0.76.pkl",
                    "../../saved/TFIDF_BiGCN_germanwings-crash_0.68.pkl",
                    "../../saved/TFIDF_BiGCN_ottawashooting_0.70.pkl",
                    "../../saved/TFIDF_BiGCN_sydneysiege_0.66.pkl"
                    ]
    if load_BiGCN1 or load_BiGCN2:
        assert domainID != -1
    lvec = TFIDFBasedVecV2(tfidf_vec, 20, embedding_size=300,
                         w2v_dir="../../saved/glove_en/",
                         emb_update=True, grad_preserve=True)
    prop = BiGCNV2(300, 256)
    cls = nn.Linear(1024, 2)
    discriminator = nn.Sequential(nn.Linear(1024, 512), nn.ReLU(), nn.Linear(512, 5))
    model = BiGCNRumorDetecV2(lvec, prop, cls, batch_size=20, grad_accum_cnt=1)
    if load_BiGCN1:
        model.load_model(BiGCN1_Paths[domainID])
    if load_BiGCN2:
        model.load_model(BiGCN2_Paths[domainID])
    return model, discriminator.cuda()

def obtain_BiGCN_set(fs_prefix, od_prefix, nd_prefix, lt_cnt=0):
    fs_set = BiGCNTwitterSet()
    fs_set.load_data_fast(data_prefix=fs_prefix)
    od_set = BiGCNTwitterSet()
    od_set.load_data_fast(data_prefix=od_prefix)
    nd_set = BiGCNTwitterSet()
    nd_set.load_data_fast(data_prefix=nd_prefix)
    if lt_cnt > 0:
        nd_set_1, nd_set_2 = nd_set.split([lt_cnt*1.0 / len(nd_set), 1.0])
        return fs_set, od_set, nd_set_2, nd_set_1
    else:
        return fs_set, od_set, nd_set, None

def BiGCNSet_Loader(domain_id, valid_cnt, lt_cnt=0):
    events = ['../../../pheme-rnr-dataset/charliehebdo',
              '../../../pheme-rnr-dataset/ferguson',
              '../../../pheme-rnr-dataset/germanwings-crash',
              '../../../pheme-rnr-dataset/ottawashooting',
              '../../../pheme-rnr-dataset/sydneysiege']
    source = BiGCNTwitterSet()
    source.load_event_list([events[j] for j in range(len(events)) if j != domain_id],
                           cached_pkl_file="../../data/pheme.pkl")
    target = BiGCNTwitterSet()
    target.load_event_list([events[domain_id]], cached_pkl_file="../../data/pheme.pkl")
    t_size = len(target)*1.0
    if lt_cnt > 0:
        labeled_target, valid_target, unlabeled_target = target.split(
            [lt_cnt/t_size, (lt_cnt+valid_cnt)/t_size, 1.0]
        )
        return valid_target, source, unlabeled_target, labeled_target
    else:
        if valid_cnt == 0:
            return None, source, target, None
        else:
            valid_target, unlabeled_target = target.split(
                [valid_cnt / t_size, 1.0]
            )
            return valid_target, source, unlabeled_target, None