import torch
from torch.utils.data import DataLoader
import torch.nn as nn
from .BaseRumorFramework import RumorDetection
from Dataloader.dataloader_utils import Merge_data
import fitlog

def WeakLabeling(model: RumorDetection, data, batch_size=20):
    data_loader = DataLoader(data,
                             batch_size=batch_size,
                             shuffle=False,
                             collate_fn=data.collate_raw_batch)
    preds = []
    with torch.no_grad():
        for batch in data_loader:
            pred = model.forward(batch)
            torch.cuda.empty_cache()
            preds.append(pred)
        pred_tensor = torch.cat(preds, dim=0)
        pred_tensor = pred_tensor[:len(data)]
    weak_label = pred_tensor.__gt__(0.5).long().tolist()
    data.data_y = weak_label
    entrophy = (pred_tensor.log().abs() * pred_tensor).sum(dim=1)
    return entrophy


class SelfTraining(nn.Module):
    def __init__(self, model1:RumorDetection, model2:RumorDetection,
                 unlabel_data, label_data, dev_set, te_set,
                 convey_fn_1, convey_fn_2):
        super(SelfTraining, self).__init__()
        self.model1 = model1
        self.model2 = model2
        self.unlabel_data = unlabel_data
        self.label_data = label_data
        self.dev = dev_set
        self.te = te_set
        self.convey_fn_1 = convey_fn_1
        self.convey_fn_2 = convey_fn_2

    def iterateTraining(self, max_iter=5, log_dir="../logs/", log_suffix_1="_RumorDetection",
                        log_suffix_2="_RumorDetection", model1_file="", model2_file="", RenameModel=True):

        for iter in range(max_iter):
            unlabel_data = self.convey_fn_1(self.unlabel_data)
            WeakLabeling(self.model1, unlabel_data)
            train_data = Merge_data(self.convey_fn_2(unlabel_data),
                                    self.convey_fn_2(self.label_data))
            dev_data, te_data = self.convey_fn_2(self.dev), self.convey_fn_2(self.te)
            if iter == 0:
                rst_model2 = self.model2.valid(DataLoader(te_data,
                                                          batch_size=self.model2.batch_size,
                                                          shuffle=False,
                                                          collate_fn=te_data.collate_raw_batch),
                                                          all_metrics=True)
                print("Init Performance of model2:", rst_model2)
            self.model2.train_iters(train_data, dev_data, te_data,
                        valid_every=100, max_epochs=2, lr_discount=1.0,
                        best_valid_acc=0.0, best_test_acc=0.0, best_valid_test_acc=0.0,
                        log_dir=log_dir, log_suffix=log_suffix_2, model_file=model2_file, RenameModel=RenameModel)
            rst_model2 = self.model2.valid(DataLoader(dev_data,
                                                    batch_size=self.model2.batch_size,
                                                    shuffle=False,
                                                    collate_fn=dev_data.collate_raw_batch),
                                                    all_metrics=True)
            print("%3d | %3d Performance of model2:"%(iter, max_iter), rst_model2)

            unlabel_data = self.convey_fn_2(self.unlabel_data)
            WeakLabeling(self.model2, unlabel_data)
            train_data = Merge_data(self.convey_fn_1(unlabel_data),
                                    self.convey_fn_1(self.label_data))
            dev_data, te_data = self.convey_fn_1(self.dev), self.convey_fn_1(self.te)
            if iter == 0:
                rst_model1 = self.model1.valid(DataLoader(te_data,
                                                          batch_size=self.model1.batch_size,
                                                          shuffle=False,
                                                          collate_fn=te_data.collate_raw_batch),
                                                          all_metrics=True)
                print("Init Performance of model1:", rst_model1)
            self.model1.train_iters(train_data, dev_data, te_data,
                                    valid_every=100, max_epochs=2, lr_discount=1.0,
                                    best_valid_acc=0.0, best_test_acc=0.0, best_valid_test_acc=0.0,
                                    log_dir=log_dir, log_suffix=log_suffix_1, model_file=model1_file,
                                    RenameModel=RenameModel)
            rst_model1 = self.model1.valid(DataLoader(dev_data,
                                                      batch_size=self.model1.batch_size,
                                                      shuffle=False,
                                                      collate_fn=dev_data.collate_raw_batch
                                                      ),
                                           all_metrics=True)
            print("%3d | %3d Performance of model2:" % (iter, max_iter), rst_model1)