import sys
sys.path.append("../../")
import fitlog
from transformers import AdamW
from transformers import DistilBertConfig
from transformers import DistilBertTokenizer
from transformers import BertTokenizer
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, precision_recall_fscore_support
from datareader import *
from metrics import *
from model import *
import pickle
from tqdm import trange, tqdm
from InstanceReweighting import InstanceReweighting, update_current_devices, to_var
import copy

def pred_Logits(model:VanillaBert, data, idxs=None, batch_size=20):
    preds = []
    if idxs is None:
        idxs = list(range(len(data)))
    with torch.no_grad():
        for i in trange(0, len(idxs), batch_size):
            batch_idxs = idxs[i:min(len(idxs), i+batch_size)]
            batch = data.collate_raw_batch([data[idx] for idx in batch_idxs])
            pred = model.predict(batch)
            preds.append(pred)
    pred_tensor = torch.cat(preds)
    return pred_tensor

def prediction(model:VanillaBert, data, idxs=None, batch_size=20):
    pred_tensor = pred_Logits(model, data, idxs, batch_size)
    vals, idxs = pred_tensor.sort(dim=1)
    return idxs[:, -1], vals[:, -1]

def acc_P_R_F1(y_true, y_pred):
    return accuracy_score(y_true, y_pred.cpu()), \
                precision_recall_fscore_support(y_true, y_pred.cpu())

def Perf(model:VanillaBert, data, label, idxs=None, batch_size=20):
    y_pred, _ = prediction(model, data, idxs=idxs, batch_size=batch_size)
    y_true = label[idxs] if idxs is not None else label
    return acc_P_R_F1(y_true, y_pred)

def BERTEvaluater(data_set:NLIDataset, label):
    def evaluater(model:VanillaBert):
        acc, p_r_f1 = Perf(model, data_set, label)
        return acc, p_r_f1[0][1], p_r_f1[1][1], p_r_f1[2][1]
    return evaluater

class OnlineTrainer(InstanceReweighting):
    def __init__(self, model: VanillaBert, weak_set, few_shot_set,
                 weak_set_label, exp_idxs=[], weak_set_weights=None, convey_fn=None,
                 lr4model=2e-2, scale_lr4model=1e-3, coeff4expandset=1.0, max_few_shot_size=20,
                 batch_size=5):
        super(OnlineTrainer, self).__init__(model, weak_set, few_shot_set, weak_set_label, exp_idxs,
                                            weak_set_weights, convey_fn, lr4model, scale_lr4model,
                                            coeff4expandset, max_few_shot_size, batch_size)
        lr = lr4model*scale_lr4model
        optimizer_grouped_parameters = [
            {'params': p,
             'lr': lr * pow(0.8, 12 - int(n.split("layer.")[1].split(".", 1)[0])) if "layer." in n \
                 else (lr * pow(0.8, 13) if "embedding" in n else lr ) \
             # layer-wise fine-tuning
             } for n, p in model.named_parameters()
        ]
        self.model_optim = torch.optim.Adam(optimizer_grouped_parameters)

    def weightsGrad(self, step, batch, tmp_model, few_shot_data=None, few_shot_data_list=None):
        assert few_shot_data is not None or few_shot_data_list is not None
        if few_shot_data_list is None:
            weights = torch.zeros(self.batch_size, device=tmp_model.device)
            grad_weights = self.ComputeGrads4Weights(step, batch, weights, tmp_model, few_shot_data)
        else:
            print("-------> few shot data list ------>")
            grad_weights_list = []
            for i, few_data in enumerate(few_shot_data_list):
                weights = torch.zeros(self.batch_size, device=tmp_model.device)
                grad_weights_list.append(
                    self.ComputeGrads4Weights(step + i, batch, weights, tmp_model, few_data)
                )
                torch.cuda.empty_cache()
            grad_weights = torch.stack(grad_weights_list).mean(dim=0)
        if self.expand_data_list is not None:
            print("-------> expand data list ------>")
            grad_weights_list = []
            for i, few_data in enumerate(self.expand_data_list):
                weights = torch.zeros(self.batch_size, device=tmp_model.device)
                grad_weights_list.append(
                    self.ComputeGrads4Weights(step + i, batch, weights, tmp_model, few_data)
                )
                torch.cuda.empty_cache()
            e_grad_weights = torch.stack(grad_weights_list).mean(dim=0)
            grad_weights = grad_weights + e_grad_weights
        torch.cuda.empty_cache()
        return grad_weights

    def Training(self, max_epoch=100,
                 evaluater=None,
                 model_file="./tmp.pkl"):
        best_valid_acc = 0.0
        if torch.cuda.device_count()== 1 :
            tmp_model = copy.deepcopy(self.model)
        else:
            tmp_model = copy.deepcopy(self.model)
            tmp_model.set_device(torch.device("cuda:1"))
        if len(self.few_shot_set) > self.max_few_shot_size:
            few_shot_data = None
            few_shot_data_list = [self.few_shot_set.collate_raw_batch(
                [self.few_shot_set[j] for j in range(i,
                                                     min(i + self.max_few_shot_size,
                                                         len(self.few_shot_set)))])
                for i in range(0, len(self.few_shot_set), self.max_few_shot_size)]
        else:
            few_shot_data = self.few_shot_set.collate_raw_batch(
                [self.few_shot_set[i] for i in range(len(self.few_shot_set))]
            )
            few_shot_data_list = None
        for epoch in range(max_epoch):
            shuffled_indices = random.sample(list(range(len(self.weak_set))),
                                             len(self.weak_set)) * 2
            for step in range(0, len(self.weak_set), self.batch_size):
                tmp_model.load_state_dict(self.model.state_dict())
                indices = shuffled_indices[step:step + self.batch_size]
                batch, indices = self.SampleBatch(indices)
                grad_weights = self.weightsGrad(step, batch, tmp_model, few_shot_data, few_shot_data_list)
                print("grad:", grad_weights.norm())
                print("grad_weights:", grad_weights)
                w_tilde = torch.clamp(-grad_weights, min=0)
                print("w_tilde:", w_tilde)
                norm_c = torch.sum(w_tilde)
                w = w_tilde.data / norm_c.data if norm_c != 0 else w_tilde
                w = w.data.to(self.device)
                loss = self.ModelLoss(batch, self.model)
                cost = torch.sum(loss * w)
                self.model.zero_grad()
                self.model_optim.zero_grad()
                cost.backward()
                self.model_optim.step()
                torch.cuda.empty_cache()
                print('####Model Update (%3d | %3d [%3d, %3d]) ####, loss = %6.8f' % (
                    step, len(self.weak_set), epoch, max_epoch, loss.data.mean()
                ))
            if evaluater is not None:
                val_acc, prec, rec, f1 = evaluater(self.model)
                print(
                    '##### %6d | %6d, val_acc/val_prec/val_rec/val_f1 = %6.8f/%6.7f/%6.7f/%6.7f, best_valid_acc = %6.7f' % (
                        epoch, max_epoch,
                        val_acc, prec, rec, f1,
                        best_valid_acc
                    )
                )
                if val_acc > best_valid_acc:
                    best_valid_acc = val_acc
                    self.model.save_model(model_file)

if __name__ == "__main__":
    with open("../../args.pkl", 'rb') as fr:
        args = pickle.load(fr)
    args.model_dir = str(__file__).rstrip(".py")
    # Set all the seeds
    seed = args.seed
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    # See if CUDA available
    device = torch.device("cpu")
    if args.n_gpu > 0 and torch.cuda.is_available():
        print("Training on GPU")
        device = torch.device("cuda:0")

    # model configuration
    batch_size = args.batch_size
    lr = args.lr
    weight_decay = args.weight_decay
    n_epochs = args.n_epochs
    args.full_bert = True
    args.bertPath = "../../../bert_en/"

    batch_size = args.batch_size
    lr = args.lr
    weight_decay = args.weight_decay
    n_epochs = args.n_epochs
    args.full_bert = True
    print("====>", args.full_bert)
    bert_model = 'bert-base-uncased' if args.full_bert else 'distilbert-base-uncased'
    if args.full_bert:
        bert_config = BertConfig.from_pretrained(bert_model, num_labels=2) if args.bertPath is None else \
                        BertConfig.from_pretrained(args.bertPath, num_labels=2)
        tokenizer = BertTokenizer.from_pretrained(bert_model) if args.bertPath is None else \
                        BertTokenizer.from_pretrained(args.bertPath)
    else:
        bert_config = DistilBertConfig.from_pretrained(bert_model, num_labels=2) if args.distillBertPath is None else \
                        DistilBertConfig.from_pretrained(args.distillBertPath, num_labels=2)
        tokenizer = DistilBertTokenizer.from_pretrained(bert_model) if args.distillBertPath is None else \
                        DistilBertTokenizer.from_pretrained(args.distillBertPath)

    log_dir = args.model_dir
    if not os.path.exists(log_dir):
        os.system("mkdir %s" % log_dir)
    else:
        os.system("rm -rf %s" % log_dir)
        os.system("mkdir %s" % log_dir)
    fitlog.set_log_dir(log_dir)
    fitlog.add_hyper({
            "epochs": n_epochs,
            "learning_rate": lr,
            "warmup": args.warmup_steps,
            "weight_decay": weight_decay,
            "batch_size": batch_size,
            "train_split_percentage": args.train_pct,
            "bert_model": bert_model,
            "seed": seed,
            "pretrained_model": args.pretrained_model,
            "tags": ",".join(args.tags)
        }, name=args.run_name)

    bert_config.num_labels = 3
    bert_config.hidden_act = "relu"
    # Create the model
    if args.full_bert:
        bert = BertForSequenceClassification.from_pretrained(
                        bert_model, config=bert_config).to(device) if args.bertPath is None \
                else BertForSequenceClassification.from_pretrained(
                        args.bertPath, config=bert_config).to(device)
    else:
        bert = DistilBertForSequenceClassification.from_pretrained(
                    bert_model, config=bert_config).to(device) if args.distillBertPath is None \
                else DistilBertForSequenceClassification.from_pretrained(
                        args.distillBertPath, config=bert_config).to(device)
                
    model = VanillaBert(bert).to(device)
    model1_path = "../../saved/model_None.pth"

    SNLI_set = NLIDataset("../../../snli_1.0/snli_1.0_train.jsonl", tokenizer=tokenizer)
    test_set = NLIDataset("../../../multinli_1.0/multinli_1.0_dev_matched.jsonl", tokenizer=tokenizer)
    domain_id = 5
    new_domain_name = list(NLI_domain_map.keys())[domain_id-1]
    test_set.domainSelect(domain_id)
    new_domain_label = test_set.labelTensor()
    few_shot_set = NLIDataset(f"../../../multinli_1.0/Domain_{new_domain_name}.jsonl",
                              tokenizer=tokenizer, max_data_size=20)
    trainer = OnlineTrainer(model, SNLI_set, few_shot_set, SNLI_set.labelTensor(), lr4model=5e-3,
                            scale_lr4model=1e-2, batch_size=64)
    eval = BERTEvaluater(test_set, test_set.labelTensor())
    trainer.Training(max_epoch=10, evaluater=eval)