import sys
sys.path.append("../../")
import fitlog
from torch.optim import Adam
from transformers import DistilBertConfig
from transformers import DistilBertTokenizer
from transformers import BertTokenizer, BertConfig
from modeling_bert import AugBertForSequenceClassification
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, precision_recall_fscore_support
from datareader import *
from model import VanillaBert
from metrics import *
from prettytable import PrettyTable
import pickle
import torch
from tqdm import trange, tqdm
from InstanceReweighting import MetaEvaluator, InstanceReweighting, update_current_devices, to_var

def Dataset2Vecs(dataset, model:VanillaBert):
    vecs = []
    with torch.no_grad():
        for i in trange(0, len(dataset), batch_size):
            batch = dataset.collate_raw_batch([dataset[idx]
                                               for idx in range(i, min(len(dataset), i+batch_size))])
            input_ids, masks, seg_ids = batch[0], batch[1], batch[2]
            pred = model.Sents2Vecs(input_ids,
                                    attention_mask=masks,
                                    token_ids=seg_ids)
            vecs.append(pred)
    pred_tensor = torch.cat(vecs)
    return pred_tensor

def pred_Logits(model:VanillaBert, data, idxs=None, batch_size=20):
    preds = []
    if idxs is None:
        idxs = list(range(len(data)))
    with torch.no_grad():
        for i in trange(0, len(idxs), batch_size):
            batch_idxs = idxs[i:min(len(idxs), i+batch_size)]
            batch = data.collate_raw_batch([data[idx] for idx in batch_idxs])
            pred = model.predict(batch)
            preds.append(pred)
    pred_tensor = torch.cat(preds)
    return pred_tensor

def prediction(model:VanillaBert, data, idxs=None, batch_size=20):
    pred_tensor = pred_Logits(model, data, idxs, batch_size)
    vals, idxs = pred_tensor.sort(dim=1)
    return idxs[:, -1], vals[:, -1]

def acc_P_R_F1(y_true, y_pred):
    return accuracy_score(y_true, y_pred.cpu()), \
                precision_recall_fscore_support(y_true, y_pred.cpu(), average=None)

def Perf(model:VanillaBert, data, label, idxs=None, batch_size=20):
    y_pred, _ = prediction(model, data, idxs=idxs, batch_size=batch_size)
    y_true = label[idxs] if idxs is not None else label
    return acc_P_R_F1(y_true, y_pred)

def WeakLabeling(model:VanillaBert, data:NLIDataset, pseaudo_idxs=[], batch_size=20):
    c_idxs = list(set(range(len(data))) - set(pseaudo_idxs))
    pred_tensor = pred_Logits(model, data, idxs=c_idxs, batch_size=batch_size)
    confs, preds = pred_tensor.sort(dim=1)
    weak_label = preds[:, -1].cpu().numpy()
    data.setLabel(weak_label, c_idxs)
    entrophy = torch.zeros([len(data)], device=model.device)
    entrophy[c_idxs] = (confs.log().abs() * confs).sum(dim=1)
    data.setConfidence(confs[:, -1].cpu(), c_idxs)
    data.setEntrophy(entrophy.cpu(), c_idxs)
    return entrophy.cpu(), preds[:, -1].cpu(), confs[:, -1].cpu()

def expandPseaudoSet(model:VanillaBert, unlabeled:NLIDataset, skip_idxs=None, threshold=0.95, max_cnt=50):
    if skip_idxs is None:
        c_idxs = list(range(len(unlabeled)))
    else:
        c_idxs = list(set(range(len(unlabeled))) - set(skip_idxs))
    pred_1, conf_1 = prediction(model, unlabeled, c_idxs)
    pred_2 = unlabeled.labelTensor()[c_idxs].to(pred_1.device)
    conf_2 = unlabeled.confidence[c_idxs].to(conf_1.device)
    pred_eq = (pred_1 - pred_2).abs().__eq__(0)
    valid_conf_1 = conf_1.__gt__(threshold) & pred_eq
    valid_conf_2 = conf_2.__gt__(threshold) & valid_conf_1
    expand_idxs = torch.tensor(c_idxs, device=valid_conf_2.device)[valid_conf_2]
    if len(expand_idxs) > max_cnt:
        conf_f1 = 2*conf_2*conf_1/(conf_2+conf_1)
        sort_idxs = conf_f1[valid_conf_2].argsort()[-max_cnt:]
        expand_idxs = expand_idxs[sort_idxs].tolist()
    else:
        expand_idxs = expand_idxs.tolist()
    return expand_idxs

def BERTEvaluater(data_set:NLIDataset, label):
    def evaluater(model:VanillaBert):
        y_pred, _ = prediction(model, data_set, batch_size=20)
        pred = y_pred.cpu()
        return accuracy_score(label, pred), \
                f1_score(label, pred, average="micro"), \
                    precision_recall_fscore_support(label, pred, average=None)
    return evaluater

class MetaSelfTrainer(MetaEvaluator):
    def __init__(self, model: VanillaBert, weak_set, few_shot_set,
                weak_set_label, exp_idxs=[], weak_set_weights=None, convey_fn=None,
                 lr4model=2e-2, scale_lr4model=1e-3, coeff4expandset=1.0, max_few_shot_size=100,
                batch_size=5):
        super(MetaSelfTrainer, self).__init__(model, weak_set, few_shot_set,
                                               weak_set_label, exp_idxs, weak_set_weights,
                                               convey_fn, lr4model, scale_lr4model, coeff4expandset,
                                               max_few_shot_size, batch_size)
        self.expand_batch = []

    def LogSelectionInfo(self, e_arr, valid_idxs=None):
        indices = torch.arange(len(self.weak_set))
        print(">>>>>>>MetaEvaluate Message>>>>>>>>>>>>>>>")
        pos_indices = valid_idxs if valid_idxs is not None else indices[self.weak_set_weights.__gt__(0.0)]
        labels, preds = self.weak_set_label[indices], self.weak_set.labelTensor()[indices]
        print(len(indices))
        print(len(pos_indices))
        print(e_arr.mean(), e_arr[pos_indices].mean())
        print(accuracy_score(labels, preds), accuracy_score(labels[pos_indices], preds[pos_indices]))
        print(precision_score(labels, preds, average=None), precision_score(labels[pos_indices], preds[pos_indices], average=None))
        print(recall_score(labels, preds, average=None), recall_score(labels[pos_indices], preds[pos_indices], average=None))
        print(f1_score(labels, preds, average=None), f1_score(labels[pos_indices], preds[pos_indices], average=None))
        print("<<<<<<<<<<<<<<<<<MetaEvaluate Message<<<<<<<<<<<<")

    def ModelTrain(self, max_epoch, valid_indices):
        labels, preds = self.weak_set_label[valid_indices], self.weak_set.labelTensor()[valid_indices]
        print("trainSet perf:", acc_P_R_F1(labels, preds))
        self.model.bert.bert.embeddings.aug_type = 'mix'
        optimizer_grouped_parameters = [
            {'params': p,
             'lr': lr * pow(0.8, 12 - int(n.split("layer.")[1].split(".", 1)[0])) if "layer." in n \
                 else (lr * pow(0.8, 13) if "embedding" in n else lr ) \
             # layer-wise fine-tuning
             } for n, p in self.model.named_parameters()
        ]
        model_optim = torch.optim.Adam(optimizer_grouped_parameters)
        batch_size = 32
        for epoch in range(max_epoch):
            sum_loss = 0.
            for idx in range(0, len(valid_indices), batch_size):
                batch = self.weak_set.collate_raw_batch(
                    [self.weak_set[jj] for jj in valid_indices[idx:min(idx + batch_size, len(valid_indices))]]
                )
                cost, acc = self.model.lossAndAcc(batch)
                self.model.zero_grad()
                model_optim.zero_grad()
                cost.backward()
                self.model.bert.bert.PreserveGrad()
                model_optim.step()
                torch.cuda.empty_cache()
                print('####Model Update (%3d | %3d) ####, loss = %6.8f, acc = %6.8f' % (
                    idx, len(valid_indices), cost.data, acc
                ))
                sum_loss += cost.data
            mean_loss = (sum_loss * 1.0) / ((len(valid_indices) // self.batch_size) + 1)
            print("mean loss:", mean_loss)
            if mean_loss < 0.1:  # early stop
                break
        self.model.bert.bert.embeddings.aug_type = 'none'

    def Training(self, entrophys, max_epoch=1, batch_size=32, max_meta_steps=10,
                 lr4weights=0.1, meta_lr4model=1e-1,
                 meta_scale_lr4model=5e-3):
        tmp = (self.lr4model, self.scale_lr4model)
        self.lr4model, self.scale_lr4model = meta_lr4model, meta_scale_lr4model
        exp_idxs = self.Evaluate(max_epochs=1, max_meta_steps=max_meta_steps, lr4weights=lr4weights)  # ferguson 上是0.1, sydney上是0.05
        self.lr4model, self.scale_lr4model = tmp[0], tmp[1]
        self.LogSelectionInfo(entrophys)
        valid_indices = self.BalancedSelections() # select a balanced set based on the weak_set_weights
        self.batch_size = batch_size
        self.ModelTrain(max_epoch, valid_indices)
        return exp_idxs, torch.arange(self.weak_set_size)[self.weak_set_weights.__gt__(0.0)].tolist()

    def BalancedTraining(self, entrophys, max_epoch=1, batch_size=32, max_meta_steps=10,
        lr4weights=0.1, meta_lr4model=1e-1, meta_scale_lr4model=5e-3, pseaudo_idxs=[]):
        tmp = (self.lr4model, self.scale_lr4model)
        self.lr4model, self.scale_lr4model = meta_lr4model, meta_scale_lr4model
        exp_idxs, valid_idxs = self.HalfOut(max_epochs=1, max_meta_steps=max_meta_steps,
                                            lr4weights=lr4weights, pseaudo_idxs=pseaudo_idxs) # ferguson 上是0.1, sydney上是0.05
        self.lr4model, self.scale_lr4model = tmp[0], tmp[1]
        if len(valid_idxs) == 0:
            print("===> no valid idxs")
            return exp_idxs, valid_idxs
        self.LogSelectionInfo(entrophys, valid_idxs=valid_idxs)
        self.batch_size = batch_size
        train_idxs = valid_idxs + pseaudo_idxs + exp_idxs
        self.ModelTrain(max_epoch, train_idxs)
        return exp_idxs, valid_idxs


def test_model(model, test_set, test_label, test_suffix, step=0):
    rst_model = Perf(model, test_set, test_label)
    acc_v, (p_v, r_v, f1_v, _) = rst_model
    print(f"Original Performance of {test_suffix}:", rst_model)
    fitlog.add_best_metric({f"Original_{test_suffix}":
                                   {"valid_acc": acc_v, "valid_prec": p_v[1],
                                    "valid_recall": r_v[1], "valid_f1": f1_v[1],
                                               }})
    fitlog.add_metric({f"{test_suffix}":
                           {"valid_acc": acc_v, "valid_prec": p_v[1],
                            "valid_recall": r_v[1], "valid_f1": f1_v[1],
                            }}, step=step)

def DataSelection(bertmodel, tr_model_path, anno_model_path, weak_set, weak_set_label, p_idxs, e_idxs):
    state_dicts = torch.load(anno_model_path)
    bertmodel.load_state_dict(state_dicts)
    entrophy, preds, logits = WeakLabeling(bertmodel, weak_set, pseaudo_idxs=[])
    table = PrettyTable(["few_shot_cnt", "accuracy_init", "accuracy_slc", "accuracy_tr",
                         "entrophy_init", "entrophy_slc", "entrophy_tr"])
    # state_dicts = torch.load(tr_model_path)
    # bertmodel.load_state_dict(state_dicts)
    #
    # target_vecs = Dataset2Vecs(weak_set, bertmodel)
    # source_vecs = Dataset2Vecs(SNLI_set, bertmodel)
    # cosine = torch.matmul(target_vecs, source_vecs.transpose(0, 1))/torch.matmul(target_vecs.norm(dim=1).unsqueeze(1),
    #                                                                              source_vecs.norm(dim=1).unsqueeze(0))
    # sort_idxs = cosine.argsort(dim=1)[:, -100:].reshape(-1).tolist()
    # source_idxs = list(set(sort_idxs))
    # idxs_count = [sort_idxs.count(idx) for idx in source_idxs]
    # selected_idxs = np.argsort(idxs_count)[-100:]
    # few_shot_data_list = [weak_set.collate_raw_batch(
    #                             [SNLI_set[j] for j in range(i, min(i+20, len(selected_idxs)))]
    #                         ) for i in range(0, len(selected_idxs), 20)]
    # trainer = MetaSelfTrainer(bertmodel, weak_set, None,
    #                             weak_set_label, exp_idxs=e_idxs, convey_fn=None, lr4model=5e-2,
    #                                scale_lr4model=4e-2, max_few_shot_size=20, batch_size=20)
    # max_meta_steps = 10
    # lr4weights, meta_lr4model, meta_scale_lr4model = 0.1, 1e-2, 5e-3
    # trainer.lr4model, trainer.scale_lr4model = meta_lr4model, meta_scale_lr4model
    # valid_idxs = trainer.PopOut(max_epochs=1, max_meta_steps=max_meta_steps,
    #                                     lr4weights=lr4weights, pseaudo_idxs=pseaudo_idxs,
    #                                       pop_ratio=0.33, few_shot_data=None,
    #                                         few_shot_data_list=few_shot_data_list) # ferguson 上是0.1, sydney上是0.05
    valid_idxs = [1735, 999, 241, 129, 275, 1014, 650, 4, 1477, 209, 798, 1561, 477, 443, 1486, 1386, 1108, 1132, 380,
                  1288, 852, 1482, 388, 805, 1206, 70, 1039, 1203, 1511, 1720, 1588, 1439, 1041, 1066, 1214, 345, 758,
                  464, 25, 60, 11, 1493, 1049, 1516, 1122, 828, 985, 708, 872, 746, 1167, 846, 1254, 1102, 938, 1635,
                  57, 3, 575, 378, 34, 1848, 1547, 144, 931, 1457, 342, 1344, 1805, 558, 72, 428, 328, 461, 703, 529,
                  451, 457, 1072, 1382, 1291, 1036, 1156, 635, 1790, 1591, 1510, 17, 202, 1209, 1832, 774, 1211, 1042,
                  576, 879, 606, 321, 411, 37, 82, 1090, 133, 1407, 776, 1673, 1618, 1464, 1631, 656, 1236, 1533, 546,
                  301, 149, 1168, 167, 234, 1694, 1201, 1506, 1455, 567, 81, 644, 1585, 1819, 331, 236, 1756, 55, 128,
                  440, 1447, 1231, 1221, 407, 1416, 960, 1851, 183, 1417, 503, 41, 188, 1185, 1252, 450, 1010, 822, 1701,
                  157, 1546, 1791, 601, 1053, 1117, 1232, 1671, 1752, 506, 1101, 323, 1398, 517, 99, 1462, 821, 1450,
                  38, 1478, 1669, 15, 844, 1147, 478, 1402, 1775, 449, 1217, 174, 338, 761, 417, 1552, 361, 1054, 83,
                  1073, 513, 493, 1266, 171, 1569, 911, 43, 1743, 568, 1543, 775, 1145, 1781, 860, 419, 637, 784, 27, 1,
                  1104, 1348, 1173, 1844, 1286, 1796, 1329, 773, 1077, 837, 231, 1769, 1787, 1009, 1628, 1603, 842, 1038,
                  336, 1634, 40, 531, 956, 1518,140, 564, 1248, 608, 1392, 339, 500, 1352, 1368, 1197, 824, 69, 1501,
                  820, 329, 1706, 1385, 23, 770, 1567, 741, 978, 987, 709, 1568, 1158, 1668, 350, 768, 1810, 1278, 1693,
                  1623, 47, 839, 868, 1656, 935, 812, 163, 294, 1040, 1502, 1773, 578, 1666, 555, 1823, 1444, 652,1724,
                  194, 537, 1092, 1460, 61, 274, 251, 358, 71, 273, 1152, 893, 1171, 320, 1817, 1473,1006, 1793, 1731,
                  1517, 456, 1849, 309, 1307, 453, 1557, 1375, 514, 900, 435, 393, 298, 572, 346, 668, 598, 1302, 1779,
                  1310, 307, 1249, 1181, 1162, 1189, 1760, 952, 1830, 124, 1137, 587, 1063, 589, 592, 996, 412, 1222,
                  841, 151, 1316, 1703, 109, 225, 376, 973, 1515, 523, 584, 463, 1553, 557, 262, 1418, 1370, 815, 524,
                  469, 954, 6, 1421, 418, 1812, 1255, 1218, 143, 1854, 819, 1828, 288, 949, 790, 997, 986, 582, 867,
                  1530, 1809, 259, 1059, 1226, 26, 105, 1065, 697, 1816, 561, 1426, 355, 223, 1060, 1021, 520, 426, 1437,
                  1776, 858, 1395, 1050, 924, 1027, 809, 618, 75, 802, 1807, 764, 847, 297, 1637, 1508, 1499, 230, 918,
                  1766, 318, 1069, 476, 357, 168, 1428, 676, 1798, 968, 1051, 1679, 2, 474, 1799, 1290, 112, 1785, 162,
                  1412, 1494, 866, 73, 1664, 180, 1400, 966, 1354, 1644, 483, 243, 1728, 1273, 1788, 692, 1223, 622,
                  1626, 729, 1716, 446, 535, 1638, 1376, 67, 186, 296, 1399, 664, 1295, 1597, 691, 1551, 1389, 141,
                  633, 1055, 1274, 1076, 1263, 687, 269, 673, 198, 1685, 1794, 1227, 1538, 1746, 1321, 1797, 651, 748,
                  977, 642, 1640, 1579, 907, 1420, 169, 1589, 85, 390, 1357, 486, 645, 32, 1621, 1471, 1308, 1031, 1714,
                  1853, 484, 1125, 1608, 1649, 1180, 268, 982, 1413, 1451, 807, 663, 1048, 942, 116, 1178, 979, 1366,
                  1143, 543, 1134, 16, 95, 135, 1554, 1605, 626, 889, 1397, 1019, 215, 1686, 766, 562, 203, 300, 789,
                  1078, 1559, 175, 1566, 1619, 217, 1735, 275]
    table.add_row([
        "BERT-Based",
        accuracy_score(weak_set_label, weak_set.labelTensor()),
        accuracy_score(weak_set_label[valid_idxs], weak_set.labelTensor()[valid_idxs]),
        accuracy_score(weak_set_label[valid_idxs+p_idxs], weak_set.labelTensor()[valid_idxs+p_idxs]),
        entrophy.mean(),
        entrophy[valid_idxs].mean(),
        entrophy[valid_idxs+p_idxs].mean()
    ])
    # print(f"FS = {FS} completed!")
    print(table)


with open("../../args.pkl", 'rb') as fr:
    args = pickle.load(fr)
# args.model_dir = str(__file__).rstrip(".py")
args.model_dir = "./tmp/"
# Set all the seeds
seed = args.seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# See if CUDA available
device = torch.device("cpu")
if args.n_gpu > 0 and torch.cuda.is_available():
    print("Training on GPU")
    device = torch.device("cuda:0")

# model configuration
batch_size = args.batch_size
lr = args.lr
weight_decay = args.weight_decay
n_epochs = args.n_epochs
args.full_bert = True
args.bertPath = "../../../bert_en/"

batch_size = args.batch_size
lr = args.lr
weight_decay = args.weight_decay
n_epochs = args.n_epochs
args.full_bert = True
print("====>", args.full_bert)
bert_model = 'bert-base-uncased' if args.full_bert else 'distilbert-base-uncased'
if args.full_bert:
    bert_config = BertConfig.from_pretrained(bert_model, num_labels=2) if args.bertPath is None else \
                    BertConfig.from_pretrained(args.bertPath, num_labels=2)
    tokenizer = BertTokenizer.from_pretrained(bert_model) if args.bertPath is None else \
                    BertTokenizer.from_pretrained(args.bertPath)
else:
    bert_config = DistilBertConfig.from_pretrained(bert_model, num_labels=2) if args.distillBertPath is None else \
                    DistilBertConfig.from_pretrained(args.distillBertPath, num_labels=2)
    tokenizer = DistilBertTokenizer.from_pretrained(bert_model) if args.distillBertPath is None else \
                    DistilBertTokenizer.from_pretrained(args.distillBertPath)

log_dir = args.model_dir
if not os.path.exists(log_dir):
    os.system("mkdir %s" % log_dir)
else:
    os.system("rm -rf %s" % log_dir)
    os.system("mkdir %s" % log_dir)
fitlog.set_log_dir(log_dir)
fitlog.add_hyper({
        "epochs": n_epochs,
        "learning_rate": lr,
        "warmup": args.warmup_steps,
        "weight_decay": weight_decay,
        "batch_size": batch_size,
        "train_split_percentage": args.train_pct,
        "bert_model": bert_model,
        "seed": seed,
        "pretrained_model": args.pretrained_model,
        "tags": ",".join(args.tags)
    }, name=args.run_name)

bert_config.num_labels = 3
bert_config.hidden_act = "relu"

# Create the model
bert = AugBertForSequenceClassification.from_pretrained(
                bert_model, config=bert_config).to(device) if args.bertPath is None \
        else AugBertForSequenceClassification.from_pretrained(
                args.bertPath, config=bert_config).to(device)
bert.bert.embeddings.aug_type = 'none'
model = VanillaBert(bert).to(device)

model1_path = "../../saved/modelSNLI_1.pth"
model2_path = "../../saved/modelSNLI_2.pth"

domain_id = 2
NLI_domain_list = list(NLI_domain_map.keys())
SNLI_set = NLIDataset("../../../snli_1.0/snli_1.0_train.jsonl", tokenizer=tokenizer)
new_domain_name = NLI_domain_list[domain_id-1]
test_set = NLIDataset(f"../../../multinli_1.0/Domain_{new_domain_name}.jsonl", tokenizer=tokenizer)
new_domain_label = test_set.labelTensor()
train_iter = 0
pseaudo_idxs = []
DataSelection(model, model1_path, model2_path,
              test_set, new_domain_label, pseaudo_idxs, [])