from datasets import load_dataset, Dataset
from transformers import Trainer, AutoModelForSequenceClassification, AdamW, get_linear_schedule_with_warmup


# 模型融合
def model_fusion():
    # 1.加载数据
    test_dataset = load_dataset("json")
    # 2.加载不同的模型
    check_point = ["best_ckpt_{i}" for i in range(4)]
    model = AutoModelForSequenceClassification.from_pretrained(check_point)
    models = []
    for model in models:
        trainer = Trainer(model=model)


# 设置分层学习率
def set_hierarchical_lr(args, model):
    no_decay = ['bias', 'LayerNorm.weight']  # 不进行权重衰减的层
    optimizer_grouped_parameters = [
        # 不是Bert参数 且 不在 no_decay
        {'params': [p for n, p in model.named_parameters() if
                    (not 'bert' in n and not any(nd in n for nd in no_decay))],
         'lr': args.learning_rate, 'weight_decay': args.weight_decay},
        # 不是Bert参数 且 在 no_decay
        {'params': [p for n, p in model.named_parameters() if (not 'bert' in n and any(nd in n for nd in no_decay))],
         'lr': args.learning_rate, 'weight_decay': 0.0},
        # 是Bert参数 且 不在 no_decay
        {'params': [p for n, p in model.named_parameters() if ('bert' in n and not any(nd in n for nd in no_decay))],
         'lr': args.bert_learning_rate, 'weight_decay': args.weight_decay},
        # 是Bert参数 且 在 no_decay
        {'params': [p for n, p in model.named_parameters() if ('bert' in n and any(nd in n for nd in no_decay))],
         'lr': args.bert_learning_rate, 'weight_decay': 0.0},
    ]
    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
    scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,
                                                num_training_steps=args.max_steps)
    return optimizer, scheduler
