from transformers import PretrainedConfig import logging import datasets from datasets import load_dataset import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from datasets import load_metric import transformers import torch import io import torch.nn.functional as F import random import numpy as np import time import math import datetime import torch.nn as nn from torch.utils.data import Dataset,TensorDataset, DataLoader, RandomSampler, SequentialSampler from transformers import ( AutoModel, AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, default_data_collator, set_seed, get_constant_schedule_with_warmup, Trainer,TrainingArguments,EarlyStoppingCallback) from datasets import Dataset import torch.nn as nn import torch.nn.functional as F import sys class GanBertConfig(PretrainedConfig): model_type = "ganbert" def __init__( self, out_dropout_rate = 0.4, num_hidden_layers_g = 2, num_hidden_layers_d = 1, pos_class_weight = 10, batch_size = 64, noise_size = 100, num_train_examples = 77450, epochs = 10, epsilon = 1e-08, learning_rate_discriminator = 1e-05, learning_rate_generator = 1e-05, warmup_proportion= 0.1, model_number = -2, device ='cuda', **kwargs, ): self.out_dropout_rate=out_dropout_rate self.num_hidden_layers_g=num_hidden_layers_g self.num_hidden_layers_d=num_hidden_layers_d self.pos_class_weight=pos_class_weight self.model_number = model_number self.learning_rate_discriminator=learning_rate_discriminator self.learning_rate_generator=learning_rate_generator self.warmup_proportion=warmup_proportion self.epsilon=epsilon self.num_train_examples=num_train_examples self.epochs = epochs self.batch_size=batch_size self.noise_size = noise_size if torch.cuda.is_available(): self.device = 'cuda' else: self.device = 'cpu' super().__init__(**kwargs)