import multiprocessing
import pandas as pd
import warnings
import torch
import tokenizers
import transformers

pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"Current device is: {device}")
warnings.filterwarnings("ignore")


class Config:

    APEX = True# 启用自动混合精度训练（Automatic Mixed Precision），利用FP16加速计算并减少显存占用

    BATCH_SCHEDULER = True# 是否根据batch动态调整学习率（如Per-Batch调度）
    BATCH_SIZE_TRAIN = 32# 训练时每个batch的样本数
    BATCH_SIZE_VALID = 16# 验证时每个batch的样本数

    BETAS = (0.9, 0.98)# Adam优化器的动量参数（β1, β2），控制梯度平滑性
    EPS = 1e-6# 优化器的数值稳定项，防止除以零

    DEBUG = False

    ENCODER_LR = 2e-5# 编码器（预训练模型主干）的学习率# 编码器学习率通常更低（如 2e-5），解码器可稍高（如 5e-5）。
    DECODER_LR = 5e-5# 解码器（如分类头）的学习率

    EPOCHS = 3

    FOLDS= 5
    DROPOUT_RATE = 0.3  # 统一dropout率

    GRADIENT_ACCUMULATION_STEPS = 2  # 梯度累积步数（模拟更大batch size）
    GRADIENT_CHECKPOINTING = True  # 启用梯度检查点技术（时间换显存）会降低20%速度但节省显存。
    MAX_GRAD_NORM = 3  # 梯度裁剪的阈值（防止梯度爆炸）

    MAX_LEN = 671# 输入文本的最大token长度（根据分词统计调整）

    MODEL = "./model/microsoft/deberta-v3-base/models--microsoft--deberta-v3-base/snapshots/8ccc9b6f36199bec6961081d44eb72fb3f7353f3"





    NUM_WORKERS = multiprocessing.cpu_count()  # 数据加载的并行进程数（通常设为CPU核心数）

    PRINT_FREQ = 20  # 每20个训练步打印一次日志
    SEED = 42  # 随机种子（影响数据划分、初始化等）确保实验可复现。
    TRAIN = True  # 当前是否为训练模式（若为False则可能仅推理）
    WEIGHT_DECAY = 1e-4  # L2正则化强度（防止过拟合）对模型参数进行小幅惩罚，提升泛化性。

class Dconfig:

    BATCH_SIZE_TEST = 16
    DEBUG = False
    GRADIENT_CHECKPOINTING = True
    MAX_LEN = 1024
    MODEL = "microsoft/deberta-v3-base"
    NUM_WORKERS = 0
    PRINT_FREQ = 20
    SEED = 42
    DROPOUT_RATE = 0
    choose = 3 # 1是单折，2是全数，3是复合
    if choose == 1:
        is_ensemble = False
        is_fold = True
    elif choose == 2:
        is_ensemble = False
        is_fold = False
    elif choose == 3:
        is_ensemble = True
        is_fold = False


class Paths:
    OUTPUT_DIR = "./output"
    SAVE_DIR = "./input/save"
    TRAIN_DATA = "./input/save/train.csv"
    VALID_DATA = "./input/save/test.csv"
    TEST_NUM = 3 # 1是HC3，2是MAGE，3是AB
    if TEST_NUM==1:
        TEST_DATA = "./input/save/s_f_Hello-SimpleAI-HC3.csv"
    elif TEST_NUM==2:
        TEST_DATA = "./input/save/s_f_yaful-MAGE-test.csv"
    elif TEST_NUM==3:
        TEST_DATA = "./input/save/s_f_Ateeqq-AI-and-Human-Generated-Text.csv"


    BEST_Ensemble_THRESHOLD = 0.5


    BEST_FOLD = 0
    BEST_FOLD_MODEL_PATH = f"./output/fold_{BEST_FOLD}_best.pth"
    BEST_Full_MODEL_PATH = f"./output/the_full.pth"
    CONFIG_PATH="./input/save/config.pth"



if Config.DEBUG:
    Config.TRAIN_FOLDS = [0] # 仅使用第0折数据