import torch
import datetime
import json
from transformers import BertModel, BertTokenizer, BertConfig

class Config:
    def __init__(self):
        self.root_path = 'D:/LearnPython/crime-type-discrimination/'

        # 数据路径
        self.train_data_path = self.root_path + 'data/train_embeddings.json'
        self.test_data_path = self.root_path + 'data/test_embeddings.json'
        # 兼容旧代码的属性名（避免 AttributeError）
        self.train_datapath = self.train_data_path
        self.test_datapath = self.test_data_path
        self.dev_datapath = self.test_data_path  # 如无dev集，临时指向test，避免下游报错

        # 类别文档
        self.class_path = self.root_path + "data/class.json"

        # 加载类别映射：将罪名映射到类别，再提取唯一类别列表
        with open(self.class_path, 'r', encoding='utf-8') as f:
            class_mapping = json.load(f)  # {accusation: category}
        # 唯一类别集合
        categories = sorted(set(class_mapping.values()))
        self.class_list = categories
        # id/label映射
        self.label2id = {label: i for i, label in enumerate(self.class_list)}
        self.id2label = {i: label for label, i in self.label2id.items()}

        # 训练设备
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        # 预训练BERT
        self.bert_path = self.root_path + "bert-base-chinese"  # 预训练BERT模型的路径
        self.bert_model = BertModel.from_pretrained(self.bert_path)  # 加载预训练BERT模型
        self.tokenizer = BertTokenizer.from_pretrained(self.bert_path)  # BERT模型的分词器
        self.bert_config = BertConfig.from_pretrained(self.bert_path)  # BERT模型的配置

        # 训练超参
        self.num_classes = len(self.class_list)  # 类别数
        self.num_epochs = 2  # epoch数
        self.batch_size = 64  # mini-batch大小
        self.pad_size = 1024  # 每句话处理成的长度(短填长切)
        self.learning_rate = 5e-5  # 学习率
