from torch.utils.data import DataLoader

from dataset.dataset import BERTDataset
from dataset.vocab import WordVocab
from model.bert import BERT  # 假设这些模块在你提供的代码的相同目录下
from trainer.pretrain import BERTTrainer


def train(train_dataset, test_dataset, vocab_path, output_path,
          hidden, layers, attn_heads, seq_len, batch_size, epochs, num_workers,
          with_cuda, log_freq, corpus_lines, cuda_devices, on_memory,
          lr, adam_weight_decay, adam_beta1, adam_beta2):
    """
    训练 BERT 模型的主函数，所有参数都作为位置参数传入。
    """

    print("Loading Vocab", vocab_path)
    vocab = WordVocab.load_vocab(vocab_path)
    print("Vocab Size: ", len(vocab))

    print("Loading Train Dataset", train_dataset)
    train_dataset_obj = BERTDataset(train_dataset, vocab, seq_len=seq_len,
                                    corpus_lines=corpus_lines, on_memory=on_memory)

    print("Loading Test Dataset", test_dataset)
    test_dataset_obj = BERTDataset(test_dataset, vocab, seq_len=seq_len, on_memory=on_memory) \
        if test_dataset is not None else None

    print("Creating Dataloader")
    train_data_loader = DataLoader(train_dataset_obj, batch_size=batch_size, num_workers=num_workers)
    test_data_loader = DataLoader(test_dataset_obj, batch_size=batch_size, num_workers=num_workers) \
        if test_dataset_obj is not None else None

    print("Building BERT model")
    bert = BERT(len(vocab), hidden=hidden, n_layers=layers, attn_heads=attn_heads)

    print("Creating BERT Trainer")
    trainer = BERTTrainer(bert, len(vocab), train_dataloader=train_data_loader, test_dataloader=test_data_loader,
                          lr=lr, betas=(adam_beta1, adam_beta2), weight_decay=adam_weight_decay,
                          with_cuda=with_cuda, cuda_devices=cuda_devices, log_freq=log_freq)

    print("Training Start")
    for epoch in range(epochs):
        trainer.train(epoch)
        trainer.save(epoch, output_path)

        if test_data_loader is not None:
            trainer.test(epoch)


def main():
    # --- 所有参数都直接在代码中设置 ---
    train_dataset_path = "/Users/jiangfeng/PycharmProjects/Net/BERT/bert_pytorch/dataset/wikitext-2-train.txt"  # 替换
    test_dataset_path = "/Users/jiangfeng/PycharmProjects/Net/BERT/bert_pytorch/dataset/wikitext-2-test.txt"  # 替换 (如果没有测试集，可以设置为 None)
    vocab_path = "/Users/jiangfeng/PycharmProjects/Net/BERT/bert_pytorch/dataset/vocab.txt"  # 替换
    output_path = "bert.model"  # 替换

    hidden_size = 256
    num_layers = 8
    num_attention_heads = 8
    max_seq_len = 30
    batch_size = 64
    num_epochs = 10
    num_workers = 5
    use_cuda = True
    log_frequency = 10
    corpus_lines = None  # 如果不知道具体的行数，可以设置为 None
    cuda_devices = None  # 如果不使用特定的 CUDA 设备，可以设置为 None
    on_memory = True
    learning_rate = 1e-3
    adam_weight_decay = 0.01
    adam_beta1 = 0.9
    adam_beta2 = 0.999

    train(train_dataset_path, test_dataset_path, vocab_path, output_path,
          hidden_size, num_layers, num_attention_heads, max_seq_len,
          batch_size, num_epochs, num_workers, use_cuda, log_frequency,
          corpus_lines, cuda_devices, on_memory, learning_rate,
          adam_weight_decay, adam_beta1, adam_beta2)


if __name__ == '__main__':
    main()
