# -*- encoding:utf-8 -*-
"""
  This script provides an k-BERT exmaple for classification.
"""
import os
import sys
import torch
import json
import random
import argparse
import collections
import torch.nn as nn
from uer.utils.vocab import Vocab
from uer.utils.constants import *
from uer.utils.tokenizer import * 
from uer.model_builder import build_model
from uer.utils.optimizers import  BertAdam
from uer.utils.config import load_hyperparam
from uer.utils.seed import set_seed
from uer.model_saver import save_model
from brain import KnowledgeGraph
from multiprocessing import Process, Pool
import numpy as np
import json
import numpy as np
from tqdm import tqdm
import time
import jieba
from sklearn.metrics import classification_report, confusion_matrix
import signal
import sys
import threading
import keyboard


PAUSE_SIGNAL = False
SAVE_CHECKPOINT = False

class BertClassifier(nn.Module):
    def __init__(self, args, model):
        super(BertClassifier, self).__init__()
        self.embedding = model.embedding
        self.encoder = model.encoder
        self.labels_num = args.labels_num
        self.pooling = args.pooling
        self.output_layer_1 = nn.Linear(args.hidden_size, args.hidden_size)
        self.output_layer_2 = nn.Linear(args.hidden_size, args.labels_num)
        self.softmax = nn.LogSoftmax(dim=-1)
        self.criterion = nn.NLLLoss()
        # self.criterion = nn.CrossEntropyLoss()
        self.use_vm = not (args.no_vm or args.no_kg)  # 任一参数为True则禁用VM
        print("[BertClassifier] use visible_matrix: {}".format(self.use_vm))

    def forward(self, src, label, mask, pos=None, vm=None):
        """
        Args:
            src: [batch_size x seq_length]
            label: [batch_size]
            mask: [batch_size x seq_length]
            pos: [batch_size x seq_length]
            vm: [batch_size x seq_length x seq_length] (float类型的情感分数矩阵)
        """
        assert src.shape == mask.shape, f"src和mask尺寸不一致: {src.shape} vs {mask.shape}"
        if vm is not None:
            assert vm.shape[-1] == vm.shape[-2] == src.shape[-1], f"vm尺寸与输入不匹配: {vm.shape} vs {src.shape}"
            # print(f"Forward输入VM范围: {vm.min().item():.1f} ~ {vm.max().item():.1f}")
            # print(f"非零值占比: {(vm != 0).sum().item() / vm.numel():.1%}")

        # Embedding.
        emb = self.embedding(src, mask, pos)
        output = self.encoder(emb, mask, vm)

        # Encoder.
        if not self.use_vm:
            vm = None
        else:
            if vm is not None:
                # 确保vm是float类型
                vm = vm.float()

        # Target.
        if self.pooling == "mean":
            output = torch.mean(output, dim=1)
        elif self.pooling == "max":
            output = torch.max(output, dim=1)[0]
        elif self.pooling == "last":
            output = output[:, -1, :]
        else:
            output = output[:, 0, :]

        output = torch.tanh(self.output_layer_1(output))
        logits = self.output_layer_2(output)
        loss = self.criterion(self.softmax(logits.view(-1, self.labels_num)), label.view(-1))
        return loss, logits


def add_knowledge_worker(params):
    p_id, sentences, columns, kg, vocab, args = params

    # 加载情感知识图谱（如果是JSON格式）
    if hasattr(kg, 'kg_dict'):
        emotion_kg = kg.kg_dict
    else:
        emotion_kg = {}

    dataset = []
    for line_id, line in enumerate(sentences):
        if line_id % 10000 == 0:
            print("Progress of process {}: {}/{}".format(p_id, line_id, len(sentences)))
            sys.stdout.flush()
        line = line.strip().split('\t')
        try:
            if len(line) >= 2:  # 处理所有有效行
                label = int(line[columns["label"]])
                text = CLS_TOKEN + line[columns["text_a"]]  #提取tsv text_a部分的文本

                # 统一分词处理
                if args.tokenizer == "word":

                    words = jieba.lcut(text)    #对text进行分词，有词也有字

                    # 截断或填充到seq_length
                    words = words[:args.seq_length - 1] + [SEP_TOKEN]
                    words += [PAD_TOKEN] * (args.seq_length - len(words))

                    #将分词后的单词列表 words 转换为对应的 token ID 列表 tokens
                    tokens = [vocab.get(w) for w in words]
                    pos = list(range(len(words)))

                    # 生成visible_matrix，修改成兼容无知识图谱
                    vm = np.zeros((args.seq_length, args.seq_length), dtype=np.float32)
                    if not args.no_kg and kg and hasattr(kg, 'kg_dict'):
                        # 有KG时的逻辑
                        for i in range(min(len(words), args.seq_length)):
                            if words[i] in kg.kg_dict:
                                score = kg.kg_dict[words[i]].get("sentiment_score", 0) * 1000
                                for j in range(max(0, i - 1), min(args.seq_length, i + 2)):
                                    vm[i, j] = score
                    else:
                        # 无KG时的随机注意力（可选）
                        if random.random() < 0.3:  # 30%概率生成随机关注点
                            i = random.randint(0, args.seq_length - 1)
                            score = random.uniform(-500, 500)
                            for j in range(max(0, i - 1), min(args.seq_length, i + 2)):
                                vm[i, j] = score

                    # mask 会屏蔽填充位置（mask=0）的注意力权重，确保模型不处理无效的填充 token。
                    mask = [1 if t != vocab.get(PAD_TOKEN) else 0 for t in tokens]
                    dataset.append((tokens, label, mask, pos, vm))

                else:
                    # 原始字符级处理
                    tokens, pos, vm, _ = kg.add_knowledge_with_vm([text], add_pad=True, max_length=args.seq_length)
                    tokens = tokens[0]
                    pos = pos[0]
                    vm = vm[0].astype("bool")

                    token_ids = [vocab.get(t) for t in tokens]
                    mask = [1 if t != PAD_TOKEN else 0 for t in tokens]
                    dataset.append((token_ids, label, mask, pos, vm))

                # 确保长度一致
                assert len(tokens) == args.seq_length, f"长度不一致: {len(tokens)} != {args.seq_length}"
                # mask 会屏蔽填充位置（mask=0）的注意力权重，确保模型不处理无效的填充 token。
                # mask = [1 if t != vocab.get(PAD_TOKEN) else 0 for t in tokens]
                # dataset.append((tokens, label, mask, pos, vm))

        except Exception as e:
            print(f"Error processing line {line_id}: {str(e)}")
            print("Problematic line:", line)

    # print(f"Max length: {max(len(x[0]) for x in dataset)}, Min length: {min(len(x[0]) for x in dataset)}")
    return dataset

# 添加检查点保存函数
def save_checkpoint(epoch, model, optimizer, args, best_result):
    checkpoint = {
        'epoch': epoch,
        'model_state_dict': model.state_dict(),
        'optimizer_state_dict': optimizer.state_dict(),
        'best_result': best_result,
        'random_state': random.getstate(),
        'torch_random_state': torch.random.get_rng_state(),
        'args': vars(args)
    }
    torch.save(checkpoint, os.path.join(os.path.dirname(args.output_model_path), 'checkpoint.pth'))
    print(f"\nCheckpoint saved at epoch {epoch}")


def main():
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    # Path options.

    parser.add_argument("--pretrained_model_path", default=None, type=str,
                        help="Path of the pretrained model.")
    # parser.add_argument("--pretrained_model_path", default="weibo_model.bin", type=str,
    #                     help="Path of the pretrained model.")
    parser.add_argument("--output_model_path", default="./models/classifier_model.bin", type=str,
                        help="Path of the output model.")
    parser.add_argument("--vocab_path", default="./models/google_vocab.txt", type=str,
                        help="Path of the vocabulary file.")
    parser.add_argument("--train_path", type=str, required=True,
                        help="Path of the trainset.")
    parser.add_argument("--dev_path", type=str, required=True,
                        help="Path of the devset.") 
    parser.add_argument("--test_path", type=str, required=True,
                        help="Path of the testset.")
    parser.add_argument("--config_path", default="./models/google_config.json", type=str,
                        help="Path of the config file.")

    # Model options.
    parser.add_argument("--batch_size", type=int, default=32,
                        help="Batch size.")
    parser.add_argument("--seq_length", type=int, default=256,
                        help="Sequence length.")
    parser.add_argument("--encoder", choices=["bert", "lstm", "gru", \
                                                   "cnn", "gatedcnn", "attn", \
                                                   "rcnn", "crnn", "gpt", "bilstm"], \
                                                   default="bert", help="Encoder type.")
    parser.add_argument("--bidirectional", action="store_true", help="Specific to recurrent model.")
    parser.add_argument("--pooling", choices=["mean", "max", "first", "last"], default="first",
                        help="Pooling type.")

    # Subword options.
    parser.add_argument("--subword_type", choices=["none", "char"], default="none",
                        help="Subword feature type.")
    parser.add_argument("--sub_vocab_path", type=str, default="models/sub_vocab.txt",
                        help="Path of the subword vocabulary file.")
    parser.add_argument("--subencoder", choices=["avg", "lstm", "gru", "cnn"], default="avg",
                        help="Subencoder type.")
    parser.add_argument("--sub_layers_num", type=int, default=2, help="The number of subencoder layers.")

    # Tokenizer options.
    parser.add_argument("--tokenizer", choices=["bert", "char", "word", "space"], default="bert",
                        help="Specify the tokenizer." 
                             "Original Google BERT uses bert tokenizer on Chinese corpus."
                             "Char tokenizer segments sentences into characters."
                             "Word tokenizer supports online word segmentation based on jieba segmentor."
                             "Space tokenizer segments sentences into words according to space."
                             )
    parser.add_argument("--tokenizer_path", type=str, default="./custom_words.txt",
                        help="Path to custom dictionary for word tokenizer")

    # Optimizer options.
    parser.add_argument("--learning_rate", type=float, default=2e-5,
                        help="Learning rate.")
    parser.add_argument("--warmup", type=float, default=0.1, #范围0-1，设成0禁用慢热功能
                        help="Warm up value.")

    # Training options.
    parser.add_argument("--dropout", type=float, default=0.5,
                        help="Dropout.")
    parser.add_argument("--epochs_num", type=int, default=5,
                        help="Number of epochs.")
    parser.add_argument("--report_steps", type=int, default=1000,
                        help="Specific steps to print prompt.")
    parser.add_argument("--seed", type=int, default=7,
                        help="Random seed.")

    # Evaluation options.
    parser.add_argument("--mean_reciprocal_rank", action="store_true", help="Evaluation metrics for DBQA dataset.")

    # kg
    parser.add_argument("--kg_name", required=False, help="KG name or path")
    parser.add_argument("--workers_num", type=int, default=1, help="number of process for loading dataset")
    parser.add_argument("--no_vm", action="store_true", help="Disable the visible_matrix")
    parser.add_argument("--no_kg", action="store_true", help="完全禁用知识图谱")

    args = parser.parse_args()

    # Load the hyperparameters from the config file.
    args = load_hyperparam(args)

    set_seed(args.seed)

    # 新增代码：初始化jieba分词器（添加在set_seed(args.seed)之后）
    if args.tokenizer == "word":
        import jieba
        if os.path.exists(args.tokenizer_path):
            jieba.load_userdict(args.tokenizer_path)
        else:
            print(f"Warning: {args.tokenizer_path} not found, using default jieba dictionary")

    # Count the number of labels.
    labels_set = set()
    columns = {}
    with open(args.train_path, mode="r", encoding="utf-8") as f:
        for line_id, line in enumerate(f):
            try:
                line = line.strip().split("\t")
                if line_id == 0:
                    for i, column_name in enumerate(line):
                        columns[column_name] = i
                    continue
                label = int(line[columns["label"]])
                labels_set.add(label)
            except:
                pass
    args.labels_num = len(labels_set) 

    # Load vocabulary.
    vocab = Vocab()
    vocab.load(args.vocab_path)
    args.vocab = vocab

    # Build bert model.
    # A pseudo target is added.
    args.target = "bert"
    model = build_model(args)

    # Load or initialize parameters.
    if args.pretrained_model_path is not None:
        # Initialize with pretrained model.
        print("Loading pretrained model...")
        model.load_state_dict(torch.load(args.pretrained_model_path), strict=False)  
    else:
        # Initialize with normal distribution.
        for n, p in list(model.named_parameters()):
            if 'gamma' not in n and 'beta' not in n:
                p.data.normal_(0, 0.02)
    
    # Build classification model.
    model = BertClassifier(args, model)

    # For simplicity, we use DataParallel wrapper to use multiple GPUs.
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    if torch.cuda.device_count() > 1:
        print("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
        model = nn.DataParallel(model)

    model = model.to(device)
    
    # Datset loader.
    def batch_loader(batch_size, input_ids, label_ids, mask_ids, pos_ids, vms):
        instances_num = input_ids.size()[0]
        vms_tensor = torch.stack([torch.from_numpy(vm) for vm in vms]).float()

        # 只处理完整的批次
        for i in range(instances_num // batch_size):
            yield (
                input_ids[i * batch_size: (i + 1) * batch_size, :].to(device),
                label_ids[i * batch_size: (i + 1) * batch_size].to(device),
                mask_ids[i * batch_size: (i + 1) * batch_size, :].to(device),
                pos_ids[i * batch_size: (i + 1) * batch_size, :].to(device),
                vms_tensor[i * batch_size: (i + 1) * batch_size].to(device)
            )
        # 不处理最后一个不完整的批次

    # 替换原有kg加载逻辑
    if args.no_kg:
        spo_files = []
        kg = KnowledgeGraph(spo_files=spo_files, predicate=True)
        print("Running WITHOUT knowledge graph (by --no_kg flag)")
    elif not args.kg_name or args.kg_name == 'none':
        spo_files = []
        kg = KnowledgeGraph(spo_files=spo_files, predicate=True)
        print("Running WITHOUT knowledge graph (kg_name not provided)")
    elif args.kg_name.endswith('.json'):
        try:
            if os.path.exists(args.kg_name):
                with open(args.kg_name, 'r', encoding='utf-8') as f:
                    kg_dict = json.load(f).get("words", {})
                kg = KnowledgeGraph(spo_files=[], predicate=True)
                kg.kg_dict = kg_dict  # 手动添加字典
                print(f"Loaded JSON KG with {len(kg_dict)} emotional words")
            else:
                raise FileNotFoundError(f"KG file {args.kg_name} not found")
        except Exception as e:
            print(f"Failed to load KG: {str(e)}")
            args.no_kg = True  # 自动降级为无KG模式
            spo_files = []
            kg = KnowledgeGraph(spo_files=spo_files, predicate=True)
    else:
        try:
            kg = KnowledgeGraph(spo_files=[args.kg_name], predicate=True)
        except TypeError:  # 处理args.kg_name为None的情况
            kg = None
            args.no_kg = True
            kg = KnowledgeGraph(spo_files=[], predicate=True)



    def read_dataset(path, workers_num=1):
        print("Loading sentences from {}".format(path))
        sentences = []
        with open(path, mode='r', encoding="utf-8") as f:
            for line_id, line in enumerate(f):
                if line_id == 0:
                    continue
                sentences.append(line)
        sentence_num = len(sentences)

        print("There are {} sentence in total. We use {} processes to inject knowledge into sentences.".format(sentence_num, workers_num))
        if workers_num > 1:
            params = []
            sentence_per_block = int(sentence_num / workers_num) + 1
            for i in range(workers_num):
                params.append((i, sentences[i*sentence_per_block: (i+1)*sentence_per_block], columns, kg, vocab, args))
            pool = Pool(workers_num)
            res = pool.map(add_knowledge_worker, params)
            pool.close()
            pool.join()
            dataset = [sample for block in res for sample in block]
        else:
            params = (0, sentences, columns, kg, vocab, args)
            dataset = add_knowledge_worker(params)

        return dataset

    def evaluate(args, is_test, metrics='Acc'):
        if is_test:
            dataset = read_dataset(args.test_path, workers_num=args.workers_num)
        else:
            dataset = read_dataset(args.dev_path, workers_num=args.workers_num)

        input_ids = torch.LongTensor([sample[0] for sample in dataset])
        label_ids = torch.LongTensor([sample[1] for sample in dataset])
        mask_ids = torch.LongTensor([sample[2] for sample in dataset])
        pos_ids = torch.LongTensor([example[3] for example in dataset])
        vms = [example[4] for example in dataset]

        batch_size = args.batch_size
        instances_num = input_ids.size()[0]

        if is_test:
            print("The number of evaluation instances: ", instances_num)

        correct = 0
        predictions = []
        true_labels = []

        model.eval()
        total_batches = instances_num // batch_size + (1 if instances_num % batch_size != 0 else 0)

        with torch.no_grad():
            # 使用 tqdm 创建进度条
            progress_bar = tqdm(total=total_batches, desc='Evaluating', unit='batch')

            for i, (input_ids_batch, label_ids_batch, mask_ids_batch, pos_ids_batch, vms_batch) in enumerate(
                    batch_loader(batch_size, input_ids, label_ids, mask_ids, pos_ids, vms)):
                input_ids_batch = input_ids_batch.to(device)
                label_ids_batch = label_ids_batch.to(device)
                mask_ids_batch = mask_ids_batch.to(device)
                pos_ids_batch, vms_batch = pos_ids_batch.to(device), vms_batch.to(device)

                loss, logits = model(input_ids_batch, label_ids_batch, mask_ids_batch, pos_ids_batch, vms_batch)
                pred = torch.argmax(logits, dim=1)

                predictions.extend(pred.cpu().numpy())
                true_labels.extend(label_ids_batch.cpu().numpy())
                correct += torch.sum(pred == label_ids_batch).item()

                # 更新进度条
                progress_bar.update(1)
                progress_bar.set_postfix({
                    'acc': f'{correct / ((i + 1) * batch_size):.2%}',
                    'loss': f'{loss.item():.4f}'
                })

            # 关闭进度条
            progress_bar.close()

        # 计算并打印详细评估报告
        report = classification_report(true_labels, predictions,
                                       target_names=[f'Class {i}' for i in range(args.labels_num)])
        conf_matrix = confusion_matrix(true_labels, predictions)
        print("\nClassification Report:\n", report)
        print("Confusion Matrix:\n", conf_matrix)

        final_acc = correct / len(dataset)
        print("Acc. (Correct/Total): {:.4f} ({}/{})".format(final_acc, correct, len(dataset)))
        return final_acc

    # Training phase.
    print(f"Model device: {next(model.parameters()).device}")
    # sample_batch = next(batch_loader(batch_size, input_ids, label_ids, mask_ids, pos_ids, vms))
    # print(f"Batch devices - inputs: {sample_batch[0].device}, vms: {sample_batch[4].device}")

    print("Start training.")
    trainset = read_dataset(args.train_path, workers_num=args.workers_num)

    print("训练集标签分布:", collections.Counter([sample[1] for sample in trainset]))
    if args.test_path:  # 如果提供了测试集路径
        testset = read_dataset(args.test_path, workers_num=args.workers_num)
        print("测试集标签分布:", collections.Counter([sample[1] for sample in testset]))

    print("Shuffling dataset")
    random.shuffle(trainset)
    instances_num = len(trainset)
    batch_size = args.batch_size

    # print("Trans data to tensor.")
    input_ids = torch.LongTensor([example[0] for example in trainset])
    label_ids = torch.LongTensor([example[1] for example in trainset])
    mask_ids = torch.LongTensor([example[2] for example in trainset])
    pos_ids = torch.LongTensor([example[3] for example in trainset])
    vms = [example[4] for example in trainset]
    vms_tensor = torch.stack([torch.from_numpy(vm) for vm in vms]).float()

    train_steps = int(instances_num * args.epochs_num / batch_size) + 1

    # print("Batch size: ", batch_size)
    # print("The number of training instances:", instances_num)

    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'gamma', 'beta']
    optimizer_grouped_parameters = [
                {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.01},
                {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.0}
    ]
    optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup, t_total=train_steps)


    total_loss = 0.
    result = 0.0
    best_result = 0.0

    #--------------检查点相关代码--------------
    # 添加检查点加载
    start_epoch = 1
    best_result = 0.0
    checkpoint_path = './models/checkpoint.pth'
    if os.path.exists(checkpoint_path):
        # 先加载到CPU内存
        checkpoint = torch.load(checkpoint_path, map_location='cpu')
        print(f"Loaded checkpoint from {checkpoint_path}")

        # 分步加载并立即释放无用数据
        def load_and_clean(component, key, transform=None):
            value = checkpoint.pop(key)
            if transform:
                value = transform(value)
            component.load_state_dict(value)
            del value
            torch.cuda.empty_cache()  # 立即释放显存

        # 加载模型参数
        # print("Loading model state...")
        load_and_clean(model, 'model_state_dict')

        # 加载优化器状态
        # print("Loading optimizer state...")
        load_and_clean(optimizer, 'optimizer_state_dict')

        # 加载其他状态
        # print("Loading training states...")
        start_epoch = checkpoint.pop('epoch') + 1
        best_result = checkpoint.pop('best_result')
        random.setstate(checkpoint.pop('random_state'))
        torch.random.set_rng_state(checkpoint.pop('torch_random_state'))

        # 处理args键（不删除，仅用于参考）
        loaded_args = checkpoint.pop('args', None)
        if loaded_args:
            print(f"Original args: {loaded_args}")

        # 确保所有数据都已处理（除了args）
        remaining_keys = set(checkpoint.keys()) - {'args'}
        assert len(remaining_keys) == 0, f"Unused checkpoint keys: {remaining_keys}"
        del checkpoint
        torch.cuda.empty_cache()

        print(f"Resuming training from epoch {start_epoch}, best result: {best_result:.4f}")

    # 移除原有的signal_handler和键盘监听线程，改为以下监听方式
    def setup_keyboard_listener():
        def on_space_press(event):
            global PAUSE_SIGNAL
            if event.name == 'space':
                PAUSE_SIGNAL = True

        keyboard.on_press(on_space_press)

        # 初始化键盘监听（在训练开始前）

    setup_keyboard_listener()
    print("\n已启用空格键暂停功能，按下空格键即可保存状态并暂停训练")

    # 训练循环
    for epoch in range(start_epoch, args.epochs_num + 1):
        model.train()
        total_batches = instances_num // batch_size + (1 if instances_num % batch_size != 0 else 0)

        # 添加带进度条的batch迭代
        with tqdm(total=total_batches, desc=f'Epoch {epoch}', unit='batch') as pbar:
            j = 0
            for i, (input_ids_batch, label_ids_batch, mask_ids_batch, pos_ids_batch, vms_batch) in enumerate(
                    batch_loader(batch_size, input_ids, label_ids, mask_ids, pos_ids, vms)):

                j += 1
                model.zero_grad()
                # vms_batch = torch.FloatTensor(vms_batch)

                input_ids_batch = input_ids_batch.to(device)
                label_ids_batch = label_ids_batch.to(device)
                mask_ids_batch = mask_ids_batch.to(device)
                pos_ids_batch = pos_ids_batch.to(device)
                vms_batch = vms_batch.to(device)

                loss, _ = model(input_ids_batch, label_ids_batch, mask_ids_batch, pos=pos_ids_batch, vm=vms_batch)
                if torch.cuda.device_count() > 1:
                    loss = torch.mean(loss)
                total_loss += loss.item()

                # 更新进度条信息
                pbar.set_postfix({
                    'loss': f'{loss.item():.4f}',
                    'avg_loss': f'{total_loss / (i + 1):.4f}'
                })
                pbar.update(1)

                if (i + 1) % args.report_steps == 0:
                    print("\nEpoch {} - Step {}: Avg Loss {:.4f}".format(
                        epoch, i + 1, total_loss / args.report_steps))
                    sys.stdout.flush()
                    total_loss = 0.

                loss.backward()
                optimizer.step()

                # 在每个batch处理后检查暂停信号
                if PAUSE_SIGNAL:
                    save_checkpoint(epoch-1, model, optimizer, args, best_result)
                    print("训练已暂停，模型状态已保存。")
                    sys.exit(0)

        # 在每个epoch结束时保存模型
        save_model(model, args.output_model_path.replace(".bin", f"_epoch{epoch}.bin"))
        print(f"Model saved for epoch {epoch}")

        print("\nStart evaluation on dev dataset.")
        result = evaluate(args, False)
        if result > best_result:
            best_result = result
            save_model(model, args.output_model_path)
        else:
            continue

        print("\nStart evaluation on test dataset.")
        evaluate(args, True)

    # Evaluation phase.
    print("Final evaluation on the test dataset.")

    if torch.cuda.device_count() > 1:
        model.module.load_state_dict(torch.load(args.output_model_path))
    else:
        model.load_state_dict(torch.load(args.output_model_path))
    evaluate(args, True)

    # 训练完成后删除检查点
    if os.path.exists(checkpoint_path):
        os.remove(checkpoint_path)


if __name__ == "__main__":
    main()
