import importlib
import os.path
import random
import sys

import torch

from bart import bart
from lstm import lstm
from model.ket2a.ket2a_processor import KET2AProcessor
from model.ket2a.model import KET2A
from nllb_moe import nllb_moe
from t5 import t5
from switch_transformer import switch_transformer

from main import train as ket2a_train

config = {
    "seed": 0,
    "datasets_path": r"A:\projects\doing\KEA2T-final\datasets\datasets4\seq2seq",
    "pretrained": False,
    "lang": "en",
    "max_len": 20,
    "n_epochs": 300,
    "learning_rate": 0.0002
}

lstm_config = {
    "seed": 0,
    "datasets": r"A:\projects\doing\KEA2T-final\datasets\datasets4",
    "batch_size": 16,
    "seq_max_len": 10,
    "in_dim": 32,
    "hid_dim": 32,
    "learning_rate": 0.001,
    "dropout": 0.1,
    "num_layers": 1,
    "num_epochs": 500
}

ket2a_config = {
    "seed": 0,
    "log": "log",
    "datasets": r"A:\projects\doing\KEA2T-final\datasets\datasets4",
    "entity_dim": 128,  # * "* means necessary"
    "batch_size": 16,  # *
    "pre_train": False,  # *
    "lang": "en",  # * "lang of datasets, cn(chinese) or en(english)"
    "gcn_type": "gat",  # * gat/none
    "epochs": 200,  # *
    "seq2seq_type": "transformer",  # * gru2gru/transformer
    "gcn_args": {  # *
        "device": "cpu",
        "learning_rate": 0.001,
        "heads": [2, ],  # gat
        "hidden_dim": 128,  # gat
    },
    "seq2seq_args": {  # *
        "device": "cpu",
        "learning_rate": 0.001,

        "max_length": 10,  # gru2gru
        "encoder_hidden_dim": 128,  # gru2gru
        "decoder_hidden_dim": 128,  # gru2gru

        "num_encoder_layers": 1,  # transformer
        "num_decoder_layers": 1,  # transformer
        "num_heads": 2,  # transformer
        "ffn_hid_dim": 128,  # transformer
    },
}

processor = KET2AProcessor(
    ket2a_config["datasets"],
    ket2a_config["seq2seq_args"]["max_length"],
    ket2a_config["batch_size"])
# datasets
train_dataloader = processor.seq2seq_dataset.train
test_dataloader = processor.seq2seq_dataset.test

seeds = [754335025, 692060235, 582885236, 294106359]


def auto_test():
    for i in range(0, 4):
        # seed = random.randint(0, 1000000000)
        seed = seeds[i]
        config["seed"] = seed
        lstm_config["seed"] = seed
        ket2a_config["seed"] = seed

        random.seed(seed)
        sys.path.append(os.path.join(config["datasets_path"]))
        # 动态导入文件中的函数
        module = importlib.import_module('data_split')
        function = getattr(module, 'split')
        # 调用导入的函数
        function(os.path.join(config["datasets_path"]))

        # lstm.train(lstm_config)

        # ket2a_config["gcn_type"] = "gat"
        # ket2a = KET2A(processor, **ket2a_config)
        # ket2a_train(ket2a, train_dataloader, test_dataloader, ket2a_config["epochs"], config=ket2a_config)
        # ket2a_config["gcn_type"] = "none"
        # ket2a = KET2A(processor, **ket2a_config)
        # ket2a_train(ket2a, train_dataloader, test_dataloader, ket2a_config["epochs"], config=ket2a_config)

        config["learning_rate"] = 0.0002
        bart.train(**config)
        nllb_moe.train(**config)
        # config["learning_rate"] = 0.00005
        # t5.train(**config)
        # switch_transformer.train(**config)


auto_test()
