from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, Callback, LossMonitor, SummaryCollector, \
    TimeMonitor
from utils import *
import argparse
import mindspore.dataset as ds
from models import *
import mindspore as ms
import mindspore.nn as nn
from mindspore import Model, context
from mindspore import load_checkpoint, load_param_into_net
import mindspore.dataset.transforms.c_transforms as C
from mindspore import dtype as mstype
from mindspore.nn.metrics import Accuracy
from mindspore.nn import warmup_lr
parser = argparse.ArgumentParser(description='Chinese Text Classification')
parser.add_argument('--model', type=str,  default='DPCNN', help='choose a model: TextCNN, TextRNN, FastText, TextRCNN, TextRNN_Att, DPCNN, Transformer')
parser.add_argument('--embedding', default='random', type=str, help='random or pre_trained')
parser.add_argument('--word', default=True, type=bool, help='True for word, False for char')
args = parser.parse_args()

# context.set_context(
#     mode=context.GRAPH_MODE,
#     save_graphs=False,
#     device_target='CPU')

if __name__ =='__main__':
    dataset = '.'  # 文件名前缀
    embedding = 'test.npz'
    if args.embedding == 'random':
        embedding = 'random'
    model_name = args.model  # 'TextRCNN'  # TextCNN, TextRNN, FastText, TextRCNN, TextRNN_Att, DPCNN, Transformer
    config = Config(dataset)
    np.random.seed(1)
    start_time = time.time()
    print("Loading data......")

# load data
    vocab, train_data, dev_data, test_data = build_dataset(config, True)
    # train
    train_iter = DatasetMSIterater(train_data, config)
    train_data = ds.GeneratorDataset(train_iter, ['data', 'label'], num_parallel_workers=1, shuffle=True)
    train_dataset = train_data.map(C.TypeCast(mstype.int32), input_columns='label', num_parallel_workers=1)
    # dev
    dev_iter = DatasetMSIterater(dev_data, config)
    dev_data = ds.GeneratorDataset(dev_iter, ['data', 'label'], num_parallel_workers=1, shuffle=True)
    dev_dataset = dev_data.map(C.TypeCast(mstype.int32), input_columns='label', num_parallel_workers=1)

# init net
    config.n_vocab = len(vocab)
    net = DPCNN(config)

# todo check if param updated
    # param_dict1 = load_checkpoint("./models/DPCNN_6-4_67.ckpt")
    # param_dict2 = load_checkpoint("./models/DPCNN_7-3_6.ckpt")
    # print(param_dict1)
    loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
    optim = nn.Adam(params=net.trainable_params(), learning_rate=0.000005,beta1=0.9, beta2=0.999, eps=1e-8, weight_decay=5e-3)
    # optim = nn.Adam(params=net.trainable_params(), learning_rate=0.001)

    state_dict = ms.load_checkpoint("DPCNN_1-3_1050.ckpt")
    ms.load_param_into_net(net,state_dict)
    model = ms.Model(net, loss_fn = loss, optimizer = optim, metrics = {"Accuracy": Accuracy()})
# define Callbacks
    summary_collector = SummaryCollector(summary_dir='./summary_dir', collect_freq=1) 
    config_ck = CheckpointConfig(save_checkpoint_steps=10, keep_checkpoint_max=1000)
    ckpt_cb = ModelCheckpoint(prefix='DPCNN', directory='models', config=config_ck)
    epoch_per_eval = {"state":[], "epoch": [], "acc": []}
    eval_cb = EvalCallBack(model, dev_dataset, 1, epoch_per_eval, 'eval')
    eval_train_cb = EvalCallBack(model, train_dataset, 1, epoch_per_eval, 'train')
    time_cb = TimeMonitor()

# train
    print("============== Starting Training ==============")
    model.train(epoch=30, train_dataset=train_data, dataset_sink_mode=False, callbacks=[ckpt_cb, LossMonitor(10), eval_cb, time_cb, eval_train_cb])
    print("============== Training Success ==============")
    print(1)



