import torch
import numpy as np
import argparse
import logging

import os
import sys
import json
import math
import random
import transformers
import torch.utils.data as Data
import horovod.torch as hvd
import yaml
import codecs
import time

from transformers import GPT2LMHeadModel, GPT2Config
from lstm_crf import LSTM_CRF
from optparse import OptionParser
from tqdm import tqdm

os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
rand_int = random.randint(1,100)
np.set_printoptions(threshold=np.inf)
logging.basicConfig(
            filename='/work/xuyongdang/code/transformer/trained_model/decode.biaodian.', # + str(rand_int),
            filemode='a+',
            level=logging.INFO, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")

get_time_ms = lambda: int(time.time() * 1000)

def torch_save_epoch(path, model, optimizer, epoch):
    """Function to save torch model states
    :param str path: file path to be saved
    :param torch.nn.Module model: torch model
    """
    if hasattr(model, 'module'):
        torch.save({
            'epoch': epoch,
            'model_state_dict': model.module.state_dict(),
            'optimizer_state_dict': optimizer.state_dict()
            }, path)
    else:
        torch.save({
            'epoch': epoch,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict()
            }, path)

# read big txt file
class ReadDataset(Data.Dataset):
    """MAYBE discard the last batch when the data not enough
        batch_size: The value should be int divided by horovod rank_num
        length : <= dataset length
        sorted : True
    """
    def __init__(self, file_path=None, batch_size=1, ascend_order=False):
        data_lines = []
        with open(file_path, "r") as f:
            for ln in f:
                data_lines.append(ln[:-1].strip().split())
        self.dataset = data_lines
        self.sentences  = len(data_lines)
        self.list_len = [len(xx) for xx in data_lines]
        #logging.info('path %s have sentences: %d', file_path, self.sentences)
        self.batch_ind = []
        if batch_size > 1:
            if ascend_order == False:
                indices = sorted(range(self.sentences), key=lambda i: self.list_len[i]) # sort by length in ascending order
            else:
                indices = range(self.sentences)
            bs = 0
            discard_batch_count = 0
            while bs < self.sentences:
                be = bs + batch_size
                # make sure each batch have same size, IMPORTANT for HOROVOD sampler
                if be <= self.sentences:
                    if self.list_len[indices[be-1]] - self.list_len[indices[bs]] > 5: # discard this batch if length diff more than 3 characters
                        discard_batch_count += 1
                        bs = be
                        logging.info(data_lines[indices[be-1]])
                        logging.info(data_lines[indices[bs]])
                        continue
                    else:
                        self.batch_ind.append(indices[bs:be])
                bs = be
            self.sentences = len(self.batch_ind) * batch_size
            #logging.info('discard_count: ' + str(discard_batch_count))
        else:
            self.batch_ind = [[i] for i in range(self.sentences)]

    def __len__(self):
        return self.sentences
    
    def __getitem__(self, index):
        ln = self.dataset[index]
        # 自定义对训练数据进行预处理
        data = np.array([int(label) for label in ln], dtype=np.int32)
        return data

    def get_batch_num(self):
        return len(self.batch_ind)

class HorovodSampler(Data.Sampler):
    """Sampler that restricts data loading to a subset of the batch_ind.
    Arguments:
        batch_ind: Dataset sorted indices used for sampling.
        num_replicas (optional): Number of processes participating in
            distributed training.
        rank (optional): Rank of the current process within num_replicas.
        shuffle (optional): If true (default), sampler will shuffle the indices
    """
    def __init__(self, batch_ind, num_replicas=None, rank=None, shuffle=True):
        if num_replicas is None:
            raise RuntimeError("Requires distributed package to be available")
        if rank is None:
            raise RuntimeError("Requires distributed package to be available")
        self.batch_ind = batch_ind[:]
        self.num_replicas = num_replicas
        self.rank = rank
        self.epoch = 0
        lens = len(batch_ind) * len(batch_ind[0])
        self.num_samples = int(math.ceil(lens * 1.0 / self.num_replicas))
        self.total_size = self.num_samples * self.num_replicas
        self.shuffle = shuffle
    def __iter__(self):
        # deterministically shuffle based on epoch
        g = torch.Generator()
        g.manual_seed(self.epoch)
        if self.shuffle:
            # shuffle items between the neigbour of two batches
            batch_ind_len = len(self.batch_ind)
            batch_size = len(self.batch_ind[0])
            # shuffle the batch index
            idx_tmp = torch.randperm(batch_ind_len, generator=g).tolist()
            indices = []
            for xx in idx_tmp:
                indices += self.batch_ind[xx]
        else: # FIXME
            indices = list(range(len(self.batch_ind)))
        # add extra samples to make it evenly divisible
        indices += indices[:(self.total_size - len(indices))]
        assert len(indices) == self.total_size

        # subsample
        indices = indices[self.rank:self.total_size:self.num_replicas]
        assert len(indices) == self.num_samples
        return iter(indices)

    def __len__(self):
        return self.num_samples

    def set_epoch(self, epoch):
        self.epoch = epoch

def init_punct_model(configs):
    """初始化模型
    """
    # init rnn parameters
    rnn_input_dim  = configs['model_params']['rnn_input_dim']
    rnn_type       = configs['model_params']['rnn_type']
    rnn_units      = configs['model_params']['rnn_hidden_units']
    num_rnn_layers = configs['model_params']['rnn_layers']
    bi_flag        = configs['model_params']['bi_flag']
    
    # crf parameters
    use_crf        = configs['model_params']['use_crf']
    target_size    = configs['model_params']['punct_size']

    # init other parameters
    dropout_rate   = configs['model_params']['dropout_rate']
    average_batch  = configs['model_params']['average_batch']
    use_cuda       = configs['model_params']['use_cuda']

    # init model
    punct_model = LSTM_CRF(rnn_input_dim=rnn_input_dim, rnn_type=rnn_type, rnn_units=rnn_units, num_rnn_layers=num_rnn_layers, bi_flag=bi_flag,
        dropout_rate=dropout_rate, average_batch=average_batch, use_crf=use_crf, target_size=target_size, use_cuda=use_cuda)

    return punct_model

# Chinese sentence biaodian symbol prediction
class BIAODIAN(torch.nn.Module):
    def __init__(self, lm_model, frozen_lm=True, batch_first=True):
        super(BIAODIAN, self).__init__()
        config_file  = '/work/xuyongdang/code/transformer/punct_stream/config/punct.yml' 
        #config_file  = '/work/xuyongdang/code/transformer/trained_model/embed240_head8_layer12_biaodian/punct.yml' 
        config_punc  = yaml.load(codecs.open(config_file, encoding='utf-8'))
        self.past_kv = None
        self.lm_pretrained = lm_model
        self.blstm_crf     = init_punct_model(config_punc)
        if frozen_lm:
            for param in self.lm_pretrained.parameters():
                param.requires_grad = False

        # initialize parameters from uniform distribution, LSTM has init by stdv = 1/sqrt(out_units)
        # keep in mind, all params have been initialized when create it

    def forward(self, x, states=None, sentlen=None):
        # Forward propagate
        if states is not None:
            outputs = self.lm_pretrained.forward(input_ids=x, past=self.past_kv)
        else:# the first word
            outputs = self.lm_pretrained.forward(input_ids=x)
        self.past_kv = outputs[1]
        top_hidden = outputs[2][-1] # [144, 4, 768]
        # Decode hidden states of all time steps
        y_out, states = self.blstm_crf.forward(top_hidden, states, sentlen)
        logp = y_out.log_softmax(dim=-1) 
 
        return y_out, logp, states
    
    def crf_loss(self, emission, mask, t):
        return self.blstm_crf.loss(emission, mask, t)

    def predict(self, emission, mask):
        return self.blstm_crf.loss_function.decode(emission, mask)

def concat_examples(batch, device=-1, padding=None):
    """Custom concat_examples for pytorch
    :param np.ndarray batch: The batch to concatenate
    :param int device: The device to send to
    :param Tuple[int,int] padding: The padding to use
    :return: (inputs, targets)
    :rtype (torch.Tensor, torch.Tensor)
    """
    input_x  = []
    target_y = []
    for xx in batch:
        input_x.append(torch.LongTensor(xx[0]))
        target_y.append(torch.LongTensor(xx[1]))
    x = torch.nn.utils.rnn.pad_sequence(input_x, batch_first=True, padding_value=padding[0])
    t = torch.nn.utils.rnn.pad_sequence(target_y, batch_first=True, padding_value=padding[1])
    if device >= 0:
        x = x.cuda(device)
        t = t.cuda(device)
    return x, t

def concat_examples_new(batch, device=None, padding=None):

    input_x  = []
    target_y = []
    last_idx = []
    for xx in batch:
        last_idx.append(len(xx[0]))
        input_x.append(torch.LongTensor(xx[0]))
        target_y.append(torch.LongTensor(xx[1]))
    x = torch.nn.utils.rnn.pad_sequence(input_x, batch_first=True, padding_value=padding[0])
    t = torch.nn.utils.rnn.pad_sequence(target_y, batch_first=True, padding_value=padding[1])
    if device >= 0:
        x = x.cuda(device)
        t = t.cuda(device)
    return x, t, last_idx

# initialize parameters from uniform distribution and orthogonal
def get_params_num(model):
    num_netparam = 0
    for param in model.parameters():
        num_netparam += param.numel()
    return num_netparam

def lm_topk_acc(pad_outputs, pad_targets, ignore_label=-100, beam=20):
    """Function to calculate accuracy

    :param torch.Tensor pad_outputs: prediction tensors (B*Lmax, D)
    :param torch.Tensor pad_targets: target tensors (B, Lmax, D)
    :param int ignore_label: ignore label id
    :retrun: accuracy value (0.0 - 1.0)
    :rtype: float
    """
    mask = pad_targets != ignore_label
    _, pred_topk = torch.topk(pad_outputs.view(pad_targets.size(0), pad_targets.size(1), pad_outputs.size(1)), beam, dim=2)
    numerator = 0
    #logging.info(mask.detach().cpu().numpy())
    #logging.info(pred_topk[:,:,0].detach().cpu().numpy())
    for i in range(beam):
        numerator += torch.sum(pred_topk[:,:,i].masked_select(mask) == pad_targets.masked_select(mask))
    denominator = torch.sum(mask)
    return float(numerator) / float(denominator)

def lm_argmax_acc(pad_outputs, pad_targets, ignore_label=-100):
    """Function to calculate accuracy

    :param torch.Tensor pad_outputs: prediction tensors (B*Lmax, D)
    :param torch.Tensor pad_targets: target tensors (B, Lmax, D)
    :param int ignore_label: ignore label id
    :retrun: accuracy value (0.0 - 1.0)
    :rtype: float
    """
    pad_pred = pad_outputs.view(
        pad_targets.size(0),
        pad_targets.size(1),
        pad_outputs.size(1)).argmax(2)
    mask = pad_targets != ignore_label
    numerator = torch.sum(pad_pred.masked_select(mask) == pad_targets.masked_select(mask))
    denominator = torch.sum(mask)
    return float(numerator) / float(denominator)

def logging_biaodian(batch_x, batch_t, y_pred, char_list):
    """
        batch_x: tensor(batch_size, max_len)
        y_pred:  list(batch_size)
    """
    biaodian_list  = [' b', ' ,', '。', ' ?', ' !', ' :', ' _', ' s', ' e'] # add start and end label
    
    #logging.info(y_pred.shape)
    #logp = y_pred.log_softmax(dim=-1)
    #pred_biaodian = logp.view(
    #    batch_t.size(0),
    #    batch_t.size(1),
    #    y_pred.size(1)).argmax(2)
    
    for ind, xx in enumerate(batch_x): # xx type already is numpy.ndarray
        logging.info(''.join([char_list[idx] for idx in xx if idx > 0]))
        #logging.info(''.join([biaodian_list[idx] for idx in batch_t[ind] if idx >= 0]))
        logging.info(''.join([biaodian_list[idx] for idx in y_pred[ind] if idx >= 0]))

def metric_average(val, name):
    avg_tensor = hvd.allreduce(val, name=name)
    return avg_tensor.item()

def torch_load_epoch(path, model, optimizer=None, epoch=None):
    """Function to load torch model states

    :param str path: model file or snapshot file to be loaded
    :param torch.nn.Module model: torch model
    """
    model_state_dict = torch.load(path, map_location=lambda storage, loc: storage)
    if hasattr(model, 'module'):
        model.module.load_state_dict(model_state_dict['model_state_dict'])
    else:
        model.load_state_dict(model_state_dict['model_state_dict'])

    if epoch is not None:
        epoch = model_state_dict['epoch']
    if optimizer is not None:
        optimizer.load_state_dict(model_state_dict['optimizer_state_dict'])
    logging.info(epoch)

    del model_state_dict
    return epoch

def train(args, lm_model):
    vocab_size  = 6819 #0:<blank>, end:<eos>
    eos         = vocab_size - 1
    ignore_id   = -100 # used to pad, why error in openmpi version
    char_id_max = 6817
    epoch       = 1

    # remove type check
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False  # https://github.com/pytorch/pytorch/issues/6351

    model = BIAODIAN(lm_model, frozen_lm=True, batch_first=True)
    if args.resume:
        loadfile = args.resume
        re_epoch = torch_load_epoch(loadfile, model, epoch=epoch)
        logging.info('###Resume from path: ' + loadfile)
        logging.info('###Resume epoch: ' + str(re_epoch))
        epoch    = re_epoch + 1
    #定义优化器：
    optimizer = transformers.AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr = args.lr, correct_bias=True)
    #scheduler = transformers.WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps,t_total=total_steps)
    # Initialize Horovod
    hvd.init()
    if args.ngpu > 0:
        # Pin GPU to be used to process local rank (one GPU per process)
        gpu_id = hvd.local_rank()
        torch.cuda.set_device(gpu_id)
    torch.set_num_threads(1) # Horovod: limit # of CPU threads to be used per worker.
    # Horovod: use DistributedSampler to partition data among workers. Manually specify
    allreduce_batch_size = int(args.batch_size / hvd.size())

    #class_weight = torch.cuda.FloatTensor([0.25] + [0.75 for ind in range(biaodian_vocab-1)])
    #loss_func = torch.nn.CrossEntropyLoss(ignore_index=ignore_id, weight=class_weight)
    loss_func = torch.nn.CrossEntropyLoss(ignore_index=ignore_id)
    model.train()
    model.cuda()

    # Horovod: (optional) compression algorithm.
    compression = hvd.Compression.none # hvd.Compression.fp16
    # Add Horovod Distributed Optimizer
    horovod_optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters(), compression=compression)

    # Broadcast parameters from rank 0 to all other processes.
    hvd.broadcast_parameters(model.state_dict(), root_rank=0)
    hvd.broadcast_optimizer_state(horovod_optimizer, root_rank=0)
    
    def conv_collate_biaodian(batch_x):
        batch_data = []
        for xx in batch_x: # xx type already is numpy.ndarray
            x_array    = xx.astype(np.int32) # convert dtype
            mask       = x_array > char_id_max
            input_char = x_array[~mask]
            biaodian   = x_array[mask] - char_id_max
            biaodian_ind = np.array([val-(ind+1) for ind, val in enumerate(np.nonzero(mask)[0])], dtype=np.int32)
            target_biaodian = np.zeros(len(input_char), dtype=np.int32)
            target_biaodian[biaodian_ind] = biaodian

            batch_data.append((input_char, target_biaodian))
        return batch_data

    g = torch.Generator()
    kwargs = {'num_workers': 0, 'collate_fn': conv_collate_biaodian, 'pin_memory': False, 'drop_last': True} if args.ngpu > 0 else {}
    best_val_loss = 1.0e10
    best_acc   = 0.0
    back_count = 0

    if hvd.rank() == 0:
        if not torch.cuda.is_available(): # check cuda and cudnn availability
            logging.warning('cuda is not available')
        logging.info('Model params: ' + str(get_params_num(model)))

    def lm_evaluate(epoch):
        #args, best_val_loss, best_acc, back_count, hvd, model, horovod_optimizer = inparam
        nonlocal best_val_loss
        nonlocal best_acc
        nonlocal back_count
 
        loss_sum  = 0
        acc_sum   = 0
        time_sum  = 0
        stats_sum = []
        model.eval()

        val_dataset = ReadDataset(args.dev_token_file, args.batch_size, False)
        val_sampler = HorovodSampler(val_dataset.batch_ind, num_replicas=hvd.size(), rank=hvd.rank(), shuffle=False)
        val_loader  = torch.utils.data.DataLoader(val_dataset, batch_size=allreduce_batch_size, sampler=val_sampler, **kwargs)
        with torch.no_grad():
            #val_sampler.set_epoch(epoch)
            for ind, batch_inputs in enumerate(val_loader):
                # Concatenate the token IDs to matrices and send them to the device
                x, t, last_index = concat_examples_new(batch_inputs, gpu_id, padding=(0, ignore_id))
                batch_size, sent_len = x.shape

                #mask = t >= 0
                mask = None
                #seq_len = torch.sum(mask, dim=1).view(batch_size, 1).cuda()
                start_t = get_time_ms()
                rnn_states = None
                step_time = []
                for ii in range(sent_len):
                    step_t = get_time_ms()
                    y_out, logp, rnn_states = model.forward(x[:, ii].view(batch_size, 1), states=rnn_states)#
                    step_time.append(get_time_ms() - step_t)
                    y_pred = model.predict(logp, mask)

                cost_t = get_time_ms() - start_t
                time_sum += cost_t / sent_len
                stats_sum.append([cost_t, sent_len])
                #if ind % 100 == 0 and hvd.rank() == 0:
                #    logging.info('total: '+ str(len(val_loader)) + '#############:  ' + str(ind))
                #    y_pred = model.predict(logp, mask)
                logging_biaodian(x, t, y_pred, args.char_list)
                logging.info(step_time)

                #loss_sum += model.crf_loss(logp, t, mask)
                #acc_batch = lm_topk_acc(y_out, t, ignore_label=ignore_id, beam=2)
                #acc_batch = lm_argmax_acc(logp.view(batch_size*sent_len, -1), t, ignore_label=ignore_id)
                #acc_sum  += metric_average(torch.tensor(acc_batch), 'avg_acc')
            #cur_val_loss = metric_average(loss_sum / len(val_loader), 'avg_val_loss')
            #avg_acc = acc_sum / len(val_loader)
            logging.info(time_sum / len(val_loader))
            logging.info(stats_sum)
            logging.info(errr)
        savedir = args.output_dir + '/' + args.model_dir_tag
        if hvd.rank() == 0:
            logging.info(str(epoch) + '_epoch loss: ' + str(cur_val_loss))
            logging.info(str(epoch) + '_epoch  acc: ' + str(avg_acc))
            logging.info(errr)
            savefile = savedir + '/gpt2.model.' + str(epoch)
            if cur_val_loss < best_val_loss:
                src  = savefile
                dest = os.path.join(savedir, '%s' % 'gpt2.model.best')
                if os.path.lexists(dest):
                    os.remove(dest)
                os.symlink(src, dest)
                logging.info('best model is ' + src)
        # retraining epoch
        if cur_val_loss >= best_val_loss:
            back_count += 1
            loadfile = savedir + '/gpt2.model.best'
            re_epoch = torch_load_epoch(loadfile, model, horovod_optimizer, epoch)
            logging.info('###Retraining from epoch: ' + str(re_epoch))
            epoch = re_epoch + 1
            #optimizer_eps_decay()
        else:
            best_acc = avg_acc
            best_val_loss = cur_val_loss
        model.train()

    lm_evaluate(epoch)
    while epoch < args.epochs:
        # 文件乱序
        g.manual_seed(epoch)
        file_idx = torch.randperm(args.num_pieces, generator=g).tolist()
        logging.info(file_idx)
        cur_step = 0
        file_count = 0
        for idx, val in enumerate(file_idx):
            # 读取文件内的所有行，并切分batch
#            with open(args.train_token_path + '/tokenizer_train_{}'.format(val), 'r') as f:
#                text = f.read().strip().split()
#            tokens = [int(token) for token in text]
#            batch_len = args.maxlen * args.batch_size
#            batch_num = math.floor(len(tokens) / batch_len) # discard left
#            train_loader= []
#            for i in range(0, batch_num):
#                train_loader.append(tokens[i*batch_len:(i+1)*batch_len])
#            random.shuffle(train_loader)
            # 按行读取文件，并切分batch
            if args.num_pieces == 1:
                train_label   = args.train_token_path
            else:
                train_label   = args.train_token_path + '/tokenizer_train_{}'.format(val)
            train_dataset = ReadDataset(train_label, args.batch_size, False)
            train_sampler = HorovodSampler(train_dataset.batch_ind, num_replicas=hvd.size(), rank=hvd.rank())
            train_sampler.set_epoch(epoch)
            train_loader  = torch.utils.data.DataLoader(train_dataset, batch_size=allreduce_batch_size, sampler=train_sampler, **kwargs)
            
            file_count += 1
            #batch_num = train_dataset.get_batch_num()
            #logging.info('file_idx: ' + str(idx) + ' batch_num: ' + str(batch_num))
            # 开始训练
            for batch_inputs in train_loader:
                # Concatenate the token IDs to matrices and send them to the device
                x, t, last_index = concat_examples_new(batch_inputs, gpu_id, padding=(0, ignore_id))
                batch_size, sent_len = x.shape
                horovod_optimizer.zero_grad()

                ## (loss), lm_logits, presents, (all hidden_states), (attentions)
                mask = t >= 0
                y_out, logp, _ = model.forward(x, states=None, sentlen=last_index)
                loss  = model.crf_loss(logp, t, mask)

                # loss backward
                loss.backward()
                horovod_optimizer.synchronize()
                if args.max_grad_norm is not None:
                    torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
                with horovod_optimizer.skip_synchronize():
                    horovod_optimizer.step()
                
                avg_loss = hvd.allreduce_(loss)
                cur_step  += 1
                if hvd.rank() == 0 and cur_step % args.log_step == 0:
                    LOSS_PRINT = str.format('cur_step {}, at text ({:.0f}%) of epoch {}, loss {:.6f}, len {}'.format(
                                       cur_step, 100.0 * file_count/args.num_pieces, epoch, avg_loss.item(), x.size(1)))
                    logging.info(LOSS_PRINT)
               #logging.info(errr)

        if hvd.rank() == 0:
            logging.info('finished model training for epoch ' + str(epoch))
            savedir  = args.output_dir + '/' + args.model_dir_tag
            savefile = savedir + '/gpt2.model.' + str(epoch)
            if not os.path.exists(savedir):
                os.mkdir(savedir)
            torch_save_epoch(savefile, model, optimizer, epoch)

        # evaluate the model
        lm_evaluate(epoch)
        epoch += 1

def main(args):
    # 参数
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_config', default='/work/xuyongdang/code/transformer/trained_model/gpt2-config.json', type=str, 
                        help='配置文件路径')
    parser.add_argument('--num_pieces', default=50 , type=int,
                        help='训练语料文件个数')
    parser.add_argument('--ngpu', default=3 , type=int,
                        help='使用的GPU数量')
    parser.add_argument('--pretrained_lm', default='', type=str,
                        help='预训练的语言模型文件')
    parser.add_argument('--resume', default='', type=str,
                        help='恢复训练的标点符号模型文件')
    parser.add_argument('--train_token_path', default='/data/xuyongdang/lm_corpus_clean/tokenfile/train_max81_biaodian', type=str,
                        help='训练语料的token文件目录')
    parser.add_argument('--dev_token_file',   default='/data/xuyongdang/lm_corpus_clean/tokenfile/dev_test/tokenizer_dev_50_max24', type=str,
                        help='验证集语料的token文件')
    parser.add_argument('--epochs', default=10 ,type= int,
                        help='epoch大小')
    parser.add_argument('--lr', default=0.001, type=float,
                        help='学习率')
    parser.add_argument('--batch_size', default=96, type=int,
                        help='训练的batch size')
    parser.add_argument('--maxlen', default=81, type=int,
                        help='读取数据的最大长度')
    parser.add_argument('--max_grad_norm', default=0.8, type=float,
                        help='max grand norm.')
    parser.add_argument('--warmup_steps', default=2000, type=int,
                        help='warm up步数')
    parser.add_argument('--log_step', default=400, type=int,
                        help='训练过程loss等指标输出步长')
    parser.add_argument('--output_dir', default='/work/xuyongdang/code/transformer/trained_model', type=str,
                        help='结果输出路径')
    parser.add_argument('--model_dir_tag', default='default', type=str,
                        help='结果输出路径')
    parser.add_argument('--dict_path', default='/work/xuyongdang/code/transformer/punct_stream/config/train_units_alpha.txt', type=str,
                        help='Dictionary')

    args = parser.parse_args()
    logging.info(args)

    config = GPT2Config.from_json_file(args.model_config)
    logging.info("Model config %s", str(config))
    model = GPT2LMHeadModel(config)
    if args.pretrained_lm:
        loadfile = args.pretrained_lm
        re_epoch = torch_load_epoch(loadfile, model)
        logging.info('###load pretrained lm from path: ' + loadfile)

    # load dictionary for debug log
    if args.dict_path:
        with open(args.dict_path, 'rb') as f:
            dictionary = f.readlines()
        char_list = [entry.decode('utf-8').split(' ')[0]
                     for entry in dictionary]
        char_list.insert(0, '<blank>')
        char_list.append('<eos>')
        args.char_list = char_list

    train(args, model)

if __name__ == '__main__':
    main(sys.argv[1:])
