#!/usr/bin/env python
"""
    Main training workflow
"""
from __future__ import division

import argparse
import glob
import os
import time

import sentencepiece

from models import data_loader, model_builder
from models.data_loader import load_dataset
from models.model_builder import Summarizer
from models.trainer import build_trainer
from others.logging import logger, init_logger
import torch
import random

model_flags = ['hidden_size', 'ff_size', 'heads', 'emb_size', 'local_layers', 'inter_layers','structured']




def wait_and_validate(args, device_id):
    timestep = 0
    if (args.test_all):
        cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
        cp_files.sort(key=os.path.getmtime)
        xent_lst = []
        for i, cp in enumerate(cp_files):
            step = int(cp.split('.')[-2].split('_')[-1])
            xent = validate(args, device_id, cp, step)
            xent_lst.append((xent, cp))
            max_step = xent_lst.index(min(xent_lst))
            if (i - max_step > 10):
                break
        xent_lst = sorted(xent_lst, key=lambda x: x[0])[:5]
        logger.info('PPL %s' % str(xent_lst))
        for xent, cp in xent_lst:
            step = int(cp.split('.')[-2].split('_')[-1])
           # test1(args, device_id, cp, step)
        else:
            while (True):
                cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
                cp_files.sort(key=os.path.getmtime)
                if (cp_files):
                    cp = cp_files[-1]
                    time_of_cp = os.path.getmtime(cp)
                    if (not os.path.getsize(cp) > 0):
                        time.sleep(60)
                        continue
                    if (time_of_cp > timestep):
                        timestep = time_of_cp
                        step = int(cp.split('.')[-2].split('_')[-1])
                        validate(args, device_id, cp, step)
                        _test1(args, device_id, cp, step)

                cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
                cp_files.sort(key=os.path.getmtime)
                if (cp_files):
                    cp = cp_files[-1]
                    time_of_cp = os.path.getmtime(cp)
                    if (time_of_cp > timestep):
                        continue
                else:
                    time.sleep(300)


def validate(args, device_id, pt, step):
    device = "cpu" if args.visible_gpu == '-1' else "cuda"
    if (pt != ''):
        test_from = pt
    else:
        test_from = args.test_from
    logger.info('Loading checkpoint from %s' % test_from)
    checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
    opt = vars(checkpoint['opt'])
    for k in opt.keys():
        if (k in model_flags):
            setattr(args, k, opt[k])
    print(args)

    spm = sentencepiece.SentencePieceProcessor()
    spm.Load(args.vocab_path)
    word_padding_idx = spm.PieceToId('<PAD>')
    vocab_size = len(spm)
    model = Summarizer(args, word_padding_idx, vocab_size, device, checkpoint)
    model.eval()
    #加载验证数据集
    valid_iter =data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False), {'PAD': word_padding_idx},
                                  args.batch_size, device,
                                  shuffle=False, is_test=False)
    trainer = build_trainer(args, device_id, model, None)
    stats = trainer.validate(valid_iter)
    trainer._report_step(0, step, valid_stats=stats)
    return stats.xent()

def _test1(args, device_id, pt, step):
    cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
    cp_files.sort(key=os.path.getmtime)
    pt = cp_files[-1]
    device = "cpu" if args.visible_gpu == '-1' else "cuda"
    if (pt != ''):
        test_from = pt
    else:
        test_from = args.test_from
    logger.info('Loading checkpoint from %s' % test_from)
    checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
    opt = vars(checkpoint['opt'])
    for k in opt.keys():
        if (k in model_flags):
            setattr(args, k, opt[k])
    print(args)

    spm = sentencepiece.SentencePieceProcessor()
    spm.Load(args.vocab_path)
    word_padding_idx = spm.PieceToId('<PAD>')
    vocab_size = len(spm)
    model = Summarizer(args, word_padding_idx, vocab_size, device, checkpoint)
    model.eval()

    test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False), {'PAD': word_padding_idx},
                                  args.batch_size, device,
                                  shuffle=False, is_test=True)
    trainer = build_trainer(args, device_id, model, None)
    trainer.test(test_iter,step)




def train(args, device_id):
    #这部分和训练无关
    init_logger(args.log_file)
    #从ckpt加载训练
    if args.train_from != '':
        logger.info('Loading checkpoint from %s' % args.train_from)
        checkpoint = torch.load(args.train_from,
                                map_location=lambda storage, loc: storage)
        opt = vars(checkpoint['opt'])
        for k in opt.keys():
            if (k in model_flags):
                setattr(args, k, opt[k])
    else:
        checkpoint = None

    #随机数设置
    torch.manual_seed(args.seed)#设置torch生成的随机数
    random.seed(args.seed)#设置random生成的随机数
    torch.backends.cudnn.deterministic = True#和随机数搭配 保证每次运行网络的时候相同输入的输出是固定的

    #进行分词 直接加载数据集的即可
    spm = sentencepiece.SentencePieceProcessor()
    spm.Load(args.vocab_path)
    word_padding_idx = spm.PieceToId('<PAD>')
    vocab_size = len(spm)

    #迭代加载数据部分
    def train_iter_fct():
        # return data_loader.AbstractiveDataloader(load_dataset('train', True), symbols, FLAGS.batch_size, device, True)
        return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), {'PAD':word_padding_idx}, args.batch_size, device,
                                                 shuffle=True, is_test=False)
    #构建摘要提取模型
    model = Summarizer(args, word_padding_idx, vocab_size, device, checkpoint)
    optim = model_builder.build_optim(args, model, checkpoint)#构建优化器
    logger.info(model)#打印模型信息
    trainer = build_trainer(args, device_id, model, optim)#将训练部分的函数封装 同时包括测试验证
    #
    trainer.train(train_iter_fct, args.train_steps)



class Argu():
    def __init__(self):
        self.mode='train'
        #self.mode = 'test'
        self.onmt_path='../data/cnndm_data/cnndm'
        self.data_path='../data/'
        self.raw_path='../line_data'
        self.vocab_path='../data/spm.cnndm.model'
        self.model_path='../models/'
        self.result_path='../results/cnndm'
        self.temp_dir='../temp'
        self.batch_size=10000#默认10000 可能对机器要求较大
        self.min_nsents=3
        self.max_nsents=100
        self.min_src_ntokens=5
        self.max_src_ntokens=200
        self.structured=False#false表示使用TransformerInterEncoder 否则使用StructuredEncoder
        self.hidden_size=128
        self.ff_size=512
        self.heads=8
        self.emb_size=128
        self.local_layers=5
        self.inter_layers=2
        self.dropout=0.2
        self.optim='adam'
        self.lr=0.15
        self.beta1=0.9
        self.beta2=0.999
        self.decay_method=''
        self.warmup_steps=8000
        self.max_grad_norm=0
        self.save_checkpoint_steps=5
        self.accum_count=1
        self.report_every=10
        self.train_steps=100
        self.visible_gpu='0'#这里我使用gpu
        self.gpu_ranks=[0]
        self.log_file='../logs/cnndm.log'
        self.dataset=''
        self.seed=666
        self.test_all=False
        self.train_from=''
        self.test_from=''
        self.report_rouge=True



if __name__ == '__main__':

    args=Argu()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_gpu

    init_logger(args.log_file)
    device = "cpu" if args.visible_gpu == '-1' else "cuda"
    device_id = 0 if device == "cuda" else -1

    if (args.mode == 'train'):
        train(args, device_id)
    # elif (args.mode == 'validate'):
    #     wait_and_validate(args, device_id)
    elif (args.mode == 'test'):
         _test1(args, device_id, args.test_from, 100)
