# -*- coding: utf-8 -*-
"""
@project: bert-text-classification

@author: bruce zhang

@desc:
"""
import torch
from torch.utils.data import TensorDataset
from datetime import timedelta
import time
import os
import datetime

class InputFeatures:
    def __init__(self, input_id, input_mask, label_id):
        self.input_id = input_id
        self.input_mask = input_mask
        self.label_id = label_id

def load_vocab(vocab_file):
    vocab = {}
    index = 0
    with open(vocab_file, 'r', encoding='utf-8') as reader:
        while True:
            token = reader.readline()
            if not token: break
            token= token.strip()
            vocab[token] = index
            index += 1
    return vocab

def read_corpus(path, max_length, label_dic, vocab):
    file = open(path, 'r', encoding='utf-8')
    content = file.readlines()
    file.close()
    result = []
    for line in content:
        text, label = line.strip().split('|||')
        tokens = list(text)
        label_id = label_dic[label]
        if len(tokens) > max_length - 2:
            tokens = tokens[0:max_length-2]
        tokens_f = ['[CLS]'] + tokens + ['[SEP]']
        input_ids = [int(vocab[i]) if i in vocab else int(vocab['[UNK]']) for i in tokens_f]
        input_mask = [1] * len(input_ids)
        while len(input_ids) < max_length:
            input_ids.append(0)
            input_mask.append(0)
        assert len(input_ids) == max_length
        assert len(input_mask) == max_length
        feature = InputFeatures(input_id=input_ids, input_mask=input_mask,
                                label_id=label_id)
        result.append(feature)
    return result

def build_dataset(data):
    input_ids = torch.LongTensor([temp.input_id for temp in data])
    input_mask = torch.LongTensor([temp.input_mask for temp in data])
    labels = torch.LongTensor([temp.label_id for temp in data])
    dataset = TensorDataset(input_ids, input_mask, labels)
    return dataset



def save_model(model, epoch, path='result/', **kwargs):
    """
    默认保留所有模型
    :param model: 模型
    :param path: 保存路径
    :param loss: 校验损失
    :param last_loss: 最佳epoch损失
    :param kwargs: every_epoch or best_epoch
    :return:
    """
    if not os.path.exists(path):
        os.mkdir(path)
    if kwargs.get('name', None) is None:
        cur_time = datetime.datetime.now().strftime('%Y-%m-%d#%H时%M分%S秒')
        name = cur_time + '--epoch=%d' % epoch
        full_name = os.path.join(path, name)
        torch.save(model.state_dict(), full_name)
        print('Saved model at epoch {} successfully'.format(epoch))
        with open('{}/checkpoint'.format(path), 'w') as file:
            file.write(name)
            print('Write to checkpoint')


def load_model(model, path='result/', **kwargs):
    if kwargs.get('name', None) is None:
        with open('{}/checkpoint'.format(path)) as file:
            content = file.read().strip()
            name = os.path.join(path, content)
    else:
        name=kwargs['name']
        name = os.path.join(path, name)
    model.load_state_dict(torch.load(name, map_location=lambda storage, loc: storage))
    print('load model {} successfully'.format(name))
    return model

def get_time_diff(start_time):
    end_time = time.time()
    time_diff = end_time - start_time
    return timedelta(seconds=int(round(time_diff)))

