import torch
from tqdm import tqdm
import time, random
from datetime import timedelta

PAD, CLS = '[PAD]', '[CLS]'  # padding符号, bert中综合信息符号

def build_dataset(config, args):
    def load_dataset(path, pad_size=32):
        contents, labels = [], []
        with open(path, 'r', encoding='UTF-8') as f1:
            all_lines = f1.readlines()
        if not args.infer_mode:
            random.seed(args.seed)
            random.shuffle(all_lines)
        
        for line in tqdm(all_lines):
            lin = line.strip()
            if not lin: continue
            if args.infer_mode:
                contents.append(lin)
                labels.append(-1)
            else:
                res = lin.split('\t')
                contents.append(res[0])
                if args.multi_labels: 
                    # 此处要求标签已经离散化,且是等长的one-hot形式
                    labels.append([int(i) for i in res[1:]])
                else:
                    labels.append(int(res[1])) 
        # input_ids token_type_ids attention_mask labels
        encode_res = config.tokenizer.batch_encode_plus(contents, padding=True, 
                                max_length=args.pad_size, pad_to_multiple_of=args.pad_size, 
                                truncation="longest_first", return_tensors="pt")
        encode_res.update({"labels": torch.LongTensor(labels)})
        return encode_res

    if args.infer_mode:
        infer = load_dataset(config.infer_path, config.pad_size)
        return '', infer
    else:
        train = load_dataset(config.train_path, config.pad_size)
        dev = load_dataset(config.dev_path, config.pad_size)
        test = load_dataset(config.test_path, config.pad_size)
        return '', train, dev, test


class DatasetIterater(object):
    def __init__(self, encode_batches, batch_size, device):
        self.batch_size = batch_size
        self.encode_batches = encode_batches
        self.total_len = len(encode_batches["input_ids"])
        self.n_batches =  self.total_len // batch_size
        self.n_batches = self.n_batches if self.n_batches!=0 else 1
        self.residue = False
        if self.total_len // batch_size != 0:
            self.residue = True
        self.index = 0
        self.device = device

    def __next__(self):
        if self.residue and self.index == self.n_batches: # 最后一轮
            encode_batches = {k:v[self.index * self.batch_size: self.total_len].to(self.device) \
                        for k,v in self.encode_batches.items()}
            self.index += 1
            return encode_batches

        elif self.index >= self.n_batches:
            self.index = 0
            raise StopIteration
        else:
            encode_batches = {k:v[self.index * self.batch_size: (self.index + 1) * self.batch_size].to(self.device) \
                        for k,v in self.encode_batches.items()}
            self.index += 1
            return encode_batches

    def __iter__(self):
        return self

    def __len__(self):
        if self.residue:
            return self.n_batches + 1
        else:
            return self.n_batches


def build_iterator(dataset, config):
    iter = DatasetIterater(dataset, config.batch_size, config.device)
    return iter


def get_time_dif(start_time):
    """获取已使用时间"""
    end_time = time.time()
    time_dif = end_time - start_time
    return timedelta(seconds=int(round(time_dif)))


def freeze_to_layer_by_name(model, layer_name='all'):
    '''冻结层. 从0到layer_name.'''
    if layer_name == None: return
    if layer_name == 'all':
        index_start = len(model.state_dict())
    else:
        index_start = -1
        for index, (key, _value) in enumerate(model.state_dict().items()):
            if layer_name in key: 
                index_start = index
                break

    if index_start < 0:
        print(f"Don't find layer name: {layer_name}")
        print(f"must in : \n{model.state_dict().keys()}")
        return
    
    no_grad_nums = index_start + 1
    grad_nums = 0

    for index, i in enumerate(model.parameters()):
        if index >= index_start:
            i.requires_grad = True
            grad_nums += 1
        else:
            i.requires_grad = False
    print(f"freeze layers num: {no_grad_nums}, active layers num: {grad_nums}.")