import torch

from constants import DOMAIN_SLOT_TAG, VALUE_SLOT_TAG


def shift_tokens_left(input_ids, pad_token_id):
    shifted_token_ids = torch.zeros_like(input_ids)
    shifted_token_ids[:, :-1] = input_ids[:, 1:].clone()
    shifted_token_ids[:, -1] = pad_token_id

    assert pad_token_id is not None

    return shifted_token_ids


def multilabel_to_onehot(intents, labels_map):
    res = [0.] * len(labels_map)

    for intent in intents:
        if intent in labels_map:
            res[labels_map[intent]] = 1.
    return res


def load_model_and_parallel(args, model, ckpt_path=None):
    """
    load model and put it into gpu (single / multi)
    """

    gpu_ids = args.gpu_ids.split(',')

    # set to device to the first cuda
    device = torch.device("cpu" if gpu_ids[0] == '-1' else "cuda:" + gpu_ids[0])
    model.to(device)

    if ckpt_path is not None and ckpt_path != "":
        state_dict = torch.load(ckpt_path, map_location=torch.device('cpu'))
        model.load_state_dict(state_dict, strict=True)

    if len(gpu_ids) > 1:
        print(f'Use multi gpus in: {gpu_ids}')
        gpu_ids = [int(x) for x in gpu_ids]
        model = torch.nn.DataParallel(model.cuda(), device_ids=gpu_ids)
    else:
        print(f'Use single gpu in: {gpu_ids}')

    return model, device


def extract_triplet(raw_tokens, tokenizer):
    out_lst = [[] for _ in range(len(raw_tokens))]

    for i in range(len(raw_tokens)):
        tokens = raw_tokens[i]
        tokens = tokens.replace(tokenizer.pad_token, '').replace(tokenizer.cls_token, ''). \
            replace(tokenizer.sep_token, '').replace(tokenizer.unk_token, '')
        tokens = tokens.strip().split(' ')
        state, pred = '', ''

        for j in range(len(tokens)):
            if tokens[j] == DOMAIN_SLOT_TAG:
                state = 'domain_begin'
                if pred != '':
                    out_lst[i].append(pred)
                    pred = ''

            elif tokens[j] == VALUE_SLOT_TAG:
                state = 'value_begin'

            elif state == 'domain_begin':
                pred += tokens[j]

            elif state == 'value_begin':
                pred += '-' + tokens[j]

        if pred != '':
            out_lst[i].append(pred)

    return out_lst