# -*- coding:utf-8 -*-
# editor: zzh

from transformers import BertTokenizer
from configs import *
import re
from sklearn import preprocessing
import torch

tokenizer = BertTokenizer.from_pretrained(pretrain_path)


def tokenize(first_text, second_text = None, max_len = None):
    first_tokenized_text = tokenizer.tokenize(first_text)
    tokenized_text = ['[CLS]'] + first_tokenized_text
    segment_ids = [0] * (len(first_tokenized_text) + 1)
    if second_text:
        second_tokenized_text = tokenizer.tokenize(second_text)
        tokenized_text = tokenized_text + ['[SEP]'] + second_tokenized_text
        segment_ids = segment_ids + [0] + [1]*len(second_tokenized_text)

    tokenized_ids = tokenizer.convert_tokens_to_ids(tokenized_text)

    if max_len:
        if len(tokenized_ids) > max_len:
            tokenized_ids = tokenized_ids[:max_len]
            segment_ids = segment_ids[:max_len]
        else:
            pad_ids = [0]*(max_len - len(tokenized_ids))
            tokenized_ids = tokenized_ids + pad_ids
            segment_ids = segment_ids + pad_ids

    return tokenized_ids, segment_ids, tokenized_text


def ids2tokens(ids):
    return tokenizer.convert_ids_to_tokens(ids)


def lables2onehot(data, num_class):
    """
            字符串标签类别转为one_hot类型数据
            :param data:
            :param num_class:
            :return:
            """
    le = preprocessing.LabelEncoder()
    index = le.fit_transform(data)
    # index = torch.LongTensor(index).view(-1, 1)
    # index = torch.LongTensor(index)
    # target = torch.zeros(len(index),num_class, dtype=torch.long)
    # target.scatter_(dim = -1, index=index, value=1)

    # return target
    return index

if __name__ == '__main__':
    ids = [101,34,23,43,23,23]
    print(ids2tokens(ids))


