
import time, os, pickle
from datetime import timedelta
from sklearn.preprocessing import MultiLabelBinarizer as MLB
import torch
import numpy as np
import os
from tqdm import tqdm

def build_class_map(config):
    class_to_idx = {name:idx for idx,name in enumerate(config.class_list) if name}
    idx_to_class = {idx:name for name,idx in class_to_idx.items()}
    return class_to_idx, idx_to_class

def load_MLB(config):
    """读取MLB分类器. 不能从class_list中读取, 因为MLB生成labels的顺序是数据出现顺序.
    允许从pkl文件中读取
    或者从专门生成的mlb_class中读取并构建.
    """
    assert os.path.exists(config.MLB_path)
    if config.MLB_path.endswith("pkl"):
        with open(config.MLB_path, 'rb') as f1:
            mlb = pickle.load(f1)
        return mlb
    else:
        with open(config.MLB_path, 'r') as f1:
            mlb_class = f1.readlines()
        mlb_class = [i.strip() for i in mlb_class if i.strip()]
        mlb = MLB()
        mlb.classes = mlb_class
        return mlb

def get_time_dif(start_time):
    """获取已使用时间"""
    end_time = time.time()
    time_dif = end_time - start_time
    return timedelta(seconds=int(round(time_dif)))

def load_stop_words(stop_word_path):
    with open(stop_word_path, 'r', encoding='utf-8') as f1:
        stop_words = f1.readlines()
    stop_words = [stop_word.strip() for stop_word in stop_words if stop_word.strip()]
    return stop_words

def clean_text(text):
    text = re.sub(
            "[a-zA-Z0-9]|[\s+\-\|\!\/\[\]\{\}_,.$%^*(+\"\')]+|[:：+——()?【】《》“”！，。？、~@#￥%……&*（）]+|题目", '',line)
    # words = jieba.lcut(line, cut_all=False)
    return text

def load_pretrain_word_vectors(pretrain_dir='pre_train_embedding/sgns.sogou.char', head_line=False):
    '''提取预训练词向量
    默认使用: Baidu Encyclopedia 百度百科 Word + Character 300d 词向量.
    '''
    # 统计文件行数
    assert os.path.exists(pretrain_dir)
    embeddings_map = {}
    with open(pretrain_dir, "r", encoding='UTF-8') as f1:
        all_lines = f1.readlines()
    if head_line: start = 0
    else: start = 1
    for line in tqdm(all_lines[start:], desc="Loading pretrain word vectors"):
        lin = line.strip().split(" ")
        emb = [float(x) for x in lin[1:]]
        embeddings_map.update({lin[0]: emb})
    return embeddings_map


