# -*- coding: utf-8 -*-

import os
import time
import torch
import numpy as np
from datetime import timedelta
from collections import OrderedDict
from gensim.models import KeyedVectors
from typing import Dict, Any


PAD, UNK = "<PAD>", "<UNK>"


def load_word_vec(pretrained_path: str, cached_wv_path: str=None, cache_vocab_path:str=None) -> torch.FloatTensor:
    if cached_wv_path is not None and os.path.exists(cached_wv_path):
        print("load word vectors from cache: %s " % cached_wv_path, end="")
        vectors = np.load(cached_wv_path)
        print(' done. ')
    else:
        assert pretrained_path is not None, "Please supply pretrained-path or cached-wv-path."
        print("initialize load word2vec... ", end="")
        model = KeyedVectors.load_word2vec_format(pretrained_path, binary=False)
        vocab_list = [word for word in model.vocab.keys()]
        word2id = {PAD: 0, UNK: 1}
        vectors = np.zeros((len(vocab_list) + 2, model.vector_size))
        unk = np.zeros(model.vector_size)
        for i, word in enumerate(vocab_list):
            word2id[word] = i + 2
            vectors[i + 2] = model.wv[word]
            unk += vectors[i + 2]
        print(" done. ")
        vectors[1] = unk / len(vocab_list)
        if cached_wv_path:
            # 保存, 后续的训练使用
            print("save word2vec to cache ... ", end='')
            np.save(cached_wv_path, vectors)
            print(' done. ')
        if cache_vocab_path:
            with open(cache_vocab_path, 'w') as f:
                for word in word2id.keys():
                    f.write(word + "\n")

    weights = torch.FloatTensor(vectors)
    return weights


def load_vocabs_from_wv(pretrained_path: str, cache_vocab_path: str=None) -> Dict[str, int]:
    if cache_vocab_path is not None and os.path.exists(cache_vocab_path):
        print("load word vocabs from cache: %s " % cache_vocab_path, end="")
        vocabs = load_vocabs_from_file(cache_vocab_path)
        print(' done. ')
    else:
        assert pretrained_path is not None, "Please supply pretrained-path or cached-wv-path."
        print("initialize load word2vec... ", end="")
        model = KeyedVectors.load_word2vec_format(pretrained_path, binary=False)
        vocab_list = [word for word in model.vocab.keys()]
        vocabs = {PAD: 0, UNK: 1}
        for i, word in enumerate(vocab_list):
            vocabs[word] = i + 2
        print(" done. ")
        if cache_vocab_path:
            with open(cache_vocab_path, 'w') as f:
                for word in vocabs.keys():
                    f.write(word + "\n")
    return vocabs


def load_vocabs_from_file(vocab_path: str) -> Dict[str, int]:
    vocabs = OrderedDict()
    count = 0
    with open(vocab_path, 'r') as f:
        for line in f:
            line = line.strip()
            if line is None or line == "":
                continue
            vocabs[line] = count
            count += 1
    return vocabs


def get_time_dif(start_time):
    """获取已使用时间"""
    end_time = time.time()
    time_dif = end_time - start_time
    return timedelta(seconds=int(round(time_dif)))

