"""
Document process tools.

"""
import jieba
import numpy as np
from collections import Counter


def doc2mat(doc, w2v):
    """
   Transform document into matrix, where each row is the word2vec of the word in the article at corresponding place.
   
   Args:
   - doc: :string: a document after preprocessing.
   - w2v: pretrained w2v model.
   
   Returns:
   - mat: the corresponding matrix of the document.
    """
    mat = None
    for i, word in enumerate(jieba.cut(doc)):
        try:
            wv = w2v[word]
        except:
            for j, w in enumerate(word):
                if j == 0:
                    wv = w2v[w]
                else:
                    
                    wv = np.vstack((wv,w2v[w]))
                    
        if i == 0:
            mat = wv
        else:
            try:
                mat = np.vstack((mat, wv))
            except:
                print(mat.shape)
                print(wv.shape)
                raise RuntimeError
    return mat


def dataset2mat(dataset, w2v, pad_size=-1):
    """
   Transform the dataset with string into dataset with corresponding matrix.
   
   Args:
    - dataset: :numpy.ndarray: (n, 2) row dataset whose first item is string.
    - w2v: :dict: the pretrained word2vec.
    
    Returns:
    - Dataset: :list: (n, 2) dataset after processing whose first item is the matrix.
    """
    Dataset = []
    for i in range(len(dataset)):
        if pad_size == -1:
            Dataset.append([doc2mat(dataset[i, 0],w2v), int(dataset[i, 1])])
        else:
            mat = doc2mat(dataset[i, 0],w2v)
            if len(mat) >= pad_size:
                Dataset.append([mat[:pad_size], int(dataset[i, 1][:pad_size])])
            else:
                Dataset.append([mat, int(dataset[i, 1])])
    return Dataset


def dataset2idf(dataset, words):
    """
    Computing the idf of the dataset.
    
    Args:
    - dataset: :list: (n,2) the row dataset.
    - words: :dict: all possible words.
    
    Returns:
    - idf: :dict: the idf of each word.
    """
    idf = dict.fromkeys(words, 0)
    df = dict.fromkeys(words, 0)
    n = len(dataset)
    split_dataset = []
    
    # use jieba to get the vocabulary of each lyric.
    print('Spliting each lyric......')
    for i in range(n):
        lyric = dataset[i][0]
        split_word = []
        for word in jieba.cut(lyric):
            if word in words.keys():
                if word not in split_word:
                    split_word.append(word)
            else:
                for w in word:
                    if w not in split_word:
                        split_word.append(w)
        split_dataset.append(split_word)
        
    # compute df of each word
    print('Computing the df......')
    for i in range(n):
        for word in split_dataset[i]:
            df[word] = df[word] + 1
            
    # compute the idf of each word.
    print('Computing the idf......')
    for word in words.keys():
        idf[word] = np.log10(n / (df[word] + 1)) # plus one 
    
    return idf


def doc2tf(word, doc, vocab):
    """
    Compute the term frequency of word in doc.
    
    Args:
    - word: :string: the word.
    - doc: :string: the document.
    - vocab: :dict: the total vocabulary.
    
    Returns:
    - tf: :int: the term frequency.
    """
    tf = 0
    length = 0
    for _word in jieba.cut(doc):
        if _word in vocab:
            length += 1
            if _word == word:
                tf += 1
        else:
            for w in word:
                length += 1
                if w == word:
                    tf += 1
    return tf / length






