#!/usr/bin/env python3

"""AI composing by HMM

Word2VecHMM is a pipline essentially.
pipeline: Word2Vec -> HMM
"""

import pathlib
import joblib

import numpy as np
from hmmlearn import hmm
from scipy.spatial import distance
from gensim.models import word2vec, Word2Vec

from sklearn.preprocessing import FunctionTransformer, LabelEncoder
from sklearn.base import BaseEstimator


config = {
    'wv-model-name': 'wv.model',
    'hmm-model-name': 'hmm.model',
    'encoding': 'label',
    'corpus-path': '~/Folders/mycorpus/love',
}

# set your own corpus path
CORPUS_PATH = pathlib.Path(config['corpus-path']).expanduser()

WV_PATH = pathlib.Path(config['wv-model-name'])
HMM_PATH = pathlib.Path(config['hmm-model-name'])

def read(path, level='p'):
    """Read text from files
    Return: list of str ['sentences']
    """
    if isinstance(path, str):
        path = pathlib.Path(path)
    if path.is_dir():
        if level == 'd':
            return [read(f, level=level) for f in path.iterdir()]
        elif level == 'p':
            res = []
            for f in path.iterdir():
                if f.suffix in {'.txt', '.md'}:
                    res.extend(read(f, level=level))
            return res
    else:
        if level == 'd':
            return path.read_text().rpartition('---')[-1]
        elif level == 'p':
            return [p for p in path.read_text().rpartition('---')[-1].split('\n\n') if len(p)>2]


def get_corpus(path=CORPUS_PATH):
    # only for Chinese
    # get corpus (for gensim)
    # A corpus is a 2-order list of strings/words, like [['words']]
    import jieba
    import jieba.posseg as pseg
    import logging
    jieba.setLogLevel(logging.INFO)

    def cut_flag(s):
        def _f(wf):
            if wf.flag == 'o':
                return f'{wf.word}~'
            elif wf.flag == 'y':
                return f'{wf.word}\n'
            elif wf.flag in {'x', 'un'}:
                return '/'
            elif wf.word in {';', '；', ':', '：'}:
                return '，'
            else:
                return wf.word
        return list(map(_f, pseg.cut(s)))

    s_s = read(path)
    return [cut_flag(s) for s in s_s]

def _o(model):
    # I hope that it never be called
    n_featrues = len(model.wv[model.wv.index_to_key[0]])
    return np.random.random(n_featrues)


class Word2VecTransformer(FunctionTransformer):
    def __init__(self, wv_model, *args, **kwargs):
        """
        Arguments:
            wv_model --- word2vec model of gensim
        """
        def _t(words):
            return np.vstack([wv_model.wv[w] if (w in wv_model.wv.key_to_index) else _o(wv_model) for w in words])

        def _it(x):
            k = np.argmin([distance.euclidean(x, wv_model.wv[w]) for w in wv_model.wv.key_to_index])
            return wv_model.wv.index_to_key[k]
        super(Word2VecTransformer, self).__init__(func=_t, inverse_func=lambda xs: list(map(_it, xs)), check_inverse=False, *args, **kwargs)

def make_wv(corpus, vector_size=50, rebuild=False):
    # make word2vec model from corpus
    if not rebuild and WV_PATH.exists():
        wv_model = Word2Vec.load(config['wv-model-name'])
    else:
        wv_model = word2vec.Word2Vec(corpus, vector_size=vector_size, workers=4)
        wv_model.build_vocab(corpus)
        wv_model.save(config['wv-model-name'])
    return wv_model

class BaseTextHMM(BaseEstimator):
    # Wrapper class for HMM models in hmmlearn
    # override encode and decode methods if it is necessary.
    def __init__(self, model, encoder=None):
        self._model = model
        self._encoder = encoder

    @property
    def encoder(self):
        return self._encoder

    @property
    def model(self):
        return self._model

    def fit(self, corpus):
        self.preprocess(corpus)
        Xs = self.encode(corpus)
        lengths = list(map(len, Xs))
        return self.model.fit(np.vstack(Xs), lengths)

    def sample(self, *args, **kwargs):
        return self.model.sample(*args, **kwargs)

    def generate(self, *args, **kwargs):
        Y, Z = self.sample(*args, **kwargs)
        for y in self.decode(Y):
            yield y

    def preprocess(self, corpus):
        words = np.hstack(corpus)
        self.encoder.fit(words)

    def encode(self, corpus):
        """Encoding method
        
        encode corpus to sequence(s) os numbers
        
        Arguments:
            corpus {2-order list of words} -- the corpus read from strings or text files
        
        Returns:
            sequence(s) os numbers for hmmlearn
        """

        return list(map(self.encoder.transform, corpus))

    def decode(self, Y):
        """Decoding method

        As an inverse method of `encode`

        Arguments:
            Y {2d array} -- output of hmmlearn
        
        Returns:
            list of words
        """
        return self.encoder.inverse_transform(Y)



class TextHMM(BaseTextHMM):
    """Discrete HMM with MultinomialHMM
    
    Use LabelEncoder as its encoder, so override the encode/decode methods.
    
    Extends:
        _Mixin
        MultinomialHMM
    """

    def preprocess(self, corpus):
        if self.encoder is None:
            self._encoder = LabelEncoder()
        super().preprocess(corpus)

    def encode(self, corpus):
        return list(map(lambda ws: self.encoder.transform(ws)[:,None], corpus))

    def decode(self, Y):
        return self.encoder.inverse_transform(np.ravel(Y))


class Word2VecHMM(BaseTextHMM):
    # encode corpus with Word2Vec

    def preprocess(self, corpus):
        if self.encoder is None:
            wv_model = make_wv(corpus)
            self._encoder = Word2VecTransformer(wv_model)
        super().preprocess(corpus)


print('get corpus')
corpus = get_corpus(path=CORPUS_PATH)

print('initialize encoder')
# _encoder = LabelEncoder()
wv_model = make_wv(corpus, vector_size=50, rebuild=False)
_encoder = Word2VecTransformer(wv_model)

print('make hmm model')

rebuild = False
retrain = True

if not rebuild and HMM_PATH.exists():
    _model = joblib.load(config['hmm-model-name'])
    if retrain:
        _model.n_iter = 100
        model = Word2VecHMM(model=_model, encoder=_encoder)
        model.fit(corpus)
else:
    _model = hmm.GaussianHMM(n_components=20, n_iter=1000, tol=0.0001, init_params="", covariance_type='diag')
    #_model = hmm.MultinomialHMM(n_components=5, n_iter=1000, tol=0.0001)
    model = Word2VecHMM(model=_model, encoder=_encoder)
    model.fit(corpus)
    joblib.dump(model.model, config['hmm-model-name'])

print('generating sentences:')
Y = model.generate(n_samples=1000)


n_words_per_line = 20
k = 0
import time, random
for y in model.generate(n_samples=1000):
    print(y, end='')
    time.sleep(random.random()/5)
    k+= 1
    if k == n_words_per_line:
        k=0
        print(' ❥')
