import unittest
import re
import numpy as np
import nltk
import spacy
from matplotlib import pyplot as plot

from sklearn.datasets import load_files

import mglearn
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import CountVectorizer, ENGLISH_STOP_WORDS, TfidfVectorizer
from sklearn.model_selection import cross_val_score, GridSearchCV
from sklearn.pipeline import make_pipeline
from sklearn.decomposition import LatentDirichletAllocation


class TestBase(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        super(TestBase, self).__init__(*args, **kwargs)
        '''
        self.movie_path = r"G:\projects\machine_learning_data\data\movie_review\aclImdb"
        movie_train = load_files(path.join(self.movie_path, "train"))
        movie_test = load_files(path.join(self.movie_path, "test"))
        movie_review = movie_train.data, movie_train.target, movie_test.data, movie_test.target  # xtr, xte, ytr, yte
        movie_xtr = [doc.replace(b"<br />", b"") for doc in movie_review[0]]
        movie_xte = [doc.replace(b"<br />", b"") for doc in movie_review[1]]
        movie_review[0], movie_review[1] = movie_xtr, movie_xte
        self.movie_review = movie_review
        '''


class TestText(TestBase):
    def test_workds_bag(self):
        bards_words = ['the fool doth think he is wise,', 'but the wise man knows himself to be a fool']
        vect = CountVectorizer().fit(bards_words)
        print(f'vocabulary type: {type(vect.vocabulary_)}, vocabulary: {vect.vocabulary_}')
        bag_words_trans = vect.transform(bards_words)
        print(f'words bag transform type:{type(bag_words_trans)},\n'
              f'words bag transfrom shape:{bag_words_trans.shape},\n'
              f'words bag transform repr: {repr(bag_words_trans)}\n'
              f'words bag transform:\n {bag_words_trans.toarray()}')

    def get_movie_data_test(self):
        movie_train, movie_test = load_files("train_path"), load_files("test_path")
        xtr, xte, ytr, yte = movie_train.data, movie_train.target, movie_test.data, movie_test.target
        movie_xtr = [doc.replace(b"<br />", b"") for doc in xtr]
        movie_xte = [doc.replace(b"<br />", b"") for doc in xte]
        return movie_xtr, movie_xte, ytr, yte

    def test_movies_bag(self):
        text_tr, text_te, ytr, yte = self.movie_review
        print(f'xtr len: {len(text_tr)}, xte len: {len(text_te)}, ytr len: {len(ytr)}')
        return
        vect = CountVectorizer().fit(text_tr)
        xtr = vect.transform(text_tr)
        print(f'vect transform features :{vect.get_feature_names()}')  # shape: (25000, n) feature num is n, sort by alphabet

        # cross validation
        print(f'mean logistic regression cross score: {cross_val_score(LogisticRegression(), xtr, ytr, cv=5)}')

        # grid search
        params_grid = {'C': [0.001, 0.01, 0.1, 1, 10]}
        grid = GridSearchCV(LogisticRegression(), params_grid, cv=5).fit(xtr, ytr)
        print(f'grid best score: {grid.best_score_:.3f}')
        print(f'grid best params: {grid.best_params_}')  # C:0.1
        print(f'grid test score: {grid.score(text_te, yte)}')

    def test_movies_bag_5_appear(self):
        xtr, xte, ytr, yte = self.get_movie_data_test()
        vect = CountVectorizer(min_df=5).fit(xtr)
        xtr_trans = vect.transform(xtr)
        grid = GridSearchCV(LogisticRegression(), {'C': [0.001, 0.01, 0.1, 1, 10]}, cv=5).fit(xtr, ytr)
        print(f'logistic regression best score: {grid.score(xte, yte)}')

    def test_predict_stop_words(self):
        xtr, xte, ytr, yte = self.get_movie_data_test()
        vect = CountVectorizer(min_df=5, stop_words='english').fit(xtr, ytr)
        xtr_trans = vect.transform(xtr)
        grid = GridSearchCV(LogisticRegression(), {'C': [0.001, 0.01, 0.1, 1, 10]}, cv=5).fit(xtr, ytr)
        print(f'grid best score: {grid.best_score_}')

    def test_tfidf_show_features(self):
        xtr, xte, ytr, yte = self.get_movie_data_test()
        pipe = make_pipeline(TfidfVectorizer(min_df=5), LogisticRegression())
        grid = GridSearchCV(pipe, {'logisticregression__C': [0.001, 0.01, 0.1, 1, 10]}, cv=5).fit(xtr, ytr)
        print(f'best cross score: {grid.best_score_}')

        # show tfidf words
        vectorizer = grid.best_estimator_.named_steps["tfidfvectorizer"]
        xtr_trans = vectorizer.transform(xtr)
        max_val = xtr_trans.max(axis=0).toarray().ravel()
        sorted_by_tfidf = max_val.argsort()
        feature_name = np.array(vectorizer.get_feature_names())
        # show tf-idf score
        print(f'tfidf score lowest 20 ea: {feature_name[:20]}')
        print(f'tfidf score highest 20 ea: {feature_name[-20:]}')
        # show idf score
        print(f' idf score lowest 20 ea: {vectorizer.idf_[:20]}')
        # show bar chart with highest tfief score features
        mglearn.tools.visualize_coefficients(grid.best_estimator_.named_steps["logisticregression"].coef_,
                                             feature_names=feature_name, n_top_features=40)

    def test_word_bag_ngram(self):
        bards_words = ['the fool doth think he is wise,', 'but the wise man knows himself to be a fool']
        vector = CountVectorizer(ngram_range=(1, 1)).fit(bards_words)
        print(f'length of feature: {len(vector.vocabulary_)},\nvector feature names: {vector.get_feature_names_out()}')
        # show 2 dimension words
        vector = CountVectorizer(ngram_range=(2, 2)).fit(bards_words)
        print(f'feature len: {len(vector.vocabulary_)},\n2 di vector feature names: {vector.get_feature_names_out()}')
        vector = CountVectorizer(ngram_range=(1, 3)).fit(bards_words)
        print(f'feature len: {len(vector.vocabulary_)},\n vector feature names: {vector.get_feature_names_out()}')

    def test_ngram_movie(self):
        xtr, xte, ytr, yte = self.get_movie_data_test()
        pipe = make_pipeline(TfidfVectorizer(min_df=5), LogisticRegression())
        param_grid = {'logisticregression__C': [0.001, 0.01, 0.1, 1, 10, 100],
                      'tfidfvectorizer__ngram_range': [(1, 1), (1, 2), (1, 3)]}
        grid = GridSearchCV(pipe, param_grid, cv=5).fit(xtr, ytr)
        print(f'cross best score: {grid.best_score_}')
        print(f'grid best params: {grid.best_params_}')
        scores = grid.cv_results_['mean_test_score'].reshape(-1, 3).T
        heatmap = mglearn.tools.heatmap(scores, xlabel='C', ylabel='ngram_range', cmap='viridis', fmt='%.3f',
                                        xticklabels=param_grid['logisticregression__C'],
                                        yticklabels=param_grid['tfidfvectorizer__ngram_range'])
        plot.colorbar(heatmap)
        plot.show()
        # show 1-3 di features
        vect = grid.best_estimator_.named_steps['efidfvectorizer']
        feature_names = np.array(vect.get_feature_names.out())
        coef = grid.best_estimator_.named_steps['logisticregression'].coef_
        mglearn.tools.visualize_coefficients(coef, feature_names, n_top_features=40)

    def test_word_stem(self):
        en_nlp = spacy.load('en_core_web_sm')
        stemmer = nltk.stem.PorterStemmer()

        def compare_normalization(doc):
            doc_spacy = en_nlp(doc)
            print(f'show word split result: {[token.lemma_ for token in doc_spacy]}')
            print(f'show word stem found result: {[stemmer.stem(token.norm_.lower()) for token in doc_spacy]}')

        test_text = "our meeting today was worse than yesterday, I'm scared of meeting the clients tomorrow"
        compare_normalization(test_text)

    def test_lemmatization(self):
        regexp = re.compile('(?u)\\b\\w\\w+\\b')
        en_nlp = spacy.load('en_core_web_sm')
        old_tokenizer = en_nlp.tokenizer
        en_nlp.tokenizer = lambda string: old_tokenizer.tokens_from_list(regexp.findall(string))
        def custom_tokenizer(doc):
            doc_spacy = en_nlp(doc, entity=False, parse=False)
            return [token.lemma_ for token in doc_spacy]

        lemma_vect = CountVectorizer(tokenizer=custom_tokenizer, min_df=5)
        xtr, xte, ytr, yte = self.get_movie_data_test()
        xtr_lemma = lemma_vect.fit_transform(xtr)
        print(f'words lemmatization shape: {xtr_lemma.shape}')

    def test_topic_modeling(self):
        vect = CountVectorizer(max_features=10000, max_df=.15)
        x = vect.fit_transform('test train')
        lda = LatentDirichletAllocation(n_topics=10, learning_method='batch', max_iter=25, random_state=0)
        topics = lda.fit_transform(x)
        print(f'lda component shape: {lda.components_.shape}')  # (10, 10000)
        sorting = np.argsort(lda.components_, axis=1)[:, ::-1]
        feature_names = np.array(vect.get_feature_names_out())
        mglearn.tools.print_topics(topics=range(10), feature_names=feature_names, sorting=sorting,
                                   topics_per_chunk=5, n_words=10)
        lda100 = LatentDirichletAllocation(n_topics=100, learning_method='batch', max_iter=25, random_state=0)
        topics_100 = lda100.fit_transform(x)
        # randomly select several topics
        topics_sample = np.array([11, 21, 31, 41, 51])
        sorting_100 = np.argsort(lda100.components_, axis=1)[:, ::-1]
        feature_names_100 = np.array(vect.get_feature_names_out())
        mglearn.tools.print_topics(topics=topics_sample, feature_names=feature_names_100,
                                   sorting=sorting_100, topic_per_chunk=7, n_words=20)