import unittest
from typing import List

import gensim
from gensim.models.doc2vec import TaggedDocument
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_validate
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier

from log import LOGGER


class EmbeddingGenerator:
    def gen_embedding(self, docs: List[TaggedDocument]):
        pass

    def infer_embedding(self, words_of_doc: list):
        pass


class EmbeddingGeneratorByDoc2Vec(EmbeddingGenerator):
    def __init__(self, docs):
        self.model = self.gen_doc2vec_model(docs)

    @staticmethod
    def gen_doc2vec_model(norm_corpus):
        model = gensim.models.Doc2Vec(
            norm_corpus, vector_size=100, dm=1, window=5, min_count=1, epochs=20
        )
        LOGGER.info("total %d words" % model.corpus_total_words)

        return model

    def gen_embedding(self, docs: List[TaggedDocument]):
        return [self.model.dv[doc.tags[0]] for doc in docs]

    def infer_embedding(self, words_of_doc: list):
        pass


METRICS = ("accuracy", "precision", "recall", "f1")


def classify_eval(corpus: list, embedding_generator: EmbeddingGenerator):
    names = [
        "Near Neighbors",
        "Linear SVM",
        "RBF SVM",
        "Decision Tree",
        "Random Forest",
        "Logistic Regression",
        "AdaBoost",
        "Neural Network",
    ]
    clfs = [
        KNeighborsClassifier(2, p=2),
        SVC(kernel="linear", C=0.01),
        SVC(C=0.01),
        DecisionTreeClassifier(max_depth=5),
        RandomForestClassifier(max_depth=5, n_estimators=120),
        LogisticRegression(penalty="l2"),
        AdaBoostClassifier(),
        MLPClassifier(),
    ]

    X = embedding_generator.gen_embedding(corpus)
    # cross_validate need default label=0 or 1
    y = [1 if doc.tags[1] == "Y" else 0 for doc in corpus]
    for clf_name, clf in zip(names, clfs):
        cv_res = cross_validate(clf, X, y, scoring=METRICS, cv=2)
        for metric in METRICS:
            test_metric_name = "test_%s" % metric
            print(
                "%s:[%s], %s mean score:%0.2f"
                % (
                    clf_name,
                    ",".join("%0.2f" % x for x in cv_res[test_metric_name]),
                    metric,
                    cv_res[test_metric_name].mean(),
                )
            )


class TextClassifierTest(unittest.TestCase):
    def corpus4doc2vec(self):
        origin = ["i am chinese", "i am in Hubei", "she is beautiful", "he is handsome"]
        # doc2vec的一条语料记录用标记文档（TaggedDocument）表示，第二个参数是用户打的标记
        return [
            TaggedDocument(x.split(" "), [i, "Y" if i % 2 == 0 else "N"])
            for i, x in enumerate(origin)
        ]

    def test_doc2vec(self):
        corpus = self.corpus4doc2vec()
        embedding_gen = EmbeddingGeneratorByDoc2Vec(corpus)
        self.assertEqual(len(embedding_gen.model.dv), len(corpus) + 2)

        vec = embedding_gen.model.infer_vector(["i", "chinese"])
        sims = embedding_gen.model.dv.most_similar(vec, topn=3)
        print(sims)

        classify_eval(corpus, embedding_gen)


if __name__ == "__main__":
    unittest.main()
