import time

import numpy as np
import torch
from scipy.sparse import hstack, csr_matrix
from sklearn.decomposition import TruncatedSVD
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.svm import SVC

import configs


class TraditionalDs:

    def __init__(self, dataloader, tk, batch_size=64):
        self.dataloader = dataloader
        self.tokenizer = tk
        self.batch_size = batch_size

    def get_features(self):
        texts = []
        stats_list = []
        labels_list = []

        with torch.no_grad():
            for batch in self.dataloader:
                input_ids, __attention_mask, stats, labels = batch[0], batch[1], batch[2], batch[3]
                batch_texts = self.tokenizer.batch_decode(
                    input_ids,
                    skip_special_tokens=True
                )
                texts.extend(batch_texts)

                # Transform one-hot to class index
                newlabel = np.argmax(labels.cpu().numpy(), axis=1)

                stats_list.extend(stats.cpu().numpy())
                labels_list.extend(newlabel)

        # TF-IDF features
        vectorizer = TfidfVectorizer(
            max_features=configs.tfidf_maxFt,
            ngram_range=(1, 2),
            sublinear_tf=True,
            analyzer="char_wb",
            min_df=5
        )

        print("TF-IDF ft. generating")
        tfidf_features = vectorizer.fit_transform(texts)

        stats_features = np.array(stats_list)
        combined_features = hstack([tfidf_features, csr_matrix(stats_features)])

        return combined_features, np.array(labels_list)


def SVDprocess(features):
    if features.shape[1] > configs.n_ft_afterSVD:
        print("svd process")
        svd = TruncatedSVD(n_components=configs.n_ft_afterSVD, random_state=configs.c_random_seed)
        reduced_features = svd.fit_transform(features)
        print(f"new ft. num={reduced_features.shape[1]}")
        return reduced_features
    return features


def train_evaluate(model, name, X_train, y_train, X_test, y_test):
    print(f"Now training. name=", name)

    model.fit(X_train, y_train)

    print("Now evaluating. name=", name)
    # predict
    start_pred = time.time()
    y_pred = model.predict(X_test)
    pred_time = time.time() - start_pred

    # Scoring
    metrics = {
        "accuracy": accuracy_score(y_test, y_pred),
        "precision": precision_score(y_test, y_pred),
        "recall": recall_score(y_test, y_pred),
        "f1": f1_score(y_test, y_pred),
        "pred_time": pred_time
    }

    print(f"{name} Result:")
    print(f'ACC: {metrics["accuracy"]}, PRES: {metrics["precision"]}')
    print(f'REC: {metrics["recall"]}, F1: {metrics["f1"]}')

    return metrics


def compare3models(train_dataset, test_dataset, tk):
    # get tradition dataset
    train_trds = TraditionalDs(train_dataset, tk)
    test_trds = TraditionalDs(test_dataset, tk)

    # process data
    X_train, y_train = train_trds.get_features()
    X_test, y_test = test_trds.get_features()

    # SVD
    X_train_red = SVDprocess(X_train)
    X_test_red = SVDprocess(X_test)

    models = {
        "SVM": SVC(
            kernel="linear",
            probability=True,
            random_state=configs.c_random_seed
        ),
        "LDA": LinearDiscriminantAnalysis(),
        "RandomForest": RandomForestClassifier(
            n_estimators=150,
            max_depth=25,
            class_weight="balanced",
            n_jobs=-1,
            random_state=configs.c_random_seed
        )
    }

    results = {}

    for name, model in models.items():
        results[name] = train_evaluate(
            model, name,
            X_train_red, y_train,
            X_test_red, y_test
        )

    print("\nResults:")

    for name, metrics in results.items():
        print("Model:", name)
        print("res:", metrics)


if __name__ == "__main__":
    from data import tokenizer, load, loadTestDs, getLoader

    train_dataloader = getLoader(load(), 64)
    test_dataloader = getLoader(loadTestDs(), 64)

    compare3models(train_dataloader, test_dataloader, tokenizer)
