# from misc.classifier.word_dict import word_dict
# from scipy.sparse import csr_matrix
from sklearn.feature_extraction import DictVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
# from sklearn.feature_extraction.text import TfidfTransformer
import pickle
import sys
import numpy as np
import operator


def get_top_class(items, top_n):
    return [k for (k, v) in sorted(items, key=operator.itemgetter(1), reverse=True)[0: top_n]]


if __name__ == '__main__':
    data_file = sys.argv[1]
    lg_file = sys.argv[2]
    C = float(sys.argv[3])
    X, target = pickle.load(open(data_file, 'rb'))

    train_x, test_x, train_y, test_y = train_test_split(X, target, test_size=0.10, random_state=30, stratify=target)

    # logreg = LogisticRegression(penalty='l1', C=1e2, max_iter=100, multi_class='ovr')
    best_logreg = LogisticRegression(penalty='l1', max_iter=20, C=C, multi_class='ovr', n_jobs=3)
    # best_logreg = LogisticRegression(penalty='l1', max_iter=20, C=C, multi_class='multinomial', solver='sag', n_jobs=6)

    best_logreg.fit(train_x, train_y)
    with open(lg_file, 'wb+') as fd:
        pickle.dump(best_logreg, fd)

    for k, v in zip(best_logreg.predict(test_x), test_y):
        print("%s %s" % (k,v))

    print(X.shape)
    print(best_logreg)

    err_cnt = sum(np.array(best_logreg.predict(test_x)) != np.array(test_y))
    print(err_cnt)
    print(err_cnt/len(test_y))

    err_cnt = sum(np.array(best_logreg.predict(train_x)) != np.array(train_y))
    print(err_cnt)
    print(err_cnt/len(train_y))

    print(list(zip(best_logreg.classes_, best_logreg.predict_proba(test_x[0])[0])))

    err_cnt = 0
    for x, y in zip(test_x, test_y):
        labels = get_top_class(zip(best_logreg.classes_, best_logreg.predict_proba(x)[0]), 3)
        if y not in labels:
            err_cnt += 1

    print(err_cnt)
    print(err_cnt/len(test_y))

    err_cnt = 0
    for x, y in zip(train_x, train_y):
        labels = get_top_class(zip(best_logreg.classes_, best_logreg.predict_proba(x)[0]), 3)
        if y not in labels:
            err_cnt += 1

    print(err_cnt)
    print(err_cnt/len(train_y))

