# -*- coding: utf-8 -*-
# train.py
# Created by Hardy on 19th, Feb

# from misc.classifier.word_dict import word_dict
# from scipy.sparse import csr_matrix
# from sklearn.feature_extraction import DictVectorizer
from __future__ import division
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
# from sklearn.feature_extraction.text import TfidfTransformer
import pickle
import sys
import numpy as np


if __name__ == '__main__':
    data_file = sys.argv[1]
    lg_file = sys.argv[2]

    X, target = pickle.load(open(data_file, 'rb'))

    train_x, test_x, train_y, test_y = train_test_split(X, target, test_size=0.20, random_state=30)
    # logreg = LogisticRegression(penalty='l1', C=1e2, max_iter=100, multi_class='ovr')
    logreg = LogisticRegression(penalty='l1', max_iter=10, multi_class='ovr')
    # logreg = LogisticRegression(penalty='l1', max_iter=10, multi_class='multinomial', solver='saga')

    params = {"C": [10]}
    cv = StratifiedKFold(n_splits=10, shuffle=True)
    logreg_cv = GridSearchCV(logreg, params, cv=cv, n_jobs=3, verbose=True)

    logreg_cv.fit(train_x, train_y)

    best_logreg = logreg_cv.best_estimator_
    print(logreg_cv.cv_results_)
    with open(lg_file, 'wb+') as fd:
        pickle.dump(best_logreg, fd)

    for k, v in zip(best_logreg.predict(test_x[0:100]), test_y[0:100]):
        print("%s %s" % (k, v))

    print(best_logreg)

    err_cnt = sum(np.array(best_logreg.predict(test_x)) != np.array(test_y))
    print(err_cnt)
    print(err_cnt/len(test_y))
