from __future__ import print_function
from __future__ import division

import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn2pmml.decoration import ContinuousDomain
from sklearn2pmml.pipeline import PMMLPipeline
from sklearn2pmml.feature_extraction.text import Splitter
from sklearn2pmml import sklearn2pmml
from sklearn_pandas import DataFrameMapper
from sklearn.model_selection import GridSearchCV, StratifiedKFold
import sys


if __name__ == '__main__':
    cat_file = '/Users/hardy/data/cat_count_all.txt'
    data_file = '/Users/hardy/data/train20_all.tsv'
    cat_count = 30
    # steps = [CountVectorizer(), TfidfTransformer(), LogisticRegression()]
    # steps = [('tfidf', TfidfVectorizer(min_df=4, norm=None)), ('classify', LogisticRegression(penalty='l1', multi_class='ovr', max_iter=10))]
    # pipeline = PMMLPipeline(steps)
    #
    C_OPS = [10]

    param_grid = [
        {
            'classify__C': C_OPS
        }
    ]

    cv = StratifiedKFold(n_splits=10, shuffle=True)

    target = []
    X = []

    cat_dict = dict()
    with open(cat_file) as fd:
        for l in fd:
            d = l.strip().split(' ')
            if len(d) == 2:
                try:
                    count, cat = int(d[0]), d[1]
                    cat_dict[cat] = count

                except Exception as e:
                    print(str(e))

    print('cat_count is : ', len([k for k in cat_dict.keys() if cat_dict[k] >= cat_count]))
    n = 0
    with open(data_file) as fd:
        for l in fd:
            n += 1
            if n > 100000:
                break
            d = l.strip().split('\t')

            if len(d) == 2:
                t, x = d
                if cat_dict.get(t) and cat_dict[t] >= cat_count and t != '#18':

                    target.append(t.strip())
                    X.append(' '.join([k.strip() for k in x.strip().split(' ') if
                                       (not (k.strip().isdigit())) and 0 < len(k.strip()) < 15]))

    data = pd.DataFrame({'data': X})
    data['target'] = target
    print(set(target))

    mapper = DataFrameMapper([
        ('data', TfidfVectorizer(min_df=2, norm=None, analyzer='word', tokenizer=Splitter())),
    ])
    pipeline = PMMLPipeline([
        ("mapper", mapper),
        ('classify', LogisticRegression(penalty='l1', multi_class='ovr', max_iter=10))
        ])

    grid = GridSearchCV(pipeline, cv=cv, n_jobs=3, param_grid=param_grid)

    grid.fit(data, data['target'])
    pipeline = grid.best_estimator_
    import pickle
    with open('', 'wb+') as fd:
        pickle.dump(pipeline, fd)

    # pipeline_pmml = make_pmml_pipeline(pipeline_pmml, active_fields=data.columns, target_fields=t.columns)
    # sklearn2pmml(pipeline, '/Users/hardy/lr.pmml', with_repr=True, debug=True)
#
