# Parts of this file is inspired by:
# http://scikit-learn.org/stable/auto_examples/document_classification_20newsgroups.html

import time
import numpy as np
import pylab as pl

from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.dummy import DummyClassifier
from sklearn.linear_model import RidgeClassifier
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
from sklearn.externals import joblib

import stockdata
import stocknews

STORE_TRAINED_MODEL = False
NUMBER_OF_ARTICLES = 30000
NEUTRAL_THRESHOLD = 0.01
MAX_FEATURES = 200
TIMESPAN = 4
opts_print_top10 = True
opts_print_report = True
opts_print_cm = False
opts_fig_cm = False
START_DATE = "2011-10-15"
END_DATE = "2013-11-30"

# Prepare data
# ------------
print "Preparing data..", time.clock()

def label_prices_change(price_change):
    if price_change > 1 + NEUTRAL_THRESHOLD:
        return 'positive'
    elif price_change < 1 - NEUTRAL_THRESHOLD:
        return 'negative'
    else:
        return 'neutral'

# Preparing

print "Getting data..", time.clock()

train_stock_articles = stocknews.StockNews(db_file_name='data/_stocknews_db_639-stocks_2011-10-15-2013-10-15.shelve')
test_stock_articles = stocknews.StockNews(db_file_name='data/_stocknews_db_639-stocks_2013-10-15-2013-11-31.shelve')
no_stocks = len(set([stock for stock in train_stock_articles.iterate('stock_short_name')][:NUMBER_OF_ARTICLES]))
stock_prices = stockdata.StockData(train_stock_articles.get_short_names(), START_DATE, END_DATE)

# Prepare training set

print "Preparing data sets..", time.clock()

vectorizer = TfidfVectorizer(max_features=MAX_FEATURES, stop_words='english')

X_train = vectorizer.fit_transform([article for article in train_stock_articles.iterate('content')][:NUMBER_OF_ARTICLES])
print("Training: n_samples: %d, n_features: %d" % X_train.shape)

y_train = []
for counter, (short_name, datetime) in enumerate(train_stock_articles.iterate(('stock_short_name','datetime'))):
    if counter == NUMBER_OF_ARTICLES:
        break
    price_change = stock_prices.get_stock_change(short_name, datetime, TIMESPAN)
    category = label_prices_change(price_change)
    y_train.append(category)

# Prepare test set

X_test = vectorizer.transform([article for article in test_stock_articles.iterate('content')][:NUMBER_OF_ARTICLES])
print("TEST: n_samples: %d, n_features: %d" % X_test.shape)

y_test = []
for counter, (short_name, datetime) in enumerate(test_stock_articles.iterate(('stock_short_name','datetime'))):
    if counter == NUMBER_OF_ARTICLES:
        break
    price_change = stock_prices.get_stock_change(short_name, datetime, 4)
    category = label_prices_change(price_change)
    y_test.append(category)

# Train models
# ------------

print "Training models..", time.clock()

categories = ['positive', 'neutral', 'negative']
feature_names = np.asarray(vectorizer.get_feature_names())

# Method for benchmarking classifiers
def benchmark(clf, name):
    print('_' * 80)
    print(name)
    print("(%s)" % clf)
    t0 = time.time()
    clf.fit(X_train, y_train)
    train_time = time.time() - t0
    print("train time: %0.3fs" % train_time)

    t0 = time.time()
    pred = clf.predict(X_test)
    test_time = time.time() - t0
    print("test time:  %0.3fs" % test_time)

    score = metrics.f1_score(y_test, pred, labels=categories)
    print("f1-score:   %0.3f" % score)
    score = metrics.precision_score(y_test, pred, labels=categories)

    # most predictive words
    if hasattr(clf, 'coef_'):
        print("dimensionality: %d" % clf.coef_.shape[1])
        print("density: %f" % density(clf.coef_))

        if opts_print_top10 and feature_names is not None:
            print("10 most predictive words for each class:")
            for i, category in enumerate(categories):
                top10 = np.argsort(clf.coef_[i])[-10:]
                print("%s: %s"
                      % (category, " ".join(feature_names[top10])))
        print()

    # classification report
    if opts_print_report:
        print("classification report:")
        print(metrics.classification_report(y_test, pred,
                                            target_names=categories))
    # confusion matrix
    cm = metrics.confusion_matrix(y_test, pred)
    if opts_print_cm:
        print("confusion matrix:")
        print(cm)
    if opts_fig_cm:
        pl.matshow(cm, cmap='Greys')
        pl.title(name)
        pl.colorbar()
        pl.ylabel('True label')
        pl.xlabel('Predicted label')
        pl.show()

    print()
    return score

# Instantiate classifiers
classifiers = [(DummyClassifier(), "Random classifier"),
               (RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
               (Perceptron(n_iter=50), "Perceptron"),
               #(KNeighborsClassifier(n_neighbors=10), "kNN"),
               (PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
               (LinearSVC(loss='l2', penalty='l2', dual=False, tol=1e-3),
                "Linear Support Vector Classifier (L2 penalty)"),
               (SGDClassifier(alpha=.0001, n_iter=50, penalty="elasticnet"),
                "Stocastic Gradient Descent (Elastic-Net penalty)"),
               (BernoulliNB(alpha=.01), "Naive Bayes")]

# Run benchmarking
def shorten(name):
    if len(name) > 20:
        name = name[:20] + "..."
    return name

results = [benchmark(clf, name) for clf, name in classifiers]
clf_names = [shorten(name) for _, name in classifiers]

# Plot
indices = np.arange(len(results))

pl.figure()
pl.title("F1-score")
pl.barh(indices, results, label="score")
pl.yticks(())
pl.legend(loc='best')
pl.subplots_adjust(left=0.4)
pl.subplots_adjust(top=.95)
pl.subplots_adjust(bottom=.05)

for i, c in zip(indices, clf_names):
    pl.text(-.3, i, c)

pl.show()

if STORE_TRAINED_MODEL:
    for clf, name in classifiers:
        joblib.dump(clf, 'data/classifiers/%s.pkl' % name)