import nltk
import sqlite3
import numpy
from scipy import sparse
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.decomposition import TruncatedSVD, NMF
from sklearn.svm import SVC, LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import NearestCentroid
from sklearn.feature_selection import SelectKBest, chi2
from datetime import datetime
__author__ = 'panagiotis'
from models import ColumnSelector
from models import DocVectorizer, SentimentTransformer, MultiDocVectorizer
from models import SpellingTransformer, RemoveContractions, NegationTransformer


from sklearn.metrics import classification_report, precision_score, recall_score
from sklearn.metrics import confusion_matrix, f1_score
from sklearn.metrics import mean_absolute_error as mae


# load review data
database_file = "Hotels_g189413_Crete.db"
database_path = "/home/panagiotis/Projects/Thesis/datasets/"
# database_path = "/home/pstalidis/Projects/Thesis/datasets/"
conn = sqlite3.connect(database_path + database_file)
source = conn.cursor()

data = [(review_title, review_text, review_rating)
        for (review_id, hotel_id, hotel_star, hotel_rating,
             average_clean_score, average_service_score, average_location_score,
             average_room_score, average_sleep_score, average_value_score,
             review_title, review_text, review_rating,
             review_clean_score, review_service_score, review_location_score,
             review_room_score, review_sleep_score, review_value_score)
        in source.execute("SELECT * FROM reviews")]

# xtr1, xts1 = train_test_split([(r[0], r[1], 1) for r in data if r[2] > 3], train_size=5000, test_size=1000, random_state=0)
# xtr2, xts2 = train_test_split([(r[0], r[1], 0) for r in data if r[2] < 4], train_size=5000, test_size=1000, random_state=0)
#
# train_titles, train_review, train_target = zip(*shuffle(xtr1 + xtr2, random_state=0))
# test_titles, test_review, test_target = zip(*shuffle(xts1 + xts2, random_state=0))

xtr1, xts1 = train_test_split([r for r in data if r[2] == 1], train_size=2000, test_size=500, random_state=0)
xtr2, xts2 = train_test_split([r for r in data if r[2] == 2], train_size=2000, test_size=500, random_state=0)
xtr3, xts3 = train_test_split([r for r in data if r[2] == 3], train_size=2000, test_size=500, random_state=0)
xtr4, xts4 = train_test_split([r for r in data if r[2] == 4], train_size=2000, test_size=500, random_state=0)
xtr5, xts5 = train_test_split([r for r in data if r[2] == 5], train_size=2000, test_size=500, random_state=0)

train_data = shuffle(xtr1 + xtr2 + xtr3 + xtr4 + xtr5, random_state=0)
test_data = shuffle(xts1 + xts2 + xts3 + xts4 + xts5, random_state=0)

train_titles, train_review, train_target = zip(*train_data)
test_titles, test_review, test_target = zip(*test_data)

del xtr1, xtr2, xtr3, xtr4, xtr5, xts1, xts2, xts3, xts4, xts5

# train_target = [1 if float(r) > 3 else -1 for r in train_target]
# test_target = [1 if float(r) > 3 else -1 for r in test_target]

train_target = [float(r) for r in train_target]
test_target = [float(r) for r in test_target]


# pre processing of all data
t0 = datetime.now()
preprocess = Pipeline([('contract', RemoveContractions(spell_check=False, output="tokens")),
                       ('spelling', SpellingTransformer(tokenize=False, output="tokens")),
                       ('negations', NegationTransformer(tokenize=False, output="tokens"))])
train_titles = preprocess.fit_transform(train_titles)
train_review = preprocess.fit_transform(train_review)
test_titles = preprocess.fit_transform(test_titles)
test_review = preprocess.fit_transform(test_review)
train_target = [int(r) for r in train_target]
test_target = [int(r) for r in test_target]
print "preprocessing took", datetime.now() - t0  # 2:45:25


t0 = datetime.now()
# clf = GridSearchCV(Pipeline([("transformer", TfidfVectorizer(tokenizer=lambda x: x, lowercase=False,
#                                                              stop_words=nltk.corpus.stopwords.words('english'))),
#                              ("clf", SVC(C=1000, gamma=0.01))]),
#                    param_grid={'transformer__binary': (True, False),
#                                'transformer__ngram_range': ((1, 1), (1, 2), (1, 3)),
#                                'transformer__min_df': (1, 2),
#                                'transformer__max_df': (0.2, 0.5, 0.8, 1.0),
#                                'transformer__use_idf': (True, False),
#                                'transformer__norm': ('l1', 'l2', None)
#                                }, scoring='f1', verbose=5)
# clf.fit(train_titles, train_target)
# title_tfidf_vectorizer = clf.best_estimator_.named_steps["transformer"]
title_tfidf_vectorizer = TfidfVectorizer(tokenizer=lambda x: x, lowercase=False,
                                         stop_words=nltk.corpus.stopwords.words('english'),
                                         ngram_range=(1, 1), norm="l1", use_idf=False, binary=True,
                                         min_df=1, max_df=0.5)
title_tfidf_vectorizer.fit(train_titles, train_target)
print "titles:"  # , clf.best_params_
# title_doc_vectorizer = DocVectorizer(n_features=65, tokenize=False, lemmatize=False)
title_doc_vectorizer = MultiDocVectorizer(n_features=(34, 37, 41, 44), tokenize=False, lemmatize=False)
title_doc_vectorizer.fit(train_titles, train_target)
print "doc vectorizer"
title_sent_vectorizer = SentimentTransformer(vectorizer="tfidf", tokenize=False, lemmatize=True)
title_sent_vectorizer.fit(train_titles, train_target)
print "sent vectorizer"
# clf = GridSearchCV(Pipeline([("transformer", TfidfVectorizer(tokenizer=lambda x: x, lowercase=False,
#                                                              stop_words=nltk.corpus.stopwords.words('english'))),
#                              ("clf", SVC(C=1000, gamma=0.01))]),
#                    param_grid={'transformer__binary': (True, False),
#                                'transformer__ngram_range': ((1, 1), (1, 2), (1, 3)),
#                                'transformer__min_df': (0, 1, 2),
#                                'transformer__max_df': (0.2, 0.5, 0.8, 1.0),
#                                'transformer__use_idf': (True, False),
#                                'transformer__norm': ('l1', 'l2', None)
#                                }, scoring='f1', verbose=5)
# clf.fit(train_review, train_target)
# review_tfidf_vectorizer = clf.best_estimator_.named_steps["transformer"]
review_tfidf_vectorizer = TfidfVectorizer(tokenizer=lambda x: x, lowercase=False,
                                          stop_words=nltk.corpus.stopwords.words('english'),
                                          ngram_range=(1, 2), norm="l2", use_idf=True, binary=True,
                                          min_df=1, max_df=0.5)
review_tfidf_vectorizer.fit(train_review, train_target)
# print "review:", clf.best_params_
# review_doc_vectorizer = DocVectorizer(n_features=100, tokenize=False, lemmatize=True)
review_doc_vectorizer = MultiDocVectorizer(n_features=(100, 260), tokenize=False, lemmatize=True)
review_doc_vectorizer.fit(train_review, train_target)
print "doc vectorizer"
review_sent_vectorizer = SentimentTransformer(vectorizer="tfidf", tokenize=False, lemmatize=True)
review_sent_vectorizer.fit(train_review, train_target)
print "sent vectorizer"
print "fitting of transformers", datetime.now() - t0


t0 = datetime.now()
all_train_features = sparse.hstack((title_tfidf_vectorizer.transform(train_titles),
                                    title_doc_vectorizer.transform(train_titles),
                                    title_sent_vectorizer.transform(train_titles),
                                    review_tfidf_vectorizer.transform(train_review),
                                    review_doc_vectorizer.transform(train_review),
                                    review_sent_vectorizer.transform(train_review),))

print "training transformation", datetime.now() - t0

t0 = datetime.now()
all_test_features = sparse.hstack((title_tfidf_vectorizer.transform(test_titles),
                                   title_doc_vectorizer.transform(test_titles),
                                   title_sent_vectorizer.transform(test_titles),
                                   review_tfidf_vectorizer.transform(test_review),
                                   review_doc_vectorizer.transform(test_review),
                                   review_sent_vectorizer.transform(test_review),))

print "testing transformation", datetime.now() - t0

gs_clf = GridSearchCV(SVC(), param_grid={'C': (1e2, 5e2, 1e3, 5e3, 1e4), 'gamma': (5e-2, 1e-2, 5e-3, 1e-3)},
                      scoring='f1', verbose=5)
gs_clf.fit(all_train_features, train_target)
z_test_5class = gs_clf.predict(all_test_features)

print classification_report(test_target, z_test_5class)
print confusion_matrix(test_target, z_test_5class)

# from matplotlib import pyplot
# pyplot.figure()
#
# a, b = zip(*[(t.parameters['C'], t.mean_validation_score) for t in gs_clf.grid_scores_ ])
# pyplot.plot(a, b)
# pyplot.show()

gs_clf = GridSearchCV(SVC(), param_grid={'C': (1e2, 5e2, 1e3, 5e3, 1e4), 'gamma': (5e-2, 1e-2, 5e-3, 1e-3)},
                      scoring='f1', verbose=5)
gs_clf.fit(all_train_features, [1 if r > 3 else -1 for r in train_target])
z_test_2class = gs_clf.predict(all_test_features)

print classification_report([1 if r > 3 else -1 for r in test_target], z_test_2class)
print confusion_matrix([1 if r > 3 else -1 for r in test_target], z_test_2class)


for c_name, clf in [("SVM(RBF)\t", SVC(C=1000, gamma=0.01)),
                    ("SVM(Linear)\t", LinearSVC(dual=False, tol=1e-3)),
                    ("Naive Bayes", MultinomialNB()),
                    ("Max Entropy", LogisticRegression()),
                    ]:
    clf.fit(all_train_features, train_target)
    clf_res_5class = clf.predict(all_test_features)
    print "Classifier:", c_name, "on Ratings"
    print classification_report(test_target, clf_res_5class)
    print confusion_matrix(test_target, clf_res_5class)
    clf.fit(all_train_features, [1 if r > 3 else -1 for r in train_target])
    clf_res_2class = clf.predict(all_test_features)
    print "Classifier:", c_name, "on Binary"
    print classification_report(test_target, clf_res_2class)
    print confusion_matrix(test_target, clf_res_2class)
