import nltk
import sqlite3
import numpy
from scipy import sparse
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.decomposition import TruncatedSVD, NMF
from sklearn.svm import SVC
from sklearn.neighbors import NearestCentroid
from sklearn.feature_selection import SelectKBest, chi2
from datetime import datetime
__author__ = 'panagiotis'
from models import ColumnSelector
from models import DocVectorizer, SentimentTransformer
from models import SpellingTransformer, RemoveContractions, NegationTransformer


from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import mean_absolute_error as mae

#
# titles = Pipeline([('extract', ColumnSelector(0)),
#                    ('preprocess', preprocess),
#                    ('features', FeatureUnion([
#                        ('tfidf', TfidfVectorizer(tokenizer=nltk.word_tokenize,
#                                                  stop_words=nltk.corpus.stopwords.words('english'),
#                                                  binary=True, ngram_range=(1, 1),
#                                                  min_df=2, max_df=0.2, use_idf=True, norm='l2')),
#                        ('aspects', DocVectorizer(n_features=100, window=5, sample=1e-5)),
#                        ('sentiment', SentimentTransformer(vectorizer=CountVectorizer(tokenizer=nltk.word_tokenize,
#                                                                                      stop_words=nltk.corpus.stopwords.words('english'),
#                                                                                      ngram_range=(1, 1), min_df=2)))
#                    ]))])
#
# reviews = Pipeline([('extract', ColumnSelector(1)),
#                    ('preprocess', preprocess),
#                    ('features', FeatureUnion([
#                        ('tfidf', TfidfVectorizer(tokenizer=nltk.word_tokenize,
#                                                  stop_words=nltk.corpus.stopwords.words('english'),
#                                                  binary=True, ngram_range=(1, 2),
#                                                  min_df=2, max_df=0.7, use_idf=True, norm='l2')),
#                        ('aspects', DocVectorizer(n_features=260, window=10, sample=1e-3)),
#                        ('sentiment', SentimentTransformer(vectorizer=TfidfVectorizer(tokenizer=nltk.word_tokenize,
#                                                                                      stop_words=nltk.corpus.stopwords.words('english'),
#                                                                                      ngram_range=(1, 1), min_df=2)))
#                    ]))])
#
# gs_clf = Pipeline([('features', FeatureUnion([('title', titles),
#                                               ('body', reviews)
#                                               ])),
#                    ('clf', SVC(C=1000, gamma=0.01))
#                    # ('clf', NearestCentroid())
#                    ])
#

# load review data
database_file = "Hotels_g189413_Crete.db"
database_path = "/home/panagiotis/Projects/Thesis/datasets/"
# database_path = "/home/pstalidis/Projects/Thesis/datasets/"
conn = sqlite3.connect(database_path + database_file)
source = conn.cursor()

data = [(review_title, review_text, review_rating)
        for (review_id, hotel_id, hotel_star, hotel_rating,
             average_clean_score, average_service_score, average_location_score,
             average_room_score, average_sleep_score, average_value_score,
             review_title, review_text, review_rating,
             review_clean_score, review_service_score, review_location_score,
             review_room_score, review_sleep_score, review_value_score)
        in source.execute("SELECT * FROM reviews")]

xtr1, xts1 = train_test_split([r for r in data if r[2] == 1], train_size=2000, test_size=200, random_state=0)
xtr2, xts2 = train_test_split([r for r in data if r[2] == 2], train_size=2000, test_size=200, random_state=0)
xtr3, xts3 = train_test_split([r for r in data if r[2] == 3], train_size=2000, test_size=200, random_state=0)
xtr4, xts4 = train_test_split([r for r in data if r[2] == 4], train_size=2000, test_size=200, random_state=0)
xtr5, xts5 = train_test_split([r for r in data if r[2] == 5], train_size=2000, test_size=200, random_state=0)

train_data = shuffle(xtr1 + xtr2 + xtr3 + xtr4 + xtr5, random_state=0)
test_data = shuffle(xts1 + xts2 + xts3 + xts4 + xts5, random_state=0)

train_titles, train_review, train_target = zip(*train_data)
test_titles, test_review, test_target = zip(*test_data)

# train_target = [1 if float(r) > 3 else -1 for r in train_target]
# test_target = [1 if float(r) > 3 else -1 for r in test_target]

train_target = [float(r) for r in train_target]
test_target = [float(r) for r in test_target]


t0 = datetime.now()

preprocess = Pipeline([('contract', RemoveContractions(spell_check=False, output="tokens")),
                       ('spelling', SpellingTransformer(tokenize=False, output="tokens")),
                       ('negations', NegationTransformer(tokenize=False, output="tokens"))])

train_titles = preprocess.fit_transform(train_titles)
train_review = preprocess.fit_transform(train_review)
test_titles = preprocess.fit_transform(test_titles)
test_review = preprocess.fit_transform(test_review)

print "preprocessing took", datetime.now() - t0
t0 = datetime.now()

title_tfidf_vectorizer = TfidfVectorizer(tokenizer=lambda x: x, lowercase=False,
                                         stop_words=nltk.corpus.stopwords.words('english'),
                                         binary=True, ngram_range=(1, 1),
                                         min_df=2, max_df=0.2, use_idf=True, norm='l2')
title_tfidf_vectorizer.fit(train_titles)
print "1"
title_doc_vectorizer = DocVectorizer(n_features=100, window=5, sample=1e-5, tokenize=False)
title_doc_vectorizer.fit(train_titles)
print "2"
title_sent_vectorizer = SentimentTransformer(vectorizer="count", tokenize=False)
title_sent_vectorizer.fit(train_titles)
print "3"
review_tfidf_vectorizer = TfidfVectorizer(tokenizer=lambda x: x, lowercase=False,
                                          stop_words=nltk.corpus.stopwords.words('english'),
                                          binary=True, ngram_range=(1, 2),
                                          min_df=2, max_df=0.7, use_idf=True, norm='l2')
review_tfidf_vectorizer.fit(train_review)
print "4"
review_doc_vectorizer = DocVectorizer(n_features=260, window=10, sample=1e-3, tokenize=False)
review_doc_vectorizer.fit(train_review)
print "5"
review_sent_vectorizer = SentimentTransformer(vectorizer=TfidfVectorizer(tokenizer=lambda x: x, lowercase=False,
                                                                         stop_words=nltk.corpus.stopwords.words('english'),
                                                                         ngram_range=(1, 1), min_df=1))
review_sent_vectorizer.fit(train_review)
print "6"

print "fitting of transformers", datetime.now() - t0
t0 = datetime.now()

all_train_features = (title_tfidf_vectorizer.transform(train_titles).toarray(),
                      title_doc_vectorizer.transform(train_titles),
                      title_sent_vectorizer.transform(train_titles),
                      review_tfidf_vectorizer.transform(train_review).toarray(),
                      review_doc_vectorizer.transform(train_review),
                      review_sent_vectorizer.transform(train_review),)

print "training transformation", datetime.now() - t0
t0 = datetime.now()

all_test_features = (title_tfidf_vectorizer.transform(test_titles).toarray(),
                     title_doc_vectorizer.transform(test_titles),
                     title_sent_vectorizer.transform(test_titles),
                     review_tfidf_vectorizer.transform(test_review).toarray(),
                     review_doc_vectorizer.transform(test_review),
                     review_sent_vectorizer.transform(test_review),)

print "testing transformation", datetime.now() - t0
t0 = datetime.now()


results = dict()

gs_clf = SVC(C=1000, gamma=0.01)
gs_clf.fit(numpy.hstack(all_train_features[:3]), train_target)
results["all_title_features"] = gs_clf.predict(numpy.hstack(all_test_features[:3]))

gs_clf = SVC(C=1000, gamma=0.01)
gs_clf.fit(numpy.hstack(all_train_features[3:]), train_target)
results["all_review_features"] = gs_clf.predict(numpy.hstack(all_test_features[3:]))

gs_clf = SVC(C=1000, gamma=0.01)
gs_clf.fit(numpy.hstack(all_train_features), train_target)
results["all_off_the_features"] = gs_clf.predict(numpy.hstack(all_test_features))

gs_clf = SVC(C=1000, gamma=0.01)
gs_clf.fit(numpy.hstack(all_train_features[1:3]), train_target)
results["title_doc_and_sent_features"] = gs_clf.predict(numpy.hstack(all_test_features[1:3]))


gs_clf = SVC(C=1000, gamma=0.01)
gs_clf.fit(numpy.hstack(all_train_features[4:6]), train_target)
results["review_doc_and_sent_features"] = gs_clf.predict(numpy.hstack(all_test_features[4:6]))

gs_clf = SVC(C=1000, gamma=0.01)
gs_clf.fit(numpy.hstack((all_train_features[0], all_train_features[4])), train_target)
results["all_tfidf_features"] = gs_clf.predict(numpy.hstack((all_test_features[0], all_test_features[4])))






# gs_clf.fit(sparse.hstack(all_train_features), train_target)
#
# print "classifier training", datetime.now() - t0
# t0 = datetime.now()
#
# z_test = gs_clf.predict(sparse.hstack(all_test_features))
#
# print "classifier prediction", datetime.now() - t0
# t0 = datetime.now()










# print 10*"-", " scale 1-5 ", 10*"-"
# print confusion_matrix(test_target, z_test)
# print "mae", mae(test_target, z_test)
#
#
# Y_bin = [1 if r > 3 else -1 for r in test_target]
# Z_bin = [1 if r > 3 else -1 for r in z_test]
#
#
# print confusion_matrix(Y_bin, Z_bin)
# print "mae", mae(Y_bin, Z_bin)
# print classification_report(Y_bin, Z_bin)

for name in results:
    print 10*"-", " positive-negative ", 10*"-"
    print "-", name, "-"
    print classification_report(test_target, results[name])







# tttt_clf.fit(X_train[:50], y_train[:50])
# print confusion_matrix(y_test, tttt_clf.predict(X_test))
#
# SWN = gs_clf.named_steps['features'].transformer_list[0][1].named_steps['features'].transformer_list[2][1]
# SRN = gs_clf.named_steps['features'].transformer_list[1][1].named_steps['features'].transformer_list[2][1]
# tttt_clf.fit_transform(X_train[:50], y_train[:50])
#
# tttt_clf.transform(X_test[:10])
#
# all_data_X = [(r[0], r[1]) for r in data]
# all_data_y = [r[2] for r in data]
# print classification_report(all_data_y, gs_clf.predict(all_data_X))
# all_data_y_bin = [1 if r[2] > 3 else -1 for r in data]
# print classification_report(all_data_y, [1 if r > 3 else -1 for r in gs_clf.predict(all_data_X)])

