import nltk
import sqlite3
import numpy
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from datetime import datetime
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import mean_absolute_error as mae
__author__ = 'panagiotis'
from models import DocVectorizer, SentimentTransformer
from models import SpellingTransformer, RemoveContractions, NegationTransformer


# load review data
database_file = "Hotels_g189413_Crete.db"
# database_path = "/home/panagiotis/Projects/Thesis/datasets/"
database_path = "/home/pstalidis/Projects/Thesis/datasets/"
conn = sqlite3.connect(database_path + database_file)
source = conn.cursor()

data = [(review_title, review_text, review_rating)
        for (review_id, hotel_id, hotel_star, hotel_rating,
             average_clean_score, average_service_score, average_location_score,
             average_room_score, average_sleep_score, average_value_score,
             review_title, review_text, review_rating,
             review_clean_score, review_service_score, review_location_score,
             review_room_score, review_sleep_score, review_value_score)
        in source.execute("SELECT * FROM reviews")]

xtr1, xts1 = train_test_split([r for r in data if r[2] == 1], train_size=20, test_size=5, random_state=0)
xtr2, xts2 = train_test_split([r for r in data if r[2] == 2], train_size=20, test_size=5, random_state=0)
xtr3, xts3 = train_test_split([r for r in data if r[2] == 3], train_size=20, test_size=5, random_state=0)
xtr4, xts4 = train_test_split([r for r in data if r[2] == 4], train_size=20, test_size=5, random_state=0)
xtr5, xts5 = train_test_split([r for r in data if r[2] == 5], train_size=20, test_size=5, random_state=0)

train_titles, train_review, train_target = zip(*shuffle(xtr1 + xtr2 + xtr3 + xtr4 + xtr5, random_state=0))
test_titles, test_review, test_target = zip(*shuffle(xts1 + xts2 + xts3 + xts4 + xts5, random_state=0))

# pre processing of all data
t0 = datetime.now()
preprocess = Pipeline([('contract', RemoveContractions(spell_check=False, output="tokens")),
                       ('spelling', SpellingTransformer(tokenize=False, output="tokens")),
                       ('negations', NegationTransformer(tokenize=False, output="tokens"))])
train_titles = preprocess.fit_transform(train_titles)
train_review = preprocess.fit_transform(train_review)
test_titles = preprocess.fit_transform(test_titles)
test_review = preprocess.fit_transform(test_review)
train_target = [float(r) for r in train_target]
test_target = [float(r) for r in test_target]
print "preprocessing took", datetime.now() - t0  # 2:45:25


t0 = datetime.now()
clf = GridSearchCV(Pipeline([("transformer", TfidfVectorizer(tokenizer=lambda x: x, lowercase=False,
                                                             stop_words=nltk.corpus.stopwords.words('english'))),
                             ("clf", SVC(C=1000, gamma=0.01))]),
                   param_grid={'transformer__binary': (True,),  # False
                               'transformer__ngram_range': ((1, 2),),  # (1, 1),
                               'transformer__min_df': (1,),  # 2, 5,
                               'transformer__max_df': (0.2,),  # 0.5, 0.9,
                               'transformer__use_idf': (False,),  # True,
                               'transformer__norm': ('l1',)  # 'l2',
                               })
clf.fit(train_titles, train_target)
title_tfidf_vectorizer = clf.best_estimator_.named_steps["transformer"]
print "titles:", clf.best_params_
title_doc_vectorizer = DocVectorizer(n_features=41, window=5, sample=1e-5, tokenize=False)
title_doc_vectorizer.fit(train_titles, train_target)
print "doc vectorizer"
title_sent_vectorizer = SentimentTransformer(vectorizer=CountVectorizer(tokenizer=lambda x: x, lowercase=False,
                                                                        stop_words=nltk.corpus.stopwords.words('english'),
                                                                        ngram_range=(1, 1), min_df=2))

title_sent_vectorizer.fit(train_titles, train_target)
print "sent vectorizer"
clf = GridSearchCV(Pipeline([("transformer", TfidfVectorizer(tokenizer=lambda x: x, lowercase=False,
                                                             stop_words=nltk.corpus.stopwords.words('english'))),
                             ("clf", SVC(C=1000, gamma=0.01))]),
                   param_grid={'transformer__binary': (True, False),
                               'transformer__ngram_range': ((1, 1), (1, 2), (1, 5)),
                               'transformer__min_df': (1, 2, 5),
                               'transformer__max_df': (0.2, 0.5, 0.9),
                               'transformer__use_idf': (True, False),
                               'transformer__norm': ('l1', 'l2')
                               })
clf.fit(train_review, train_target)
review_tfidf_vectorizer = clf.best_estimator_.named_steps["transformer"]
print "review:", clf.best_params_
review_doc_vectorizer = DocVectorizer(n_features=260, window=10, sample=1e-3, tokenize=False)
review_doc_vectorizer.fit(train_review, train_target)
print "doc vectorizer"
review_sent_vectorizer = SentimentTransformer(vectorizer=TfidfVectorizer(tokenizer=lambda x: x, lowercase=False,
                                                                         stop_words=nltk.corpus.stopwords.words('english'),
                                                                         ngram_range=(1, 1), min_df=2))

review_sent_vectorizer.fit(train_review, train_target)
print "sent vectorizer"
print "fitting of transformers", datetime.now() - t0


t0 = datetime.now()
all_train_features = (title_tfidf_vectorizer.transform(train_titles).toarray(),
                      title_doc_vectorizer.transform(train_titles),
                      title_sent_vectorizer.transform(train_titles),
                      review_tfidf_vectorizer.transform(train_review).toarray(),
                      review_doc_vectorizer.transform(train_review),
                      review_sent_vectorizer.transform(train_review),)

print "training transformation", datetime.now() - t0

t0 = datetime.now()
all_test_features = (title_tfidf_vectorizer.transform(test_titles).toarray(),
                     title_doc_vectorizer.transform(test_titles),
                     title_sent_vectorizer.transform(test_titles),
                     review_tfidf_vectorizer.transform(test_review).toarray(),
                     review_doc_vectorizer.transform(test_review),
                     review_sent_vectorizer.transform(test_review),)

print "testing transformation", datetime.now() - t0


results = dict()

# gs_clf = SVC(C=1000, gamma=0.01)
# gs_clf.fit(numpy.hstack(all_train_features[:3]), train_target)
# results["all_title_features"] = gs_clf.predict(numpy.hstack(all_test_features[:3]))
#
# gs_clf = SVC(C=1000, gamma=0.01)
# gs_clf.fit(numpy.hstack(all_train_features[3:]), train_target)
# results["all_review_features"] = gs_clf.predict(numpy.hstack(all_test_features[3:]))

gs_clf = SVC(C=1000, gamma=0.01)
gs_clf.fit(numpy.hstack(all_train_features), train_target)
results["all_of_the_features"] = gs_clf.predict(numpy.hstack(all_test_features))

# gs_clf = SVC(C=1000, gamma=0.01)
# gs_clf.fit(numpy.hstack(all_train_features[1:3]), train_target)
# results["title_doc_and_sent_features"] = gs_clf.predict(numpy.hstack(all_test_features[1:3]))
#
#
# gs_clf = SVC(C=1000, gamma=0.01)
# gs_clf.fit(numpy.hstack(all_train_features[4:6]), train_target)
# results["review_doc_and_sent_features"] = gs_clf.predict(numpy.hstack(all_test_features[4:6]))
#
# gs_clf = SVC(C=1000, gamma=0.01)
# gs_clf.fit(numpy.hstack((all_train_features[0], all_train_features[4])), train_target)
# results["all_tfidf_features"] = gs_clf.predict(numpy.hstack((all_test_features[0], all_test_features[4])))

# for name in results:
#     print 10*"-", " positive-negative ", 10*"-"
#     print "-", name, "-"
#     print classification_report(test_target, results[name])

for name in results:
    print 10*"-", " positive-negative ", 10*"-"
    print "-", name, "-"
    print classification_report([1 if r > 3 else -1 for r in test_target], [1 if r > 3 else -1 for r in results[name]])

