from sklearn.feature_extraction.text import TfidfVectorizer
__author__ = 'panagiotis'
from models.sentiformer import SWN
import nltk
import sqlite3
from sklearn.cross_validation import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.svm import SVC
from models.sentiformer import tokenize_titles, tokenize_negations
from models.sentiformer import RepeatReplacer, RemoveContractions
from sklearn.grid_search import GridSearchCV


# load review data
database_file = "Hotels_g189413_Crete.db"
database_path = "/home/panagiotis/Projects/Thesis/datasets/"
# database_path = "/home/pstalidis/Projects/Thesis/datasets/"
conn = sqlite3.connect(database_path + database_file)
source = conn.cursor()

data = [(review_text, review_title, review_rating)
        for (review_id, hotel_id, hotel_star, hotel_rating,
             average_clean_score, average_service_score, average_location_score,
             average_room_score, average_sleep_score, average_value_score,
             review_title, review_text, review_rating,
             review_clean_score, review_service_score, review_location_score,
             review_room_score, review_sleep_score, review_value_score)
        in source.execute("SELECT * FROM reviews")]


t1, _ = train_test_split([r for r in data if r[2] == 1], train_size=1000, test_size=0.01, random_state=0)
t2, _ = train_test_split([r for r in data if r[2] == 2], train_size=1000, test_size=0.01, random_state=0)
t3, _ = train_test_split([r for r in data if r[2] == 3], train_size=1000, test_size=0.01, random_state=0)
t4, _ = train_test_split([r for r in data if r[2] == 4], train_size=1000, test_size=0.01, random_state=0)
t5, _ = train_test_split([r for r in data if r[2] == 5], train_size=1000, test_size=0.01, random_state=0)

train_data, test_data = train_test_split(t1+t2+t3+t4+t5, train_size=4500, test_size=500, random_state=1)
# train_data, test_data = train_test_split(data, train_size=4500, test_size=500, random_state=0)

X_train = [r[0] for r in train_data]
X_test = [r[0] for r in test_data]

# Y_train = [1 if r[2] > 3 else -1 for r in train_data]
# Y_test = [1 if r[2] > 3 else -1 for r in test_data]

Y_train = [r[2] for r in train_data]
Y_test = [r[2] for r in test_data]

# Y_train = [-1 if r[2] < 3 else 1 if r[2] > 3 else 0 for r in train_data]
# Y_test = [-1 if r[2] < 3 else 1 if r[2] > 3 else 0 for r in test_data]


text_clf = Pipeline([('contract', RemoveContractions()),
                     ('repeats', RepeatReplacer()),
                     # ('sentiwordnet', SWN(CountVectorizer())),
                     ('features', FeatureUnion([('sentiwordnet', SWN()),
                                                ('vect', TfidfVectorizer(tokenizer=nltk.word_tokenize,
                                                                         stop_words=nltk.corpus.stopwords.words('english'),
                                                                         binary=False, ngram_range=(1, 3),
                                                                         min_df=2, max_df=0.7,
                                                                         use_idf=True, norm='l2'
                                                                         )),
                                                ])),
                     ('clf', SVC()),
                     ])

parameters = {
    'features__sentiwordnet__vectorizer': (CountVectorizer(binary=True), CountVectorizer(binary=False), TfidfVectorizer()),
    'clf__C': (5e2,),
    'clf__gamma': (1e-3,),
}

gs_clf = GridSearchCV(text_clf, parameters, verbose=3)
gs_clf = gs_clf.fit(X_train, Y_train)

# get best parameters from grid search
best_parameters, score, _ = max(gs_clf.grid_scores_, key=lambda x: x[1])
for param_name in sorted(parameters.keys()):
    print("%s: %r" % (param_name, best_parameters[param_name]))
print "mae", score

Z_test = gs_clf.predict(X_test)

from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import mean_absolute_error as mae

print 10*"-", " scale 1-5 ", 10*"-"
print confusion_matrix(Y_test, Z_test)
print "mae", mae(Y_test, Z_test)
print classification_report(Y_test, Z_test)

Y_bin = [1 if r > 3 else -1 for r in Y_test]
Z_bin = [1 if r > 3 else -1 for r in Z_test]

print 10*"-", " positive-negative ", 10*"-"
print confusion_matrix(Y_bin, Z_bin)
print "mae", mae(Y_bin, Z_bin)
print classification_report(Y_bin, Z_bin)

