import nltk
import sqlite3
import numpy
from collections import defaultdict
from scipy import sparse
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC, LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
import csv
from datetime import datetime
__author__ = 'panagiotis'
from models import SpellingTransformer, RemoveContractions, NegationTransformer
from sklearn.metrics import classification_report, precision_score, recall_score, f1_score


# load review data
database_file = "Hotels_g189413_Crete.db"
database_path = "/home/panagiotis/Projects/Thesis/datasets/"
# database_path = "/home/pstalidis/Projects/Thesis/datasets/"
conn = sqlite3.connect(database_path + database_file)
source = conn.cursor()

data = [(review_title, review_text, review_rating)
        for (review_id, hotel_id, hotel_star, hotel_rating,
             average_clean_score, average_service_score, average_location_score,
             average_room_score, average_sleep_score, average_value_score,
             review_title, review_text, review_rating,
             review_clean_score, review_service_score, review_location_score,
             review_room_score, review_sleep_score, review_value_score)
        in source.execute("SELECT * FROM reviews")]

# xtr1, xts1 = train_test_split([(r[0], r[1], 1) for r in data if r[2] > 3], train_size=5000, test_size=1000, random_state=0)
# xtr2, xts2 = train_test_split([(r[0], r[1], 0) for r in data if r[2] < 4], train_size=5000, test_size=1000, random_state=0)
#
# train_titles, train_review, train_target = zip(*shuffle(xtr1 + xtr2, random_state=0))
# test_titles, test_review, test_target = zip(*shuffle(xts1 + xts2, random_state=0))

xtr1, xts1 = train_test_split([r for r in data if r[2] == 1], train_size=2000, test_size=200, random_state=0)
xtr2, xts2 = train_test_split([r for r in data if r[2] == 2], train_size=2000, test_size=200, random_state=0)
xtr3, xts3 = train_test_split([r for r in data if r[2] == 3], train_size=2000, test_size=200, random_state=0)
xtr4, xts4 = train_test_split([r for r in data if r[2] == 4], train_size=2000, test_size=200, random_state=0)
xtr5, xts5 = train_test_split([r for r in data if r[2] == 5], train_size=2000, test_size=200, random_state=0)

train_data = shuffle(xtr1 + xtr2 + xtr3 + xtr4 + xtr5, random_state=0)
test_data = shuffle(xts1 + xts2 + xts3 + xts4 + xts5, random_state=0)

train_titles, train_review, train_target = zip(*train_data)
test_titles, test_review, test_target = zip(*test_data)

del xtr1, xtr2, xtr3, xtr4, xtr5, xts1, xts2, xts3, xts4, xts5

train_target = [float(r) for r in train_target]
test_target = [float(r) for r in test_target]


# pre processing of all data
t0 = datetime.now()
preprocess = Pipeline([('contract', RemoveContractions(spell_check=False, output="tokens")),
                       ('spelling', SpellingTransformer(tokenize=False, output="tokens")),
                       ('negations', NegationTransformer(tokenize=False, output="tokens"))])
train_titles = preprocess.fit_transform(train_titles)
train_review = preprocess.fit_transform(train_review)
test_titles = preprocess.fit_transform(test_titles)
test_review = preprocess.fit_transform(test_review)
train_target = [int(r) for r in train_target]
test_target = [int(r) for r in test_target]
print "preprocessing took", datetime.now() - t0  # 2:45:25


results = defaultdict(lambda: defaultdict(list))
binary = defaultdict(lambda: defaultdict(list))

# t0 = datetime.now()
for clf_name, clf in [("SVM(RBF)", SVC()),
                      #("SVM(Linear)", LinearSVC()),
                      #("MultinomialNB", MultinomialNB()),
                      #("MaximumEntropy", LogisticRegression())
                      ]:
    gs_clf = GridSearchCV(Pipeline([("transformer", TfidfVectorizer(tokenizer=lambda x: x, lowercase=False,
                                                                    stop_words=nltk.corpus.stopwords.words('english'))),
                                    ("clf", clf)]),
                          param_grid={'transformer__binary': (True, False),
                                      'transformer__ngram_range': ((1, 1), (1, 2), (1, 3)),
                                      'transformer__min_df': (1, 2),
                                      'transformer__max_df': (0.2, 0.5, 0.8),
                                      'transformer__use_idf': (True, False),
                                      'transformer__norm': ('l1', 'l2', None)
                                      }, scoring='f1', verbose=5)
    gs_clf.fit(train_titles, train_target)
    results["titles_only"][clf_name].append(gs_clf.grid_scores_)
    results["titles_only"][clf_name].append([(precision_score(test_target, res),
                                              recall_score(test_target, res),
                                              f1_score(test_target, res)) for res in [gs_clf.predict(test_titles)]])
    results["titles_only"][clf_name].append(gs_clf.best_estimator_.named_steps["transformer"])
    binary["titles_only"][clf_name].append([(precision_score([1 if r > 3 else -1 for r in test_target], [1 if r > 3 else -1 for r in res]),
                                             recall_score([1 if r > 3 else -1 for r in test_target], [1 if r > 3 else -1 for r in res]),
                                             f1_score([1 if r > 3 else -1 for r in test_target], [1 if r > 3 else -1 for r in res])) for res in [gs_clf.predict(test_titles)]])


# t0 = datetime.now()
for clf_name, clf in [("SVM(RBF)", SVC()),
                      #("SVM(Linear)", LinearSVC()),
                      #("MultinomialNB", MultinomialNB()),
                      #("MaximumEntropy", LogisticRegression())
                      ]:
    gs_clf = GridSearchCV(Pipeline([("transformer", TfidfVectorizer(tokenizer=lambda x: x, lowercase=False,
                                                                    stop_words=nltk.corpus.stopwords.words('english'))),
                                    ("clf", clf)]),
                          param_grid={'transformer__binary': (True, False),
                                      'transformer__ngram_range': ((1, 1), (1, 2), (1, 3)),
                                      'transformer__min_df': (1, 2),
                                      'transformer__max_df': (0.2, 0.5, 0.8),
                                      'transformer__use_idf': (True, False),
                                      'transformer__norm': ('l1', 'l2', None)
                                      }, scoring='f1', verbose=5)
    gs_clf.fit(train_review, train_target)
    results["reviews_only"][clf_name].append(gs_clf.grid_scores_)
    results["reviews_only"][clf_name].append([(precision_score(test_target, res),
                                              recall_score(test_target, res),
                                              f1_score(test_target, res)) for res in [gs_clf.predict(test_review)]])
    results["reviews_only"][clf_name].append(gs_clf.best_estimator_.named_steps["transformer"])
    binary["reviews_only"][clf_name].append([(precision_score([1 if r > 3 else -1 for r in test_target], [1 if r > 3 else -1 for r in res]),
                                              recall_score([1 if r > 3 else -1 for r in test_target], [1 if r > 3 else -1 for r in res]),
                                              f1_score([1 if r > 3 else -1 for r in test_target], [1 if r > 3 else -1 for r in res])) for res in [gs_clf.predict(test_review)]])

# mork
for featureset in results.keys():
    for clf_name in results[featureset].keys():
        temp = [[(key.split("__")[1], params[key]) for key in sorted(params.keys())] + [("fmeasure", mean)] for params, mean, _ in results[featureset][clf_name][0]]
        temp = [{key: value for key, value in iteration} for iteration in temp]
        with open(featureset+clf_name+'_ratings.csv', 'w') as csvfile:
            fieldnames = temp[0].keys()
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()
            for item in temp:
                writer.writerow(item)
        results[featureset][clf_name][0] = None


# t0 = datetime.now()
for clf_name, clf in [#("SVM(RBF)", SVC()),
                      ("SVM(Linear)", LinearSVC()),
                      ("MultinomialNB", MultinomialNB()),
                      ("MaximumEntropy", LogisticRegression())]:
    clf.fit(sparse.hstack((results["titles_only"][clf_name][-1].transform(train_titles), results["reviews_only"][clf_name][-1].transform(train_review))), train_target)
    results["titles_and_reviews"][clf_name].append("nope")
    results["titles_and_reviews"][clf_name].append([(precision_score(test_target, res),
                                                    recall_score(test_target, res),
                                                    f1_score(test_target, res)) for res in [clf.predict(sparse.hstack((results["titles_only"][clf_name][-1].transform(test_titles), results["reviews_only"][clf_name][-1].transform(test_review))))]])
    results["titles_and_reviews"][clf_name].append(TfidfVectorizer())
    binary["titles_and_reviews"][clf_name].append([(precision_score([1 if r > 3 else -1 for r in test_target], [1 if r > 3 else -1 for r in res]),
                                                    recall_score([1 if r > 3 else -1 for r in test_target], [1 if r > 3 else -1 for r in res]),
                                                    f1_score([1 if r > 3 else -1 for r in test_target], [1 if r > 3 else -1 for r in res])) for res in [clf.predict(sparse.hstack((results["titles_only"][clf_name][-1].transform(test_titles), results["reviews_only"][clf_name][-1].transform(test_review))))]])


for featureset in results.keys():
    for clf_name in results[featureset].keys():
        print featureset, clf_name
        print results[featureset][clf_name][1]
        # print results[featureset][clf_name][-1].get_params()

for featureset in binary.keys():
    for clf_name in binary[featureset].keys():
        print featureset, clf_name
        print binary[featureset][clf_name][0]


# replace some reviews or rating 3 with rating 4 and 5
# run experiments again for binary classification


xtr, xts = train_test_split([r for r in data if r[2] > 3], train_size=1000, test_size=100, random_state=0)
xtr_titles, xtr_review, xtr_target = zip(*xtr)
xts_titles, xts_review, xts_target = zip(*xts)
xtr_titles = preprocess.fit_transform(xtr_titles)
xtr_review = preprocess.fit_transform(xtr_review)
xts_titles = preprocess.fit_transform(xts_titles)
xts_review = preprocess.fit_transform(xts_review)
xtr_target = [5 for r in xtr_target]
xts_target = [5 for r in xts_target]

temp = zip(xtr_titles, xtr_review, xtr_target) + zip(train_titles, train_review, train_target)
train_titles, train_review, train_target = zip(*[r for r in temp if r[2] == 3][:1000] + [r for r in temp if r[2] != 3])

temp = zip(xts_titles, xts_review, xts_target) + zip(test_titles, test_review, test_target)
test_titles, test_review, test_target = zip(*[r for r in temp if r[2] == 3][:100] + [r for r in temp if r[2] != 3])

del xtr, xtr_titles, xtr_review, xtr_target, xts, xts_titles, xts_review, xts_target, temp
train_target = [1 if r > 3 else -1 for r in train_target]
test_target = [1 if r > 3 else -1 for r in test_target]

n_results = defaultdict(lambda: defaultdict(list))

# t0 = datetime.now()
for clf_name, clf in [#("SVM(RBF)", SVC()),
                      ("SVM(Linear)", LinearSVC()),
                      ("MultinomialNB", MultinomialNB()),
                      ("MaximumEntropy", LogisticRegression())]:
    gs_clf = GridSearchCV(Pipeline([("transformer", TfidfVectorizer(tokenizer=lambda x: x, lowercase=False,
                                                                    stop_words=nltk.corpus.stopwords.words('english'))),
                                    ("clf", clf)]),
                          param_grid={'transformer__binary': (True, False),
                                      'transformer__ngram_range': ((1, 1), (1, 2), (1, 3)),
                                      'transformer__min_df': (1, 2),
                                      'transformer__max_df': (0.2, 0.5, 0.8),
                                      'transformer__use_idf': (True, False),
                                      'transformer__norm': ('l1', 'l2', None)
                                      }, scoring='f1', verbose=5)
    gs_clf.fit(train_titles, train_target)
    n_results["titles_only"][clf_name].append(gs_clf.grid_scores_)
    n_results["titles_only"][clf_name].append([(precision_score(test_target, res),
                                              recall_score(test_target, res),
                                              f1_score(test_target, res)) for res in [gs_clf.predict(test_titles)]])
    n_results["titles_only"][clf_name].append(gs_clf.best_estimator_.named_steps["transformer"])


# t0 = datetime.now()
for clf_name, clf in [#("SVM(RBF)", SVC()),
                      ("SVM(Linear)", LinearSVC()),
                      ("MultinomialNB", MultinomialNB()),
                      ("MaximumEntropy", LogisticRegression())]:
    gs_clf = GridSearchCV(Pipeline([("transformer", TfidfVectorizer(tokenizer=lambda x: x, lowercase=False,
                                                                    stop_words=nltk.corpus.stopwords.words('english'))),
                                    ("clf", clf)]),
                          param_grid={'transformer__binary': (True, False),
                                      'transformer__ngram_range': ((1, 1), (1, 2), (1, 3)),
                                      'transformer__min_df': (1, 2),
                                      'transformer__max_df': (0.2, 0.5, 0.8),
                                      'transformer__use_idf': (True, False),
                                      'transformer__norm': ('l1', 'l2', None)
                                      }, scoring='f1', verbose=5)
    gs_clf.fit(train_review, train_target)
    n_results["reviews_only"][clf_name].append(gs_clf.grid_scores_)
    n_results["reviews_only"][clf_name].append([(precision_score(test_target, res),
                                                 recall_score(test_target, res),
                                                 f1_score(test_target, res)) for res in [gs_clf.predict(test_review)]])
    n_results["reviews_only"][clf_name].append(gs_clf.best_estimator_.named_steps["transformer"])


for featureset in n_results.keys():
    for clf_name in n_results[featureset].keys():
        temp = [[(key.split("__")[1], params[key]) for key in sorted(params.keys())] + [("fmeasure", mean)] for params, mean, _ in n_results[featureset][clf_name][0]]
        temp = [{key: value for key, value in iteration} for iteration in temp]
        with open(featureset+clf_name+'_binary.csv', 'w') as csvfile:
            fieldnames = temp[0].keys()
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()
            for item in temp:
                writer.writerow(item)
        n_results[featureset][clf_name][0] = None


# t0 = datetime.now()
for clf_name, clf in [#("SVM(RBF)", SVC()),
                      ("SVM(Linear)", LinearSVC()),
                      ("MultinomialNB", MultinomialNB()),
                      ("MaximumEntropy", LogisticRegression())]:
    clf.fit(sparse.hstack((n_results["titles_only"][clf_name][-1].transform(train_titles), n_results["reviews_only"][clf_name][-1].transform(train_review))), train_target)
    n_results["titles_and_reviews"][clf_name].append("nope")
    n_results["titles_and_reviews"][clf_name].append([(precision_score(test_target, res),
                                                       recall_score(test_target, res),
                                                       f1_score(test_target, res)) for res in [clf.predict(sparse.hstack((n_results["titles_only"][clf_name][-1].transform(test_titles), n_results["reviews_only"][clf_name][-1].transform(test_review))))]])
    n_results["titles_and_reviews"][clf_name].append(TfidfVectorizer())


for featureset in results.keys():
    for clf_name in n_results[featureset].keys():
        print featureset, clf_name
        print n_results[featureset][clf_name][1]
        print n_results[featureset][clf_name][-1].get_params()

