import sqlite3
import nltk
import random
import numpy
from sklearn.cross_validation import train_test_split
__author__ = 'panagiotis'
from gensim.models import Word2Vec, Doc2Vec
from gensim.models.doc2vec import LabeledSentence


# load review data
database_file = "Hotels_g189413_Crete.db"
database_path = "/home/panagiotis/Projects/Thesis/datasets/"
# database_path = "/home/pstalidis/Projects/Thesis/datasets/"
conn = sqlite3.connect(database_path + database_file)
source = conn.cursor()

data = [(review_id, review_title, review_rating)
        for (review_id, hotel_id, hotel_star, hotel_rating,
             average_clean_score, average_service_score, average_location_score,
             average_room_score, average_sleep_score, average_value_score,
             review_title, review_text, review_rating,
             review_clean_score, review_service_score, review_location_score,
             review_room_score, review_sleep_score, review_value_score)
        in source.execute("SELECT * FROM reviews")]


data = [r for r in data if len(r[1]) > 0]

t1, _ = train_test_split([r for r in data if r[2] == 1], train_size=1000, test_size=0.01, random_state=0)
t2, _ = train_test_split([r for r in data if r[2] == 2], train_size=1000, test_size=0.01, random_state=0)
t3, _ = train_test_split([r for r in data if r[2] == 3], train_size=1000, test_size=0.01, random_state=0)
t4, _ = train_test_split([r for r in data if r[2] == 4], train_size=1000, test_size=0.01, random_state=0)
t5, _ = train_test_split([r for r in data if r[2] == 5], train_size=1000, test_size=0.01, random_state=0)

train_data, test_data = train_test_split(t1+t2+t3+t4+t5, train_size=4500, test_size=500, random_state=0)

X_train = [LabeledSentence(words=nltk.word_tokenize(r[1]), labels=[r[0]]) for r in train_data]
Y_train = [1 if r[2] > 3 else -1 for r in train_data]
X_test = [LabeledSentence(words=nltk.word_tokenize(r[1]), labels=[r[0]]) for r in test_data]
Y_test = [1 if r[2] > 3 else -1 for r in test_data]

from sklearn.linear_model import SGDClassifier

for dimensions in xrange(420, 430, 1):

    d2v = Doc2Vec(size=dimensions, min_count=1, window=10, sample=1e-3, negative=5)
    d2v.build_vocab(X_train+X_test)

    for permutation in xrange(0, 20):
        random.shuffle(X_train)
        d2v.train(X_train)
    nX_train = numpy.vstack([d2v[label] for label in [r[0] for r in train_data]])

    for permutation in xrange(0, 20):
        random.shuffle(X_test)
        d2v.train(X_test)
    nX_test = numpy.vstack([d2v[label] for label in [r[0] for r in test_data]])

    lr = SGDClassifier(loss='log', penalty='l1')
    lr.fit(nX_train, Y_train)

    print "Number of Features", dimensions, 'Test Accuracy: %.2f'%lr.score(nX_test, Y_test)

print "done"


