
"""
@This file will be used to predict the accuracy of our classifier

@@@@@ Example link:
http://scikit-learn.org/0.11/auto_examples/index.html
"""
import numpy as np
import pylab
import matplotlib
import sklearn
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
import MySQLdb

port=3306
user="root"
password="123"
database="isec2014"
table_migrated = "train_migrated_serverfault"
table_not_migrated = "train_not_migrated_serverfault"


db1= MySQLdb.connect(host="localhost", user=user, passwd=password,db=database, port=port)
select_cursor = db1.cursor()
vectorizer = CountVectorizer(min_df=1)
transformer = TfidfTransformer()

train_data =['hi', 'ho ho', 'hi hi', 'ho ho']
train_target = [0,1,0,1]
test_data = ['ho', 'hi ho hi']
test_target = [0, 1]


vectorizer = TfidfVectorizer(min_df=1)
x_train=vectorizer.fit_transform(train_data)
y_train = train_target

x_test = vectorizer.fit_transform(test_data)
y_test = test_target
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()

knn.fit(x_train, y_train)
KNeighborsClassifier(algorithm='auto', leaf_size=1, metric='minkowski',
           n_neighbors=1, p=2, weights='uniform')
predict = knn.predict(x_test)

print "y test", predict

"""

word_counts = vectorizer.fit_transform(train_data)
counts_array= word_counts.toarray()
tfidf = transformer.fit_transform(counts_array)

print "tf-idf=", tfidf.toarray() 
"""
"""
str1 = "select f1_title,migrated from "+table_migrated
select_cursor.execute(str1)

data_0 = select_cursor.fetchall()
print"data_0=", data_0 

#Randomize and take small subset of data_0

str_not = "select f1_title,migrated from "+table_not_migrated
select_cursor.execute(str_not)
data_1   = select_cursor.fetchall()
print "data_1 = ", data_1

#Randomize and take small subset of data_1

all_data  = np.array(data_0+ data_1)
print "data = ", all_data

feature_data = all_data[:,[0]]
print "feature data =", feature_data

class_data =  all_data[:,[1]]

object = len(all_data)
print "object=", object

vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
                                 stop_words='english')

#train_feature = vectorizer.transform(feature_data[:2])
vect = ['hi', 'hmm', 'hoas']
train_feature = vectorizer.transform(vect)
train_class =  class_data[:2]

test_feature =transform( feature_data[2:])
test_class =  class_data[2:]

print "train", train_feature, "target = ", train_class

print "test", test_feature, "target=", test_class

from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()

gnb.fit(train_feature, train_class)
predict_class = gnb.predict(test_feature)

print "predict class", predict_class

#from sklearn.neighbors import KNeighborsClassifier
#knn = KNeighborsClassifier()
#knn.fit(train_feature, train_class)
#KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski',
#           n_neighbors=1, p=2, weights='uniform')
#knn.predict(test_feature)


# Transforming link:http://scikit-learn.org/stable/auto_examples/mlcomp_sparse_document_classification.html


"""
