"""
@This file will be used to classify data of serverfault for numeric features
@Cross validation 70-30, 10-fold

"""
import numpy as np
import pylab
import matplotlib
import sklearn
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
import MySQLdb
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import HashingVectorizer
import scipy.sparse
from sklearn import cross_validation
#from preprocess import pp

#"""
port=3306
user="root"
password="123"
database="isec2014"
table_migrated = "train_migrated_serverfault_aug13"
table_not_migrated = "train_not_migrated_serverfault_aug13"

"""
site_name = "serverfault"
port=3307
user="sangeetal"
password="sangeetal"
database="sangeeta"
table_migrated = "train_migrated_"+site_name+"_aug13"
table_not_migrated = "train_not_migrated_"+site_name+"_aug13"
#"""
random_seed_val = 0

db1= MySQLdb.connect(host="localhost", user=user, passwd=password,db=database, port=port)
select_cursor = db1.cursor()

def clean(val):
    return val

#Read data from migrated table
str_m = "select f1_title, f1_body, f1_title_len, f1_body_len, f1_reputation, f1_account_age from "\
+ table_migrated

print "strm = ", str_m
select_cursor.execute(str_m)
m_title_body = list()
m_other_feature = list()
m_data = list()
target = list()
not_m_data = list()

k=0
migrated_data = select_cursor.fetchall()
for temp_data in migrated_data:
    #q_body_title = temp_data[0]+" "+ temp_data[1]
    #print "q body title", q_body_title
    #clean_q=clean(q_body_title)
    #m_title_body.append(clean_q)
    
    temp_f_val = list()
    title_len = temp_data[2]
    body_len = temp_data[3]
    reputation = temp_data[4]
    account_age = temp_data[5]
    
    temp_f_val.append(title_len)
    temp_f_val.append(body_len)
    temp_f_val.append(reputation)
    temp_f_val.append(account_age) 
    m_other_feature.append(temp_f_val)
      
    target.append(1)
    #if k>=1:
    #    k=1
    #    break
    #k=1

#tfidf = Pipeline([
#    ('vectorizer', HashingVectorizer(ngram_range=(1,4), non_negative=True)),
#    ('tfidf', TfidfTransformer()),
#])

"""
vectorizer = TfidfVectorizer(min_df=1)

x_title_body=vectorizer.fit_transform(m_title_body)



print "x_title shape", x_title_body.shape
m_other_feature_array = np.asarray(m_other_feature)
print m_other_feature_array.shape

m_data = np.hstack([x_title_body.toarray(), m_other_feature_array])
print m_data
"""

np.random.seed(random_seed_val)
indices = np.random.permutation(len(m_data))    

str_not_m = "select f1_title, f1_body, f1_title_len, f1_body_len, f1_reputation, f1_account_age from "\
+ table_not_migrated

print "str not m=", str_not_m
select_cursor.execute(str_not_m)

not_migrated_data = select_cursor.fetchall()
not_m_other_feature = list()
for temp_data in not_migrated_data:
    #q_body_title = temp_data[0]+" "+ temp_data[1]
    #print "q body title", q_body_title
    #clean_q=clean(q_body_title)
    #m_title_body.append(clean_q)
    
    temp_f_val = list()
    title_len = temp_data[2]
    body_len = temp_data[3]
    reputation = temp_data[4]
    account_age = temp_data[5]
    
    temp_f_val.append(title_len)
    temp_f_val.append(body_len)
    temp_f_val.append(reputation)
    temp_f_val.append(account_age) 
    not_m_other_feature.append(temp_f_val)
      
    target.append(0)
    
m_data = m_other_feature
not_m_data = not_m_other_feature

total_data = m_data + not_m_data
#print "m data", m_data, "not m data", not_m_data

cv = cross_validation.ShuffleSplit(len(target), n_iter=10, test_size=0.30, random_state=0)

knn = KNeighborsClassifier(algorithm='auto', leaf_size=1, metric='minkowski',
           n_neighbors=3, p=2, weights='uniform')
dt  =DecisionTreeClassifier(max_depth=5)
rf =  RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
ada =    AdaBoostClassifier()
svc =     SVC(kernel="linear", C=0.025)

dt_score = cross_validation.cross_val_score(dt,np.asarray(total_data), np.asarray(target), cv=cv)
rf_score = cross_validation.cross_val_score(rf,np.asarray(total_data), np.asarray(target), cv=cv)
ada_score = cross_validation.cross_val_score(ada,np.asarray(total_data), np.asarray(target), cv=cv)
svc_score = cross_validation.cross_val_score(svc,np.asarray(total_data), np.asarray(target), cv=cv)
knn_score = cross_validation.cross_val_score(knn,np.asarray(total_data), np.asarray(target), cv=cv)
#rf_score = cross_validation.cross_val_score(rf,x_data_tfidf.toarray(), target_arr.toarray(), cv=cv)
#dt_score = cross_validation.cross_val_score(dt,x_data_tfidf.toarray(), target_arr.toarray(), cv=cv)




print "dt acc", dt_score.mean()
print "rf acc", rf_score.mean()
print "ada acc", ada_score.mean()
print "svc acc", svc_score.mean()
print "knn acc", knn_score.mean()
"""

select_cursor.execute(str_m)
m_data = list()
target = list()
not_m_data = list()

migrated_data = select_cursor.fetchall()
for temp_data in migrated_data:
    q_body_title = temp_data[0]+" "+ temp_data[1]
    clean_q=clean(q_body_title)
    m_data.append(clean_q)
    target.append(1)
 
#np.random.seed(0)
#indices = np.random.permutation(len(m_data))    
str_not_m = "select f1_title, f1_body from "+ table_not_migrated
select_cursor.execute(str_not_m)

not_migrated_data = select_cursor.fetchall()

np.random.seed(1)
indices = np.random.permutation(len(not_migrated_data))[:len(m_data)]


count=0
for temp_data in not_migrated_data:
    #if count in indices:
    #    print "count=", count
    q_body_title = temp_data[0]+" "+ temp_data[1]
    clean_q=clean(q_body_title)
    not_m_data.append(clean_q)
    target.append(0)
    count= count+1
    if count >= len(m_data):
        break; 


total_data = m_data +not_m_data

total_data_arr = np.asarray(total_data)
target_arr = np.asarray(target)


#print "m_tuple_count=", len(total_tuple)
#print "len = ", len(target)


np.random.seed(1)
new_indices = np.random.permutation(len(total_data))

half_tuple = (len(total_data))/2
x_data =total_data_arr[new_indices[:half_tuple]]
y_train = target_arr[new_indices[:half_tuple]]

x_test_data = total_data_arr[new_indices[half_tuple:]] 
y_test = target_arr[new_indices[half_tuple:]]

print "len=  ", len(x_data), "type = ", type(x_data), "target len = ", len(y_train)
print "len =  ", len(x_test_data), "type = ", type(x_test_data), "target len ", len(y_test)

vectorizer = TfidfVectorizer(min_df=1)
x_train=vectorizer.fit_transform(x_data)
x_test = vectorizer.transform(x_test_data)

print "len=  ", x_test.shape
print "len = ", x_train.shape

knn = KNeighborsClassifier()
knn.fit(x_train, y_train)
KNeighborsClassifier(algorithm='auto', leaf_size=1, metric='minkowski',
           n_neighbors=3, p=2, weights='uniform')
predict_knn = knn.predict(x_test)
print(classification_report(y_test, predict_knn))
print accuracy_score(y_test, predict_knn)


dt = tree.DecisionTreeClassifier()
dt.fit(x_train.toarray(), y_train)

predict_dt = dt.predict(x_test.toarray())
print(classification_report(y_test, predict_dt))
print accuracy_score(y_test, predict_dt)
"""

"""
iris_X_train = iris_X[indices[:-10]]
#print "y train=", iris.target
iris_y_train = iris_y[indices[:-10]]
iris_X_test  = iris_X[indices[-10:]]
iris_y_test  = iris_y[indices[-10:]]

    
    
m_data_array =  np.asarray(m_data)   
"""

