import numpy as np
import csv
from sklearn.decomposition import PCA
from sklearn.ensemble import ExtraTreesClassifier,RandomForestClassifier,GradientBoostingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn import preprocessing,grid_search,svm,tree
from sklearn.cross_validation import cross_val_score

def load_data_csv(file):
	lines = csv.reader(open(file))
	data = []
	for line in lines:
		data.append(line)
	data=np.asarray(data)
	return data

data = load_data_csv('task2/train_data.csv')

X=data[:,:410].astype(float)
Y=data[:,410].astype(int)

labelNum=[0,0,0]
for y in Y:
	labelNum[y] += 1
print labelNum
import sys
sys.exit()

test = load_data_csv('task2/test_feature_data.csv')
test = test[:,1:].astype(float)

'''
pca = PCA(n_components=200)
pca.fit(X)
X=pca.transform(X)
test=pca.transform(test)
'''

#clf = ExtraTreesClassifier(n_estimators=500)
#clf=AdaBoostClassifier(n_estimators=100)
#scores = cross_val_score(clf, X, Y)
#print scores.mean()

#clf=tree.DecisionTreeClassifier()
#clf=RandomForestClassifier(n_estimators=100)

#clf=GradientBoostingClassifier(n_estimators=500, max_depth=6, learning_rate=0.2) # best
clf=GradientBoostingClassifier(n_estimators=500, max_depth=6, learning_rate=0.2) # best
clf = clf.fit(X, Y)

pp = clf.predict(test)

print pp
k=1
f = open("task2_pre.csv", "w")
for p in pp:
	print >> f,"%d,%d" %(k,p)
	k += 1
f.close()


