import numpy as np
import csv
from sklearn.decomposition import PCA,KernelPCA
from sklearn.ensemble import ExtraTreesClassifier,RandomForestClassifier,GradientBoostingClassifier
from sklearn.svm import SVC
from sklearn import preprocessing,grid_search
import sys
from sklearn.feature_selection import SelectKBest as skb
from sklearn.pipeline import Pipeline

def load_data_csv(file):
	lines = csv.reader(open(file))
	data = []
	for line in lines:
		data.append(line)
	data=np.asarray(data)
	return data

data = load_data_csv('task1/train_data.csv')

X=data[:,:129].astype(float)
Y=data[:,129:].astype(int)

print "shape of X: ", X.shape
print "shape of Y: ", Y.shape

test = load_data_csv('task1/test_feature_data.csv')
test = test[:,1:].astype(float)
print "shape of test: ",test.shape

'''
pca = PCA(n_components=110)
pca.fit(X)
X=pca.transform(X)
test=pca.transform(test)
'''

XX=[]
YY=[]
for x,y in zip(X,Y):
	#print len(y)
	if sum(y) != -12:
		XX.append(x)
		YY.append(y)
X=np.asarray(XX)
Y=np.asarray(YY)
print "shape of X: ", X.shape
print "shape of Y: ", Y.shape

ret=[]
size=12
from sklearn import cross_validation
from sklearn.cross_validation import KFold
kf = KFold(len(Y), n_folds=3)
for i in range(size):
	y=Y[:,i]
	clf = Pipeline([
  		('feature_selection', skb(k=100)), #best
  		('classification', SVC(C=2.0,kernel='rbf',probability=True,gamma=1e-3)) # 
		])
	clf.fit(X, y)
	#clf = ExtraTreesClassifier(n_estimators=1).fit(xx, y)
	pp=clf.predict_proba(test)
	#print pp[:,0]
	ret.append(pp[:,1])

'''
for i in range(size):
	y=Y[:,i]
	print y.shape
	clf = ExtraTreesClassifier(n_estimators=1).fit(xx, y)
	pp = clf.predict(tt)
	ret.append(pp)
'''
print len(ret)
print len(ret[0])


f = open("task1_pre.csv", "w")

for k in range(204):
	#print >>f,"%d"%(k+1),
	f.write('%d' %(k+1))
	for i in range(size):
		#r=ret[i]
		#print len(r)
		f.write(',%.6f' %(ret[i][k]) )
		#print >> f,"\b,%.2f" %(ret[i][k]),
	#print >> f
	f.write('\n')
f.close()





