import numpy as np
import numpy
import csv
from sklearn.decomposition import PCA,KernelPCA,SparsePCA
from sklearn.ensemble import ExtraTreesClassifier,RandomForestClassifier,AdaBoostClassifier,GradientBoostingClassifier
from sklearn import svm,tree,neighbors
from sklearn.metrics import f1_score
from sklearn import preprocessing
from sklearn.neural_network import BernoulliRBM
#import theano
#import theano.tensor as T
#from theano.tensor.shared_randomstreams import RandomStreams
#from dA import dA
import matplotlib.pyplot as plt

def plot2d(X,Y,ax):
    for (x,y) in zip(X,Y):
        if y == 0:
            ax.plot(x[0],x[1],'ro')
        elif y == 1:
            ax.plot(x[0],x[1],'b*')
        else:
            ax.plot(x[0],x[1],'gs')

def dae_trans(X,learning_rate=0.00003, training_epochs=15,
            batch_size=1,corruption_level=0.1,nv=2,nh=2):
    #from dA import dA
    train_set_x = theano.shared(numpy.asarray(X,dtype=theano.config.floatX), borrow=True)
    index = T.lscalar() # index to a [mini]batch
    x = T.matrix('x')  # the data is presented as rasterized images
    rng = numpy.random.RandomState(123)
    theano_rng = RandomStreams(rng.randint(2 ** 30))

    n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
    da = dA(numpy_rng=rng, theano_rng=theano_rng, input=x,
            n_visible=nv, n_hidden=nh)

    cost, updates = da.get_cost_updates(corruption_level=corruption_level,
                                        learning_rate=learning_rate)
    train_da = theano.function([index], cost, updates=updates,
         givens={x: train_set_x[index * batch_size:
                                  (index + 1) * batch_size]})
    for epoch in xrange(training_epochs):
        # go through trainng set
        c = []
        for batch_index in xrange(n_train_batches):
            c.append(train_da(batch_index))
        print 'Training epoch %d, cost ' % epoch, numpy.mean(c)
    return da#.transform(X,corruption_level=corruption_level)

def load_data_csv(file):
	lines = csv.reader(open(file))
	data = []
	for line in lines:
		data.append(line)
	data=np.asarray(data)
	return data

data = load_data_csv('adult/train_data.csv')

X=data[:,:410].astype(float)
Y=data[:,410].astype(int)

test = load_data_csv('adult/test_feature_data.csv')

test = test[:,1:].astype(float)

'''
scaler = preprocessing.StandardScaler().fit(X)
X=scaler.transform(X)
test=scaler.transform(test)
'''

'''
pca=PCA(n_components=100)
pca.fit(X)
xx=pca.transform(X)
tt=pca.transform(test)


kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
kpca.fit(xx)
xx=kpca.transform(xx)
tt=kpca.transform(tt)
'''

'''
def dae_l1(X,test,corruption_level=0.0,nv=410,nh=400):
    dae_l1=dae_trans(X,corruption_level=corruption_level,nv=nv,nh=nh,training_epochs=5,learning_rate=0.00001)
    X=dae_l1.transform(X,corruption_level=corruption_level)[0]
    test=dae_l1.transform(test,corruption_level=corruption_level)[0]
    #l2=dae_trans(l1,Y,corruption_level=corruption_level)[0]
    #plot2d(l2,Y,ax)
    return X,test
'''

#X, test=dae_l1(X,test,0,400,400)

def mrmrF(data,c):
    from mrmr_local import mrmr
    fn = ['F%d' % n for n in range(data.shape[1])]
    assert data.shape==(len(c),len(fn))

    mrmrout = mrmr(data,fn,c,threshold=0.5)

    R = mrmrout['mRMR']
    print 'Order \t Fea \t Name \t Score'
    for i in range(len(R['Fea'])):
        print '%d \t %d \t %s \t %f' % \
              (i, R['Fea'][i], fn[R['Fea'][i]], R['Score'][i])

#mrmrF(X,Y)
feature_rank = np.loadtxt('task2_rank.txt', delimiter = '\t').astype(int)
feature_rank = feature_rank - 1

def subset(data, feature_rank, num=10):
    return data[:, feature_rank[:num]]

#fig, ax=plt.subplots(1,4)
#(ax1,ax2,ax3,ax4)=ax
#plot2d(X,Y,ax1)

#num=350
#X=subset(X,feature_rank,num)
#test=subset(test,feature_rank,num)
#plot2d(X,Y,ax2)

#X, test=dae_l1(X,test,0,330,10)
#plot2d(X,Y,ax3)
#plt.show()

weights='uniform'#'distance' # 'uniform'
from sklearn import cross_validation
from sklearn.cross_validation import KFold
from sklearn.metrics import make_scorer
kf = KFold(len(Y), n_folds=3)
from sklearn.grid_search import GridSearchCV
#model=ExtraTreesClassifier(n_estimators=100) # Accuracy: 0.72 (+/- 0.04)
#model=svm.SVC()
#model=ExtraTreesClassifier(n_estimators=500)
#model=RandomForestClassifier(n_estimators=100)

model=GradientBoostingClassifier(n_estimators=10,max_depth=3,learning_rate=0.1,random_state=2)

f1_scorer = make_scorer(f1_score,average='macro')
#accuracy = cross_validation.cross_val_score(model, X, Y, cv=kf, scoring=f1_scorer)
#accuracy = cross_validation.cross_val_score(model, X, Y, cv=kf, scoring='f1')
#print accuracy
#print("Accuracy: %0.2f (+/- %0.2f)" % (accuracy.mean(), accuracy.std() * 2))

param_grid={'n_estimators':[300],'max_depth':[6],'learning_rate':[0.3,0.4,0.6,0.8]}

model = GridSearchCV(GradientBoostingClassifier(),cv=10,
        param_grid=param_grid,scoring=f1_scorer,n_jobs=64)

model=model.fit(X,Y)

print(model.best_estimator_)
for params, mean_score, scores in model.grid_scores_:
    print("%0.3f (+/-%0.03f) for %r" % (mean_score, scores.std() / 2, params))

pp = model.predict(test)
print pp
k=1
f = open("task3_out_cheetah2.csv", "w")
for p in pp:
	print >> f,"%d,%d" %(k,p)
	k += 1
f.close()

