from sklearn.linear_model import LogisticRegression
import xgboost as xgb
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, roc_auc_score

def lr_model(x,y,valx, valy, C, log_file=None, **kwargs):
	model = LogisticRegression(C=C,class_weight='balanced',**kwargs)
	model.fit(x,y)
	print(kwargs)
	y_pred_train = model.predict_proba(x)[ :,1]
	fpr_dev,tpr_dev,_ =roc_curve(y, y_pred_train)
	train_ks = abs(fpr_dev - tpr_dev).max()
	train_roc_val = roc_auc_score(y,y_pred_train)
	print('trainks : ', train_ks)
	print('train_auc_score:',train_roc_val)
	y_pred = model.predict_proba(valx)[:,1]
	fpr_val,tpr_val,_=roc_curve(valy, y_pred)
	val_ks = abs(fpr_val - tpr_val).max()
	test_roc_val = roc_auc_score(valy, y_pred)
	print('valks : ' , val_ks)
	print('val_auc_score:',test_roc_val)
	
	fig,ax = plt.subplots()
	ax.plot(fpr_dev, tpr_dev, label='dev') 
	ax.plot(fpr_val, tpr_val, label='val') 
	ax.plot([0,1], [0,1], 'k--') 
	ax.set_xlabel('False positive rate') 
	ax.set_ylabel('True positive rate')
	ax.set_title('ROC Curve\n trainroc:{}\n test_roc:{}'.format(round(train_roc_val,2),round(test_roc_val,2)))
	ax.legend(loc='best')
	if not log_file:
		fig.show() 
	else:
		fig.savefig(log_file)
	return y_pred, y_pred_train, model
	
	
def lr_model_validation(model,x, y, log_file=None):
	y_pred_train = model.predict_proba(x)[:,1]
	fpr_dev,tpr_dev,_ = roc_curve(y,y_pred_train) 
	train_ks = abs(fpr_dev - tpr_dev).max() 
	train_roc_val = roc_auc_score(y,y_pred_train)
	print('trainks : ', train_ks)
	print('train_auc_score:',train_roc_val)

	fig,ax = plt.subplots()
	ax.plot(fpr_dev, tpr_dev, label='oot') 
	ax.plot([0,1], [0,1], 'k--') 
	ax.set_xlabel('False positive rate') 
	ax.set_ylabel('True positive rate')
	
	ax.set_title('ROC Curve\n trainroc:{}'.format(round(train_roc_val,2)))
	ax.legend(loc='best') 
	if not log_file:
		fig.show() 
	else:
		fig.savefig(log_file)
	return y_pred_train
	
def xgb_model(x, y, valx, valy, log_file=None,estimators=400):
	model = xgb.XGBClassifier(learning_rate=0.05,n_estimators=estimators, max_depth=2,class_weight='balanced',min_child_weight=1,subsample=1,nthread=1,scale_pos_weight=1, random_state=37, n_jobs=1, reg_lambda=300)
	model.fit(x, y)
	y_pred = model.predict_proba(x)[:,1] 
	fpr_dev,tpr_dev,_ = roc_curve(y, y_pred) 
	train_ks = abs(fpr_dev - tpr_dev).max() 
	train_roc_val = roc_auc_score(y, y_pred) 
	print('train_ks : ',train_ks)
	print('train_auc_score:', train_roc_val) 
	y_pred = model.predict_proba(valx)[:,1] 
	fpr_val,tpr_val,_ = roc_curve(valy, y_pred) 
	val_ks = abs(fpr_val - tpr_val).max() 
	test_roc_val = roc_auc_score(valy, y_pred) 
	print('val_ks :', val_ks)
	print('val_auc_score: ', test_roc_val) 
	fig,ax = plt.subplots() 
	ax.plot(fpr_dev, tpr_dev, label='dev') 
	ax.plot(fpr_val, tpr_val, label='val') 
	ax.plot([0,1], [0,1], 'k--') 
	ax.set_xlabel('False positive rate')
	ax.set_ylabel('True positive rate1')
	ax.set_title('ROC Cunve\n train roc:{}\n test roc:{}'.format(round(train_roc_val,2),round(test_roc_val,2))) 
	ax.legend(loc= 'best') 
	if not log_file:
		fig.show()
	else:
		fig.savefig(log_file)
	return model, y_pred 
