from datetime import datetime
from csv import DictReader
from sklearn.metrics import log_loss
import scipy.sparse as sp
import ftrl
train = './data/train.csv'
test = './data/test.csv'
alpha = 0.1  # learning rate
beta = 0.9   # smoothing parameter for adaptive learning rate
L1 = 1.     # L1 regularization, larger value means more regularized
L2 = 1.     # L2 regularization, larger value means more 
epoch=1
D = 2 ** 20
holdafter = 9   # data after date N (exclusive) are used as validation
holdout = None 
def data_encode(path, D):
	for t, row in enumerate(DictReader(open(path))):
		ID = row['id']
		del row['id']
		y = 0.
		if 'click' in row:
			if row['click'] == '1':
				y = 1.
			del row['click']
		date = int(row['hour'][4:6])
		row['hour'] = row['hour'][6:]
		x = []
		for key in row:
			value = row[key]
			index = abs(hash(key + '_' + value)) % D
			x.append(index)

		yield t, date, ID, x, y
start = datetime.now()
model  = ftrl.FtrlProximal(alpha, beta, L1, L2)
for e in range(epoch):
	loss = 0.
	count = 0
	for t, date, ID, x, y in data_encode(train, D):  # data is a generator
		x = sp.csr_matrix(x)
		y_ = model.predict(x)
		if (holdafter and date > holdafter) or (holdout and t % holdout == 0):
			loss += log_loss(y, y_)
			count += 1
		else:
            # step 2-2, update learner with label (click) information
			model.fit(x,y)
	print('Epoch %d finished, validation logloss: %f, elapsed time: %s' % (
        e, loss/count, str(datetime.now() - start)))



