# -*- coding: utf-8 -*-

# 导入pyspark
from pyspark import SparkContext
from pyspark.mllib.regression import LabeledPoint
import numpy as np

# 导入线性回归
from pyspark.mllib.regression import LinearRegressionWithSGD
# 导入决策树回归
from pyspark.mllib.tree import DecisionTree, DecisionTreeModel

# 导入可视化
import matplotlib
matplotlib.use('Agg') # 不回显
import matplotlib.pyplot as plt
# import matplotlib.pylab as plb

# 导入数据库操作
import MySQLdb

# 函数-找出数据集某列所有可能取值的列表
def get_mapping(rdd, idx): # 去重 + 每个值映射唯一索引 + map_collect
	return rdd.map(lambda fields:fields[idx]).distinct().zipWithIndex().collectAsMap()

# 函数-线性模型创建特征向量
def extract_features(record):
	cat_vec = np.zeros(cat_len)
	i = 0
	step = 0
	for field in records[2:9]:
		m = mappings[i]
		idx = m[field]
		cat_vec[idx + step] = 1
		i = i + 1
		step = step + len(m)
	num_vec = np.array([float(field) for field in record[10:14]])
	print "num_vec: \n"
	print num_vec
	print "cat_vec: \n"
	print cat_vec
	print np.concatenate(cat_vec, num_vec)
	return np.concatenate(cat_vec, num_vec)

# 函数-决策树模型创建特征向量
def extract_features_dt(record):
	return np.array(map(float, record[2:14]))

# 函数-提取实际租借车数
def extract_label(record):
	return float(record[-1])

# 函数-平方误差
def squared_error(actual, pred):
	return (pred - actual)**2

# 函数-绝对误差
def abs_error(actual, pred):
	return np.abs(pred - actual)

# 函数-平方对数误差
def squared_log_error(pred, actual):
	return (np.log(pred + 1) - np.log(actual + 1))**2

# 函数-参数设置对决策树性能的影响
def evaluate_dt(train, test, maxDepth, maxBins):
	model = DecisionTree.trainRegressor(train, {}, impurity = 'variance', maxDepth = maxDepth, maxBins = maxBins)
	preds = model.predict(test.map(lambda p: p.features))
	actual = test.map(lambda p: p.label)
	tp = actual.zip(preds)
	rmsle = np.sqrt(tp.map(lambda (t, p): squared_log_error(t,p)).mean())
	return rmsle

# 函数-画出比较
def evaluate_dt_draw(train, test, maxDepth, maxBins):
	# 训练DecisionModel
	model = DecisionTree.trainRegressor(train, {}, impurity = 'variance', maxDepth = maxDepth, maxBins = maxBins)
	preds = model.predict(test.map(lambda p: p.features))
	actual = test.map(lambda p: p.label)
	# 画图
	plt.figure(num=1, figsize=(8,6))
	plt.title('Decision Tree Regression Model', size=14)
	plt.xlabel('Actuals', size=14)
	plt.ylabel('Predicts', size=14)
	actual_take = actual.take(20)
	preds_take = preds.take(20)
	plt.plot(actual_take, actual_take)
	plt.plot(actual_take, preds_take, "r+")
	# text 点
	for i in range(0, 20):
		arrShow = (int(actual_take[i]), round(preds_take[i], 2))
		textShow = str(arrShow)
		plt.text(actual_take[i], preds_take[i], textShow, color="red", fontsize=6)

	plt.savefig('DecisionTree.png', format='png')
	# 返回rsmle
	tp = actual.zip(preds)
	mse = tp.map(lambda (t, p): squared_error(t, p)).mean()
	rmsle = np.sqrt(tp.map(lambda (t, p): squared_log_error(t,p)).mean())
	sql = "insert into queue(name, status, mse, rmsle) values('%s', %d, %f, %f)"%("DecisionTreeRegression", 1, mse, rmsle)
	insertNewRecordForDecisionTreeRegression(sql)
	return model

# 函数-操作数据库
def insertNewRecordForDecisionTreeRegression(sql):
	# 连接数据库
	try:
		conn = MySQLdb.connect(host='localhost',user='root',passwd='root',db='sparkDemo')
	except Exception, e:
		print e
	# cursor对象操作数据库
	cursor = conn.cursor()
	# 执行SQL
	try:
		cursor.execute(sql)
	except Exception, e:
		print e
	# 提交操作
	conn.commit()
	# 关闭指针
	cursor.close()
	# 关闭数据库
	conn.close()
	return 1

# main函数部分
sc = SparkContext("yarn-client", "Decision Tree Regression Spark App")

# 加载数据集
path = "hdfs://192.168.119.141:9100/data/bike/hour_noheader.csv" # 数据集位置
raw_data = sc.textFile(path) # 数据集
# num_data = raw_data.count() # 数据集行数
records = raw_data.map(lambda x: x.split(",")) # 通过逗号分隔
# first = records.first() # 第一行
# print first # 输出第一行
# print num_data # 输出总行数
records.cache() # 缓存rdd

# 线性模型
# 线性数据准备
# mappings = [get_mapping(records, i) for i in range(2, 10)] # 选择索引为2-9的数据
# cat_len = sum(map(len, mappings)) # 索引为2-9的数据 所有可能的取值情况（catagory）
# num_len = len(records.first()[11:15]) # 索引为11-14的数据 列数（numerical）
# total_len = num_len + cat_len
# print "total_len:"
# print total_len

# 为线性模型创建特征向量
# data = records.map(lambda r: LabeledPoint(extract_label(r), extract_features(r)))
# first_point = data.first()
# print "Raw data: " + str(first[2:])
# print data.first
# print "Label: " + str(first_point.label)
# print "Linear Model feature vector:\n" + str(first_point.features)
# print "Linear Model feature vector length: " + str(len(first_point.features))

# 决策树模型
# 决策树数据准备
data_dt = records.map(lambda r: LabeledPoint(extract_label(r), extract_features_dt(r)))
# first_point_dt = data_dt.first()
# print "Decision Tree feature vector: " + str(first_point_dt.features)
# print "Decision Tree feature vector length: " + str(len(first_point_dt.features))

# 决策树模型构造
# dt_model = DecisionTree.trainRegressor(data_dt, {}) # 调整参数
# preds = dt_model.predict(data_dt.map(lambda p: p.features))
# actual = data_dt.map(lambda p: p.label)
# true_vs_predicted_dt = actual.zip(preds)
# # actual = data_dt.map(lambda p: p.)
# # true_actual_predicted_dt = true_actual_predicted_dt.zip()
# print "Decision Tree predictions: " + str(true_vs_predicted_dt.take(5))
# print "Decision Tree depth: " + str(dt_model.depth())
# print "Decision Tree number of nodes: " + str(dt_model.numNodes())

# 决策树误差函数
# mse_dt = true_vs_predicted_dt.map(lambda (t, p): squared_error(t, p)).mean() # 均方误差
# mae_dt = true_vs_predicted_dt.map(lambda (t, p): abs_error(t, p)).mean() # 平均绝对误差
# rmsle_dt = np.sqrt(true_vs_predicted_dt.map(lambda (t, p): squared_log_error(t, p)).mean()) # 均方根对数误差
# print "Decision Tree - Mean Squared Error: %2.4f" % mse_dt
# print "Decision Tree - Mean Absolute Error: %2.4f" % mae_dt
# print "Decision Tree - Root Mean Squared Log Error: %2.4f" % rmsle_dt

# 决策树数据的影响
# targets = records.map(lambda r: float(r[-1])).collect()
# plt.hist(targets, bins = 40, normed = True)
# plt.show()
# fig = plt.gcf()
# fig.set_size_inches(16, 10)
# plt.savefig('x.png', format='png')

# 绘图e.g.
# mu =0.0
# sigma =2.0
# samples = np.random.normal(loc=mu, scale=sigma, size=1000)
# plt.figure(num=1, figsize=(8,6))
# plt.title('Plot 2', size=14)
# plt.xlabel('value', size=14)
# plt.ylabel('counts', size=14)
# plt.hist(samples, bins=40, range=(-10,10))
# plt.text(-9,100, r'$\mu$ = 0.0, $\sigma$ = 2.0', size=16)
# plt.savefig('plot2.png', format='png')

# 评估模型(对决策树有影响的参数有-树的深度/最大划分数)
# 准备数据
# data_with_idx_dt = data_dt.zipWithIndex().map(lambda (k, v): (v, k))
# test_dt = data_with_idx_dt.samples(False, 0.2, 42)
# train_dt = data_with_idx_dt.subtractByKey(test_dt)
# train_data_dt = train_dt.map(lambda (idx, p): p)
# test_data_dt = test_dt.map(lambda (idx, p): p)

# 参数
# depthParams = [1, 2, 3, 4, 5, 10, 20]
# binParams = [2, 4, 8, 16, 32, 64, 100]

# 最大树深度
# depthMetrics = [evaluate_dt(train_data_dt, test_data_dt, param, 32) for param in depthParams]
# print depthParams
# print depthMetrics
# plt.plot(depthParams, depthMetrics)
# fig = plt.gcf()

# 最大划分数
# binMetrics = [evaluate_dt(train_data_dt, test_data_dt, 5, param) for param in binParams]
# print binParams
# print binMetrics
# plt.plot(binParams, binMetrics)
# fig = plt.gcf()

# train/test 数据准备
data_with_idx_dt = data_dt.zipWithIndex().map(lambda (k, v): (v, k))
test_dt = data_with_idx_dt.sample(False, 0.2, 42)
train_dt = data_with_idx_dt.subtractByKey(test_dt)
train_data_dt = train_dt.map(lambda (idx, p): p)
test_data_dt = test_dt.map(lambda (idx, p): p)

model = evaluate_dt_draw(train_data_dt, test_data_dt, 10, 16)
path = "DecisionTreeModel"
model.save(sc, path)

sc.stop() # sc关闭
