import pyspark
from pyspark import SparkContext
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.tree import DecisionTree
import numpy as np
from pyspark.mllib.evaluation import RegressionMetrics
import time
sc = SparkContext("local", "duty 3")
sc.setLogLevel("ERROR")
stockrdd = sc.textFile('file:///home/allen/myspark/stock_data.csv')
header = stockrdd.first()
rDate = stockrdd.filter(lambda x: x !=header)
lines = rDate.map(lambda x: x.split(","))
def convert_float(v):
    return float(v)
def process_features(line):
    """处理特征，line为字段行"""
    ## 处理余下的特征
    Features = [convert_float(value) for value in line[3:7]]
    # 返回拼接的总特征列表
    return Features
def process_label(line):
    return float(line[-1])
process_label(lines.first())
labelpointRDD = lines.map(lambda r: LabeledPoint(process_label(r), process_features(r)))
(trainData, validationData, testData) = labelpointRDD.randomSplit([7,1,2])
trainData.persist()
validationData.persist()
testData.persist()
model = DecisionTree.trainRegressor(trainData, categoricalFeaturesInfo={}, impurity="variance", maxDepth=5, maxBins=32, 
                            minInstancesPerNode=1, minInfoGain=0.0)
## 定义模型评估函数
def RMSE(model, validationData):
    ## 计算模型的准确率
    predict = model.predict(validationData.map(lambda p:p.features))
    ## 拼接预测值和实际值
    predict_real = predict.zip(validationData.map(lambda p: p.label))
    ## 计算均方误差
    rmse = np.sqrt(predict_real.map(lambda p: (p[0]-p[1])**2).sum() / predict_real.count())
    return rmse

## 调用函数求模型在验证集上的准确率
rmse =  RMSE(model, validationData)
print(labelpointRDD.first())
print("训练集样本个数："+str(trainData.count()) + "验证集样本个数："+str(validationData.count())+ "测试集样本个数："+str(testData.count()))
print("均方误差RMSE="+str(rmse))


# 创建trainEvaluateModel函数包含训练与评估功能，并计算训练评估的时间。

# def trainEvaluateModel(trainData, validationData, maxDepthParm, maxBinsParm, minInstancesPerNodeParm, minInfoGainParm):
#     startTime = time.time()
#     ## 创建并训练模型
#     model = DecisionTree.trainRegressor(trainData, categoricalFeaturesInfo={}, impurity="variance", maxDepth=maxDepthParm, 
#                                         maxBins=maxBinsParm, minInstancesPerNode=minInstancesPerNodeParm, minInfoGain=minInfoGainParm)
#     ## 计算RMSE
#     rmse = RMSE(model, validationData)
#     duration = time.time() - startTime   # 持续时间
#     print("训练评估：参数"+ ",  maxDepth="+str(maxDepthParm)+",  maxBins="+str(maxBinsParm)+ 
#           ", minInstancesPerNode="+str(minInstancesPerNodeParm) +", minInfoGainParm="+str(minInfoGainParm)+"\n"
#          "===>消耗时间="+str(duration)+",  均方误差RMSE="+str(rmse))
#     return rmse, duration, maxDepthParm, maxBinsParm, minInstancesPerNodeParm, minInfoGainParm, model


# ## 定义函数gridSearch网格搜索最佳参数组合
# def gridSearch(trainData, validationData, maxDepthList, maxBinsList, minInstancesPerNodeList, minInfoGainList):
#     metrics = [trainEvaluateModel(trainData, validationData, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
#           for maxDepth in maxDepthList
#           for maxBins in maxBinsList
#           for minInstancesPerNode in minInstancesPerNodeList
#           for minInfoGain in minInfoGainList]
#     # 按照RMSE从小到大排序，返回最小RMSE的参数组合
#     sorted_metics = sorted(metrics, key=lambda k:k[0], reverse=False)
#     best_parameters = sorted_metics[0]
#     print("最佳参数组合："+"maxDepth="+str( best_parameters[2]) + 
#          ",  maxBins="+str( best_parameters[3])+",  minInstancesPerNode="+str( best_parameters[4])+
#           ", minInfoGain="+str(best_parameters[5])+"\n"+
#          ",  均方误差RMSE="+str( best_parameters[0]))
#     return  best_parameters
# ## 参数组合
# maxDepthList = [3, 5, 10,20,25]
# maxBinsList = [30, 50,100,200]
# minInstancesPerNodeList=[1,3,5,10,20]
# minInfoGainList=[0.0,0.3,0.5]

# ## 调用函数返回最佳参数组合
# best_parameters = gridSearch(trainData, validationData, maxDepthList, maxBinsList, minInstancesPerNodeList, minInfoGainList)