"""
本算法为决策树算法
"""
#total_loan,year_of_loan,interest,monthly_payment,work_type,employer_type,,work_year,house_exist,house_loan_status,censor_status,marriage,offsprings,use,debt_loan_ratio,del_in_18month,scoring_low,scoring_high,pub_dero_bankrup,early_return,early_return_amount,early_return_amount_3mon,recircle_b,recircle_u,initial_list_status,title,policy_code,f0,f1,f2,f3,f4,f5,is_default
from __future__ import print_function
import sys
from operator import add
from pyspark.sql import SparkSession
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.conf import SparkConf
from pyspark.sql import Row
spark=SparkSession.builder.appName("predict").getOrCreate()
sc = SparkContext.getOrCreate()
## 读取csv文件
print("开始导入数据...")
rawData = sc.textFile("file:///workspace/bdkit-demo/spark-python-demo/src/doc2.csv")
print("-------------------------------------------------------------------")
print(type(rawData))
header = rawData.first()  # 第一行为字段说明行
## 删除第一行
rData = rawData.filter(lambda x: x != header)
print("-------------------------------------------------------------------")
## 取出前2项数据
print(rData.take(2))
## 以逗号每一行
lines = rData.map(lambda x: x.split(","))
print("共有："+str(lines.count())+"项数据")
#处理特征
import numpy as np
print("处理特征值")
def convert_float(v):
    """处理数值, 将字符串转化为float"""
    return float(v)
def process_features(line):
    """处理特征，line为字段行"""
    Features = [(value) for value in line[0:33]]
    # 返回拼接的总特征列表
    return Features
## 处理预测目标值
print("处理label")
def process_label(line):
    return int(line[-1])
process_label(lines.first())

print("0000000000000000000000000000000000000000")

## 构建LabeledPoint数据：
from pyspark.mllib.regression import LabeledPoint
labelpointRDD = lines.map(lambda r: LabeledPoint(process_label(r), \
                                                 process_features(r)))
print("1111111111111111111111111111111111111111")
labelpointRDD.first()                                              
print("2222222222222222222222222222222222222222")
## 划分训练集和测试集
(trainData, validationData) = labelpointRDD.randomSplit([8,2])
print("训练集样本个数："+str(trainData.count()) + "验证集样本个数："+str(validationData.count()))
print("3333333333333333333333333333333333333333")
"""
# 将数据暂存在内存中，加快后续运算效率
trainData.persist()
validationData.persist()
"""
print("4444444444444444444444444444444444444444")
## 使用决策数模型进行训练
from pyspark.mllib.tree import DecisionTree
"""model = DecisionTree.trainRegressor(trainData, categoricalFeaturesInfo={}, impurity="variance", maxDepth=5, maxBins=32, 
                            minInstancesPerNode=1, minInfoGain=0.0)"""
model = DecisionTree.trainClassifier(trainData, numClasses=2,categoricalFeaturesInfo={}, impurity="entropy", maxDepth=5, maxBins=32)
print("5555555555555555555555555555555555555555")
## 对模型进行评估
#import numpy as np
from pyspark.mllib.evaluation import RegressionMetrics

## 定义模型评估函数
def RMSE(model, validationData):
    ## 计算模型的准确率
    predict = model.predict(validationData.map(lambda p:p.features))
    ## 拼接预测值和实际值
    predict_real = predict.zip(validationData.map(lambda p: p.label))
    print("--------------------------------")
    print(type(predict_real))
    print(predict_real)
    predict_real.saveAsTextFile('file:///workspace/bdkit-demo/spark-python-demo/src/result.txt')
    #print(predict_real)
    ## 计算均方误差
    #rmse = np.sqrt(predict_real.map(lambda p: (p[0]-p[1])**2).sum() / predict_real.count())
    #return rmse
    error_rate=predict_real.map(lambda p: (int(p[0])-int(p[1]))**2).sum() / predict_real.count()
    return error_rate

## 调用函数求模型在验证集上的准确率
#rmse =  RMSE(model, validationData)
#print("均方误差RMSE="+str(rmse))
error_rate =  RMSE(model, validationData)
print("error rate="+str(error_rate))