from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
# coding: utf-8

# In[9]:
if __name__ == "__main__":
	#指定本地运行spark
	sparkConf = SparkConf().setMaster("local[*]") 
	sc = SparkContext(conf = sparkConf)
	spark = SparkSession.builder.master('local').appName("DecisionTree").config("spark.some.config.option", "some-value").getOrCreate()
	# coding: utf-8

	# In[3]:


	Path = "hdfs://XX:8020/u01/bigdata/"
	raw_data = sc.textFile(Path + "data/train_noheader.tsv")


	# In[4]:


	#raw_data.take(2)


	# In[5]:


	numRaw = raw_data.count()
	numRaw


	# In[6]:


	raw_data.countByKey()


	# In[7]:


	records = raw_data.map(lambda line: line.split('\t'))
	records.first()


	# In[8]:


	len(records.first())


	# In[9]:


	data = records.collect()
	data[0]


	# In[10]:


	numColumns = len(data[0])
	numColumns


	# In[11]:


	from pyspark.ml.linalg import Vectors
	from pyspark.ml.classification import DecisionTreeClassifier


	# In[12]:


	data1 = []
	for i in range(numRaw):
		trimmed = [ each.replace('"', "") for each in data[i] ]
		label = int(trimmed[-1])
		features = map(lambda x: 0.0 if x == "?" else x, trimmed[4: numColumns-1])
		c = (label, Vectors.dense(map(float, features)))
		data1.append(c)


	# In[13]:


	data1[0]


	# In[14]:


	df = spark.createDataFrame(data1, ["label", "features"])
	df.show(2)


	# In[15]:


	df.printSchema()


	# In[16]:


	df.cache()


	# In[17]:


	from pyspark.ml.feature import VectorIndexer
	featureIndexer = VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=24).fit(df)


	# In[18]:


	(trainData, testData) = df.randomSplit([0.8, 0.2], seed=1234L) #seed=1234L表示每次生成的训练集和测试集总行数不变


	# In[19]:


	trainData.count()


	# In[20]:


	testData.count()


	# In[21]:


	dt = DecisionTreeClassifier(maxDepth=5, labelCol="label", featuresCol="indexedFeatures", impurity="entropy")


	# In[22]:


	from pyspark.ml import Pipeline
	pipeline = Pipeline(stages = [featureIndexer, dt])
	model = pipeline.fit(trainData)


	# In[23]:


	test0 = spark.createDataFrame([(data1[0][1],)], ["features"])
	result = model.transform(test0)
	result.show()


	# In[24]:


	result.select(['prediction']).show()


	# In[25]:


	firstRaw = list(data1[0][1])
	firstRaw[0]


	# In[26]:


	firstRaw[1]


	# In[27]:


	predictData = Vectors.dense(firstRaw)
	predictData


	# In[28]:


	predictRaw = spark.createDataFrame([(predictData,)], ["features"])
	predictResult = model.transform(predictRaw)
	predictResult.show()


	# In[29]:


	predictResultAll = model.transform(testData)
	predictResultAll.select(['prediction']).show()


	# In[30]:


	df_predict = predictResultAll.select(['prediction']).toPandas()
	dtPredict = list(df_predict.prediction)
	dtPredict[:10]


	# In[31]:


	dtTotalCorrect = 0
	testRaw = testData.count();
	#testLabel = testData.select("label").collect()
	df_test = testData.select(['label']).toPandas()
	testLabel = list(df_test.label)
	testLabel[:10]


	# In[32]:


	for i in range(testRaw):
		if dtPredict[i] == testLabel[i]:
			dtTotalCorrect += 1
	dtTotalCorrect


	# In[33]:


	1.0 * dtTotalCorrect / testRaw 


	# In[34]:


	from pyspark.ml.linalg import Vectors
	from pyspark.ml.feature import VectorIndexer
	from pyspark.ml import Pipeline
	raw_data = sc.textFile(Path + "data/train_noheader.tsv")
	numRaw = raw_data.count()
	records = raw_data.map(lambda line: line.split('\t'))
	data = records.collect()
	numColumns = len(data[0])
	data1 = []


	# In[35]:


	category = records.map(lambda x: x[3].replace("\"", ""))
	categories = sorted(category.distinct().collect())
	categories


	# In[36]:


	numCategories = len(categories)
	numCategories


	# In[37]:


	def transform_category(x):
		markCategory = [0] * numCategories
		index = categories.index(x)
		markCategory[index] = 1
		return markCategory


	# In[38]:


	for i in range(numRaw):
		trimmed = [ each.replace('"', "") for each in data[i] ]
		label = int(trimmed[-1])
		cate = transform_category(trimmed[3])
		features = cate + map(lambda x: 0.0 if x == "?" else x, trimmed[4: numColumns-1]) #只取第5到27列
		c = (label, Vectors.dense(map(float, features)))
		data1.append(c)


	# In[39]:


	df = spark.createDataFrame(data1, ["label", "features"])
	df.cache()


	# In[40]:


	featureIndexer = VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=24).fit(df)


	# In[41]:


	(trainData, testData) = df.randomSplit([0.8, 0.2], seed=1234L) #seed=1234L表示每次生成的训练集和测试集总行数不变
	trainData.count()


	# In[42]:


	testData.count()


	# In[43]:


	dt = DecisionTreeClassifier(maxDepth=5, labelCol="label", featuresCol="indexedFeatures", impurity="entropy")


	# In[44]:


	pipeline = Pipeline(stages = [featureIndexer, dt])
	model = pipeline.fit(trainData) #训练模型


	# In[45]:


	dtTotalCorrect = 0
	testRaw = testData.count();
	#testLabel = testData.select("label").collect() #这个获取的row不是list？版本原因
	df_test = testData.select(['label']).toPandas()
	testLabel = list(df_test.label)

	for i in range(testRaw):
		if dtPredict[i] == testLabel[i]:
			dtTotalCorrect += 1

	1.0 * dtTotalCorrect / testRaw 


	# In[46]:


	#导入交叉验证和参数网格
	from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
	#导入二分类评估器
	from pyspark.ml.evaluation import BinaryClassificationEvaluator
	evaluator = BinaryClassificationEvaluator() #初始化一个评估器
	#设置参数网格
	paramGrid = ParamGridBuilder().addGrid(dt.maxDepth, [4,5,6]).build()
	#设置交叉认证的参数
	crossval = CrossValidator(estimator=pipeline,
					  estimatorParamMaps=paramGrid,
					  evaluator=evaluator,
					  numFolds=2)  # use 3+ folds in practice

	# 通过交叉认证来训练模型, and choose the best set of parameters.
	cvModel = crossval.fit(trainData)
	# 测试模型
	predictResultAll = cvModel.transform(testData)
	df_predict = predictResultAll.select(['prediction']).toPandas()
	dtPredict = list(df_predict.prediction)

	dtTotalCorrect = 0
	testRaw = testData.count();
	df_test = testData.select(['label']).toPandas()
	testLabel = list(df_test.label)

	for i in range(testRaw):
		if dtPredict[i] == testLabel[i]:
			dtTotalCorrect += 1

	1.0 * dtTotalCorrect / testRaw


	# In[47]:


	bestModel = cvModel.bestModel.stages[1]
	bestModel


	# In[48]:


	bestModel.numFeatures


	# In[49]:


	bestModel.depth


	# In[50]:


	bestModel.numNodes

