# -*- coding: utf-8 -*-
# __author__ = 'Yuanjiang Huang'
# yuanjiang.huang@socialcredits.cn

import sys, os
import json
import traceback
from scpy.logger import get_logger
from pyspark import SparkContext
from pyspark import SparkConf

logger = get_logger(__file__)
CURRENT_PATH = os.path.dirname(__file__)
if CURRENT_PATH:
	CURRENT_PATH = CURRENT_PATH + '/'
from pyspark.mllib.linalg import Vectors
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.param import Param, Params
from pyspark.sql import SQLContext
from pyspark.ml import Pipeline
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.feature import HashingTF, Tokenizer
from pyspark.sql import Row

master_url = 'local'
# master_url = 'spark://192.168.31.70:32789'
conf = SparkConf().setMaster(master_url).setAppName('Demo')
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)

# http://spark.apache.org/docs/latest/ml-guide.html
# 通过spark04.sh提交

def test():

	# Prepare training data from a list of (label, features) tuples.
	training = sqlContext.createDataFrame([
	    (1.0, Vectors.dense([0.0, 1.1, 0.1])),
	    (0.0, Vectors.dense([2.0, 1.0, -1.0])),
	    (0.0, Vectors.dense([2.0, 1.3, 1.0])),
	    (1.0, Vectors.dense([0.0, 1.2, -0.5]))], ["label", "features"])

	# Create a LogisticRegression instance. This instance is an Estimator.
	lr = LogisticRegression(maxIter=10, regParam=0.01)
	# Print out the parameters, documentation, and any default values.
	print "LogisticRegression parameters:\n" + lr.explainParams() + "\n"

	# Learn a LogisticRegression model. This uses the parameters stored in lr.
	model1 = lr.fit(training)

	# Since model1 is a Model (i.e., a transformer produced by an Estimator),
	# we can view the parameters it used during fit().
	# This prints the parameter (name: value) pairs, where names are unique IDs for this
	# LogisticRegression instance.
	print "Model 1 was fit using parameters: "
	print model1.extractParamMap()

	# We may alternatively specify parameters using a Python dictionary as a paramMap
	paramMap = {lr.maxIter: 20}
	paramMap[lr.maxIter] = 30 # Specify 1 Param, overwriting the original maxIter.
	paramMap.update({lr.regParam: 0.1, lr.threshold: 0.55}) # Specify multiple Params.

	# You can combine paramMaps, which are python dictionaries.
	paramMap2 = {lr.probabilityCol: "myProbability"} # Change output column name
	paramMapCombined = paramMap.copy()
	paramMapCombined.update(paramMap2)

	# Now learn a new model using the paramMapCombined parameters.
	# paramMapCombined overrides all parameters set earlier via lr.set* methods.
	model2 = lr.fit(training, paramMapCombined)
	print "Model 2 was fit using parameters: "
	print model2.extractParamMap()

	# Prepare test data
	test = sqlContext.createDataFrame([
	    (1.0, Vectors.dense([-1.0, 1.5, 1.3])),
	    (0.0, Vectors.dense([3.0, 2.0, -0.1])),
	    (1.0, Vectors.dense([0.0, 2.2, -1.5]))], ["label", "features"])

	# Make predictions on test data using the Transformer.transform() method.
	# LogisticRegression.transform will only use the 'features' column.
	# Note that model2.transform() outputs a "myProbability" column instead of the usual
	# 'probability' column since we renamed the lr.probabilityCol parameter previously.
	prediction = model2.transform(test)
	selected = prediction.select("features", "label", "myProbability", "prediction")
	for row in selected.collect():
	    print row

def test2():



	# Prepare training documents from a list of (id, text, label) tuples.
	LabeledDocument = Row("id", "text", "label")
	training = sqlContext.createDataFrame([
	    (0L, "a b c d e spark", 1.0),
	    (1L, "b d", 0.0),
	    (2L, "spark f g h", 1.0),
	    (3L, "hadoop mapreduce", 0.0)], ["id", "text", "label"])

	# Configure an ML pipeline, which consists of tree stages: tokenizer, hashingTF, and lr.
	tokenizer = Tokenizer(inputCol="text", outputCol="words")
	hashingTF = HashingTF(inputCol=tokenizer.getOutputCol(), outputCol="features")
	lr = LogisticRegression(maxIter=10, regParam=0.01)
	pipeline = Pipeline(stages=[tokenizer, hashingTF, lr])

	# Fit the pipeline to training documents.
	model = pipeline.fit(training)

	# Prepare test documents, which are unlabeled (id, text) tuples.
	test = sqlContext.createDataFrame([
	    (4L, "spark i j k"),
	    (5L, "l m n"),
	    (6L, "mapreduce spark"),
	    (7L, "apache hadoop")], ["id", "text"])

	# Make predictions on test documents and print columns of interest.
	prediction = model.transform(test)
	selected = prediction.select("id", "text", "prediction")
	for row in selected.collect():
	    print(row)


def test3():
	from pyspark.ml.feature import PCA
	from pyspark.mllib.linalg import Vectors

	data = [(Vectors.sparse(5, [(1, 1.0), (3, 7.0)]),),
	        (Vectors.dense([2.0, 0.0, 3.0, 4.0, 5.0]),),
	        (Vectors.dense([4.0, 0.0, 0.0, 6.0, 7.0]),)]
	df = sqlContext.createDataFrame(data, ["features"])
	pca = PCA(k=3, inputCol="features", outputCol="pcaFeatures")
	model = pca.fit(df)
	result = model.transform(df).select("pcaFeatures")
	result.show(truncate=False)
if __name__ == "__main__":
	test3()

