# -*- coding: utf-8 -*-
# __author__ = 'Yuanjiang Huang'
# yuanjiang.huang@socialcredits.cn

import sys, os
from pyspark import SparkContext
from pyspark import SparkConf


CURRENT_PATH = os.path.dirname(__file__)
if CURRENT_PATH:
    CURRENT_PATH = CURRENT_PATH + '/'

# transformation: map, flatMap, filter, distinct, sample, cartesian, union, intersection, subtract
def test1(sc):
	nums = sc.parallelize([1, 2, 3, 4])
	squared = nums.map(lambda x: x * x).collect()
	for num in squared:
		print "%i " % (num)

def test_map_flatmap(sc):
	lines = sc.parallelize(["hello world", "hi"])
	words1 = lines.flatMap(lambda line:line.split(" "))
	words2 = lines.map(lambda line:line.split(" "))
	print words1.collect() #['hello', 'world', 'hi']
	print words2.collect() #[['hello', 'world'], ['hi']]

def test_cartesian(sc):
	# Cartesian product between two RDDs, returns all possible pairs
	# Cartesian product is very expensive for large RDDs
	rdd1 = sc.parallelize(["a","b"])
	rdd2 = sc.parallelize(["c","d"])
	rdd3 = rdd1.cartesian(rdd2)
	print rdd3.collect() #[('a', 'c'), ('a', 'd'), ('b', 'c'), ('b', 'd')]

# ------------------------------------------------------------------------------
# action:
# 1. reduce: The most common action on basic RDDs you will likely use is reduce() , which takes
#	a function that operates on two elements of the type in your RDD and returns a new
#	element of the same type.
# reduce将RDD中元素两两传递给输入函数，同时产生一个新的值，新产生的值与RDD中下一个元素再被传递给输入函数直到最后只有一个值为止。

def test_reduce(sc):
	nums = sc.parallelize([1, 2, 3, 3])
	sum = nums.reduce(lambda x, y: x+y)
	print sum #9
	nums2 = sc.parallelize([(1,1), (1,3), (2,3), (3,3)])

	# reduceByKey
	# 对RDD元素为(k,v)形式的reduce操作
	sum2 = nums2.reduceByKey(lambda x,y:x+y).collect()
	print sum2 #[(1,4),(2,3),(3,3)]

	# count() suffers from the restriction that all of your data must fit on a single machine, (unit test)
	# so be careful
	print nums.count() # 4
	print nums.countByValue() #{1: 1, 2: 1, 3: 2}
	print nums2.countByKey() #{1: 2, 2: 1, 3: 1}
	print nums.take(2) #[1, 2]
	print nums.top(2) #[3, 3]
	print nums2.take(2) #[(3, 3), (2, 3)]
	print nums2.top(2) #[(3, 3), (2, 3)]

# 2. fold: Similar to reduce() is fold() , which also takes a function with the same signature as
# needed for reduce() , but in addition takes a “zero value” to be used for the initial call
# on each partition. The zero value you provide should be the identity element for your
# operation; that is, applying it multiple times with your function should not change
# the value (e.g., 0 for +, 1 for *, or an empty list for concatenation).

def test_fold(sc):
	nums = sc.parallelize([1, 2, 3, 3])
	sum = nums.fold(1,lambda x,y:x*y)
	print sum #18

# 3 aggregate():
# # Both fold() and reduce() require that the return type of our result be the same type as that of the elements in the RDD we are operating over.
# The aggregate() function frees us from the constraint of having the return be the
# same type as the RDD we are working on. With aggregate() , like fold() , we supply
# an initial zero value of the type we want to return. We then supply a function to combine the elements from our
# RDD with the accumulator. Finally, we need to supply a
# second function to merge two accumulators, given that each node accumulates its
# own results locally.

# Aggregate the elements of each partition, and then the results for all the partitions, using given combine functions and a neutral "zero value".
#  This function can return a different result type, U, than the type of this RDD, T. Thus, we need one operation for merging a T into an U and one operation
# for merging two U's, as in scala.
# TraversableOnce. Both of these functions are allowed to modify and return their first argument instead of creating a new U to avoid memory allocation.

def func1(x,y):
	print 'func1',x,' ',y
	return (x[0]+y,x[1]+1,x[2])

def func2(x,y):
	print 'func2',x,y
	return (x[0]+y[0],x[1]+y[1],x[2]+y[2])

def test_aggregate(sc):
	nums = sc.parallelize([1, 2, 3, 3, 5])
	#(0,0)定义了初始化的零值，同时页指定了最终返回结果的格式, 此例子中第一位存和，第二位计数，第三位什么都不做
	# acc初始化为(0,0,0), value取nums的没一个值，相当于把nums转换为另有一种RDD，因此返回的不一定是原来(nums)的样子
	# 第二个lambda的输入为一个lambda的输出，并且初始化aac1为(0,0)
	# acc1,acc2是对第一步lambda的combination操作
	sumCount = nums.aggregate((0, 0, 0),
	                          (lambda acc, value: func1(acc,value)),
	                          (lambda acc1, acc2: func2(acc1,acc2)))
	#avarage
	print sumCount #(9,4)
	print sumCount[0] / float(sumCount[1]) #2.25

def test_foreach(sc):
	data = 'file:///'+CURRENT_PATH+'/data/README.txt'
	nums = sc.parallelize([1, 2, 3, 3, 5])
	# perform action on each element of nums, but return nothing.
	num2= nums.foreach(lambda x:x+1)
	# num2 is a None
	print num2 #None


def pair_RDD(sc):
	nums = sc.parallelize([1, 2, 3, 5, 3])
	#通过map创建(k,v)
	nums_pair = nums.map(lambda x:(x,x))
	print nums_pair.collect() #[(1, 1), (2, 2), (3, 3), (3, 3), (5, 5)]
	print nums_pair.keys().collect()
	print nums_pair.values().collect()
	print nums_pair.sortByKey().collect()
	print nums_pair.groupByKey().collect()
	# reduceByKey is a transformation, rather than a action.
	print nums_pair.reduceByKey(lambda x,y:x+y).collect() #[(1, 1), (2, 2), (3, 6), (5, 5)]

def test_word_count(sc):
	data = 'file:///'+CURRENT_PATH+'/data/README.txt'
	text_file = sc.textFile(data)
	flatMap_res = text_file.flatMap(lambda line: line.split())
	print 'flatMap_res'+'*'*100
	print flatMap_res.collect()
	map_res = flatMap_res.map(lambda word: (word, 1))
	print 'map_res'+'*'*100
	print map_res.collect()
	print 'reduceByKey_res'+'*'*100
	reduceByKey_res = map_res.reduceByKey(lambda a, b: a+b)
	print reduceByKey_res.collect()

	print 'overall'+'*'*100
	res = text_file.flatMap(lambda line: line.split()).map(lambda word: (word, 1)).reduceByKey(lambda a, b: a+b)
	print res.collect()

def test_DataFrames(sc):
	#http://spark.apache.org/docs/latest/sql-programming-guide.html#dataframes
	# Spark SQL is a Spark module for structured data processing.
	# It provides a programming abstraction called DataFrames and can also act as distributed SQL query engine.
	# Spark SQL can also be used to read data from an existing Hive installation.
	# DataFrames can be constructed from a wide array of sources such as: structured data files, tables in Hive, external databases, or existing RDDs.
	# With a SQLContext, applications can create DataFrames from an existing RDD, from a Hive table, or from data sources.
	# A complete API spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.DataFrame
	from pyspark.sql import SQLContext
	sqlContext = SQLContext(sc)
	df = sqlContext.read.json('file:///'+CURRENT_PATH+'/data/people.json')
	df.show()
	df.printSchema()
	df.select("name").show()
	df.select(df['name'], df['age'] + 1).show()
	df.filter(df['age'] > 21).show()
	df.groupBy("age").count().show()

def test_DataFrames2(sc):
	from pyspark.sql import SQLContext,Row
	sqlContext = SQLContext(sc)
	# Load a text file and convert each line to a Row.
	lines = sc.textFile('file:///'+CURRENT_PATH+'/data/people.txt')
	parts = lines.map(lambda l: l.split(","))
	print parts.collect() # [[u'Michael', u' 29'], [u'Andy', u' 30'], [u'Justin', u' 19']]
	people = parts.map(lambda p: Row(name=p[0], age=int(p[1])))

	# Infer the schema, and register the DataFrame as a table.
	schemaPeople = sqlContext.createDataFrame(people)
	schemaPeople.registerTempTable("people")

	# SQL can be run over DataFrames that have been registered as a table.
	teenagers = sqlContext.sql("SELECT name FROM people WHERE age >= 13 AND age <= 19")

	# The results of SQL queries are RDDs and support all the normal RDD operations.
	teenNames = teenagers.map(lambda p: "Name: " + p.name)
	for teenName in teenNames.collect():
		print(teenName)

def test_DataFrames3(sc):
	# Import SQLContext and data types
	from pyspark.sql import SQLContext
	from pyspark.sql.types import StructField,StructType,StringType

	# sc is an existing SparkContext.
	sqlContext = SQLContext(sc)

	# Load a text file and convert each line to a tuple.
	lines = sc.textFile('file:///'+CURRENT_PATH+'/data/people.txt')
	parts = lines.map(lambda l: l.split(","))
	people = parts.map(lambda p: (p[0], p[1].strip()))

	# The schema is encoded in a string.
	schemaString = "name age"
	# StructField(name, dataType, nullable)
	fields = [StructField(field_name, StringType(), True) for field_name in schemaString.split()]
	schema = StructType(fields)

	# Apply the schema to the RDD.
	schemaPeople = sqlContext.createDataFrame(people, schema)

	# Register the DataFrame as a table.
	schemaPeople.registerTempTable("people")

	# SQL can be run over DataFrames that have been registered as a table.
	results = sqlContext.sql("SELECT name FROM people")

	# The results of SQL queries are RDDs and support all the normal RDD operations.
	names = results.map(lambda p: "Name: " + p.name)
	for name in names.collect():
	  print(name)


def test_DataFrames_json(sc):
	# Spark SQL can automatically infer the schema of a JSON dataset and load it as a DataFrame. This conversion can be done using SQLContext.read.json on a JSON file.
	from pyspark.sql import SQLContext
	sqlContext = SQLContext(sc)

	# A JSON dataset is pointed to by path.
	# The path can be either a single text file or a directory storing text files.
	people = sqlContext.read.json('file:///'+CURRENT_PATH+'/data/people.json')

	# The inferred schema can be visualized using the printSchema() method.
	people.printSchema()
	# root
	#  |-- age: integer (nullable = true)
	#  |-- name: string (nullable = true)

	# Register this DataFrame as a table.
	people.registerTempTable("people")

	# SQL statements can be run by using the sql methods provided by `sqlContext`.
	teenagers = sqlContext.sql("SELECT name FROM people WHERE age >= 13 AND age <= 19")

	# Alternatively, a DataFrame can be created for a JSON dataset represented by
	# an RDD[String] storing one JSON object per string.
	anotherPeopleRDD = sc.parallelize([
	  '{"name":"Yin","address":{"city":"Columbus","state":"Ohio"}}'])
	anotherPeople = sqlContext.jsonRDD(anotherPeopleRDD)

if __name__ == "__main__":
	master_url = 'local'
	# master_url = 'spark://192.168.31.70:32789'
	conf = SparkConf().setMaster(master_url).setAppName('Demo')
	sc = SparkContext(conf=conf)
	if True:
		# test_fold(sc)
		# test_foreach(sc)
		# pair_RDD(sc)
		# test_word_count(sc)
		# test_DataFrames3(sc)
		test_DataFrames_json(sc)