from pyspark.sql import SparkSession
# python自带的库
from operator import add
import re
"""
@version:
@software: PyCharm
@file: 
@time: 
"""

spark = SparkSession.builder.master(master="local").appName("test").getOrCreate()

sc = spark.sparkContext
# 寻找不重复值
wordList = ['cat', 'elephant', 'rat', 'rat', 'cat']

wordsRDD = sc.parallelize(wordList, 4)

# 获取wordsRDD中的不重复的（key,value）-> distinct()
uniqueWords = wordsRDD.map(lambda word: (word, 1)).distinct()
print("disticntRDD去除重复的（key,value）:{}".format(uniqueWords.collect()))

##############################################################

# 获取wordsRDD中的不重复的个数 -> count()
countUniqueWords = wordsRDD.map(lambda word: (word, 1)).distinct().count()
print("获取wordsRDD中的不重复的个数:{}".format(countUniqueWords))

##############################################################

# 小作业: 计算每个字平均出现几次

wordsCount2 = [('cat', 2), ('elephant', 1), ('rat', 2)]
wordCountRDD2 = sc.parallelize(wordsCount2)  #
# 不知道什么原因用map(lambda (x,y): y) 会报错，后来采用索引解决这个问题
totalCount1 = wordCountRDD2.map(lambda x: x[1]).reduce(lambda x, y: x + y)
# 获取单词出现的value的值，随后对value进行计算
#            单词总数 5               有几个单词  3
average1 = totalCount1 / (wordCountRDD2.distinct().count())

print("小作业: 计算每个字平均出现几次:{}".format(average1))


##############################################################

# 把rdd函数当成参数传递
# 一个 RDD函数
def wordCount(wordListRDD):
    """Creates a pair RDD with word counts from an RDD of words.

    Args:
        wordListRDD (RDD of str): An RDD consisting of words.

    Returns:
        RDD of (str, int): An RDD consisting of (word, count) tuples.
    """
    return (wordListRDD
            .map(lambda a: (a, 1))
            .reduceByKey(lambda a, b: a + b))


wordsList3 = ['cat', 'elephant', 'rat', 'rat', 'cat']
wordsRDD3 = sc.parallelize(wordsList3, 4)
print("将rdd作为参数进行分词计数:{}".format(wordCount(wordsRDD3).collect()))

##############################################################

# pyspark调用python的库
wordsList4 = ['cat', 'elephant', 'rat', 'rat', 'cat']
wordsRDD4 = sc.parallelize(wordsList4, 4)

pairRDD4 = wordsRDD4.map(lambda word: (word, 1))
wordCounts4 = pairRDD4.reduceByKey(lambda a, b: a + b)
totalCount4 = wordCounts4.map(lambda a: a[1]).reduce(add)
print("调用python中add函数执行：{}".format(totalCount4))


##############################################################

