#import findspark.findspark as f
import findspark
findspark.init("D:/software/spark/spark-2.3.2-bin-hadoop2.7")

from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
from pyspark.sql import Row


spark = SparkSession\
        .builder\
        .appName("PythonWordCount")\
        .getOrCreate()


# rdd
# 以行为单位
# lines = sc.textFile("text.txt")
sc = spark.sparkContext

lines = sc.textFile("D:\\software\\spark\\spark-2.3.2-bin-hadoop2.7\\examples\\src\\main\\resources\\People.txt")

print(lines.collect())

#lineLengths = lines.map(lambda s: len(s))
#totalLength = lineLengths.reduce(lambda a, b: a + b)
'''
parts = lines.map(lambda l: l.split(","))
people = parts.map(lambda p: Row(name=p[0], age=int(p[1])))

# Infer the schema, and register the DataFrame as a table.
schemaPeople = spark.createDataFrame(people)
schemaPeople.createOrReplaceTempView("people")

# SQL can be run over DataFrames that have been registered as a table.
teenagers = spark.sql("SELECT name FROM people WHERE age >= 13 AND age <= 19")
'''
spark.stop()
