# import findspark
#
# findspark.init()
# from pyspark import SparkContext
#
# # Create a SparkContext
# sc = SparkContext(appName="PySparkDemo")
#
# # Perform operations using the SparkContext
# data = [1, 2, 3, 4, 5]
# rdd = sc.parallelize(data)
# squared_rdd = rdd.map(lambda x: x**2)
# result = squared_rdd.collect()
#
# # Print the result
# print(result)
#
# # Stop the SparkContext
# sc.stop()


# import findspark
# findspark.init()

from pyspark import SparkConf, SparkContext
from pyspark import cloudpickle
from pyspark.serializers import read_int, PickleSerializer
from pyspark import accumulators
# 创建SparkConf对象
# conf = SparkConf().setAppName('MyApp').setMaster('local')
#
# sc=SparkContext(conf=conf).getOrCreate()
# sc.version
#
# print(sc.version)
#
# # 导包
# from pyspark import SparkConf, SparkContext
#
# #  设置环境变量
# import os
# #  设置pyspark 解析器
# # os.environ['PYSPARK_PYTHION'] = 'D:\\devsoft\\Python3114'
# # 创建SparkConf类对象
# #   运行模式setMaster()可以设置分布式集群
# #	setAppName()  设置conf名称
# conf = SparkConf().setMaster("local[*]").setAppName("test_spark_app")
#
# # 基于SparkConf类对象创建SparkContext类对象做为入口   执行环境入口对象 sparkcontext
# sc = SparkContext(conf=conf)
# # 打印PySpark的运行版本
# print(sc.version)
# # rdd对象  通过sparkcontext的parallelize方法 把python数据容器（list、tuple、set、dict、str）转换为RDD对象
# # rdd = sc.parallelize(数据容器对象)
# #  读取文件  转换成rdd对象
# # rdd = sc.textFile(文件路径)
# #  输出RDD对象
# #  print(rdd)  不会打印输出，  print打印只能打印python对象   rdd.collect() 把rdd 转换成python对象
# # print(rdd.collect())
#
# # 停车SparkContenxt对象的运行(停车Pyspark程序)
# sc.stop()



from pyspark import SparkConf, SparkContext
conf = SparkConf().setMaster("local[*]").setAppName("test_spark_app")
sc = SparkContext(conf=conf)
print(sc.version)
sc.stop()




from pyspark import SparkConf, SparkContext
conf = SparkConf().setMaster("local[*]").setAppName("test_spark_app")
sc = SparkContext(conf=conf)
print(sc.version)
sc.stop()


