# This is a sample Python script.

# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.


def print_hi(name):
    # Use a breakpoint in the code line below to debug your script.
    print(f'Hi, {name}')  # Press Ctrl+F8 to toggle the breakpoint.


# Press the green button in the gutter to run the script.
if __name__ == '__main__':
    print_hi('PyCharm')

# See PyCharm help at https://www.jetbrains.com/help/pycharm/


from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession

#  设置环境变量
import os

#  设置pyspark 解析器
# os.environ['PYSPARK_PYTHION'] = 'D:\\devsoft\\Python3114'
# 创建SparkConf类对象
#   运行模式setMaster()可以设置分布式集群
#	setAppName()  设置conf名称
conf = SparkConf().setMaster("local[*]").setAppName("test_spark_app")

# 基于SparkConf类对象创建SparkContext类对象做为入口   执行环境入口对象 sparkcontext
sc = SparkContext(conf=conf)
# 打印PySpark的运行版本
print(sc.version)

# rdd对象  通过sparkcontext的parallelize方法 把python数据容器（list、tuple、set、dict、str）转换为RDD对象
# rdd = sc.parallelize(数据容器对象)
#  读取文件  转换成rdd对象
# rdd = sc.textFile(文件路径)
#  输出RDD对象
#  print(rdd)  不会打印输出，  print打印只能打印python对象   rdd.collect() 把rdd 转换成python对象
# print(rdd.collect())

# 停车SparkContenxt对象的运行(停车Pyspark程序)






from pyspark.sql import SparkSession

# 创建一个SparkSession
spark = SparkSession.builder.master("local[*]").appName("My Spark Application").enableHiveSupport().getOrCreate()

# 读取本地CSV文件
file_path = "file:///D:\\data\\UserBehavior.csv"  # 或者 "file:///path/to/your/local/file.csv" 在类Unix系统上
# df = spark.read.format("csv").csv(file_path, header=True, inferSchema=True)
# df = spark.read.format("csv").csv(file_path)
df = spark.read.format("csv").load(file_path)


# 显示DataFrame的内容
df.show()

print(df.count())
# 关闭SparkSession
spark.stop()













# ss = SparkSession.builder.appName("test").master("local[*]").getOrCreate();
#
# from pyspark.sql import *
# df = ss.read.csv("D:\\000\\ordercomment.csv", inferSchema = True, header = True)
#
# path = "D:/000/ordercomment.csv";
#
# df = ss.read.format("csv").load(path);
#
# df.show(10)
#
# print(df.count())
