# mysql入门
# from pymysql import Connection
# coon=Connection(
#     host="localhost",
#     port=3306,
#     user="root",
#     password="123456",
#     autocommit=True
# )
# # print(coon.get_server_info())
# cursor=coon.cursor()  #游标
# coon.select_db("test")  #use test
# # cursor.execute("create table test_pymysql(id int);")
# # cursor.execute("select * from student")
# # result=cursor.fetchall()
# # for r in result:
# #     print(r)
#
# cursor.execute("insert into student values(20,'xxl',15,'男');")
# # coon.commit()
# coon.close()
# ==================================================================
# # MySQL综合案例
# from file_define import *
# from data_define import *
# from pymysql import Connection
# text_file_reader=TextFileReader("C:\\python的学习\\python_learning_-hm\\2011年1月销售数据.txt")
# json_file_reader=JsonFileReader("C:\\python的学习\\python_learning_-hm\\2011年2月销售数据JSON.txt")
# jan_data=text_file_reader.read_data()
# feb_data=json_file_reader.read_data()
#
# all_data=jan_data+feb_data
# conn=Connection(
#     host="localhost",
#     port=3306,
#     user="root",
#     password="123456",
#     autocommit=True
# )
# conn.select_db("py_sql")
# cursor=conn.cursor()
# for r in all_data:
#     sql=(f"insert into orders(order_date,order_id,money,province)"
#          f"values('{r.date}','{r.order_id}',{r.money},'{r.province}')")
#     # print(sql)
#     cursor.execute(sql)
# cursor.close()
#
# from pymysql import Connection
# import json
# conn=Connection(
#     host="localhost",
#     port=3306,
#     user="root",
#     password="root",
#     autocommit=True
# )
# conn.select_db("py_sql")
# cursor=conn.cursor()
# cursor.execute("select * from orders")
# result=cursor.fetchall()
# result=str(result)
# f=open("C:/test.txt","w",encoding="UTF-8")
# f.write(json.dumps(result))
# f.close()
# ==============================================================
# spark
from pyspark import SparkConf,SparkContext
conf=SparkConf().setMaster("local[*]").setAppName("test_spark")

sc=SparkContext(conf=conf)

rdd1=sc.parallelize([1,2,3,4,5])
rdd2=sc.parallelize((1,2,3,4,5))
rdd3=sc.parallelize({1,2,3,4,5})
rdd4=sc.parallelize({"key1":"values1","key2":"values2"})
rdd6=sc.parallelize('xxl')
rdd5=sc.textFile("C:\\2024test.txt")
print(rdd1.collect())
print(rdd2.collect())
print(rdd3.collect())
print(rdd4.collect())
print(rdd5.collect())
print(rdd6.collect())
sc.stop()
