import pymysql
from pyspark.context import SparkContext
import time

sc = SparkContext()

lines_rdd = sc.textFile("../../data/students.txt")

def to_mysql(lines):

    start = time.time()
    # 1、创建数据库链接
    # 每一个分区创建一个数据库链接
    con = pymysql.connect(host="master", port=3306, user="root", password="123456", database="myemployees")
    end = time.time()
    print(end - start)


    # 循环处理一个分区的数据
    for line in lines:
        split = line.split(",")
        id = split[0]
        name = split[1]
        age = int(split[2])
        sex = split[3]
        clazz = split[4]
        # 2、获取游标
        cursor = con.cursor()

        start = time.time()
        # 3、执行sql
        cursor.execute("insert into students(id,name,age,sex,clazz) values(%s,%s,%s,%s,%s)", (id, name, age, sex, clazz))
        end = time.time()
        print(end - start)

        # 提交事务
        con.commit()


    # 关闭数据库链接
    con.close()


# 循环将数据写入mysql
lines_rdd.foreachPartition(to_mysql)
