import os

from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.types import *

os.environ['HADOOP_HOME'] = 'D:\\hadoop-2.9.2'

# Driver
spark = SparkSession \
    .builder \
    .master('local') \
    .appName('HelloSpark') \
    .getOrCreate()

# person = spark.sparkContext.parallelize([(0, "Lucy", 0), (1, "Lily", 0), (2, "Tim", 2), (3, "Danial", 0)]) \
#     .toDF(["id", "name", "cityId"])
#
# cities = spark.sparkContext.parallelize([(0, "Beijing"), (1, "Shanghai"), (2, "Guangzhou")]).toDF(["id", "name"])

employees_df = spark.read.format('jdbc') \
    .option("url", "jdbc:mysql://master:3306/hr") \
    .option("dbtable", "employees") \
    .option("user", "root") \
    .option("password", "mysqlroot") \
    .load()

jobs_df = spark.read.format('jdbc') \
    .option("url", "jdbc:mysql://master:3306/hr") \
    .option("dbtable", "jobs") \
    .option("user", "root") \
    .option("password", "mysqlroot") \
    .load()

employees_df.join(jobs_df, employees_df['job_id'] == jobs_df['job_id']) \
    .select(employees_df['first_name'], employees_df['last_name'], jobs_df['job_title']) \
    .show()
