# -*- coding: utf-8 -*-
"""
etl_job.py
~~~~~~~~~~

This Python module contains an example Apache Spark ETL job definition
that implements best practices for production ETL jobs. It can be
submitted to a Spark cluster (or locally) using the 'spark-submit'
command found in the '/bin' directory of all Spark distributions
(necessary for running any Spark job, locally or otherwise). For
example, this example script can be executed as follows,

    $SPARK_HOME/bin/spark-submit \
    --master spark://localhost:7077 \
    --py-files packages.zip \
    --files path/to/library_job_template.json \
    jobs/etl_job.py

where packages.zip contains Python modules required by ETL job (in
this example it contains a class to provide access to Spark's logger),
which need to be made available to each executor process on every node
in the cluster; etl_config.json is a text file sent to the cluster,
containing a JSON object with all of the configuration parameters
required by the ETL job; and, etl_job.py contains the Spark application
to be executed by a driver process on the Spark master node.

"""
import socket
import sys

print("当前python版本：" + sys.version)


def get_host_ip():
    """
    查询本机ip地址
    :return:
    """
    try:
        s=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
        s.connect(('8.8.8.8',80))
        ip=s.getsockname()[0]
    finally:
        s.close()

    return ip


print("当前服务器地址：" + get_host_ip())

from bdp.pyjob.JobExecutor import JobExecutor
from bdp.pyspark.spark import start_spark

def main():
    """Main ETL script definition.

    :return: None
    """
    if (len(sys.argv) != 3):
        sys.exit('Missing command line argument, e.g., etl_job library_job_template.json')

    print('开始进入pyspark执行程序')

    job_json_filename = sys.argv[1]
    exec_filename = sys.argv[2]

    # start Spark application and get Spark session, logger and config
    spark, log, library_job_template, environment = start_spark(
        app_name='my_etl_job',
        master='yarn',
        files=[job_json_filename]
    )

    # log that main ETL job is starting
    log.warn('etl_job is up-and-running')

    # load library.json from zip -- if spark_driver_file is not zipped with conf file, this might not work
    # config_data = pkgutil.get_data(__name__, 'configs/library.json').decode().replace('\n', '')
    # library_config = loads(config_data)

    # run the Spark pipeline
    executor = JobExecutor(spark, log, library_job_template, exec_filename, environment)
    try:
        executor.init()
        executor.run()
    finally:
        # log the success and terminate Spark application
        log.warn('etl_job is finished')
        executor.destroy()
        spark.stop()

    return None


# entry point for PySpark ETL application
if __name__ == '__main__':
    main()
