import sys
import os
import argparse
from pyspark.sql import SparkSession

file_path = r'C:\study\pythonprojects\Etl_Spark_Py\sample_files\inbound\customer.csv'
file_path = r'E:\bigdata\projects\Etl_Spark_Py\sample_files\inbound\customer.csv'


def handle_commandline(args_in):
    """ handle Command arguments, and return a tuple containing arguments.
    :param args_in: Arguments from script parameter
    :type args_in: sequence
    :return: an instance of argument parser

    command sample ingest_file_to_stage.py -s D_INDIV.csv
    """
    parser = argparse.ArgumentParser()
    parser.add_argument('-s', '--filename', type=str, help='Description', required=True)
    parser.add_argument('-c', '--is_clean_up', type=str, help='Description', required=False)
    args_obj = parser.parse_args(args_in)
    return args_obj


class NoRecordException(Exception):
    def __init__(self, message):
        Exception.__init__(self)
        self.message = message


def main(argv):
    print(argv)
    if os.path.exists(file_path) and os.path.isfile(file_path):
        process_file_by_spark(file_path)
        return 1
    else:
        print('file path is not exist or is not a file\n')
        return -1


def process_file_by_spark(file_location):
    # 使用DataFrame读取文件，然后把文件写入到一个HIVE或者HBase中
    spark = SparkSession.builder.appName('Spark_ETL').getOrCreate()
    customer_data = spark.read.csv(file_location, header=True)
    customer_data.show()
    pass


if __name__ == "__main__":
    main(sys.argv[1:])
