# coding=utf-8

import os, time, json, pyspark

from pyspark import SparkContext, SparkConf
import argparse
import sys

from pyspark.sql import SparkSession

aws_region = "us-east-2"
access_id = "AKIAIOFGB7H4GDSE5NJQ"
access_key = "MdMrUky4EUaeCUhJD7fRVypPRbampGszywjzUiat"


class CommonEMR(object):
    def __init__(self, app_name, confs=[]):
        self.app_name = app_name
        # resolve input arguments
        self.parser = argparse.ArgumentParser()
        self.parser.add_argument("-e", "--env", type=str, choices=["prod", "integration", "testing", "debug"], default="prod", help="testing | prod")
        # self.parser.add_argument("-p", "--data_path", action='append', type=str, help="data path for prod")  # --data_path xxx --data_path yyy
        self.parser.add_argument("-p", "--parameters", nargs='+', type=str, help="input parameters for prod")  # --parameters xxx->111 yyy->222
        # self.parser.add_argument("-d", "--data_path", nargs='+', type=str, help="data path for prod")  # obsoleted --data_path xxx yyy
        # self.parser.add_argument("-o", "--output_path", nargs='+', type=str, help="output path for prod")  # obsoleted
        # self.parser.add_argument("-ts", "--timestamp", type=bool, default=False, help="output path append timestamp")  # obsoleted
        self.args = self.parser.parse_args()
        self.input_parameters = self.get_input_parameter_properties(self.args.parameters)
        # self.data_path_properties = self.get_data_path_properties(self.args.data_path)  # obsoleted
        # self.output_path_properties = self.get_output_path_properties(self.args.output_path, self.args.timestamp)  # obsoleted

        self.sc = self.setup_spark_context(confs)
        self.sparkSession = self.setup_spark_session()

    def setup_spark_context(self, confs):
        conf = SparkConf().setAppName(self.app_name)
        conf.set("spark.yarn.maxAppAttempts", 1)
        for c in confs:
            k = c[0]
            v = c[1]
            print("setting spark conf k=%s, v=%s" % (k, v))
            conf.set(k, v)

        self.sc = SparkContext(conf=conf)
        self.sc.setSystemProperty("com.amazonaws.services.s3.enableV4", "true")
        self._setup_hadoop_conf(self.sc)
        return self.sc

    def exec_sql_read(self, sql, jdbc_property):
        jdbc_type = jdbc_property["type"]
        jdbc_host = jdbc_property["host"]
        jdbc_db = jdbc_property["database"]
        jdbc_user = jdbc_property["username"]
        jdbc_pwd = jdbc_property["password"]
        if jdbc_type == "postgresql":
            jdbc_driver = "org.postgresql.Driver"
        else:
            raise Exception("support only postgresql driver")

        url = "jdbc:%s://%s/%s" % (jdbc_type, jdbc_host, jdbc_db)
        spark_session = self.sparkSession
        df = spark_session.read \
            .format("jdbc") \
            .option("url", url) \
            .option("query", sql) \
            .option("user", jdbc_user) \
            .option("password", jdbc_pwd) \
            .option("driver", jdbc_driver) \
            .load()
        return df

    # def exec_sql_write(self, df, schema, tablename):
    #     spark_session = self.sparkSession
    #     df.write \
    #         .format("jdbc") \
    #         .option("url", "jdbc:postgresql://10.0.24.4/amzdata") \
    #         .option("dbtable", "%s.%s" % (schema, tablename)) \
    #         .option("user", "pgamzdata") \
    #         .option("password", "abcd1234") \
    #         .save()

    def setup_spark_session(self):
        spark_session = SparkSession.builder.appName(self.app_name)\
            .config("fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem")\
            .config("com.amazonaws.services.s3.enableV4", "true")\
            .config("fs.s3a.access.key", access_id)\
            .config("fs.s3a.secret.key", access_key)\
            .config("fs.s3a.path.style.access", "true")\
            .config("fs.s3a.connection.maximum", "100000")\
            .config("fs.s3a.endpoint", "s3." + aws_region + ".amazonaws.com")\
            .getOrCreate()
        # You can use this hadoop configuration as alternative to spark.hadoop configuration
        # spark.sparkContext.hadoopConfiguration.set("fs.s3a.multiobjectdelete.enable", "false")
        # spark.sparkContext.hadoopConfiguration.set("fs.s3a.access.key", "my access key")
        # spark.sparkContext.hadoopConfiguration.set("fs.s3a.secret.key", "my secret key")
        return spark_session

    @staticmethod
    def get_output_path_properties(output_path, enable_timestamp):
        # resolve output_path
        output_path_properties = []
        if isinstance(output_path, list):
            for path in output_path:
                if path.startswith("ptadmin-emr://"):
                    properties = CommonEMR.resolve_ptadmin_schema(path)
                    output_path_properties.append(properties)
                elif path.startswith("s3://") or path.startswith("s3a://"):
                    if enable_timestamp:
                        path = CommonEMR.append_timestamp(path)
                    output_path_properties.append({"s3": path})
                else:
                    raise Exception("only support schema ptadmin-emr://, s3:// or s3a://")
        elif isinstance(output_path, str):
            if output_path.startswith("ptadmin-emr://"):
                output_path_properties.append(CommonEMR.resolve_ptadmin_schema(output_path))
            elif output_path.startswith("s3://") or output_path.startswith("s3a://"):
                if enable_timestamp:
                    output_path = CommonEMR.append_timestamp(output_path)
                output_path_properties.append({"s3": output_path})
            else:
                raise Exception("only support schema ptadmin-emr://, s3:// or s3a://")
        else:
            raise Exception("type must be list or str")

        return output_path_properties

    @staticmethod
    def get_input_parameter_properties(input_parameters):
        input_parameters_map = {}
        if input_parameters is None:
            return input_parameters_map

        if isinstance(input_parameters, list):
            for p in input_parameters:
                print("resolving parameter = %s" % p)
                k, v = p.split(":=")
                input_parameters_map[k.strip()] = v.strip()
        elif isinstance(input_parameters, str):
            k, v = input_parameters.split(":=")
            input_parameters_map[k.strip()] = v.strip()
        else:
            raise Exception("type must be list or str")
        return input_parameters_map

    @staticmethod
    def get_data_path_properties(data_path):
        # resolve data_path input
        data_path_properties = []
        if isinstance(data_path, list):
            for path in data_path:
                print("data_path = %s" % path)
                if path.startswith("ptadmin-emr://"):
                    properties = CommonEMR.resolve_ptadmin_schema(path)
                    data_path_properties.append(properties)
                elif path.startswith("s3://") or path.startswith("s3a://"):
                    data_path_properties.append({"s3": path})
                    # s3_properties =
                    # if "s3" not in data_path_properties:
                    #     data_path_properties["s3"] = []
                    # data_path_properties["s3"].append(path)
                else:
                    raise Exception("only support schema ptadmin-emr://, s3:// or s3a://")
        elif isinstance(data_path, str):
            print("data_path = %s" % data_path)
            if data_path.startswith("ptadmin-emr://"):
                data_path_properties.append(CommonEMR.resolve_ptadmin_schema(data_path))
            elif data_path.startswith("s3://") or data_path.startswith("s3a://"):
                data_path_properties.append({"s3": data_path})
            else:
                raise Exception("only support schema ptadmin-emr://, s3:// or s3a://")
        else:
            raise Exception("type must be list or str")

        return data_path_properties

    @staticmethod
    def resolve_jdbc_url(jdbc_url):
        # jdbc_url = '{"user": "pgamzdata", "password": "abcd1234", "driver": "org.postgresql.Driver"};jdbc:postgresql://10.0.24.4/amzdata'
        properties = eval(jdbc_url.split(';')[0])
        url = jdbc_url.split(';')[1]
        print("jdbc url=%s" % url)
        print("jdbc properties=%s" % properties)
        return url, properties
        # pass

    @staticmethod
    def resolve_ptadmin_schema(ptadmin_emr_url):  # ptadmin-emr://jdbc/postgresql/<user>:<pwd>@<host>:<port>/<db>/<schema>/<table> (<schema> and <table> is optional)
        schema_properties = {}
        url = ptadmin_emr_url.replace("ptadmin-emr://", "")
        tmp2 = url.split(
            "/")  # [0] jdbc, [1] postgresql, [2] <user>:<pwd>@<host>:<port>, [3] <db>, [4] <schema>, [5] <table>
        schema_type = tmp2[0]
        if "jdbc" in schema_type:  # ptadmin-emr://pgamzdata:abcd1234@jdbc/postgresql/10.0.24.4/amzdata
            jdbc_properties = {}
            jdbc_properties["type"] = tmp2[1]
            if "@" in tmp2[2]:
                tmp = tmp2[2].split("@")[0]  # <user>:<pwd>
                jdbc_properties["username"] = tmp.split(":")[0]
                jdbc_properties["password"] = tmp.split(":")[1]
                jdbc_properties["host"] = tmp2[2].split("@")[1]
            jdbc_properties["database"] = tmp2[3]
            if len(tmp2) > 4:
                jdbc_properties["schema"] = tmp2[4]
                jdbc_properties["table"] = tmp2[5]

            schema_properties["jdbc"] = jdbc_properties
        elif "batch" in schema_type:  # pt-admin://batch/batch_xxxxxx
            batch_properties = {"batchname": tmp2[1]}
            schema_properties["batch"] = batch_properties

        return schema_properties

    @staticmethod
    def get_classic_jdbc_properties(jdbc_property):
        jdbc_type = jdbc_property["type"]
        jdbc_host = jdbc_property["host"]
        jdbc_db = jdbc_property["database"]

        url = "jdbc:%s://%s/%s" % (jdbc_type, jdbc_host, jdbc_db)

        user = jdbc_property["username"]
        pwd = jdbc_property["password"]
        table_name = jdbc_property["table"]
        properties = {"user": user, "password": pwd, "driver": "org.postgresql.Driver"}
        return url, table_name, properties

    @staticmethod
    def append_timestamp(file_path):
        timestamp = time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime(time.time()))
        filename_without_ext, ext = os.path.splitext(file_path)
        return "%s_%s%s" % (filename_without_ext, timestamp, ext)  # variable ext shall contains . already
        # # bucket_name = "core-products"
        # # s3_prefix = "s3a://%s/" % bucket_name
        # if self.args.output_path_timestamp:
        #     timestamp = time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime(time.time()))
        #     filename_without_ext, ext = os.path.splitext(file_path)
        #     # return "%s_%s.%s" % (filename_without_ext, timestamp, ext)
        #     return "%s_%s%s" % (filename_without_ext, timestamp, ext)  # variable ext shall contains . already
        # else:
        #     return file_path

    @staticmethod
    def get_snowflake_options(sfAccount, sfUser, sfPassword, sfDatabase="EMR_OUTPUT", sfSchema="PUBLIC", tempdir="s3a://pt-snowflake"):
        snowflake_option = {
            "sfURL": "%s.us-east-1.snowflakecomputing.com" % sfAccount,
            "sfAccount": sfAccount,
            "sfUser": sfUser,
            "sfPassword": sfPassword,
            "sfDatabase": sfDatabase,
            "sfSchema": sfSchema,
            "sfWarehouse": "",
            # "awsAccessKey": sc.hadoopConfiguration.get("fs.s3n.awsAccessKeyId"),
            # "awsSecretKey": sc.hadoopConfiguration.get("fs.s3n.awsSecretAccessKey"),
            "awsAccessKey": access_id,
            "awsSecretKey": access_key,
            "tempdir": tempdir
        }
        return snowflake_option
    
    @staticmethod
    def _setup_hadoop_conf(sc):
        # aws_region = "us-east-2"
        # access_id = "AKIAIOFGB7H4GDSE5NJQ"
        # access_key = "MdMrUky4EUaeCUhJD7fRVypPRbampGszywjzUiat"
        
        hadoop_conf = sc._jsc.hadoopConfiguration()
        # # see https://stackoverflow.com/questions/43454117/how-do-you-use-s3a-with-spark-2-1-0-on-aws-us-east-2
        hadoop_conf.set("fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem")
        hadoop_conf.set("com.amazonaws.services.s3.enableV4", "true")
        hadoop_conf.set("fs.s3a.access.key", access_id)
        hadoop_conf.set("fs.s3a.secret.key", access_key)
        hadoop_conf.set("fs.s3a.path.style.access", "true")
        hadoop_conf.set("fs.s3a.connection.maximum", "100000")
        hadoop_conf.set("fs.s3a.endpoint", "s3." + aws_region + ".amazonaws.com")

    @staticmethod
    def convert_date(x):
        try:
            ts = str(x)[:10] + "." + str(x)[10:]
            p = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(float(ts)))
            return p
        except Exception as e:
            print("scrape_date value error: %s" % (x))
            return "NA"

    @staticmethod
    def convert_price(x):
        try:
            p = x.replace("$", "").replace(",", "")
            return p
        except Exception as e:
            print("convert_price value error: %s" % (x))
            return -1