import sys
import time
import pymysql
from pyspark.sql import HiveContext
from pyspark import SparkConf, SparkContext, SQLContext
from pyspark.sql import SparkSession


# /usr/bin/spark-submit --jars "/home/engyne/spark/ojdbc7.jar" --master local  /home/engyne/spark/spark_test.py
# conf = SparkConf().setAppName('inc_dd_openings')
sqlContext = SparkSession.builder.appName("inc_dd_openings").config("hive.metastore.uris","thrift://hadoop001:9083").config("spark.some.config.option","some-value").config("spark.driver.extraClassPath","/opt/cloudera/parcels/CDH/lib/spark/jars/mysql-connector-java-8.0.21.jar").enableHiveSupport().getOrCreate()


# sc = SparkContext(conf=conf)
# sqlContext = HiveContext(conf)

reload(sys)
sys.setdefaultencoding("utf-8")

mysql_url = "192.168.**.**"
mysql_port = 3306
mysql_user = "root"
mysql_password = "123456"
mysql_db = "test"




get_df_url = "jdbc:mysql://192.168.168.168:3306/test"
get_df_driver = "oracle.jdbc.driver.OracleDriver"
get_df_user = "test"
get_df_password = "test#6"

# insert		update		delete
def conMysqlDB_exec(sqlStr):
    db = pymysql.Connect(host=mysql_url,
                             port=mysql_port,
                             user=mysql_user,
                             password=mysql_password,
                             database=mysql_db)
    cursor = db.cursor()
    try:
        cursor.execute(sqlStr)
        db.commit()
        result = True
    except:
        print("---->MySqlError: execute error")
        result = False
        db.rollback()
    db.close
    return result


# select
def conMysqlDB_fetchall(sqlStr):

    db =pymysql.Connect(host=mysql_url,
                             port=mysql_port,
                             user=mysql_user,
                             password=mysql_password,
                             database=mysql_db)
        # mysql_url,mysql_user, mysql_password, mysql_db, charset='utf8')
    cursor = db.cursor()
    results = []

    try:
        # cursor.execute("select * from spark_job where status =1 ")
        cursor.execute(sqlStr)
        results = cursor.fetchall()
    except:
        print("---->MySqlError: unable to fecth data")
    db.close
    return results


#  import
def importData(type):
    time_start = time.time()
    findJobSql = "SELECT * FROM spark_job where status=1"
    result = conMysqlDB_fetchall(findJobSql)
    resultInfoList = []
    for i, val in enumerate(result):
        databaseName = val[1]
        tableName = val[2]
        partitionColumnName = val[3]
        partitionColumnDesc = val[4]
        checkColumn = val[5]
        lastValue = val[6]

        sqlContext.sql("use %s" % databaseName)

        if type == "append":
            df = getDF("(select * from %s where to_char(%s, 'yyyy-MM-dd')>'%s')" % (tableName, checkColumn, lastValue))
            try:
                nowLastValue = df.rdd.reduce(max)[checkColumn]
                o2hBase(df, databaseName, tableName, partitionColumnName, partitionColumnDesc, True, "")
                updataJobSql = "UPDATE job SET last_value='%s' WHERE table_name='%s'" % (nowLastValue, tableName)
                if conMysqlDB_exec(updataJobSql):
                    print("---->SUCCESS: incremental import success")
                    resultInfoList.append("SUCCESS: %s import success" % tableName)
            except ValueError:
                print("---->INFO: No new data added!")
                resultInfoList.append("INFO: %s   ValueError(No new data added!)" % tableName)
                pass
            except:
                print("---->ERROR: other error")
                resultInfoList.append("ERROR: %s has other error" % tableName)
                pass

        else:
            df = getDF(tableName)
            df.show(20)
            # try:
            o2hBase(df, databaseName, tableName, partitionColumnName, partitionColumnDesc, False, type)
            print("---->INFO: import success")
            resultInfoList.append("SUCCESS: %s import success" % tableName)
            # except:
            #     print("---->ERROR: import error")
            #     resultInfoList.append("ERROR: %s import error" % tableName)
            #     pass
    print("RESULT:")
    for i, val in enumerate(resultInfoList):
        print(val)
    time_end = time.time()
    print("---->INFO: time cost", (time_end - time_start) / 60, "m")


def max(a, b):
    if a > b:
        return a
    else:
        return b


def getDF(tableName):
    # df = ''
    # try:
    df = sqlContext.read.format("jdbc").options(
        url="jdbc:mysql://192.168.168.168:168/test?useUnicode=true&characterEncoding=utf-8&serverTimezone=Asia/Shanghai",
        driver="com.mysql.cj.jdbc.Driver",
        dbtable=tableName,
        user="test",
        password="test#6").load()
    # except Exception:
    #     print("---->DF_ERROR: get df error")
    #     print("---->DF_ERROR: get df error")
    return df
 # df = sqlContext.read.format("jdbc") \
        #     .option("url", get_df_url) \
        #     .option("driver", get_df_driver) \
        #     .option("dbtable", tableName) \
        #     .option("user", get_df_user).option("password", get_df_password) \
        #     .load()
        # df = pymysql.Connect(host=mysql_url1,
        #                      port=mysql_port1,
        #                      user=mysql_user1,
        #                      password=mysql_password1,
        #                      database=mysql_db1)


# o2h
def o2hBase(df, databaseName, tableName, partitionColumnName, partitionColumnDesc, isIncremental, type):
    sqlContext.sql("use %s" % databaseName)
    schema = df.schema
    cnSql, columnNameSql = getTableDesc(df, partitionColumnName)
    # df.show(10)
    # rdd = df.map(lambda x : [x[processLineList[i]].replace("\n","").replace("\r","") for i in range(len(processLineList))])

    rdd = df.rdd.map(
        #lambda row: [row[i] for i in range(len(row))])
         lambda x: [ (x[i].replace("\n", "").replace("\r", "") if isinstance(x[i], unicode) or isinstance(x[i], str) else x[i]) for i in range(len(x))])
         # lambda x: (x[0], x(1)))
    df = sqlContext.createDataFrame(rdd, schema)
    # sdf.registerTempTable("temp{}".format(tableName))
    df.createOrReplaceTempView("temp{}".format(tableName))

    if partitionColumnName == "":
        if isIncremental:
            saveSql = "insert into table {} select * from temp{}".format(tableName,tableName)
        else:
            if type == "import-overwrite":
                saveSql = "insert overwrite table {} select * from temp{}".format(tableName,tableName)
            else:
                saveSql = "create table {0} as select * from temp{1}".format(tableName,tableName)
    else:
        sqlContext.sql("set hive.exec.dynamic.partition=true")
        sqlContext.sql("set hive.exec.dynamic.partition.mode=nonstrict")
        sqlContext.sql("SET hive.exec.max.dynamic.partitions=100000")
        sqlContext.sql("SET hive.exec.max.dynamic.partitions.pernode=100000")
        if isIncremental:
            saveSql = "insert into table %s partition(%s) SELECT %s,%s FROM temp%s" % (
            tableName, partitionColumnName, cnSql, partitionColumnName, tableName)
        else:
            if type != "import-overwrite":
                # dynamic partition create table
                createTableSql = "create table %s (%s)PARTITIONED BY (%s %s) row format delimited fields terminated by '\t'  LINES TERMINATED BY '\n'" % (
                tableName, columnNameSql, partitionColumnName, partitionColumnDesc)
                sqlContext.sql(createTableSql)
                print("---->INFO: dynamic partition create success")
            saveSql = "insert overwrite table {} partition({}) SELECT {},{} FROM temp{}".format((
            tableName, partitionColumnName, cnSql, partitionColumnName, tableName))
    sqlContext.sql(saveSql)
    # sqlContext.dropTempTable("temp{}".format(tableName))
    sqlContext.catalog.dropTempView("temp{}".format(tableName))


def getTableDesc(sqlDF, partitionColumnName):
    columnNameSql = ""
    cnSql = ""
    dfTypeList = sqlDF.dtypes
    for i, val in enumerate(dfTypeList):
        if (val[0] != partitionColumnName):
            columnNameSql = columnNameSql + val[0] + " " + val[1]
            cnSql = cnSql + val[0]

            if i + 1 != len(dfTypeList):
                columnNameSql = columnNameSql + ","
                cnSql = cnSql + ","
    return cnSql, columnNameSql


if __name__ == '__main__':
    # type :      import                #General import
    #               import-overwrite, #General import   overwrite table
    #               append                  #Incremental import
    importData("import")
