#!/home/hadoop/anaconda2/bin
# -*- coding:utf-8 -*-
import logging
import os
from enum import Enum

from .setting import GAVIAL_HIVE_DATAWOUSE
from .tabledef import TableDef

try:
  from pyspark import SparkContext
  from pyspark.sql import SparkSession
except ImportError:
  pass


class SaveMode(Enum):
  TABLE_OVERWRITE = 'table_overwrite'
  PART_OVERWRITE = 'part_overwrite'
  APPEND = 'append'


ADD_PARTITION_SQL_TEMPLATE = "ALTER TABLE {0}.{1} ADD IF NOT EXISTS PARTITION({2})"


class HiveClient(object):

  @staticmethod
  def save_dataset_to_table(df, database, table, partitions, save_mode,
      hdfs_client):
    assert isinstance(save_mode, SaveMode)
    assert isinstance(partitions, list)
    db_location = GAVIAL_HIVE_DATAWOUSE.format(database)
    orc_path = "{0}/{1}.orc".format(db_location, table)
    mode = "overwrite" if save_mode is SaveMode[
      'TABLE_OVERWRITE'] else "append"
    spark = SparkSession.builder.master(
      'yarn').enableHiveSupport().getOrCreate()
    if len(partitions) > 0:
      partition_rows = df.select(partitions).distinct().collect()
      df = df.repartition(len(partition_rows), partitions)
      logging.info("orcPath:{0}".format(orc_path))
      if save_mode is SaveMode['PART_OVERWRITE']:
        logging.info("part_overwrite")
        if hdfs_client.exists(orc_path):
          logging.info("file exists")
          for row in partition_rows:
            for key, value in row.asDict().items():
              rm_path = os.path.join(orc_path, "{0}={1}".format(
                key, value))
              logging.info("rm_path:{0}".format(rm_path))
              hdfs_client.rm(rm_path)
      df.write.partitionBy(partitions).format("orc").mode(mode).save(
        orc_path)
      logging.info(df.schema)
      table_def = TableDef(df.schema, table, database, partitions,
                           orc_path)
      spark.sql(table_def.build_create_table_sql())
      logging.info("create table if not exist:{0}.{1}.{2}".format(
        db_location, database, table))
      for row in partition_rows:
        part_pro_list = []
        logging.info(type(row))
        for key, value in row.asDict().items():
          logging.info(type(value))
          if isinstance(value, unicode) or isinstance(value, str):
            part_pro_list.append("{0}='{1}'".format(key, value))
          else:
            part_pro_list.append("{0}={1}".format(key, value))
        add_partition_sql = ADD_PARTITION_SQL_TEMPLATE.format(
          database, table, ",".join(part_pro_list))
        logging.info("add partition:{0}".format(add_partition_sql))
        spark.sql(add_partition_sql)
    else:
      table_def = TableDef(df.schema, table, database, [], orc_path)
      df.write.format("orc").mode(mode).save(orc_path)
      create_sql = table_def.build_create_table_sql()
      logging.info(create_sql)
      spark.sql(create_sql)
      logging.info("create table if not exist:{0}.{1}.{2}".format(
        db_location, database, table))
