#!/home/hadoop/anaconda2/bin
# -*- coding:utf-8 -*-
import logging
import re
from datetime import datetime
from datetime import timedelta

try:
  from .api import GavialApi
  # from hdfs3 import HDFileSystem
  import pyarrow as pa
  from .krbgavial import krbContext
  # from krbcontext import krbContext
  from .hiveclient import HiveClient
  from pyarrow import filesystem
except ImportError:
  print("import error")
  raise
from .properties import Properties

from .setting import *
import six

try:
  from pyspark import SparkContext
  from pyspark.sql import SparkSession
except ImportError:
  pass

DATE_FORMAT_MAPPING = {
  "yy": "%y",
  "yyyy": "%Y",
  "MM": "%m",
  "dd": "%d",
  "HH": "%H",
  "mm": "%M"
}


def singleton(cls, *args, **kw):
  instances = {}

  def _singleton(*args, **kw):
    if cls not in instances:
      instances[cls] = cls(*args, **kw)
    return instances[cls]

  return _singleton


@singleton
class Context(object):
  def __init__(self):
    self._client = None
    self.schedule_id = None
    self.spark_session = None
    self.local_logger = None
    self.spark_logger = None
    self.local_debug = os.path.exists("local-debug.properties")
    self.hdfs_conf = {}
    self.hadoop_enabled = True
    if self.local_debug:
      self.hadoop_home = os.getenv('HADOOP_HOME')
      if not self.hadoop_home:
        raise Exception("HADOOP_HOME not set")
      job_config = {} if not os.path.exists(
        os.path.join(
          os.getcwd(), 'local-debug.properties')) else Properties(
        os.path.join(
          os.getcwd(),
          'local-debug.properties')).get_properties()
      self.principal = job_config.get('principal', 'test')
      self.job_type = "command" if 'job.type' not in job_config else job_config[
        'job.type']
      self.config = job_config
      if 'spark' == self.job_type:
        self._init_spark_session()
      self._init_logger()
    else:
      self.hadoop_home = os.getenv('HADOOP_HOME')
      if not self.hadoop_home:
        raise Exception("HADOOP_HOME not set")
      self._parse_hdfs_conf()
      properties = Properties('runtime.properties').get_properties()
      if "hadoop.enabled" in properties:
        self.hadoop_enabled = (
            "true" == properties.get("hadoop.enabled"))
      rpc_server = properties.get('api.server')
      rpc_port = properties.get('api.server.port')
      self.job_type = properties.get("job.type")
      if 'spark' == self.job_type:
        self._init_spark_session()
      self._init_logger()
      self._fill_runtime_args()
      logging.info('rpcServer={},rpcPort={}'.format(
        rpc_server, rpc_port))
      if rpc_server and rpc_port:
        self._gavial_api = GavialApi(rpc_server, rpc_port)
        job_config = self._gavial_api.get_job_config(
          self.task_name, self.job_name, self.flow_exec_id)
        if job_config:
          self.principal = job_config['principal']
          self.config = job_config
          self._replace_reference_job_outparams()
          self._gavial_api.save_job_runtime_config(
            self.task_name, self.job_name, self.flow_exec_id,
            self.config)
        else:
          logging.info('get job config failed!')

      if self._is_kerberos_enabled():
        self._set_keytab_file()
        ccache = TICKET_CACHE_FORMAT.format(
          self.task_name, self.job_name, self.flow_exec_id)
        self._krb_session = krbContext(
          using_keytab=True,
          principal=self.principal,
          keytab_file=self.keytab_file,
          ccache_file=ccache)
      logging.info('HADOOP_HOME:{}'.format(self.hadoop_home))
    self._replace_runtime_variable()

  def _init_spark_session(self):
    try:
      from pyspark import SparkContext
      from pyspark.sql import SparkSession
    except ImportError:
      pass
    if self.local_debug:
      spark_session = SparkSession.builder.master(
        'local[*]').enableHiveSupport().getOrCreate()
      self.spark_session = spark_session
    else:
      spark_session = SparkSession.builder.master(
        'yarn').enableHiveSupport().getOrCreate()
      self.spark_session = spark_session

  def get_spark_session(self):
    return self.spark_session

  def get_logger(self):
    return self.spark_logger if self._is_spark_job() else self.local_logger

  def get_hive_client(self):
    if self._hive_client is None:
      self._hive_client = HiveClient()
    return self._hive_client

  def isHadoopEnabled(self):
    return self.hadoop_enabled

  def get_file_system(self, driver='libhdfs'):

    if self._client is None:
      if self.local_debug:
        self._client = filesystem.LocalFileSystem()
      else:
        if not self.hadoop_enabled:
          logging.info(
            "hadoop not enabled,can not create hdfs filesystem")
          return None
        if self._is_kerberos_enabled():
          self._krb_session.init_with_keytab()
          ticket_cache = TICKET_CACHE_FORMAT.format(
            self.task_name, self.job_name, self.flow_exec_id)
          assert os.path.exists(ticket_cache)
          # self._client = HDFileSystem(pars=HADOOP_CONF, ticket_cache=ticket_cache)
          self._client = pa.hdfs.connect(
            user=self.principal,
            kerb_ticket=ticket_cache,
            driver=driver)
        else:
          self._client = pa.hdfs.connect(
            user=self.principal, driver=driver)
    return self._client

  def _init_logger(self):
    if "spark" == self.job_type:
      log4jLogger = self.spark_session._jvm.org.apache.log4j
      self.spark_logger = log4jLogger.LogManager.getLogger(__name__)
    else:
      root_logger = logging.getLogger('local_logger')
      root_logger.setLevel(logging.INFO)
      logFormatter = logging.Formatter(
        '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s'
      )
      consoleHandler = logging.StreamHandler()
      consoleHandler.setLevel(logging.INFO)
      consoleHandler.setFormatter(logFormatter)
      root_logger.addHandler(consoleHandler)
      self.local_logger = root_logger

  def _fill_runtime_args(self):
    if self._is_spark_job():
      spark = SparkSession.builder.master(
        'yarn').enableHiveSupport().getOrCreate()
      spark_config = spark.sparkContext.getConf()
      java_options = spark_config.get('spark.executor.extraJavaOptions')
      logging.info(
        'getJavaOptions spark.executor.extraJavaOptions={}'.format(
          java_options))
      self._set_java_options_sys_property(java_options)
    self.flow_name = os.getenv('azkaban.flowid', '')
    self.flow_exec_id = os.getenv('azkaban.execid', '')
    self.runtime_job_id = os.getenv('azkaban.jobid', '')
    self.job_name = os.getenv('azkaban.jobname', '')
    self.task_name = os.getenv('azkaban.projectname', '')
    logging.info("getEnv result:[azkaban.flowid = {},azkaban.execid = {}, \
            azkaban.jobid = {},azkaban.jobname = {},azkaban.projectname = {}]".
                 format(self.flow_name, self.flow_exec_id,
                        self.runtime_job_id, self.job_name,
                        self.task_name))

  def _replace_reference_job_outparams(self):
    pattern = re.compile('^\\$\\{.+\\}$', re.IGNORECASE)
    params = self._gavial_api.get_task_output_params(
      self.task_name, self.flow_exec_id)
    for key in self.config:
      if self.config[key] is None:
        continue
      value = self.config[key]
      if isinstance(value, six.string_types):
        if pattern.match(value.strip()):
          p = value[2:value.length() - 1]
          ss = p.split("\\.", 2)
          job_name = ss[0]
          param_name = ss[1]
          if params[job_name] is None or params[job_name][
            param_name] is None:
            raise Exception(
              'no output param found for job :{}'.format(
                job_name))
          self.config[key] = params[job_name][param_name]

  def _replace_runtime_variable(self):
    pattern = re.compile('\[T,[Y,M,D,H,m],-?\d+,y*M*d*H*m*\]')
    for key in self.config:
      value = self.config[key]
      if not value or type(value) not in [str, bytes, bytearray]:
        continue
      matches = pattern.findall(value)
      if not matches:
        continue
      for matched in matches:
        # compute runtime variable
        part = matched[1:-1]
        parts = part.split(',')
        delt = int(parts[2])
        _time = datetime.now()
        if parts[1] == 'Y':
          _time = _time + timedelta(delt * 365)
        elif parts[1] == 'M':
          _time = _time + timedelta(delt * 30)
        elif parts[1] == 'D':
          _time = _time + timedelta(days=delt)
        elif parts[1] == 'H':
          _time = _time + timedelta(hours=delt)
        else:
          pass
        py_format = self._date_format_to_py(parts[3])
        time_str = _time.strftime(py_format)
        value = value.replace(matched, time_str)
      self.config[key] = value

  def _date_format_to_py(self, date_format):
    py_format = ''
    parts = ''
    for part in date_format:
      if not parts:
        parts = part
      else:
        if parts[-1] == part:
          parts = parts + part
        else:
          if parts in DATE_FORMAT_MAPPING:
            py_format = py_format + DATE_FORMAT_MAPPING[parts]
          else:
            py_format = py_format + parts
          parts = part
    if parts:
      if parts in DATE_FORMAT_MAPPING:
        py_format = py_format + DATE_FORMAT_MAPPING[parts]
      else:
        py_format = py_format + parts
    return py_format

  def _set_keytab_file(self):
    keytab = '{}.keytab'.format(self.job_name)
    file_list = os.listdir('.')
    for file in file_list:
      if file.startswith(keytab):
        self.keytab_file = file
        break
    logging.info('getKeyTabFile:{}'.format(self.keytab_file))

  def _set_java_options_sys_property(self, java_options):
    if java_options is None:
      raise Exception('spark.driver.extraJavaOptions is None')
    extra_java_options = java_options.replace('-D', '').replace('"', '')
    pairs = extra_java_options.split(' ')
    for pair in pairs:
      logging.info('pair={}'.format(pair))
      ss = pair.split('=')
      os.environ[ss[0]] = ss[1]

  def _is_spark_job(self):
    return self.job_type == 'spark'

  def save_state(self, plugin_state):
    if self.local_debug:
      return
    if not isinstance(plugin_state, dict):
      raise Exception("pluginState must be type of dict!")
    self._gavial_api.save_job_state(self.task_name, self.job_name,
                                    self.schedule_id, plugin_state)

  def save_table_info(self, ds_name, db_name, table_name, table_desc=''):
    if self.local_debug:
      return
    self._gavial_api.save_table_info(self.task_name, ds_name, db_name,
                                     table_name, table_desc)

  def output_values(self, output_values):
    if self.local_debug:
      print("local save output values", output_values)
      return
    if not isinstance(output_values, dict):
      raise Exception("output_values must be type of dict!")
    self._gavial_api.save_job_output_param(
      self.task_name, self.job_name, self.flow_exec_id, output_values)

  def publish_event(self, message):
    if self.local_debug:
      return
    if not isinstance(message, dict):
      raise Exception("message must be type of dict")
    self._gavial_api.send_message(self.task_name, self.job_name,
                                  self.flow_exec_id, message)

  def _is_kerberos_enabled(self):
    return 'hadoop.security.authentication' in self.hdfs_conf and "kerberos" == \
           self.hdfs_conf[
             'hadoop.security.authentication']

  def _parse_hdfs_conf(self):
    import xml.etree.ElementTree as ET

    for cf in ["core-site.xml", "hdfs-site.xml"]:
      tree = ET.parse(os.path.join(self.hadoop_home, 'etc/hadoop', cf))
      root = tree.getroot()
      for prop in root.findall('./property'):
        name = prop.find("./name").text
        value = prop.find("./value").text
        self.hdfs_conf[name] = value
