#!/home/hadoop/anaconda2/bin
# -*- coding:utf-8 -*-
import logging

try:
  from pyspark.sql.types import *
except ImportError:
  pass


class TableDef(object):

  def __init__(self, struct_type, table_name, hive_schema, partitions,
      root_location):
    self.struct_type = struct_type
    self.table_name = table_name
    self.hive_schema = hive_schema
    assert isinstance(partitions, (list, tuple))
    self.partitions = partitions
    self.root_location = root_location
    self.template = "create external table if not exists {0} ({1}) {2} stored as orc LOCATION '{3}'"
    self.partition_template = "partitioned by({0})"

  def build_create_table_sql(self):
    if self.partitions is not None and len(self.partitions) > 0:
      self.create_table_sql = self.template.format(
        "{0}.{1}".format(self.hive_schema, self.table_name),
        self.build_columns(),
        self.partition_template.format(self.build_partitions()),
        self.root_location)
    else:
      self.create_table_sql = self.template.format(
        "{0}.{1}".format(self.hive_schema, self.table_name),
        self.build_columns(), "", self.root_location)
    logging.info("create table sql = {0}".format(self.create_table_sql))
    return self.create_table_sql

  def build_columns(self):
    filter_fields = filter(lambda x: x.name not in self.partitions,
                           self.struct_type.fields)
    return ",".join([
      "{0} {1}".format(field.name, field.dataType.typeName())
      for field in filter_fields
    ])

  def build_partitions(self):
    part_fields = []
    for item in self.partitions:
      for field in self.struct_type.fields:
        if field.name == item and isinstance(
            field.dataType,
            (IntegerType, StringType, ShortType, LongType)):
          part_fields.append(field)
          break
    assert len(self.partitions) == len(part_fields)
    part_list = [
      "{0} {1}".format(field.name, field.dataType.typeName())
      for field in part_fields
    ]
    return ",".join(part_list)
