/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package code.google.pigfly;

import java.io.IOException;
import java.io.Serializable;

import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.pig.impl.logicalLayer.schema.Schema;

/**
 * 
 * TableInfo - class to communicate table information to
 * {@link PigflyInputFormat} and {@link HCatOutputFormat}
 * 
 */
public class TableInfo implements Serializable {

  private static final long serialVersionUID = 1L;

  /** The db and table names */
  private final String databaseName;
  private final String tableName;

  /** The table schema. */
  private final Schema dataColumns;
  private final Schema partitionColumns;

  /** The table being written to */
  private final Table table;

  /** The storer info */
  private StorerInfo storerInfo;

  /**
   * Initializes a new HCatTableInfo instance to be used with
   * {@link PigflyInputFormat} for reading data from a table. work with hadoop
   * security, the kerberos principal name of the server - else null The
   * principal name should be of the form: <servicename>/_HOST@<realm> like
   * "hcat/_HOST@myrealm.com" The special string _HOST will be replaced
   * automatically with the correct host name
   * 
   * @param databaseName the db name
   * @param tableName the table name
   * @param dataColumns schema of columns which contain data
   * @param partitionColumns schema of partition columns
   * @param storerInfo information about storage descriptor
   * @param table hive metastore table class
   */
  TableInfo(String databaseName, String tableName, Schema dataColumns,
      Schema partitionColumns, StorerInfo storerInfo, Table table) {
    this.databaseName =
        (databaseName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME
            : databaseName;
    this.tableName = tableName;
    this.dataColumns = dataColumns;
    this.table = table;
    this.storerInfo = storerInfo;
    this.partitionColumns = partitionColumns;
  }

  /**
   * Gets the value of databaseName
   * 
   * @return the databaseName
   */
  public String getDatabaseName() {
    return databaseName;
  }

  /**
   * Gets the value of tableName
   * 
   * @return the tableName
   */
  public String getTableName() {
    return tableName;
  }

  /**
   * @return return schema of data columns as defined in meta store
   */
  public Schema getDataColumns() {
    return dataColumns;
  }

  /**
   * @return schema of partition columns
   */
  public Schema getPartitionColumns() {
    return partitionColumns;
  }

  /**
   * @return the storerInfo
   */
  public StorerInfo getStorerInfo() {
    return storerInfo;
  }

  /**
   * minimize dependency on hive classes so this is package private this should
   * eventually no longer be used
   * 
   * @return hive metastore representation of table
   */
  Table getTable() {
    return table;
  }

  /**
   * create an HCatTableInfo instance from the supplied Hive Table instance
   * 
   * @param table to create an instance from
   * @return HCatTableInfo
   * @throws IOException
   */
  static TableInfo valueOf(Table table) throws IOException {
    // HCatSchema dataColumns =
    // HCatUtil.extractSchemaFromStorageDescriptor(table.getSd());
    // StorerInfo storerInfo = InitializeInput.extractStorerInfo(table.getSd(),
    // table.getParameters());
    // HCatSchema partitionColumns = HCatUtil.getPartitionColumns(table);
    // return new TableInfo(table.getDbName(),
    // table.getTableName(),
    // dataColumns,
    // partitionColumns,
    // storerInfo,
    // table);
    return null;
  }

  @Override
  public boolean equals(Object o) {
    if (this == o)
      return true;
    if (o == null || getClass() != o.getClass())
      return false;

    TableInfo tableInfo = (TableInfo) o;

    if (dataColumns != null ? !dataColumns.equals(tableInfo.dataColumns)
        : tableInfo.dataColumns != null)
      return false;
    if (databaseName != null ? !databaseName.equals(tableInfo.databaseName)
        : tableInfo.databaseName != null)
      return false;
    if (partitionColumns != null ? !partitionColumns
        .equals(tableInfo.partitionColumns)
        : tableInfo.partitionColumns != null)
      return false;
    if (storerInfo != null ? !storerInfo.equals(tableInfo.storerInfo)
        : tableInfo.storerInfo != null)
      return false;
    if (table != null ? !table.equals(tableInfo.table)
        : tableInfo.table != null)
      return false;
    if (tableName != null ? !tableName.equals(tableInfo.tableName)
        : tableInfo.tableName != null)
      return false;

    return true;
  }

  @Override
  public int hashCode() {
    int result = databaseName != null ? databaseName.hashCode() : 0;
    result = 31 * result + (tableName != null ? tableName.hashCode() : 0);
    result = 31 * result + (dataColumns != null ? dataColumns.hashCode() : 0);
    result =
        31 * result
            + (partitionColumns != null ? partitionColumns.hashCode() : 0);
    result = 31 * result + (table != null ? table.hashCode() : 0);
    result = 31 * result + (storerInfo != null ? storerInfo.hashCode() : 0);
    return result;
  }

}
