/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package code.google.pigfly.mapreduce;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.util.ReflectionUtils;

import code.google.pigfly.PartitionInfo;
import code.google.pigfly.PigflyMetaStore;

public class PigflyInputFormat<K extends WritableComparable, V extends Writable>
    extends InputFormat<K, V> {

  public static final Log LOG = LogFactory.getLog(PigflyInputFormat.class);

  /**
   * HiveInputSplit encapsulates an InputSplit with its corresponding
   * inputFormatClass. The reason that it derives from FileSplit is to make sure
   * "map.input.file" in MapTask.
   */
  public static class HiveInputSplit extends FileSplit implements Configurable {

    private InputSplit inputSplit;
    private String inputFormatClassName;
    private Configuration conf;

    public HiveInputSplit() {
      // This is the only public constructor of FileSplit
      super((Path) null, 0, 0, (String[]) null);
    }

    public HiveInputSplit(InputSplit inputSplit, String inputFormatClassName) {
      // This is the only public constructor of FileSplit
      super((Path) null, 0, 0, (String[]) null);
      this.inputSplit = inputSplit;
      this.inputFormatClassName = inputFormatClassName;
    }

    public InputSplit getInputSplit() {
      return inputSplit;
    }

    public String inputFormatClassName() {
      return inputFormatClassName;
    }

    @Override
    public Path getPath() {
      if (inputSplit instanceof FileSplit) {
        return ((FileSplit) inputSplit).getPath();
      }
      return new Path("");
    }

    /** The position of the first byte in the file to process. */
    @Override
    public long getStart() {
      if (inputSplit instanceof FileSplit) {
        return ((FileSplit) inputSplit).getStart();
      }
      return 0;
    }

    @Override
    public String toString() {
      return inputFormatClassName + ":" + inputSplit.toString();
    }

    @Override
    public long getLength() {
      long r = 0;
      try {
        r = inputSplit.getLength();
      } catch (Exception e) {
        throw new RuntimeException(e);
      }
      return r;
    }

    @Override
    public String[] getLocations() throws IOException {
      try {
        return inputSplit.getLocations();
      } catch (InterruptedException ie) {
        throw new IOException(ie);
      }
    }

    @Override
    public void readFields(DataInput in) throws IOException {
      String inputSplitClassName = in.readUTF();
      try {
        inputSplit =
            (InputSplit) ReflectionUtils.newInstance(
                conf.getClassByName(inputSplitClassName), conf);
      } catch (Exception e) {
        throw new IOException(
            "Cannot create an instance of InputSplit class = "
                + inputSplitClassName + ":" + e.getMessage());
      }
      ((Writable) inputSplit).readFields(in);
      inputFormatClassName = in.readUTF();
    }

    @Override
    public void write(DataOutput out) throws IOException {
      WritableUtils.writeString(out, inputSplit.getClass().getName());
      ((Writable) inputSplit).write(out);
      out.writeUTF(inputFormatClassName);
    }

    @Override
    public Configuration getConf() {
      return conf;
    }

    @Override
    public void setConf(Configuration conf) {
      this.conf = conf;
    }
  }

  public static void setInputLocation(Job job, String location)
      throws IOException {
    setInputLocation(job, location, null);
  }

  public static void setInputLocation(Job job, String location,
      String partitionFilter) throws IOException {
    // try {
    Table table = PigflyMetaStore.getInstance().getTable(location);
    List<PartitionInfo> partInfoList = new ArrayList<PartitionInfo>();
    // if (table.getPartitionKeys().size() != 0) {
    // // Partitioned table
    // List<Partition> parts =
    // PigflyMetaStore.getInstance().listPartitionsByFilter(dbName,
    // tableName, inputJobInfo.getFilter());
    //
    // // Default to 100,000 partitions if hive.metastore.maxpartition is not
    // // defined
    // int maxPart = hiveConf.getInt("hcat.metastore.maxpartitions", 100000);
    // if (parts != null && parts.size() > maxPart) {
    // throw new HCatException(ErrorType.ERROR_EXCEED_MAXPART,
    // "total number of partitions is " + parts.size());
    // }
    //
    // // populate partition info
    // for (Partition ptn : parts) {
    // PartInfo partInfo = extractPartInfo(ptn.getSd(), ptn.getParameters());
    // partInfo.setPartitionValues(createPtnKeyValueMap(table, ptn));
    // partInfoList.add(partInfo);
    // }
    //
    // } else {
    // // Non partitioned table
    // PartitionInfo partition =
    // extractPartInfo(table.getSd(), table.getParameters());
    // partInfo.setPartitionValues(new HashMap<String, String>());
    // partInfoList.add(partInfo);
    // }
    // inputJobInfo.setPartitions(partInfoList);
    // inputJobInfo.setTableInfo(HCatTableInfo.valueOf(table));
    //
    // job.getConfiguration().set(HCatConstants.HCAT_KEY_JOB_INFO,
    // HCatUtil.serialize(inputJobInfo));
    // } finally {
    // if (client != null) {
    // client.close();
    // }
    // }

  }

  // static PartitionInfo extractPartInfo(StorageDescriptor sd,
  // Map<String,String> parameters) throws IOException{
  // HCatSchema schema = HCatUtil.extractSchemaFromStorageDescriptor(sd);
  // String inputStorageDriverClass = null;
  // Properties hcatProperties = new Properties();
  // if (parameters.containsKey(HCatConstants.HCAT_ISD_CLASS)){
  // inputStorageDriverClass = parameters.get(HCatConstants.HCAT_ISD_CLASS);
  // }else{
  // // attempt to default to RCFile if the storage descriptor says it's an
  // RCFile
  // if ((sd.getInputFormat() != null) &&
  // (sd.getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS))){
  // inputStorageDriverClass = HCatConstants.HCAT_RCFILE_ISD_CLASS;
  // }else{
  // throw new
  // IOException("No input storage driver classname found, cannot read partition");
  // }
  // }
  // for (String key : parameters.keySet()){
  // if (key.startsWith(HCAT_KEY_PREFIX)){
  // hcatProperties.put(key, parameters.get(key));
  // }
  // }
  // return new PartitionInfo(schema,inputStorageDriverClass, sd.getLocation(),
  // hcatProperties);
  // }

  @Override
  public org.apache.hadoop.mapreduce.RecordReader<K, V> createRecordReader(
      org.apache.hadoop.mapreduce.InputSplit split, TaskAttemptContext context)
      throws IOException, InterruptedException {
    // HiveInputSplit hsplit = (HiveInputSplit) split;
    //
    // InputSplit inputSplit = hsplit.getInputSplit();
    // String inputFormatClassName = hsplit.inputFormatClassName();
    // Class inputFormatClass = null;
    // try {
    // inputFormatClass = Class.forName(inputFormatClassName);
    // } catch (Exception e) {
    // throw new IOException("cannot find class " + inputFormatClassName);
    // }
    //
    // // clone a jobConf for setting needed columns for reading
    // JobConf cloneJobConf = new JobConf(job);
    //
    // if (this.mrwork == null) {
    // init(job);
    // }
    //
    // boolean nonNative = false;
    // PartitionDesc part =
    // pathToPartitionInfo.get(hsplit.getPath().toString());
    // if ((part != null) && (part.getTableDesc() != null)) {
    // Utilities.copyTableJobPropertiesToConf(part.getTableDesc(),
    // cloneJobConf);
    // nonNative = part.getTableDesc().isNonNative();
    // }
    //
    // // push down projections
    // pushProjectionsAndFilters(cloneJobConf, inputFormatClass,
    // hsplit.getPath()
    // .toString(), hsplit.getPath().toUri().getPath(), nonNative);
    //
    // InputFormat inputFormat = getInputFormatFromCache(inputFormatClass,
    // cloneJobConf);
    // RecordReader innerReader = inputFormat.createRecordReader(inputSplit,
    // context);
    //
    // PigflyRecordReader<K,V> rr = new PigflyRecordReader(innerReader);
    // // rr.initIOContext(hsplit, job, inputFormatClass, innerReader);
    // return rr;
    return null;
  }

  @Override
  public List<org.apache.hadoop.mapreduce.InputSplit>
      getSplits(JobContext arg0) throws IOException, InterruptedException {
    return null;
  }
}
