/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package com.nhncorp.neptune.examples.upload.addtablet;

import java.io.IOException;
import java.text.DecimalFormat;
import java.util.Arrays;
import java.util.Date;
import java.util.Iterator;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;

import com.nhncorp.neptune.client.Cell;
import com.nhncorp.neptune.client.DirectUploader;
import com.nhncorp.neptune.client.NTable;
import com.nhncorp.neptune.client.Row;
import com.nhncorp.neptune.common.conf.NConfiguration;
import com.nhncorp.neptune.common.util.FileUtil;
import com.nhncorp.neptune.parallel.hadoop.AbstractTabletInputFormat;
import com.nhncorp.neptune.tablet.TableSchema;

public class UploadJob {
  public static final int INIT_TABLET_NUM = 10;
  public static final Log LOG = LogFactory.getLog(UploadJob.class.getName()); 
  
  public static final DecimalFormat KEY_DF1 = new DecimalFormat("0000");
  public static final DecimalFormat KEY_DF2 = new DecimalFormat("0000000000");
  
  public void runJob(String inputPath, String tableName)
      throws IOException {
    JobConf jobConf = new JobConf(UploadJob.class);
    DistributedCache.addFileToClassPath(new Path("/user/hadoop/neptune-site.xml"), jobConf);
    DistributedCache.addFileToClassPath(new Path("/user/hadoop/pleiades-site.xml"), jobConf);
    DistributedCache.addArchiveToClassPath(new Path("/user/hadoop/neptune-1.0-dev.jar"), jobConf);
    DistributedCache.addArchiveToClassPath(new Path("/user/hadoop/jgroups-all.jar"), jobConf);
    DistributedCache.addArchiveToClassPath(new Path("/user/hadoop/pleiades-0.2.1.jar"), jobConf);

    NConfiguration conf = new NConfiguration();
    TableSchema tableInfo = new TableSchema(tableName);
    tableInfo.addColumn("title");
    tableInfo.addColumn("contents");

    if (!NTable.existsTable(conf, tableName)) {
      NTable.createTable(conf, tableInfo);
    }
    jobConf.setJobName("UploadJob_" + tableName + "(" + new Date() + ")");
    Path tempOutputPath = new Path("UploadJob_" + System.currentTimeMillis());

    FileSystem fs = FileSystem.get(jobConf);
    FileUtil.delete(fs, getMaxRowKeyPath(tableName), true);
    
    // <MAP>
    FileInputFormat.addInputPath(jobConf, new Path(inputPath));
    jobConf.setMapperClass(UploadMap.class);
    jobConf.setMapOutputKeyClass(Text.class);
    jobConf.setMapOutputValueClass(Text.class);
    jobConf.setInputFormat(TextInputFormat.class);
    jobConf.set(AbstractTabletInputFormat.OUTPUT_TABLE, tableName);
    //Partitioner를 개발자가 정한 파티셔닝 전략을 사용할 수 있도록 설정
    jobConf.setPartitionerClass(UploadPartitioner.class);
    jobConf.setMapSpeculativeExecution(false);
    jobConf.setMaxMapAttempts(0);
    // </MAP>

    // <REDUCE>
    jobConf.setReducerClass(UploadReduce.class);
    jobConf.setOutputKeyClass(Text.class);
    jobConf.setOutputValueClass(Text.class);
    FileOutputFormat.setOutputPath(jobConf, tempOutputPath);
    jobConf.setNumReduceTasks(INIT_TABLET_NUM);
    jobConf.setMaxReduceAttempts(0);
    // </REDUCE>

    // Run Job
    JobClient.runJob(jobConf);

    // //delete temp output path
    FileUtil.delete(fs, tempOutputPath, true);
    FileUtil.delete(fs, getMaxRowKeyPath(tableName), true);
  }

  public static String[] parseRecord(String record) {
    String[] tokens = record.split("\t");
    int hash = Math.abs(tokens[0].hashCode());
    int partition = hash % INIT_TABLET_NUM;
    
    String key = KEY_DF1.format(partition) + "_" + KEY_DF2.format(hash);
      
    tokens[0] = key;
    
    return tokens;
  }

  static class UploadReduce implements
      Reducer<WritableComparable, Writable, WritableComparable, Writable> {
    private DirectUploader uploader;
    private JobConf jobConf;
    private boolean first = true;
    NTable ntable;
    public void reduce(WritableComparable key, 
        Iterator<Writable> values, 
        OutputCollector<WritableComparable, Writable> collector, 
        Reporter reporter) throws IOException {
      if(first) {
        init();
        first = false;
      }
      
      Row.Key rowKey = new Row.Key(key.toString());
      
      Row row = new Row(rowKey);
      while(values.hasNext()) {
        row.addCell("title", new Cell(Cell.Key.EMPTY_KEY, 
            values.next().toString().getBytes()));
      }
      uploader.put(row);
    }

    private void init() throws IOException {
      String maxRowKey = getMaxRowKey(jobConf);
      String tableName = jobConf.get(AbstractTabletInputFormat.OUTPUT_TABLE);
      
      NConfiguration conf = new NConfiguration();
      ntable = NTable.openTable(conf, tableName);
      
      //upload tablet 생성
      LOG.info("add tablet:" + maxRowKey);
      ntable.addTablet(new Row.Key(maxRowKey));
      
      //uploader 오픈
      uploader = ntable.openDirectUploader(new String[]{"title"});      
    }
    
    public void configure(JobConf jobConf) {
      this.jobConf = jobConf;
    }

    public void close() throws IOException {
      uploader.close();
    }
    
    private String getMaxRowKey(JobConf jobConf) throws IOException {
      String tableName = jobConf.get(AbstractTabletInputFormat.OUTPUT_TABLE);
      FileSystem fs = FileSystem.get(jobConf);
      Path parentPath = getMaxRowKeyPath(tableName);
      
      String taskId = jobConf.get("mapred.task.id");
      String taskId2 = taskId.substring(taskId.length() - 8);
      int taskNum = Integer.parseInt(taskId2.substring(0,6));  
      
      Path rowKeyInfoPath = new Path(parentPath, String.valueOf(taskNum));
      FileStatus[] paths = fs.listStatus(rowKeyInfoPath);
      if(paths == null || paths.length == 0) {
        throw new IOException("No max rowkey info:" + rowKeyInfoPath);
      }
      
      Arrays.sort(paths);
      
      return paths[paths.length - 1].getPath().getName();
    }
  }

  public static class UploadMap implements
      Mapper<WritableComparable, Writable, WritableComparable, Writable> {
    private JobConf jobConf;
    
    public void map(WritableComparable key, Writable value,
        OutputCollector<WritableComparable, Writable> collector,
        Reporter reporter) throws IOException {
      String record = value.toString();

      String[] parsedData = UploadJob.parseRecord(record);

      // Key와 value로 분리해서 reduce로 보낸다.
      collector.collect(new Text(parsedData[0]), new Text(parsedData[1]));
    }

    public void configure(JobConf jobConf) {
      this.jobConf = jobConf;
    }

    public void close() throws IOException {
      //각 Partition별로 maxRow.Key를 HDFS에 저장한다.
      //이정보는 reduce에서 upload 작업하기 전에 tablet을 생성하는데 사용한다.
      String tableName = jobConf.get(AbstractTabletInputFormat.OUTPUT_TABLE);
      FileSystem fs = FileSystem.get(jobConf);
      Path parentPath = getMaxRowKeyPath(tableName);
      
      int numPartitions = jobConf.getNumReduceTasks();
      for(int i = 0; i < numPartitions; i++) {
        String maxRowKey = jobConf.get(UploadPartitioner.MAX_KEY_PREFIX + "." + i);
        if(maxRowKey != null) {
          Path partitionPath = new Path(parentPath, String.valueOf(i));
          fs.mkdirs(new Path(partitionPath, maxRowKey));
        }
      }
    }
  }
  
  static Path getMaxRowKeyPath(String tableName) {
    return new Path("UploadJob_" + tableName + "_maxrow");
  }
}
