﻿/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifdef HAVE_CONFIG_H
#include <config.h>
#endif


#include "hadoop/Pipes.hh"
#include "hadoop/SerialUtils.hh"
#include "hadoop/StringUtils.hh"

#include <map>
#include <vector>

#include <errno.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>

#include <QtCore/QMutex>
#include <QtCore/QProcessEnvironment>
#include <iostream>
#include <fstream>

#include <glog/logging.h>


#include <QtNetwork/QTcpSocket>
#include <QtNetwork/QHostAddress>
#include "JobConfImpl.h"
#include "PingThread.h"
#include "TextProtocol.h"
#include "BinaryProtocol.h"
using std::map;
using std::string;
using std::vector;

using namespace HadoopUtils;

namespace HadoopPipes {


 
  const char* TextProtocol::delim = "\t\n";

  
  
  
  /**
   * Define a context object to give to combiners that will let them
   * go through the values and emit their results correctly.
   */
  class CombineContext: public ReduceContext {
  private:
    ReduceContext* baseContext;
    Partitioner* partitioner;
    int numReduces;
    UpwardProtocol* uplink;
    bool firstKey;
    bool firstValue;
    map<string, vector<string> >::iterator keyItr;
    map<string, vector<string> >::iterator endKeyItr;
    vector<string>::iterator valueItr;
    vector<string>::iterator endValueItr;

  public:
    CombineContext(ReduceContext* _baseContext,
                   Partitioner* _partitioner,
                   int _numReduces,
                   UpwardProtocol* _uplink,
                   map<string, vector<string> >& data) {
      baseContext = _baseContext;
      partitioner = _partitioner;
      numReduces = _numReduces;
      uplink = _uplink;
      keyItr = data.begin();
      endKeyItr = data.end();
      firstKey = true;
      firstValue = true;
    }

    virtual const JobConf* getJobConf() {
      return baseContext->getJobConf();
    }

    virtual const std::string& getInputKey() {
      return keyItr->first;
    }

    virtual const std::string& getInputValue() {
      return *valueItr;
    }

    virtual void emitRecord(const std::string& key, const std::string& value) {
      if (partitioner != NULL) {
        uplink->partitionedOutput(partitioner->partition(key, numReduces),
                                  key, value);
      } else {
        uplink->output(key, value);
      }
    }

    virtual void progress() {
      baseContext->progress();
    }

    virtual void setStatus(const std::string& status) {
      baseContext->setStatus(status);
    }

    bool nextKey() {
      if (firstKey) {
        firstKey = false;
      } else {
        ++keyItr;
      }
      if (keyItr != endKeyItr) {
        valueItr = keyItr->second.begin();
        endValueItr = keyItr->second.end();
        firstValue = true;
        return true;
      }
      return false;
    }

    virtual bool nextValue() {
      if (firstValue) {
        firstValue = false;
      } else {
        ++valueItr;
      }
      return valueItr != endValueItr;
    }
    
    virtual Counter* getCounter(const std::string& group, 
                               const std::string& name) {
      return baseContext->getCounter(group, name);
    }

    virtual void incrementCounter(const Counter* counter, uint64_t amount) {
      baseContext->incrementCounter(counter, amount);
    }
  };

  /**
   * A RecordWriter that will take the map outputs, buffer them up and then
   * combine then when the buffer is full.
   */
  class CombineRunner: public RecordWriter {
  private:
    map<string, vector<string> > data;
    int64_t spillSize;
    int64_t numBytes;
    ReduceContext* baseContext;
    Partitioner* partitioner;
    int numReduces;
    UpwardProtocol* uplink;
    Reducer* combiner;
  public:
    CombineRunner(int64_t _spillSize, ReduceContext* _baseContext, 
                  Reducer* _combiner, UpwardProtocol* _uplink, 
                  Partitioner* _partitioner, int _numReduces) {
      numBytes = 0;
      spillSize = _spillSize;
      baseContext = _baseContext;
      partitioner = _partitioner;
      numReduces = _numReduces;
      uplink = _uplink;
      combiner = _combiner;
    }

    virtual void emitRecord(const std::string& key,
                      const std::string& value) {
      numBytes += key.length() + value.length();
      data[key].push_back(value);
      if (numBytes >= spillSize) {
        spillAll();
      }
    }

    virtual void close() {
      spillAll();
    }

  private:
    void spillAll() {
      CombineContext context(baseContext, partitioner, numReduces, 
                             uplink, data);
      while (context.nextKey()) {
        combiner->reduce(context);
      }
      data.clear();
      numBytes = 0;
    }
  };


  class TaskContextImpl: public MapContext, public ReduceContext, 
                         public DownwardProtocol {
  private:
    bool done;
    JobConf* jobConf;
    std::string key;
    const std::string* newKey;
    const std::string* value;
    bool hasTask;
    bool isNewKey;
    bool isNewValue;
    std::string* inputKeyClass;
    std::string* inputValueClass;
    std::string status;
    float progressFloat;
    uint64_t lastProgress;
    bool statusSet;
    Protocol* protocol;
    UpwardProtocol *uplink;
    std::string* inputSplit;
    RecordReader* reader;
    Mapper* mapper;
    Reducer* reducer;
    RecordWriter* writer;
    Partitioner* partitioner;
    int numReduces;
    const Factory* factory;
    QMutex mutexDone;
    std::vector<int> registeredCounterIds;

  public:

    TaskContextImpl(const Factory& _factory) {
      statusSet = false;
      done = false;
      newKey = NULL;
      factory = &_factory;
      jobConf = NULL;
      inputKeyClass = NULL;
      inputValueClass = NULL;
      inputSplit = NULL;
      mapper = NULL;
      reducer = NULL;
      reader = NULL;
      writer = NULL;
      partitioner = NULL;
      protocol = NULL;
      isNewKey = false;
      isNewValue = false;
      lastProgress = 0;
      progressFloat = 0.0f;
      hasTask = false;
    }

    void setProtocol(Protocol* _protocol, UpwardProtocol* _uplink) {
      protocol = _protocol;
      uplink = _uplink;
    }

    virtual void start(int protocol) {
      if (protocol != 0) {
        throw Error("Protocol version " + toString(protocol) + 
                    " not supported");
      }
    }

    virtual void setJobConf(std::vector<std::string> values) {
      int len = values.size();
      JobConfImpl* result = new JobConfImpl();
	  //len必须是偶数。因为values里面是key1,value1,key2,value2这样的序列
      HADOOP_ASSERT(len % 2 == 0, "Odd length of job conf values");
      for(int i=0; i < len; i += 2) {
        result->set(values[i], values[i+1]);
      }
      jobConf = result;
    }

    virtual void setInputTypes(string keyType, string valueType) {
      inputKeyClass = new string(keyType);
      inputValueClass = new string(valueType);
    }

    virtual void runMap(string _inputSplit, int _numReduces, bool pipedInput) {
      inputSplit = new std::string(_inputSplit);
      reader = factory->createRecordReader(*this);
      HADOOP_ASSERT((reader == NULL) == pipedInput,
                    pipedInput ? "RecordReader defined when not needed.":
                    "RecordReader not defined");
      if (reader != NULL) {
        value = new string();
      }
      mapper = factory->createMapper(*this);
      numReduces = _numReduces;
      if (numReduces != 0) { 
        reducer = factory->createCombiner(*this);
        partitioner = factory->createPartitioner(*this);
      }
      if (reducer != NULL) {
        int64_t spillSize = 100;
        if (jobConf->hasKey("mapreduce.task.io.sort.mb")) {
          spillSize = jobConf->getInt("mapreduce.task.io.sort.mb");
        }
        writer = new CombineRunner(spillSize * 1024 * 1024, this, reducer, 
                                   uplink, partitioner, numReduces);
      }
      hasTask = true;
    }

    virtual void mapItem(const string& _key, const string& _value) {
      newKey = &_key;
      value = &_value;
      isNewKey = true;
    }

    virtual void runReduce(int reduce, bool pipedOutput) {
      reducer = factory->createReducer(*this);
      writer = factory->createRecordWriter(*this);
      HADOOP_ASSERT((writer == NULL) == pipedOutput,
                    pipedOutput ? "RecordWriter defined when not needed.":
                    "RecordWriter not defined");
      hasTask = true;
    }

    virtual void reduceKey(const string& _key) {
      isNewKey = true;
      newKey = &_key;
    }

    virtual void reduceValue(const string& _value) {
      isNewValue = true;
      value = &_value;
    }
    
    virtual bool isDone() {
      QMutexLocker locker(&mutexDone);
      bool doneCopy = done;
      return doneCopy;
    }

    virtual void close() {
      QMutexLocker locker(&mutexDone);
      done = true;
    }

    virtual void abort() {
      throw Error("Aborted by driver");
    }

    void waitForTask() {
      while (!done && !hasTask) {
        protocol->nextEvent();
      }
    }

    bool nextKey() {
      if (reader == NULL) {
        while (!isNewKey) {
          nextValue();
          if (done) {
            return false;
          }
        }
        key = *newKey;
      } else {
        if (!reader->next(key, const_cast<string&>(*value))) {
	  QMutexLocker locker(&mutexDone);
          done = true;
          return false;
        }
        progressFloat = reader->getProgress();
      }
      isNewKey = false;
      if (mapper != NULL) {
        mapper->map(*this);
      } else {
        reducer->reduce(*this);
      }
      return true;
    }

    /**
     * Advance to the next value.
     */
    virtual bool nextValue() {
      if (isNewKey || done) {
        return false;
      }
      isNewValue = false;
      progress();
      protocol->nextEvent();
      return isNewValue;
    }

    /**
     * Get the JobConf for the current task.
     */
    virtual JobConf* getJobConf() {
      return jobConf;
    }

    /**
     * Get the current key. 
     * @return the current key or NULL if called before the first map or reduce
     */
    virtual const string& getInputKey() {
      return key;
    }

    /**
     * Get the current value. 
     * @return the current value or NULL if called before the first map or 
     *    reduce
     */
    virtual const string& getInputValue() {
      return *value;
    }

    /**
     * Mark your task as having made progress without changing the status 
     * message.
     */
    virtual void progress() {
      if (uplink != 0) {
        uint64_t now = getCurrentMillis();
        if (now - lastProgress > 1000) {
          lastProgress = now;
          if (statusSet) {
            uplink->status(status);
            statusSet = false;
          }
          uplink->progress(progressFloat);
        }
      }
    }

    /**
     * Set the status message and call progress.
     */
    virtual void setStatus(const string& status) {
      this->status = status;
      statusSet = true;
      progress();
    }

    /**
     * Get the name of the key class of the input to this task.
     */
    virtual const string& getInputKeyClass() {
      return *inputKeyClass;
    }

    /**
     * Get the name of the value class of the input to this task.
     */
    virtual const string& getInputValueClass() {
      return *inputValueClass;
    }

    /**
     * Access the InputSplit of the mapper.
     */
    virtual const std::string& getInputSplit() {
      return *inputSplit;
    }

    virtual void emitRecord(const string& key, const string& value) {
      progress();
      if (writer != NULL) {
        writer->emitRecord(key, value);
      } else if (partitioner != NULL) {
        int part = partitioner->partition(key, numReduces);
        uplink->partitionedOutput(part, key, value);
      } else {
        uplink->output(key, value);
      }
    }

    /**
     * Register a counter with the given group and name.
     */
    virtual Counter* getCounter(const std::string& group, 
                               const std::string& name) {
      int id = registeredCounterIds.size();
      registeredCounterIds.push_back(id);
      uplink->registerCounter(id, group, name);
      return new Counter(id);
    }

    /**
     * Increment the value of the counter with the given amount.
     */
    virtual void incrementCounter(const Counter* counter, uint64_t amount) {
      uplink->incrementCounter(counter, amount); 
    }

    void closeAll() {
      if (reader) {
        reader->close();
      }
      if (mapper) {
        mapper->close();
      }
      if (reducer) {
        reducer->close();
      }
      if (writer) {
        writer->close();
      }
    }

    virtual ~TaskContextImpl() {
      delete jobConf;
      delete inputKeyClass;
      delete inputValueClass;
      delete inputSplit;
      if (reader) {
        delete value;
      }
      delete reader;
      delete mapper;
      delete reducer;
      delete writer;
      delete partitioner;
    }
  };


  /**
   * Run the assigned task in the framework.
   * The user's main function should set the various functions using the 
   * set* functions above and then call this.
   * 如果设置了mapreduce.pipes.command.port环境变量，那么从它所指向的port读/写数据。
   * 如果设置了mapreduce.pipes.commandfile环境变量，那么从它所指向的文件读/写数据。
   * 否则，从stdin/stdout读写数据。
   * @return true, if the task succeeded.
   */
  bool runTask(const Factory& factory) {
    try {
      TaskContextImpl* context = new TaskContextImpl(factory);
      Protocol* connection;
      QString portStr =QProcessEnvironment::systemEnvironment().value("mapreduce.pipes.command.port");
      QFile* inFile = NULL;
      QFile* outFile = NULL;
      char *bufin = NULL;
      char *bufout = NULL;
	  QTcpSocket* socket=NULL;
      if (!portStr.isEmpty()) {
		 socket=new QTcpSocket();
		 socket->connectToHost(QHostAddress::LocalHost, portStr.toInt());
		 const int Timeout = 5 * 1000;
		 if (!socket->waitForConnected(Timeout)) {
			 throw HadoopUtils::Error("problem connecting command socket");  //这个会被下面catch
		 }
        connection = new BinaryProtocol(socket, context, socket);
      } else if (QProcessEnvironment::systemEnvironment().contains("mapreduce.pipes.commandfile")) {
        const QString filename = QProcessEnvironment::systemEnvironment().value("mapreduce.pipes.commandfile");
        const QString outFilename = filename + ".out";
	inFile=new QFile(filename);
	if (!inFile->open(QIODevice::ReadOnly))
	  throw HadoopUtils::Error("打开输入文件失败"); 
	outFile=new QFile(outFilename);
	if (!outFile->open(QIODevice::WriteOnly | QIODevice::Truncate))
	  throw HadoopUtils::Error("打开输出文件失败");        
        connection = new BinaryProtocol(inFile, context, outFile);
      } else {
        //connection = new TextProtocol(stdin, context, stdout);
	inFile=new QFile();
	if (!inFile->open(stdin,QIODevice::ReadOnly))
	  throw HadoopUtils::Error("打开输入文件失败"); 
	outFile=new QFile();
	if (!outFile->open(stdout,QIODevice::WriteOnly))
	  throw HadoopUtils::Error("打开输出文件失败");        
        connection = new BinaryProtocol(inFile, context, outFile);
      }
      context->setProtocol(connection, connection->getUplink());
      PingThread pingThread(context);
      pingThread.start();
      context->waitForTask();
      while (!context->isDone()) {
        context->nextKey();
      }
      context->closeAll();
      connection->getUplink()->done();
      pingThread.wait();
      delete context;
      delete connection;
      if (socket != NULL) {
		socket->disconnectFromHost();
		if (socket->state() == QAbstractSocket::UnconnectedState ||
			socket->waitForDisconnected(-1))
			LOG(INFO)<<"已从服务器断开socket连接";
		delete socket;
      }
      if (inFile != NULL) {
        inFile->close();
		delete inFile;
      }
      if (outFile != NULL) {
		  outFile->close();
		  delete outFile;
      } 
      delete bufin;
      delete bufout;
      return true;
    } catch (std::runtime_error& err) {
      LOG(FATAL)<<"Hadoop Pipes Exception:"<<err.what();
      return false;
    }
  }

/** 每5秒测试下服务器能不能连上 
 * TODO: 改成QT的异步网络方法。
 * 首先用  QTcpSocket socket; 建立socket
 * 然后用 socket.connectToHost(hostName, portNumber);连接服务器
 * 然后执行 this->exec();
 * 让this->context_的isDone()来通知quit()。利用QT的signal/slot机制。
 */
void PingThread::run(){
    QString portStr = QProcessEnvironment::systemEnvironment().value("mapreduce.pipes.command.port");
	if(portStr.isEmpty()) return;
    int MAX_RETRIES = 3;
    int remaining_retries = MAX_RETRIES;
    while (!this->context_->isDone()) {
      try{
		this->sleep(5);
        int sock = -1;
		  QTcpSocket socket;
		  socket.connectToHost(QHostAddress::LocalHost, portStr.toInt());
		  const int Timeout = 5 * 1000;
		  if (!socket.waitForConnected(Timeout)) {
			  throw HadoopUtils::Error("problem connecting command socket");  //这个会被下面catch
		  }
		  socket.disconnectFromHost();
		  if (socket.state() == QAbstractSocket::UnconnectedState || socket.waitForDisconnected(10000))
				remaining_retries = MAX_RETRIES;        
      } catch (Error& err) {
        if (!this->context_->isDone()) {
          fprintf(stderr, "Hadoop Pipes Exception: in ping %s\n", 
                err.getMessage().c_str());
          remaining_retries -= 1;
          if (remaining_retries == 0) {
			//TODO: emit signals，友好退出
            exit(1);
          }
        } else {
          return;
        }
      }
    }
    return ;
  }


}



