/////////////////////////////////////////////////////////////////////////////
// Original code from libhdfs3. Copyright (c) 2013 - 2014, Pivotal Inc.
// All rights reserved. Author: Zhanwei Wang
/////////////////////////////////////////////////////////////////////////////
//  Modifications by Kumo Inc.
// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//



#ifndef _HDFS_LIBHDFS3_CLIENT_PIPELINE_H_
#define _HDFS_LIBHDFS3_CLIENT_PIPELINE_H_

#include <kmhdfs/client/file_system_inter.h>
#include <memory>
#include <kmhdfs/network/buffered_socket_reader.h>
#include <kmhdfs/network/tcp_socket.h>
#include <kmhdfs/client/packet.h>
#include <kmhdfs/client/packet_pool.h>
#include <kmhdfs/client/pipeline_ack.h>
#include <kmhdfs/server/datanode_info.h>
#include <kmhdfs/server/located_block.h>
#include <kmhdfs/server/namenode.h>
#include <kmhdfs/common/session_config.h>
#include <kmhdfs/common/thread.h>

#include <vector>
#include <deque>

namespace Hdfs {
namespace Internal {

enum BlockConstructionStage {
    /**
     * The enumerates are always listed as regular stage followed by the
     * recovery stage.
     * Changing this order will make getRecoveryStage not working.
     */
    // pipeline set up for block append
    PIPELINE_SETUP_APPEND = 0,
    // pipeline set up for failed PIPELINE_SETUP_APPEND recovery
    PIPELINE_SETUP_APPEND_RECOVERY = 1,
    // data streaming
    DATA_STREAMING = 2,
    // pipeline setup for failed data streaming recovery
    PIPELINE_SETUP_STREAMING_RECOVERY = 3,
    // close the block and pipeline
    PIPELINE_CLOSE = 4,
    // Recover a failed PIPELINE_CLOSE
    PIPELINE_CLOSE_RECOVERY = 5,
    // pipeline set up for block creation
    PIPELINE_SETUP_CREATE = 6
};

static inline const char * StageToString(BlockConstructionStage stage) {
    switch (stage) {
    case PIPELINE_SETUP_APPEND:
        return "PIPELINE_SETUP_APPEND";

    case PIPELINE_SETUP_APPEND_RECOVERY:
        return "PIPELINE_SETUP_APPEND_RECOVERY";

    case DATA_STREAMING:
        return "DATA_STREAMING";

    case PIPELINE_SETUP_STREAMING_RECOVERY:
        return "PIPELINE_SETUP_STREAMING_RECOVERY";

    case PIPELINE_CLOSE:
        return "PIPELINE_CLOSE";

    case PIPELINE_CLOSE_RECOVERY:
        return "PIPELINE_CLOSE_RECOVERY";

    case PIPELINE_SETUP_CREATE:
        return "PIPELINE_SETUP_CREATE";

    default:
        return "UNKNOWN STAGE";
    }
}

class Packet;
class OutputStreamImpl;

/**
 * setup, data transfer, close, and failover.
 */
class Pipeline {
public:

    virtual ~Pipeline() {}

    /**
     * send all data and wait for all ack.
     */
    virtual void flush() = 0;

    /**
     * send LastPacket and close the pipeline.
     */
    virtual std::shared_ptr<LocatedBlock> close(std::shared_ptr<Packet> lastPacket) = 0;

    /**
     * send a packet, retry on error until fatal.
     * @param packet
     */
    virtual void send(std::shared_ptr<Packet> packet) = 0;
};

class PipelineImpl : public Pipeline {
public:
    /**
     * construct and setup the pipeline for append.
     */
    PipelineImpl(const char * path, SessionConfig & conf,
                 std::shared_ptr<FileSystemInter> filesystem, int checksumType, int chunkSize,
                 int replication, int64_t bytesSent, PacketPool & packetPool,
                 std::shared_ptr<LocatedBlock> lastBlock, int64_t fileId);

    /**
     * construct and setup the pipeline for append.
     */
    PipelineImpl(bool append, const char * path, const SessionConfig & conf,
                 std::shared_ptr<FileSystemInter> filesystem, int checksumType, int chunkSize,
                 int replication, int64_t bytesSent, PacketPool & packetPool,
                 std::shared_ptr<LocatedBlock> lastBlock, int64_t fileId);

    /**
     * send all data and wait for all ack.
     */
    void flush();

    /**
     * send LastPacket and close the pipeline.
     */
    std::shared_ptr<LocatedBlock> close(std::shared_ptr<Packet> lastPacket);

    /**
     * send a packet, retry on error until fatal.
     * @param packet
     */
    void send(std::shared_ptr<Packet> packet);

    /**
     * get the size of bytes has sent.
     * @return bytesSent
     */
    int64_t getBytesSent();

    /**
     * wait for acks from datanode.
     * @param force
     */
    void waitForAcks(bool force);

    /**
     * Is pipeline closed. Return true if pipeline is closed.
     * @return bool
     */
    bool isClosed();

    void createBlockOutputStream(const Token & token, int64_t gs, bool recovery);

private:
    bool addDatanodeToPipeline(const std::vector<DatanodeInfo> & excludedNodes);
    void buildForAppendOrRecovery(bool recovery);
    void buildForNewBlock();
    void checkPipelineWithReplicas();
    void checkResponse(bool wait);
    void locateNextBlock(const std::vector<DatanodeInfo> & excludedNodes);
    void processAck(PipelineAck & ack);
    void processResponse();
    void resend();
    void transfer(const ExtendedBlock & blk, const DatanodeInfo & src,
                  const std::vector<DatanodeInfo> & targets,
                  const Token & token);
    int findNewDatanode(const std::vector<DatanodeInfo> & original);

protected:
    static void checkBadLinkFormat(const std::string & node);

protected:
    BlockConstructionStage stage;
    bool canAddDatanode;
    int blockWriteRetry;
    int checksumType;
    int chunkSize;
    int connectTimeout;
    int errorIndex;
    int readTimeout;
    int replication;
    int writeTimeout;
    int64_t bytesAcked; //the size of bytes the ack received.
    int64_t bytesSent; //the size of bytes has sent.
    PacketPool & packetPool;
    std::shared_ptr<BufferedSocketReader> reader;
    std::shared_ptr<FileSystemInter> filesystem;
    std::shared_ptr<LocatedBlock> lastBlock;
    std::shared_ptr<Socket> sock;
    std::deque<std::shared_ptr<Packet> > packets;
    std::string clientName;
    std::string path;
    std::vector<DatanodeInfo> nodes;
    std::vector<std::string> storageIDs;
    int64_t fileId;

};

class StripedPipelineImpl : public PipelineImpl {
public:
    /**
     * construct and setup the pipeline for append.
     */
    StripedPipelineImpl(const char * path, SessionConfig & conf,
                 std::shared_ptr<FileSystemInter> filesystem, int checksumType, int chunkSize,
                 int replication, int64_t bytesSent, PacketPool & packetPool,
                 std::shared_ptr<LocatedBlock> lastBlock, int64_t fileId);

private:
    void buildForNewBlock(std::shared_ptr<LocatedBlock> block);

private:
    std::shared_ptr<LocatedBlock> lastBlock;
};

}
}

#endif /* _HDFS_LIBHDFS3_CLIENT_PIPELINE_H_ */
