/////////////////////////////////////////////////////////////////////////////
// Original code from libhdfs3. Copyright (c) 2013 - 2014, Pivotal Inc.
// All rights reserved. Author: Zhanwei Wang
/////////////////////////////////////////////////////////////////////////////
//  Modifications by Kumo Inc.
// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//



#ifndef _HDFS_LIBHDFS3_CLIENT_OUTPUTSTREAMIMPL_H_
#define _HDFS_LIBHDFS3_CLIENT_OUTPUTSTREAMIMPL_H_

#include <atomic>
#include <kmhdfs/common/checksum.h>
#include <turbo/times/time.h>
#include <kmhdfs/common/exception_internal.h>
#include <kmhdfs/client/file_system.h>
#include <memory>
#include <kmhdfs/client/output_stream_inter.h>
#include <kmhdfs/client/packet_pool.h>
#include <kmhdfs/client/permission.h>
#include <kmhdfs/client/pipeline.h>
#include <kmhdfs/server/located_block.h>
#include <kmhdfs/common/session_config.h>
#include <kmhdfs/common/thread.h>
#ifdef MOCK
#include "PipelineStub.h"
#endif

namespace Hdfs {
namespace Internal {
/**
 * A output stream used to write data to hdfs.
 */
class OutputStreamImpl: public OutputStreamInter {
public:
    OutputStreamImpl();

    ~OutputStreamImpl();

    /**
     * To create or append a file.
     * @param fs hdfs file system.
     * @param path the file path.
     * @param pair the result of create or append.
     * @param flag creation flag, can be Create, Append or Create|Overwrite.
     * @param permission create a new file with given permission.
     * @param createParent if the parent does not exist, create it.
     * @param replication create a file with given number of replication.
     * @param blockSize  create a file with given block size.
     */
    void open(std::shared_ptr<FileSystemInter> fs, const char * path,
              std::pair<std::shared_ptr<LocatedBlock>, std::shared_ptr<Hdfs::FileStatus>> & pair,
              int flag, const Permission & permission, bool createParent, int replication,
              int64_t blockSize, int64_t fileId);

    /**
     * To append data to file.
     * @param buf the data used to append.
     * @param size the data size.
     */
    void append(const char * buf, int64_t size);

    /**
     * Flush all data in buffer and waiting for ack.
     * Will block until get all acks.
     */
    void flush();

    /**
     * return the current file length.
     * @return current file length.
     */
    int64_t tell();

    /**
     * @ref OutputStream::sync
     */
    void sync();

    /**
     * close the stream.
     */
    void close();

    /**
     * Output a readable string of this output stream.
     */
    std::string toString();

    /**
     * Keep the last error of this stream.
     * @error the error to be kept.
     */
    void setError(const exception_ptr & error);

protected:
    void appendChunkToPacket(const char * buf, int size);
    void appendInternal(const char * buf, int64_t size);
    void checkStatus();
    void closePipeline();
    void completeFile(bool throwError);
    void computePacketChunkSize();
    void flushInternal(bool needSync);
    //void heartBeatSenderRoutine();
    void initAppend(std::pair<std::shared_ptr<LocatedBlock>, std::shared_ptr<FileStatus>> & pair);
    void openInternal(std::shared_ptr<FileSystemInter> fs, const char * path,
                      std::pair<std::shared_ptr<LocatedBlock>, std::shared_ptr<Hdfs::FileStatus>> & pair,
                      int flag, const Permission & permission, bool createParent, int replication,
                      int64_t blockSize, int64_t fileId);
    void reset();
    void sendPacket(std::shared_ptr<Packet> packet);
    void setupPipeline();

    PacketPool packets;
    //std::atomic<bool> heartBeatStop;
    bool closed;
    bool isAppend;
    bool syncBlock;
    //condition_variable condHeartBeatSender;
    exception_ptr lastError;
    int checksumSize;
    int chunkSize;
    int chunksPerPacket;
    int closeTimeout;
    int heartBeatInterval;
    int packetSize;
    int position; //cursor in buffer
    int replication;
    int64_t blockSize; //max size of block
    int64_t bytesWritten; //the size of bytes has be written into packet (not include the data in chunk buffer).
    int64_t cursor; //cursor in file.
    int64_t lastFlushed; //the position last flushed
    int64_t nextSeqNo;
    mutex mut;
    std::shared_ptr<Checksum> checksum;
    std::shared_ptr<FileSystemInter> filesystem;
    std::shared_ptr<LocatedBlock> lastBlock;
    std::shared_ptr<Packet> currentPacket;
    std::shared_ptr<Pipeline> pipeline;
    std::shared_ptr<SessionConfig> conf;
    std::string path;
    int64_t fileId;
    std::vector<char> buffer;
    turbo::Time lastSend;
    //thread heartBeatSender;

    friend class Pipeline;
#ifdef MOCK
private:
    Hdfs::Mock::PipelineStub * stub;
#endif
};

}
}

#endif /* _HDFS_LIBHDFS3_CLIENT_OUTPUTSTREAMIMPL_H_ */
