/////////////////////////////////////////////////////////////////////////////
// Original code from libhdfs3. Copyright (c) 2013 - 2014, Pivotal Inc.
// All rights reserved. Author: Zhanwei Wang
/////////////////////////////////////////////////////////////////////////////
//  Modifications by Kumo Inc.
// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//



#include <kmhdfs/proto/client_datanode_protocol.pb.h>
#include <kmhdfs/server/datanode.h>
#include <kmhdfs/common/exception.h>
#include <kmhdfs/common/exception_internal.h>
#include <kmhdfs/server/rpc_helper.h>

#define DATANODE_VERSION 1
#define DATANODE_PROTOCOL "org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol"
#define BLOCK_TOKEN_KIND "HDFS_BLOCK_TOKEN"

using namespace google::protobuf;

namespace Hdfs {
namespace Internal {

DatanodeImpl::DatanodeImpl(const std::string & host, uint32_t port,
                           const SessionConfig & c, const RpcAuth & a) :
    auth(a), client(RpcClient::getClient()), conf(c), protocol(
        DATANODE_VERSION, DATANODE_PROTOCOL, BLOCK_TOKEN_KIND), server(host, port) {
    server.setTokenService("");
}

void DatanodeImpl::invoke(const RpcCall & call, bool reuse) {
    RpcChannel & channel = client.getChannel(auth, protocol, server, conf);

    try {
        channel.invoke(call);
    } catch (const HdfsFailoverException & e) {
        //Datanode do not have HA configuration.
        channel.close(true);
        Hdfs::rethrow_if_nested(e);
        assert(false && "HdfsFailoverException should be always a wrapper of other exception");
    } catch (...) {
        channel.close(true);
        throw;
    }

    channel.close(!reuse);
}

int64_t DatanodeImpl::getReplicaVisibleLength(const ExtendedBlock & b) {
    try {
        GetReplicaVisibleLengthRequestProto request;
        GetReplicaVisibleLengthResponseProto response;
        Build(b, request.mutable_block());
        invoke(RpcCall(true, "getReplicaVisibleLength", &request, &response), false);
        return response.length();
    } catch (const HdfsRpcServerException & e) {
        UnWrapper<ReplicaNotFoundException, HdfsIOException> unwraper(e);
        unwraper.unwrap(__FILE__, __LINE__);
    }
}

void DatanodeImpl::getBlockLocalPathInfo(const ExtendedBlock & block,
        const Token & token, BlockLocalPathInfo & info) {
    try {
        ExtendedBlock eb;
        GetBlockLocalPathInfoRequestProto request;
        GetBlockLocalPathInfoResponseProto response;
        Build(block, request.mutable_block());
        Build(token, request.mutable_token());
        invoke(RpcCall(true, "getBlockLocalPathInfo", &request, &response), true);
        Convert(eb, response.block());
        info.setBlock(eb);
        info.setLocalBlockPath(response.localpath().c_str());
        info.setLocalMetaPath(response.localmetapath().c_str());
    } catch (const HdfsRpcServerException & e) {
        UnWrapper<ReplicaNotFoundException, HdfsIOException> unwraper(e);
        unwraper.unwrap(__FILE__, __LINE__);
    }
}

}
}
