// Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
// 
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// 
//     http://www.apache.org/licenses/LICENSE-2.0
// 
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <sys/types.h>                  // O_CREAT
#include <fcntl.h>                      // open
#include <gflags/gflags.h>              // DEFINE_*
#include <butil/sys_byteorder.h>        // butil::NetToHost32
#include <brpc/controller.h>            // brpc::Controller
#include <brpc/server.h>                // brpc::Server
#include <braft/raft.h>                 // braft::Node braft::StateMachine
#include <braft/storage.h>              // braft::SnapshotWriter
#include <braft/util.h>                 // braft::AsyncClosureGuard
#include "kv.pb.h"                   // BlockService
#include <dirent.h>
#include "rocksdb/db.h"

DEFINE_bool(check_term, true, "Check if the leader changed to another term");
DEFINE_bool(disable_cli, false, "Don't allow raft_cli access this node");
DEFINE_bool(log_applied_task, false, "Print notice log when a task is applied");
DEFINE_int32(election_timeout_ms, 1000, 
            "Start election in such milliseconds if disconnect with the leader");
DEFINE_int32(port, 8200, "Listen port of this peer");
DEFINE_int32(snapshot_interval, 30, "Interval between each snapshot");
DEFINE_string(conf, "", "Initial configuration of the replication group");
DEFINE_string(data_path, "./data", "Path of data stored on");
DEFINE_string(group, "Block", "Id of the replication group");
DEFINE_int32(group_count, 3, "default of group count");

namespace example {
class Block;

const std::string OP_PUT="put";
const std::string OP_GET="get";
const std::string OP_DELETE="delete";
const std::string OP_BATCH="batch";
const std::string OP_LIST="list";

// Implements Closure which encloses RPC stuff
class BlockClosure : public braft::Closure {
  public:
    BlockClosure(Block* block, 
        const KVRequest* request,
        KVResponse* response,
        butil::IOBuf* data,
        google::protobuf::Closure* done)
      : _block(block)
        , _request(request)
        , _response(response)
        , _data(data)   
        , _done(done) {}
    ~BlockClosure() {}

    const KVRequest* request() const { return _request; }
    KVResponse* response() const { return _response; }
    void Run();
    butil::IOBuf* data() const { return _data; }

  private:
    // Disable explicitly delete
    Block* _block;
    const KVRequest* _request;
    KVResponse* _response;
    butil::IOBuf* _data;
    google::protobuf::Closure* _done;
};

// Implementation of example::Block as a braft::StateMachine.
class Block : public braft::StateMachine {
  public:
    Block()
      : _node(NULL)
        , _leader_term(-1)
  {}
    ~Block() {
      delete _node;
    }

    // Starts this node
    int init_db(std::string group_name){
      std::string path= FLAGS_data_path + "/" + group_name;
      _data_path = path + "/engine";
      if (!butil::CreateDirectory(butil::FilePath(_data_path))) {
        LOG(ERROR) << "Fail to create directory " << FLAGS_data_path;
        return -1;
      }

      _db_options.create_if_missing = true;

      rocksdb::DB* rocksdb;
      rocksdb::Status s = rocksdb::DB::Open(_db_options, _data_path, &rocksdb);
      if( !s.ok()){
        LOG(ERROR) << "Fail to open rocksdb " << FLAGS_data_path;
      }
      _rocksdb = std::shared_ptr<rocksdb::DB>(rocksdb);
      return 0;

    }
    int start(std::string group_name) {
      init_db(group_name);

      //_fd = new SharedFD(fd);
      butil::EndPoint addr(butil::my_ip(), FLAGS_port);
      braft::NodeOptions node_options;
      if (node_options.initial_conf.parse_from(FLAGS_conf) != 0) {
        LOG(ERROR) << "Fail to parse configuration `" << FLAGS_conf << '\'';
        return -1;
      }

      std::string path= FLAGS_data_path + "/" + group_name;
      node_options.election_timeout_ms = FLAGS_election_timeout_ms;
      node_options.fsm = this;
      node_options.node_owns_fsm = false;
      node_options.snapshot_interval_s = FLAGS_snapshot_interval;
      std::string prefix = "local://" + path;
      node_options.log_uri = prefix + "/log";
      node_options.raft_meta_uri = prefix + "/raft_meta";
      node_options.snapshot_uri = prefix + "/snapshot";
      node_options.disable_cli = FLAGS_disable_cli;

      braft::Node* node = new braft::Node(group_name, braft::PeerId(addr));
      if (node->init(node_options) != 0) {
        LOG(ERROR) << "Fail to init raft node";
        delete node;
        return -1;
      }
      _node = node;
      return 0;
    }

    // Impelements Service methods
    void put(const KVRequest* request,
        KVResponse* response,
        google::protobuf::Closure* done) {
      brpc::ClosureGuard done_guard(done);
      // Serialize request to the replicated write-ahead-log so that all the
      // peers in the group receive this request as well.
      // Notice that _value can't be modified in this routine otherwise it
      // will be inconsistent with others in this group.

      // Serialize request to IOBuf
      const int64_t term = _leader_term.load(butil::memory_order_relaxed);
      if (term < 0) {
        return redirect(response);
      }
      butil::IOBuf log;
      const uint32_t meta_size_raw = butil::HostToNet32(request->ByteSize());
      log.append(&meta_size_raw, sizeof(uint32_t));
      butil::IOBufAsZeroCopyOutputStream wrapper(&log);
      if (!request->SerializeToZeroCopyStream(&wrapper)) {
        LOG(ERROR) << "Fail to serialize request";
        response->set_success(false);
        return;
      }
      // Apply this log as a braft::Task
      braft::Task task;
      task.data = &log;
      // This callback would be iovoked when the task actually excuted or
      // fail
      task.done = new BlockClosure(this, request, response, NULL, done_guard.release());
      if (FLAGS_check_term) {
        // ABA problem can be avoid if expected_term is set
        task.expected_term = term;
      }
      // Now the task is applied to the group, waiting for the result.
      return _node->apply(task);
    }

    void get(const KVRequest *request, KVResponse* response) {
      // In consideration of consistency. GetRequest to follower should be 
      // rejected.
      if (!is_leader()) {
        // This node is a follower or it's not up-to-date. Redirect to
        // the leader if possible.
        return redirect(response);
      }

      if(_rocksdb){
        LOG(ERROR) <<"db not initialized";
        response->set_success(false);
        return;
      }

      // This is the leader and is up-to-date. It's safe to respond client
      // scoped_fd fd = get_fd();

      rocksdb::Status s = _rocksdb->Get(rocksdb::ReadOptions(), request->key(), response->mutable_value());
      if(!s.ok()){
        LOG(ERROR) <<"failed to get key:" << request->key();
        response->set_success(false);
        return;
      }

      response->set_success(true);
    }

    void List(const ListRequest *request, ListResponse* response){
      if (!is_leader()) {
        // This node is a follower or it's not up-to-date. Redirect to
        // the leader if possible.
        //return redirect(response);
        response->set_success(false);
        return;
      }

      if(_rocksdb){
        LOG(ERROR) <<"db not initialized";
        response->set_success(false);
        return;
      }

      std::unique_ptr<rocksdb::Iterator> iter(_rocksdb->NewIterator(rocksdb::ReadOptions()));
      int max_count = request->has_max_count()? request->max_count(): 1000;
      for(iter->Seek(request->start_from()); iter->Valid() && max_count > 0; iter->Next(), max_count--){

        response->add_keys(iter->key().data(), iter->key().size());
        if(request->has_with_value() && request->with_value()){
          response->add_values(iter->value().data(), iter->value().size());
        }
      }

      if(iter->status().ok()){
        response->set_success(true);
      }else{
        response->set_success(false);
      }

    }

    void Exist(const KVRequest *request, KVResponse* response){
      if (!is_leader()) {
        // This node is a follower or it's not up-to-date. Redirect to
        // the leader if possible.
        return redirect(response);
      }

      if(_rocksdb){
        LOG(ERROR) <<"db not initialized";
        response->set_success(false);
        return;
      }

      bool exists = _rocksdb-> KeyMayExist(rocksdb::ReadOptions(), request->key(), response->mutable_value());
      if(!exists){
        LOG(ERROR) <<"failed to KeyMayExist:" << request->key();
        response->set_success(false);
        return;
      }

      response->set_success(true);
    }


    bool is_leader() const 
    { return _leader_term.load(butil::memory_order_acquire) > 0; }

    // Shut this node down.
    void shutdown() {
      if (_node) {
        _node->shutdown(NULL);
      }
    }

    // Blocking this thread until the node is eventually down.
    void join() {
      if (_node) {
        _node->join();
      }
    }

  private:

    friend class BlockClosure;

    void redirect(KVResponse* response) {
      response->set_success(false);
      if (_node) {
        braft::PeerId leader = _node->leader_id();
        if (!leader.is_empty()) {
          response->set_redirect(leader.to_string());
        }
      }
    }

    int apply_one(const KVRequest* request, KVResponse* response){
      if(request->op() == OP_PUT){
        rocksdb::Status s = _rocksdb->Put(_write_options, 
            request->key(),request->value());
        if(!s.ok()){
          LOG(ERROR) << "Failed to put, key:" << request->key()
            <<" value:" << request->value();
          return -1;
        }

        return 0;
      }

      if(request->op() == OP_DELETE){
        rocksdb::Status s = _rocksdb->Delete(_write_options, request->key());
        if(!s.ok()){
            LOG(ERROR) << "Failed to put, key:" << request->key()
              <<" value:" << request->value();
            return -1;
        }

         return 0;
      }

      if(request->op() == OP_BATCH){
        rocksdb::WriteBatch rocks_batch;
        bool batch_valid = false;
        int del_count = 0;
        int put_count = 0;

        for(int i=0; i< request->batch_size(); i++){
          if(request->batch(i).op() == OP_PUT){
            rocks_batch.Put(request->batch(i).key(),
                            request->batch(i).value());
            put_count++;
            batch_valid = true;
            continue;
          }

          if(request->batch(i).op() == OP_DELETE){
            rocks_batch.Delete(request->batch(i).key());
            del_count++;
            batch_valid = true;
            continue;
          }
        }
       
        if( !batch_valid ){
             return -1;
        }

        rocksdb::Status s = _rocksdb->Write(_write_options, &rocks_batch);
        if(!s.ok()){
          LOG(ERROR) << "Failed to put, key:" << request->key()
            <<" value:" << request->value();
          return -1;
        }

        return 0;
      }
       LOG(ERROR) << "Unknow request op :" << request->op() ;
       return -1;
    }
    // @braft::StateMachine
    void on_apply(braft::Iterator& iter) {
      // A batch of tasks are committed, which must be processed through 
      // |iter|
      for (; iter.valid(); iter.next()) {
        const KVRequest* request = NULL;
        KVResponse* response = NULL;
        // This guard helps invoke iter.done()->Run() asynchronously to
        // avoid that callback blocks the StateMachine
        braft::AsyncClosureGuard closure_guard(iter.done());

        if (iter.done()) {
          // This task is applied by this node, get value from this
          // closure to avoid additional parsing.
          BlockClosure* c = dynamic_cast<BlockClosure*>(iter.done());
          request  = c->request();
          response = c->response();
        } else {
          // Have to parse BlockRequest from this log.
          uint32_t meta_size = 0;
          butil::IOBuf saved_log = iter.data();
          saved_log.cutn(&meta_size, sizeof(uint32_t));
          // Remember that meta_size is in network order which hould be
          // covert to host order
          meta_size = butil::NetToHost32(meta_size);
          butil::IOBuf meta;
          saved_log.cutn(&meta, meta_size);
          butil::IOBufAsZeroCopyInputStream wrapper(meta);
          KVRequest request;
          CHECK(request.ParseFromZeroCopyStream(&wrapper));
          //data.swap(saved_log);
          //offset = request.offset();
          //chunkid = request.chunkid();
        }

        int status =  apply_one(request, response);
        if(status!=0){
          LOG(ERROR) <<"apply one failed";
        }
        // The purpose of following logs is to help you understand the way
        // this StateMachine works.
        // Remove these logs in performance-sensitive servers.
        LOG_IF(INFO, FLAGS_log_applied_task) 
          << " op: " << request->op()
          << " key:" << request->key()
          << " value:" << request->value();
      }
    }

    struct SnapshotArg {
      std::string data_path;
      braft::SnapshotWriter* writer;
      braft::Closure* done;
    };

    static int link_overwrite(const char* old_path, const char* new_path) {
      if (::unlink(new_path) < 0 && errno != ENOENT) {
        PLOG(ERROR) << "Fail to unlink " << new_path;
        return -1;
      }
      return ::link(old_path, new_path);
    }

    static void *save_snapshot(void* arg) {
      SnapshotArg* sa = (SnapshotArg*) arg;
      std::unique_ptr<SnapshotArg> arg_guard(sa);
      // Serialize StateMachine to the snapshot
      brpc::ClosureGuard done_guard(sa->done);
      // Sync buffered data before
      LOG(INFO) << "Saving snapshot to " ;
      int rc;

      DIR *pDir = opendir(sa->data_path.c_str());
      if(pDir == NULL){
        LOG(ERROR) << "open dir:" << sa->data_path << " failed!";
        return NULL;
      }

      struct dirent* pEnt = readdir(pDir);

      while(pEnt){
        char* name = pEnt->d_name;
        if(std::string(name) == "." || std::string(name) == ".."){
          pEnt = readdir(pDir);
          continue;
        }
        std::string fpath = sa->data_path + "/" + std::string(name);
        int fd = ::open(fpath.c_str(), O_RDWR, 0644);
        if( fd < 0){
          LOG(ERROR) << "open fpath:" << fpath << " failed!";
          return NULL;
        }
        LOG(INFO) << "fsync file:" << fpath;

        for (; (rc = ::fdatasync(fd)) < 0 && errno == EINTR ;) {}
        if (rc < 0) {
          sa->done->status().set_error(EIO, "Fail to sync fd=%d : %m",
              fd);
          return NULL;
        }

        std::string snapshot_path = sa->writer->get_path() + "/" + std::string(name);
        LOG(INFO) << "link file:" << fpath << " to snapshot path:"<< snapshot_path;            

        if (link_overwrite(fpath.c_str(), snapshot_path.c_str()) != 0) {
          sa->done->status().set_error(EIO, "Fail to link data : %m");
          return NULL;
        }

        LOG(INFO) << "add snapshot meta file:" << name;
        if (sa->writer->add_file(name) != 0) {
          sa->done->status().set_error(EIO, "Fail to add file to writer");
          return NULL;
        }
        pEnt = readdir(pDir);

      }
      return NULL;
    }

    void on_snapshot_save(braft::SnapshotWriter* writer, braft::Closure* done) {
      // Save current StateMachine in memory and starts a new bthread to avoid
      // blocking StateMachine since it's a bit slow to write data to disk
      // file.
      SnapshotArg* arg = new SnapshotArg;
      arg->writer = writer;
      arg->data_path = _data_path;
      arg->done = done;
      bthread_t tid;
      bthread_start_urgent(&tid, NULL, save_snapshot, arg);
    }

    int on_snapshot_load(braft::SnapshotReader* reader) {
      // Load snasphot from reader, replacing the running StateMachine
      CHECK(!is_leader()) << "Leader is not supposed to load snapshot";

      // reset fd

      return 0;
    }

    void on_leader_start(int64_t term) {
      _leader_term.store(term, butil::memory_order_release);
      LOG(INFO) << "Node becomes leader";
    }
    void on_leader_stop(const butil::Status& status) {
      _leader_term.store(-1, butil::memory_order_release);
      LOG(INFO) << "Node stepped down : " << status;
    }

    void on_shutdown() {
      LOG(INFO) << "This node is down";
    }
    void on_error(const ::braft::Error& e) {
      LOG(ERROR) << "Met raft error " << e;
    }
    void on_configuration_committed(const ::braft::Configuration& conf) {
      LOG(INFO) << "Configuration of this group is " << conf;
    }
    void on_stop_following(const ::braft::LeaderChangeContext& ctx) {
      LOG(INFO) << "Node stops following " << ctx;
    }
    void on_start_following(const ::braft::LeaderChangeContext& ctx) {
      LOG(INFO) << "Node start following " << ctx;
    }
    // end of @braft::StateMachine

  private:
    mutable butil::Mutex _fd_mutex;
    braft::Node* volatile _node;
    butil::atomic<int64_t> _leader_term;

    uint64_t _group;
    std::string _data_path;
    std::shared_ptr<rocksdb::DB> _rocksdb;
    rocksdb::Options _db_options;
    rocksdb::WriteOptions _write_options;
};

void BlockClosure::Run() {
  // Auto delete this after Run()
  std::unique_ptr<BlockClosure> self_guard(this);
  // Repsond this RPC.
  brpc::ClosureGuard done_guard(_done);
  if (status().ok()) {
    return;
  }
  // Try redirect if this request failed.
  _block->redirect(_response);
}

// Implements example::BlockService if you are using brpc.
class BlockServiceImpl : public KVService {
  public:
    explicit BlockServiceImpl()  {}
    void put(::google::protobuf::RpcController* controller,
        const ::example::KVRequest* request,
        ::example::KVResponse* response,
        ::google::protobuf::Closure* done) {
      std::string group_name = "group_" + std::to_string(request->group());
      Block* block = _map[group_name];
      block->put(request, response, done);
    }
    void get(::google::protobuf::RpcController* controller,
        const ::example::KVRequest* request,
        ::example::KVResponse* response,
        ::google::protobuf::Closure* done) {
      brpc::ClosureGuard done_guard(done);
      std::string group_name = "group_" + std::to_string(request->group());
      Block* block = _map[group_name];
      block->get(request, response);
    }
    void init(){
      LOG(INFO) << "init group count:" << FLAGS_group_count;
      for(int count = 0; count < FLAGS_group_count; count++){
        Block* block = new Block();

        if(block->start(get_group_name(count))!=0){
          LOG(ERROR) << "Fail to start Block";
          return;
        }
        _map[get_group_name(count)] =  block;
      }
    }
    void stop(){	
      std::string group_name;
      Block* block;
      for(int count=0; count< FLAGS_group_count; count++){
        std::string group_name = "group_" + std::to_string(count);
        block = _map[group_name];
        block->shutdown();
        block->join();
      }
    }

    std::string get_group_name(int count){
      std::string group_name = "group_" + std::to_string(count);
      return group_name;
    }
  private:
    //Block* _block;
    std::map<std::string, Block*> _map;
};

}  // namespace example

int main(int argc, char* argv[]) {
  GFLAGS_NS::ParseCommandLineFlags(&argc, &argv, true);
  butil::AtExitManager exit_manager;

  // Generally you only need one Server.
  brpc::Server server;
  example::Block block;
  example::BlockServiceImpl service;

  // Add your service into RPC rerver
  if (server.AddService(&service, 
        brpc::SERVER_DOESNT_OWN_SERVICE) != 0) {
    LOG(ERROR) << "Fail to add service";
    return -1;
  }
  // raft can share the same RPC server. Notice the second parameter, because
  // adding services into a running server is not allowed and the listen
  // address of this server is impossible to get before the server starts. You
  // have to specify the address of the server.
  if (braft::add_service(&server, FLAGS_port) != 0) {
    LOG(ERROR) << "Fail to add raft service";
    return -1;
  }

  // It's recommended to start the server before Block is started to avoid
  // the case that it becomes the leader while the service is unreacheable by
  // clients.
  // Notice that default options of server are used here. Check out details 
  // from the doc of brpc if you would like change some options
  if (server.Start(FLAGS_port, NULL) != 0) {
    LOG(ERROR) << "Fail to start Server";
    return -1;
  }

  // It's ok to start Block
  service.init();

  LOG(INFO) << "Block service is running on " << server.listen_address();
  // Wait until 'CTRL-C' is pressed. then Stop() and Join() the service
  while (!brpc::IsAskedToQuit()) {
    sleep(1);
  }
  LOG(INFO) << "Block service is going to quit";

  // Stop block before server
  service.stop();
  server.Stop(0);

  // Wait until all the processing tasks are over.

  server.Join();
  return 0;
}
