#include "dicom/sync_manager.h"

#include <algorithm>
#include <surbasic/surbasic.h>
#include <gwbase/base/logging.h>
#include <gwbase/base/thread.h>
#include "dicom/session.h"
#include "dicom/gateway.h"
#include "dicom/upload/cloud_uploader.h"
#include <surbasic/AesFile.h>
#include <surbasic/AESHelper.h>

namespace dicom {

static const int kRetryDelay = 10;
static const int kBigFileThreshhold = 10 * 1024 * 1024;    // 10M
static const int kBigFileThreshhold2 = 100 * 1024 * 1024;  // 100M

enum SyncTaskState {
  kSyncNotStart = 0,
  kSyncing = 1,
  kSyncFailed = 2,
};

void SyncPerUserRetryWorker::Start() {
  worker_.reset(
      (new Thread(boost::bind(&SyncPerUserRetryWorker::PollRetryTasks, this),
                  "sync_retry")));
  worker_->Start();
}

void SyncPerUserRetryWorker::PollRetryTasks() {
  while (true) {
    sleep(1);

    // check whether whehter user login
    SDBasicToken c;
    if (!GetGateway()->session_mgr()->GetUserCredentialByEmail(user_, &c)) {
      LOG_TRACE << "User not login, do NOT schedule retry task: " << user_;
      continue;
    }

    Mutex g(mu_);
    LOG_DEBUG << "total " << waiting_retry_tasks_.size()
              << " tasks to schedule";
    while (!waiting_retry_tasks_.empty()) {
      Timestamp now = Timestamp::Now();
      Task* t = waiting_retry_tasks_[0];
      LOG_DEBUG << "Latest tetry task time: " << t->next_retry_time.ToISO8601();
      if (t->next_retry_time > now) {
        break;
      }
      mgr_->ReaddTask(t);
      std::pop_heap(waiting_retry_tasks_.begin(), waiting_retry_tasks_.end(),
                    TaskCompareRetryTime);
      waiting_retry_tasks_.pop_back();
      delete t;
    }
  }
}

void SyncPerUserRetryWorker::ScheduleRetry(Task& t, int delay) {
  LockGuard g(mu_);
  Task* rt = new Task();
  rt->id = t.id;
  rt->user = t.user;
  rt->next_retry_time = Timestamp::AfterNSeconds(delay);
  waiting_retry_tasks_.push_back(rt);
  std::push_heap(waiting_retry_tasks_.begin(), waiting_retry_tasks_.end(),
                 TaskCompareRetryTime);
}

SyncManager::SyncManager(const DicomConf* conf) {
  std::string dbdir = conf->GetDbDir();
  std::string syncdb = path::Join(dbdir, "sync_tasks.db");

  Status s = db::Database::Open(syncdb, &db_);
  if (!s.ok()) {
    LOG_FATAL << "failed to open sync task database: " << syncdb;
  }

  db_->RunSql(
      "create table if not exists sync_task_tab ("
      " local_id varchar primary key, "
      " user varchar, "
      " state int default 0, "
      " last_sync_time integer default 0)");
}

void SyncManager::AddNewUser(const std::string& user) {
  LockGuard lock(mu_);
  if (retry_workers_.find(user) != retry_workers_.end()) return;

  LOG_INFO << "Start sync retry scheduler for user: " << user;
  SyncPerUserRetryWorker* worker = new SyncPerUserRetryWorker(this, user);
  retry_workers_.insert(std::make_pair(user, worker));
  worker->Start();
}

void SyncManager::LoadPendingTasks() {
  db::StmtPtr stmt = db_->Prepare("update sync_task_tab set state = ?");
  stmt->BindInt32(kSyncNotStart);
  stmt->Step();
  stmt.reset();

  stmt = db_->Prepare("select local_id, user from sync_task_tab");
  stmt->Step();
  for (; !stmt->eof(); stmt->Step()) {
    Task t;
    t.id = stmt->GetText(0);
    t.user = stmt->GetText(1);
    t.running = false;
    AddNewUser(t.user);
    ScheduleRetry(t, kRetryDelay);
  }
}

void SyncManager::AddNewTask(const std::string& user, const std::string& id,
                             const std::string& pid) {
  {
    LockGuard g(mu_);
    db::StmtPtr stmt = db_->Prepare(
        "replace into sync_task_tab ("
        "local_id, "
        "user, "
        "state, "
        "last_sync_time) "
        "values "
        "(?, ?, ?, ?)");
    stmt->BindText(id);
    stmt->BindText(user);
    stmt->BindInt32(kSyncNotStart);
    stmt->BindInt64(Timestamp::Now().MicroSecondsSinceEpoch());
    stmt->Step();
  }

  // File maybe change immediately, upload it with some delay
  Task t;
  t.id = id;
  t.user = user;
  t.running = false;
  ScheduleRetry(t, 10);
}

void SyncManager::DeleteTask(const std::string& id) {
  LOG_INFO << "SYNC: delete sync task: " << id;
  LockGuard g(mu_);
  db::StmtPtr stmt =
      db_->Prepare("delete from sync_task_tab where local_id = ?");
  stmt->BindText(id);
  stmt->Step();
}

void SyncManager::GetNextTask(Task* t) {
  // ::sleep(10000);
  LockGuard g(mu_);
  while (tasks_.empty()) {
    not_empty_.Wait(&mu_);
  }

  assert(!tasks_.empty());
  (*t) = tasks_.front();
  tasks_.pop_front();
}

void SyncManager::ScheduleRetry(Task& t, int delay) {
  LockGuard lock(mu_);
  assert(retry_workers_.find(t.user) != retry_workers_.end());
  SyncPerUserRetryWorker* worker = retry_workers_.find(t.user)->second;
  worker->ScheduleRetry(t, delay);
  LOG_INFO << "Retry sync task: " << t.id << " after " << delay << " seconds";
}

void SyncManager::ReaddTask(Task* t) {
  tasks_.push_back(*t);
  not_empty_.Signal();
}

void SyncManager::RunWorkerLoop() {
  LOG_TRACE << "start sync worker";
  while (true) {
    Task t;
    GetNextTask(&t);

    // Do upload work;
    t.running = true;

    SDBasic::tree::TreeEntity node;
    SDBasicToken c;
    LRUFileCache* cache = GetGateway()->cache();
    DmvDBPtr dmv = GetGateway()->dmv_mgr()->GetDB(t.user);
    FileObject fo;
    DirObject d;
    LRUFileCache::Handle* h = NULL;
    SDBasic::time::Timestamp mtime;
    bool retry = true;
    bool success = false;
    Status s;

    assert(!t.user.empty());
    if (!GetGateway()->session_mgr()->GetUserCredentialByEmail(t.user, &c)) {
      LOG_ERROR << "user not login, try sync " << t.id << " later";
      goto done_upload;
    }

    if (dmv->GetFileByID(t.id, &fo) == false) {
      // file not exists or be deleted, we don't retry it
      this->DeleteTask(t.id);
      retry = false;
      goto done_upload;
    }

    LOG_INFO << "SYNC: start to sync file " << t.id << " - " << fo.name;
    if (dmv->GetDirByID(fo.parentId, &d) == false) {
      dmv->DeleteFile(t.id);
      cache->Erase(t.id);
      this->DeleteTask(t.id);
      retry = false;
      LOG_ERROR << "SYNC: can't find parent dir of file: " << t.id;
      goto done_upload;
    }

    h = cache->Lookup(t.id);
    if (h == NULL) {
      LOG_ERROR << "SYNC: file " << t.id << " missing";
      goto done_upload;
    }

    success = this->UploadFileToCloudV2(c.surdoc_key, dmv, fo,
                                        cache->FilePath(h), &node);

  done_upload:
    if (h != NULL) cache->Release(h);

    if (success && node.id.empty()) {
      LOG_ERROR << "SYNC: upload OK but cloud id is empty, what happens: "
                << fo.local_id;
      success = false;
    }

    if (success) {
      // map local id to cloud id and set local file removable
      dmv->SetFileCloudId(t.id, node.id);
      cache->MarkAsRemovable(t.id);
      DeleteTask(t.id);
      // cache->Erase(t.id);
      LOG_INFO << "SYNC: sync file successfully: " << t.id << " - " << fo.name
               << " - " << node.id;
    } else {
      LOG_ERROR << "SYNC: failed to sync file: " << t.id << " - " << fo.name;
      if (retry) {
        ScheduleRetry(t, kRetryDelay);
      }
    }
  }
}

bool SyncManager::UploadBigFile(uint64_t surdoc_key, FileObject& fo,
                                const std::string& parent_cloud_id,
                                const std::string& enc_file_path,
                                const HashEntry* hash,
                                SDBasic::tree::TreeEntity* node) {
  LRUFileCache* cache = GetGateway()->cache();
  HashCache* hcache = GetGateway()->hcache();
  ResumeInfoDb* rdb = GetGateway()->resume_info_db();
  CloudUploader uploader(cache, hcache);
  Status s;

  SDBasic::token::AccessToken token;
  SDBasic::TokenManager::get().GetToken(surdoc_key, token);

  // First, get the resume location from local database
  ResumeInfo resume_info;
  if (rdb->GetResumeInfo(fo.local_id, hash->src_md5, &resume_info) == false) {
    resume_info.fid = fo.local_id;
    resume_info.src_md5 = hash->src_md5;
  }

  // OK, we should get resume offset from server, the offset
  // in local database may be incorrect.
  s = uploader.GetBigFileResumeInfo(token, fo, hash, &resume_info);
  if (!s.ok()) {
    goto failed_upload;
  }
  rdb->UpdateResumeInfo(&resume_info);

  // Now, we upload file from resume offset
  s = uploader.DoUploadBigFile(token, fo, parent_cloud_id, enc_file_path, hash,
                               &resume_info, node);
  if (!s.ok()) {
    goto failed_upload;
  }
  rdb->DeleteResumeInfo(fo.local_id, hash->src_md5);
  return true;

failed_upload:
  resume_info.failed_count++;
  // Oops: we failed with this resume location too many times,
  // cloud server maybe not work properly with this location,
  // so we delete it, and try to get a new one next time
  if (resume_info.failed_count > 10) {
    rdb->DeleteResumeInfo(fo.local_id, hash->src_md5);
  } else {
    // Maybe we failed to get a resume location, in this
    // case, we should not add this to database
    if (!resume_info.location.empty()) {
      rdb->UpdateResumeInfo(&resume_info);
    }
  }

  // resume upload API may fail in unexpected way (e.g., server misbehavior),
  // so we try to upload it as a normal file if it is not too big (>100M)
  if (!s.ok() && hash->enc_size < kBigFileThreshhold2) {
    s = uploader.DoUploadSmallFile(token, fo, parent_cloud_id, enc_file_path,
                                   hash, node);
  }

  return s.ok();
}

bool SyncManager::UploadFileToCloudV2(int64_t surdoc_key, DmvDBPtr& dmv,
                                      FileObject& fo,
                                      const std::string& enc_file_path,
                                      SDBasic::tree::TreeEntity* node) {
  LRUFileCache* cache = GetGateway()->cache();
  HashCache* hcache = GetGateway()->hcache();
  CloudUploader uploader(cache, hcache);

  bool rapid = false;
  Status s;
  HashEntry rapidHash;

  // Ensure the parent dir is created on cloud
  std::string parent_cloud_id;
  s = uploader.CreateDirInCloud(surdoc_key, fo.parentId, dmv, &parent_cloud_id);
  if (!s.ok()) {
    LOG_ERROR << "SYNC: failed to create parent dir of file : " << fo.local_id;
    return false;
  }

  LOG_INFO << "parent dir created successfully!";

  SDBasic::token::AccessToken token;
  SDBasic::TokenManager::get().GetToken(surdoc_key, token);
  s = uploader.CheckRapidUpload(surdoc_key, fo, enc_file_path, &rapid,
                                &rapidHash);
  if (!s.ok()) {
    LOG_ERROR << "SYNC: failed to check whether we can rapid upload file : "
              << fo.local_id;
    return false;
  }

  if (rapid) {
    s = uploader.DoRapidUpload(token, fo, parent_cloud_id, &rapidHash, node);
    return s.ok();
  } else {
    HashEntry localHash;
    if (hcache->GetHashEntry(fo.digest, fo.hex_key, &localHash) == false) {
      LOG_ERROR << "SYNC: failed to get local hash of file : " << fo.local_id;
      return false;
    }

    if (!localHash.IsComplete()) {
      LOG_ERROR << "SYNC: local hash of file is incomplete: " << fo.local_id;
      return false;
    }

    if (localHash.enc_size < kBigFileThreshhold) {
      s = uploader.DoUploadSmallFile(token, fo, parent_cloud_id, enc_file_path,
                                     &localHash, node);
      return s.ok();
    } else {
      return UploadBigFile(surdoc_key, fo, parent_cloud_id, enc_file_path,
                           &localHash, node);
    }
  }

  LOG_ERROR << "we should not reach here, what happens?";
  return false;
}

bool SyncManager::UploadFileToCloud(int64_t token, FileObject& fo,
                                    const std::string& parent_cloud_id,
                                    const std::string& enc_file_path,
                                    TreeEntity* node) {
  LRUFileCache* cache = GetGateway()->cache();
  std::string tmp_file;
  // decrypt file to tmp file
  if (cache->AllocTmpFile("SYNC_WORKER", 0, &tmp_file) == false) {
    LOG_ERROR << "SYNC: failed to allocate temp file for uploading: "
              << fo.local_id;
    return false;
  }

  if (SDBasic::aes::AESHelper::DecAndUncompressFile(
          SDBasic::bytearray::ByteArray::fromHexStr(fo.hex_key), enc_file_path,
          tmp_file) == false) {
    LOG_ERROR << "SYNC: failed to open AES file of " << fo.local_id << " : "
              << fo.hex_key;
    return false;
  }

  // upload plain file to surcloud
  SDBasic::time::Timestamp mtime =
      SDBasic::time::Timestamp::FromTimeT(fo.modifiedTime.ToTimeT());
  bool success =
      SDBasic::Upload(token, tmp_file, parent_cloud_id, fo.name, mtime, *node);

  if (!success) {
    LOG_ERROR << "SYNC: failed to upload file to surcloud: " << fo.local_id;
  }

  if (success && node->id.empty()) {
    LOG_ERROR << "SYNC: upload OK but cloud id is empty, what happens: "
              << fo.local_id;
    success = false;
  }

  if (!tmp_file.empty()) cache->ReleaseTmpFile("SYNC_WORKER", tmp_file);

  return success;
}

void SyncManager::StartWorkers(int n) {
  for (int i = 0; i < n; ++i) {
    ThreadPtr p(
        new Thread(boost::bind(&SyncManager::RunWorkerLoop, this), "syncer"));
    p->Start();
  }
}

SyncManager* NewSyncManager(const DicomConf* conf) {
  SyncManager* sm = NULL;
  sm = new SyncManager(conf);
  return sm;
}
}
