#include "dicom/upload/cloud_uploader.h"

#include "dicom/storage/encrypt.h"
#include "dicom/storage/decrypt.h"
#include <surbasic/surbasic.h>
#include <surbasic/AESHelper.h>
#include <surbasic/BackupFile.h>
#include <surbasic/FileKeys.h>
#include <surbasic/EciesHelper.h>
#include <surbasic/Base64Helper.h>

using namespace SDBasic;

namespace dicom {

static Status FilleBackupFileInfo(const AccessToken& token,
                                  const FileObject& fo,
                                  const std::string& cloud_parent_id,
                                  const std::string& enc_file_path,
                                  const HashEntry* hash, BackupFile* b) {
  std::string name;
  std::string id;
  std::string parentid;
  std::string fullpath;
  std::string rootpath;
  SDBasic::time::Timestamp uploadtime;

  b->name = fo.name;
  b->id = "";
  b->parentid = cloud_parent_id;
  b->fullpath = "";
  b->rootpath = "";
  b->uploadtime = time::Timestamp::Now();

  b->ondup = OVERWRITE;
  b->convert = NONDOC;  // NO CONVERTION

  // following data has no meaning in our case
  b->type = BACKUP_STREAM;
  b->status = START_BACKUP;
  b->createtype = FILE_CREATE_NEW;
  b->support = "";

  // source file info
  b->modifytime = time::Timestamp::FromTimeT(fo.modifiedTime.ToTimeT());
  b->size = hash->src_size;
  b->digest = hash->src_digest;

  // resume upload for big file
  b->uploadid = "";
  b->offset = 0;

  // info used in ecryption upload mode (our case)
  b->sha512 = hash->src_sha512;
  b->encFilePath = enc_file_path;
  b->encFileSize = hash->enc_size;
  b->encSha1 = hash->enc_digest;
  b->sharekey =
      SDBasic::EncryptStorageKeyToShareKey(hash->src_sha512, hash->hexkey);

  // create personal key
  ecc::EciesHelper* ecc =
      ecc::EciesHelper::CreateFromPrivateKeyReturnedByServer(token.privatekey);
  if (ecc == NULL) {
    return Status(Status::kOther, "failed to parse private key");
  }
  b->personalkey = SDBasic::GeneratePersonalKey(ecc, hash->hexkey);
  delete ecc;

  // DON'T upload this plain key to server
  b->storagekey = base64::UrlBase64Helper::Encode(
      bytearray::ByteArray::fromHexStr(hash->hexkey));

  // enterprise API, no meaning in our case
  std::string sharerootid;
  b->sharetype = SHARE_NONE;
  b->sharerootid = "";
  return Status();
}

static bool DecryptFileToTmpFile(LRUFileCache* fcache, const FileObject& fo,
                                 const std::string& enc_file_path,
                                 std::string* tmp_file) {
  // allocate tmp file space
  if (fcache->AllocTmpFile("SYNC_WORKER", 0, tmp_file) == false) {
    LOG_ERROR << "SYNC: failed to allocate temp file for uploading: "
              << fo.local_id;
    return false;
  }

  // do decryption
  if (SDBasic::aes::AESHelper::DecAndUncompressFile(
          SDBasic::bytearray::ByteArray::fromHexStr(fo.hex_key), enc_file_path,
          *tmp_file) == false) {
    LOG_ERROR << "SYNC: failed to open AES file of " << fo.local_id << " : "
              << fo.hex_key;

    fcache->ReleaseTmpFile("SYNC_WORKER", *tmp_file);
    tmp_file->clear();
    return false;
  }

  return true;
}

CloudUploader::CloudUploader(LRUFileCache* fcache, HashCache* hcache)
    : fcache_(fcache), hcache_(hcache) {}

Status CloudUploader::CheckRapidUpload(uint64_t surdoc_key,
                                       const FileObject& fo,
                                       const std::string& enc_file_path,
                                       bool* rapid, HashEntry* rapidHash) {
  Status s;
  *rapid = false;

  HashEntry localHash;
  bool ok = hcache_->GetHashEntry(fo.digest, fo.hex_key, &localHash);
  if (!ok) {
    LOG_INFO << "Calculate hash info of file: " << fo.local_id;
    FileReader r;
    NullWriter w;
    s = r.Open(enc_file_path);
    if (!s.ok()) {
      LOG_ERROR << s;
      return Status(Status::kOther, "failed to open encryted file");
    }

    Decryptor dec;
    s = dec.DoDecryptAndHash(fo.hex_key, &r, &w, &localHash);
    r.Close();
    w.Close();

    if (!s.ok()) {
      LOG_ERROR << s;
      return s;
    }

    localHash.status = HASH_LOCAL;
    hcache_->UpdateHashEntry(&localHash);
  }

  assert(localHash.IsComplete());

  // Get file list with same hash from server
  SameHashChecker checker(hcache_);
  ServerHashEntryList list;
  s = checker.GetServerHashEntryListFromServer(surdoc_key, localHash.src_digest,
                                               localHash.src_sha512, &list);
  if (!s.ok()) {
    LOG_ERROR << s;
    return s;
  }

  if (list.empty()) {
    *rapid = false;
    return s;
  }

  // Fast check: since we already got the hash info of file when encryted
  // with hash->hexkye, so we don't need to encrypt it again to validate
  // the server hash entry with same hexkey
  *rapid = checker.CanRapidUploadFastCheck(&list, localHash.src_md5, rapidHash);
  if (*rapid == true) {
    assert(rapidHash->IsComplete());
    return s;
  }

  // Now, we decrypt file, and validate each entry in ServerHashEntryList to
  // find the first one which with correct enc_digest
  std::string tmp_file;
  if (DecryptFileToTmpFile(fcache_, fo, enc_file_path, &tmp_file) == false) {
    return Status(Status::kOther, "failed to decrypt file");
  }

  *rapid = checker.CanRapidUploadSlowCheck(&list, tmp_file, rapidHash);
  if (*rapid == true) {
    // Ok, we found one. hash contains the hash info of file when
    // encryted with se->hexkey, then we can use this info to do a
    // rapdi upload
    assert(rapidHash->IsComplete());
  }
  fcache_->ReleaseTmpFile("SYNC_WORKER", tmp_file);
  return s;
}

Status CloudUploader::DoRapidUpload(SDBasic::token::AccessToken& token,
                                    const FileObject& fo,
                                    const std::string& cloud_parent_id,
                                    const HashEntry* hash,
                                    SDBasic::tree::TreeEntity* node) {
  Status s;
  BackupFile bfile;
  s = FilleBackupFileInfo(token, fo, cloud_parent_id, "", hash, &bfile);
  if (!s.ok()) {
    return s;
  }

  Request req;
  std::string response;
  req.Init();
  code::SD_CODE c = req.Enc_RapidUpload(token, bfile, response);
  if (c != code::SD_SUCCESSED) {
    return Status(Status::kOther, "failed to rapid upload file");
  }

  CJsonParse parser(response);
  if (!parser.IsValid() || !parser.ParseNode(*node)) {
    LOG_ERROR << "invalid response: " << response;
    return Status(Status::kOther, "failed to parse result");
  }

  return s;
}

Status CloudUploader::DoUploadSmallFile(SDBasic::token::AccessToken& token,
                                        const FileObject& fo,
                                        const std::string& cloud_parent_id,
                                        const std::string& enc_file_path,
                                        const HashEntry* hash,
                                        SDBasic::tree::TreeEntity* node) {
  assert(hash->IsComplete());
  Status s;
  BackupFile bfile;
  s = FilleBackupFileInfo(token, fo, cloud_parent_id, enc_file_path, hash,
                          &bfile);
  if (!s.ok()) {
    return s;
  }

  Request req;
  std::string response;
  req.Init();
  code::SD_CODE c = req.Enc_UploadSmallFile(token, bfile, response);
  if (c != code::SD_SUCCESSED) {
    return Status(Status::kOther, "failed to upload small file");
  }

  CJsonParse parser(response);
  if (!parser.IsValid() || !parser.ParseNode(*node)) {
    LOG_ERROR << "invalid response: " << response;
    return Status(Status::kOther, "failed to parse result");
  }

  return s;
}

Status CloudUploader::GetBigFileResumeInfo(SDBasic::token::AccessToken& token,
                                           const FileObject& fo,
                                           const HashEntry* hash,
                                           ResumeInfo* resume_info) {
  Status s;
  code::SD_CODE c;

  Request req;
  req.Init();

  if (resume_info->location.empty()) {
    LOG_INFO << "Request a resume location for file: " << fo.local_id;
    // First we need get a resume id
    std::string location;
    c = req.Enc_GetResumeAddress(token, SHARE_NONE, "", location);
    if (c != code::SD_SUCCESSED || location.empty()) {
      LOG_ERROR << "failed to get resume addr for big file: " << fo.local_id;
      return Status(Status::kOther, "failed to get resume addr");
    }
    resume_info->location = location;
  } else {
    LOG_INFO << "Use existing resume location for file: " << fo.local_id;
  }

  int64_t offset;
  c = req.Enc_GetResumeOffset(token, resume_info->location, hash->enc_size,
                              SHARE_NONE, "", offset);
  if (c != code::SD_SUCCESSED) {
    LOG_ERROR << "failed to get resume offset of big file: " << fo.local_id;
    return Status(Status::kOther, "failed to get resume offset");
  }
  resume_info->offset = offset;
  return Status();
}

Status CloudUploader::DoUploadBigFile(SDBasic::token::AccessToken& token,
                                      const FileObject& fo,
                                      const std::string& cloud_parent_id,
                                      const std::string& enc_file_path,
                                      const HashEntry* hash,
                                      ResumeInfo* resume_info,
                                      SDBasic::tree::TreeEntity* node) {
  assert(hash->IsComplete());
  Status s;
  BackupFile bfile;
  code::SD_CODE c;
  s = FilleBackupFileInfo(token, fo, cloud_parent_id, enc_file_path, hash,
                          &bfile);
  if (!s.ok()) {
    return s;
  }

  Request req;
  req.Init();

  assert(!resume_info->location.empty());

  // Fill up resume upload information
  bfile.uploadid = resume_info->location;
  bfile.offset = resume_info->offset;

  // now, do upload
  bfile.ondup = RENAME;  // OVERWRITE is not support
  std::string response;
  LOG_INFO << "resume addr: " << resume_info->location;
  c = req.Enc_ResumeUploadFile(token, resume_info->location, &bfile, response);
  if (c != code::SD_SUCCESSED) {
    LOG_ERROR << "failed to upload big file: " << fo.local_id;
    return Status(Status::kOther, "failed to upload big file");
  }

  CJsonParse parser(response);
  if (!parser.IsValid() || !parser.ParseNode(*node)) {
    LOG_ERROR << "invalid response: " << response;
    return Status(Status::kOther, "failed to parse result");
  }

  return s;
}

static Status CreateFolderRecursive(uint64_t surdoc_key,
                                    const std::string& local_id, DmvDBPtr& db,
                                    std::string* cloud_id) {
  Status s;
  DirObject d;
  bool ok = db->GetDirByID(local_id, &d);
  if (!ok) {
    return Status(Status::kOther, "local dir node not found");
  }

  if (!d.cloud_id.empty()) {
    LOG_DEBUG << "dir already created on cloud";
    *cloud_id = d.cloud_id;
    return s;
  }

  std::string parent_cloud_id;
  s = CreateFolderRecursive(surdoc_key, d.parentId, db, &parent_cloud_id);
  if (!s.ok()) {
    return s;
  }

  SDBasic::TreeEntity node;
  bool ret = SDBasic::CreateFolder(surdoc_key, parent_cloud_id, d.name, node);
  if (ret == false) {
    return Status(Status::kOther, "failed to create dir");
  }

  assert(!node.id.empty());
  LOG_INFO << "dir " << d.name << " created with cloud id " << node.id;

  db->SetDirCloudId(d.local_id, node.id);
  *cloud_id = node.id;
  return s;
}

// Our server will create two dir with same name if we send
// tow create_dir request simultaneously. Oops!
static dicom::Mutex g_dirLock;
Status CloudUploader::CreateDirInCloud(uint64_t surdoc_key,
                                       const std::string& dir_id, DmvDBPtr& db,
                                       std::string* parent_cloud_id) {
  LockGuard g(g_dirLock);
  return CreateFolderRecursive(surdoc_key, dir_id, db, parent_cloud_id);
}
}
