//
//  hd_fs_operation.cc
//  Draco
//
//  Created by Liu Dafan on 12-2-20.
//  Copyright (c) 2012年 __MyCompanyName__. All rights reserved.
//

#include <errno.h>
#include <zlib.h>
#include "hd_fs_operation.h"
#include "data_operations.h"
#include "datastream.h"

using namespace std;

HadoopFSOperation::HadoopFSOperation(const char* dirtable, const char* filetable)
{
    _dirTable.assign(dirtable);
    _fileTable.assign(filetable);
    _maxfilelength = 1024*1024*1024; // 1G
    
    if (pthread_mutex_init(&_lock, NULL) != 0) {
        char errbuf[1024];
        strerror_r(errno, errbuf, 1024);
        DERROR("mutex init error: %s\n", errbuf);
        exit(-1);
    }
}

HadoopFSOperation::~HadoopFSOperation()
{
    pthread_mutex_destroy(&_lock);
}

HResult HadoopFSOperation::GetSubdirInfo(DIR_ID parentID, const std::string& dirName, OUT DIR_INFO& dirInfo)
{
    HResult ret = ERROR_CODE_OK;
    
    dirInfo._parentID = parentID;
    strcpy(dirInfo._name, dirName.c_str());
    
    ret = DataOperations::ReadDirectoryInfo(_dirTable, dirInfo.GetKey(), dirInfo);
    
    if (ret != ERROR_CODE_OK)
    {
        DERROR("Get Directory Info Failed: parentid: %lx, dirName: %s, error: %d",
               parentID._id, dirName.c_str(), ret);
    }
    else
    {
        DINFO("Get Directory Info successed: parentid: %lx, dirName: %s",
          parentID._id, dirName.c_str());
    }
    /*
    if (dirInfo._isDeleted == 1)
    {
        ret = ERROR_CODE_EMPTY;
    }
    */
    return ret;
}

HResult HadoopFSOperation::GetSubdirInfos(DIR_ID parentID, OUT std::vector<DIR_INFO>& dirInfos)
{
    HResult ret = ERROR_CODE_OK;
    
    char rowkey[64];
    sprintf(rowkey, "%u_%u", parentID._parts._userID, parentID._parts._dirSN);
    
    ret = DataOperations::ListDirectoryInfo(_dirTable, rowkey, dirInfos);
    
    if (ret != ERROR_CODE_OK)
    {
        DERROR("List Directory Info Failed: parentid: %lx, error: %d",
               parentID._id, ret);
    }
    else
    {
        DINFO("Get Directory Info successed: parentid: %lx, count: %d",
              parentID._id, (int)dirInfos.size());
    }
    /*
    vector<DIR_INFO>::iterator it = dirInfos.begin();
    while (it != dirInfos.end())
    {
        if (it->_isDeleted == 1)
        {
            it = dirInfos.erase(it);
        }
        else
        {
            it++;
        }
    }
    */
    return ret;
}

HResult HadoopFSOperation::GetSubfileInfo(DIR_ID parentID, const string& fileName, OUT FILE_INFO& fileInfo)
{
    HResult ret = ERROR_CODE_OK;
    vector<FILE_INFO> infos;
    
    fileInfo._parentID = parentID;
    strcpy(fileInfo._name, fileName.c_str());
    
    ret = DataOperations::ListFileVersion(_fileTable, fileInfo.GetKey().c_str(), fileName, infos);
    
    if (ret != ERROR_CODE_OK)
    {
        DERROR("Get File Info Failed: parentid: %lx, Name: %s, error: %d",
               parentID._id, fileName.c_str(), ret);
    }
    else
    {
        DINFO("Get File Info successed: parentid: %lx, Name: %s, VersionCount: %d",
              parentID._id, fileName.c_str(), (int)infos.size());
        
        // get the latest version
        fileInfo = *(infos.rbegin());
    }
    
    return ret;
}

HResult HadoopFSOperation::GetSubfileInfo(DIR_ID parentID, const std::string& fileName, uint32_t version, OUT FILE_INFO& fileInfo)
{
    HResult ret = ERROR_CODE_OK;
    fileInfo._parentID = parentID;
    strcpy(fileInfo._name, fileName.c_str());
    fileInfo._fileVersion = version;
    
    ret = DataOperations::ReadFileInfo(_fileTable, fileInfo.GetKeyWithVersion().c_str(), fileInfo);
    
    if (ret != ERROR_CODE_OK)
    {
        DERROR("Get File Info Failed: parentid: %lx, Name: %s, error: %d",
               parentID._id, fileName.c_str(), ret);
    }
    else
    {
        DINFO("Get File Info successed: parentid: %lx, Name: %s",
              parentID._id, fileName.c_str());
    }
    
    return ret;
}

HResult HadoopFSOperation::GetSubfileInfo(DIR_ID parentID, const string& fileName, OUT std::vector<FILE_INFO>& fileInfos)
{
    HResult ret = ERROR_CODE_OK;
    FILE_INFO info;
    info._parentID = parentID;
    strcpy(info._name, fileName.c_str());
    
    ret = DataOperations::ListFileVersion(_fileTable, info.GetKey().c_str(), fileName, fileInfos);
    
    if (ret != ERROR_CODE_OK)
    {
        DERROR("Get File Info Failed: parentid: %lx, Name: %s, error: %d",
               parentID._id, fileName.c_str(), ret);
    }
    else
    {
        DINFO("Get File Info successed: parentid: %lx, Name: %s, VersionCount: %d",
              parentID._id, fileName.c_str(), (int)fileInfos.size());
    }
    

    return ret;
}

HResult HadoopFSOperation::GetSubfileInfos(DIR_ID parentID, OUT std::vector<FILE_INFO>& fileInfos)
{
    HResult ret = ERROR_CODE_OK;
    
    FILE_INFO info;
    info._parentID = parentID;
    info._name[0] = '\0';
    
    ret = DataOperations::ListFileInfo(_fileTable, info.GetKey(), fileInfos);
    
    if (ret != ERROR_CODE_OK)
    {
        DERROR("List File Info Failed: parentid: %lx, error: %d",
               parentID._id, ret);
    }
    else
    {
        DINFO("Get File Info succeed: parentid: %lx, count: %d",
              parentID._id, (int)fileInfos.size());
    }
    
    // skip versions with last version
    if (fileInfos.size() > 1)
    {
        
        vector<FILE_INFO>::iterator it = fileInfos.begin();
        vector<FILE_INFO>::iterator lastit = it;
        it = lastit + 1;
        while (it != fileInfos.end())
        {
            if (strcmp(it->_name, lastit->_name) == 0)
            {
                lastit = fileInfos.erase(lastit);
            }
            else
            {
                lastit++;
            }
            it  = lastit + 1;
        }
    }
    return ret;
}

HResult HadoopFSOperation::CreateNewDir(DIR_ID parentID, const string& newDirName, OUT DIR_INFO& newDirInfo)
{
    // check dir exist
    HResult ret = GetSubdirInfo(parentID, newDirName, newDirInfo);
    if (!(ret == ERROR_CODE_EMPTY || (ret == ERROR_CODE_OK && newDirInfo._isDeleted)))
    {
        DERROR("Dir %s is existed, err:%d", newDirName.c_str(), ret);
        return ERROR_CODE_ALREADY_EXISTS;
    }
    
    //dir id
    ret = GetNewDirId(parentID._parts._userID, newDirInfo._dirID);
    if (ret != ERROR_CODE_OK)
    {
        DERROR("get new dir id failed, user: %u, error:%d",
               parentID._parts._userID, ret);
        return ret;
    }
    
    //parent id
    newDirInfo._parentID = parentID;
    //name
    strcpy(newDirInfo._name, newDirName.c_str());
    //createtime
    newDirInfo._createTime = time(NULL);
    //updatetime
    newDirInfo._updateTime = time(NULL);
    //deletetime
    newDirInfo._deleteTime = 0;
    //isdelete
    newDirInfo._isDeleted = false;
    
    //information
    newDirInfo._fileCount = 0;
    newDirInfo._totalSize = 0;
    
    return DataOperations::WriteDirectoryInfo(_dirTable, newDirInfo);
}

HResult HadoopFSOperation::CreateLinkDir(DIR_ID parentID, const string& newDirName, DIR_ID sourceID, OUT DIR_INFO& newDirInfo)
{
    // check dir exist
    HResult ret = GetSubdirInfo(parentID, newDirName, newDirInfo);
    if (!(ret == ERROR_CODE_EMPTY || (ret == ERROR_CODE_OK && newDirInfo._isDeleted)))
    {
        DERROR("Dir %s is existed, err:%d", newDirName.c_str(), ret);
        return ERROR_CODE_ALREADY_EXISTS;
    }
    
    //dir id
    newDirInfo._dirID = sourceID;
    
    //parent id
    newDirInfo._parentID = parentID;
    //name
    strcpy(newDirInfo._name, newDirName.c_str());
    //createtime
    newDirInfo._createTime = time(NULL);
    //updatetime
    newDirInfo._updateTime = time(NULL);
    //deletetime
    newDirInfo._deleteTime = 0;
    //isdelete
    newDirInfo._isDeleted = false;
    
    //information
    newDirInfo._fileCount = 0;
    newDirInfo._totalSize = 0;
    
    return DataOperations::WriteDirectoryInfo(_dirTable, newDirInfo);
}

HResult HadoopFSOperation::RenameDir(DIR_ID parentID, const string& orgDirName, const string& newDirName, OUT DIR_INFO& newDirInfo)
{
    HResult ret = ERROR_CODE_OK;
    
    DIR_INFO orgDirInfo;
    ret = GetSubdirInfo(parentID, orgDirName, orgDirInfo);
    
    if (ret != ERROR_CODE_OK)
    {
        DERROR("Get Directory Info failed, parentid: %lx, name: %s",
               parentID._id, orgDirName.c_str());
        return ret;
    }
    
    string orgkey = orgDirInfo.GetKey();
    
    newDirInfo = orgDirInfo;
    strcpy(newDirInfo._name, newDirName.c_str());
    
    // write new information
    ret = DataOperations::WriteDirectoryInfo(_dirTable, newDirInfo);
    if (ret != ERROR_CODE_OK)
    {
        DERROR("Write new Directory info failed, table: %s, name: %s",
               _dirTable.c_str(), newDirInfo._name);
        return ret;
    }
    
    // write old one
    orgDirInfo._isDeleted = true;
    orgDirInfo._deleteTime = time(NULL);
    ret = DataOperations::WriteDirectoryInfo(_dirTable, orgDirInfo);
    
    if (ret != ERROR_CODE_OK)
    {
        DERROR("Write orginal Directory info failed, table: %s, name: %s",
               _dirTable.c_str(), orgDirInfo._name);
        return ret;
    }
    
    return ret;
}

// in recursion delete
HResult HadoopFSOperation::DeleteDir(DIR_ID parentID, const string& orgDirName)
{
    HResult ret = ERROR_CODE_OK;
    
    DIR_INFO dir;
    ret = GetSubdirInfo(parentID, orgDirName, dir);
    if (ret != ERROR_CODE_OK)
    {
        DERROR("Get Directory Info failed, parentid: %lx, name: %s",
               parentID._id, orgDirName.c_str());
        return ret;
    }
    
    vector<DIR_INFO> subdirs;
    ret = GetSubdirInfos(dir._dirID, subdirs);
    
    if (ret != ERROR_CODE_OK)
    {
        DERROR("Get Directory Info failed, id: %lx", dir._dirID._id);
        return ret;
    }
    
    vector<FILE_INFO> subfiles;
    ret = GetSubfileInfos(dir._dirID, subfiles);
    
    // delete files
    for (size_t idx = 0; idx < subfiles.size(); idx ++)
    {
        ret = DeleteFile(subfiles[idx]);
        if (ret != ERROR_CODE_OK)
        {
            DERROR("Delete Sub File failed, parentid: %lx, name: %s",
                   dir._dirID._id, subfiles[idx]._name);
            return ret;
        }
    }
    
    // delete dirs
    for (size_t idx = 0; idx < subdirs.size(); idx ++)
    {
        ret = DeleteDir(subdirs[idx]);
        
        if (ret != ERROR_CODE_OK)
        {
            DERROR("Delete Sub Dir failed, parentid: %lx, name: %s",
                   dir._dirID._id, subdirs[idx]._name);
            return ret;
        }
    }
    
    // delete itself
    ret = DeleteDir(dir);
    if (ret != ERROR_CODE_OK)
    {
        DERROR("Delete Dir failed, parentid: %lx, name: %s",
               dir._parentID._id, dir._name);
        return ret;
    }
    
    return ret;
}

HResult HadoopFSOperation::RecoverDir(DIR_ID parentID, const string& name)
{   
    HResult ret = ERROR_CODE_OK;
    
    DIR_INFO info;
    ret = GetSubdirInfo(parentID, name, info);
    
    if (ret != ERROR_CODE_OK)
    {
        DERROR("Get Dir Info failed, parentid: %lx, name: %s",
               parentID._id, name.c_str());
        return ret;
    }
    
    // write delete information
    ret = DeleteDir(info, true);
    
    if (ret != ERROR_CODE_OK)
    {
        DERROR("Write orginal Dir info failed, table: %s, name: %s",
               _dirTable.c_str(), info._name);
        return ret;
    }
    
    return ret;
}

HResult HadoopFSOperation::CreateNewFile(
    DIR_ID parentID, const string& newFileName, const int8_t* buf, size_t bufLen, OUT FILE_INFO& newFileInfo)
{
    HResult ret = ERROR_CODE_OK;
    
    {
        AutoLocker locker(&_lock);
        
        if (_users.find(parentID._parts._userID) != _users.end())
        {
            // already write
            DWARNING("current user is still writing now, user: %u", parentID._parts._userID);
            return ERROR_CODE_RETRY_LATER;
        }
        _users.insert(parentID._parts._userID);
    }
    
    // check file exist and increase the version
    ret = GetSubfileInfo(parentID, newFileName, newFileInfo);
    uint32_t version = 0;
    if (ret == ERROR_CODE_OK)
    {
        version = newFileInfo._fileVersion + 1;
    }
    
    // structure define
    FILE_DATA data;
    DataStream stream;

    // get target file name
    string filename;
    uint64_t curlength;
    map<USER_ID, string>::iterator itfile = _filemap.find(parentID._parts._userID);
    if (itfile == _filemap.end())
    {
        // no file in map cache
        ret = GetCurrentFileName(parentID._parts._userID, filename);
        if (ret != ERROR_CODE_OK)
        {
            DERROR("Get Current File Name failed, user: %u, err: %d",
                   parentID._parts._userID, ret);
            goto done;
        }
        //put it into map cache
        _filemap[parentID._parts._userID] = filename;
    }
    else
    {
        filename = itfile->second;
    }
    
    ret = DataOperations::GetFileLength(filename.c_str(), curlength);
    if (ret != ERROR_CODE_OK && ret != ERROR_CODE_EMPTY)
    {
        DERROR("Get File Length failed, name: %s, err: %d",
               filename.c_str(), ret);
        goto done;
    }
    
    if (ret == ERROR_CODE_EMPTY)
    {
        curlength = 0;
    }
    
    if (curlength + bufLen > _maxfilelength)
    {
        filename = GetNewFileName(parentID._parts._userID);
        //update map cache
        _filemap[parentID._parts._userID] = filename;
    }
    
    // id
    newFileInfo._parentID = parentID;
    // name
    strcpy(newFileInfo._name, newFileName.c_str());
    // version
    newFileInfo._fileVersion = version;
    // length
    newFileInfo._length = bufLen;
    // createtime
    newFileInfo._createTime = time(NULL);
    // deletetime
    newFileInfo._deleteTime = 0;
    // compress
    newFileInfo._compressType = 0;
    // security
    newFileInfo._securityType = 0;
    // delete
    newFileInfo._isDeleted = false;
    // share
    newFileInfo._isSharing = false;
    //_md5
    newFileInfo._dataCRC = boost_crc32(buf, bufLen);
    
    // write data
    data._version = CURRENT_FILE_DATA_VERSION;
    data._fileType = 0;
    data._userID = parentID._parts._userID;
    data._length = bufLen;
    data._dataCRC = newFileInfo._dataCRC;
    data._data = (int8_t*)buf;
    
    stream.Serialize(data);
    
    // calculate header md5
    newFileInfo._headCRC = boost_crc32(
                                &(stream.getdata()[0]),
                                stream.getdata().size());
    
    stream.Serialize(data._data, data._length);
    
    // write file data
    ret = DataOperations::WriteDataBlock(filename.c_str(), stream.getdata(), newFileInfo._loc);
    if (ret != ERROR_CODE_OK)
    {
        DERROR("Write File Data failed, filename: %s, err: %d",
               filename.c_str(), ret);
        goto done;
    }
    
    // write file info
    ret = DataOperations::WriteFileInfo(_fileTable, newFileInfo);
    if (ret != ERROR_CODE_OK)
    {
        DERROR("Write File Info failed, table: %s, name: %s, err: %d",
               _fileTable.c_str(), newFileName.c_str(), ret);
        goto done;
    }
done:
    
    {
        AutoLocker locker(&_lock);
        _users.erase(parentID._parts._userID);
    }
    return ret;
}

HResult HadoopFSOperation::CreateNewFile(
    DIR_ID parentID, const string& newFileName, const string& localFileName, OUT FILE_INFO& newFileInfo)
{
    //TODO: change to stream model
    FILE *fp = fopen(localFileName.c_str(), "rb");
    if (fp == NULL)
    {
        DERROR("can't open local file: %s", localFileName.c_str());
        return ERROR_CODE_IOERROR;
    }
    fseek(fp, SEEK_END, 0);
    long len = ftell(fp);
    fseek(fp, SEEK_SET, 0);
    int8_t* buf = new int8_t[len];
    fread(buf, sizeof(int8_t), len, fp);
    fclose(fp);
    
    return CreateNewFile(parentID, newFileName, buf, len, newFileInfo);    
}

HResult HadoopFSOperation::CreateLinkFile(
    DIR_ID parentID, const string& newFileName, const FILE_INFO& sourceInfo, OUT FILE_INFO& newFileInfo)
{
    FILE_INFO tarFileInfo;
    uint32_t version = 0;

    HResult ret = GetSubfileInfo(parentID, newFileName, tarFileInfo);

    if (ret == ERROR_CODE_OK)
    {
        DNOTE("Remove operation will replace the target file, parentid: %lx, name: %s",
              parentID._id, newFileName.c_str());
        
        version = tarFileInfo._fileVersion + 1;
    }

    // id
    newFileInfo._parentID = parentID;
    // name
    strcpy(newFileInfo._name, newFileName.c_str());
    // version
    newFileInfo._fileVersion = version;
    // length
    newFileInfo._length = sourceInfo._length;
    // createtime
    newFileInfo._createTime = time(NULL);
    // deletetime
    newFileInfo._deleteTime = sourceInfo._deleteTime;
    // compress
    newFileInfo._compressType = sourceInfo._compressType;
    // security
    newFileInfo._securityType = sourceInfo._securityType;
    // delete
    newFileInfo._isDeleted = sourceInfo._isDeleted;
    // share
    newFileInfo._isSharing = sourceInfo._isSharing;
    //_md5
    newFileInfo._dataCRC = sourceInfo._dataCRC;
    // location
    newFileInfo._loc = sourceInfo._loc;
    
    return DataOperations::WriteFileInfo(_fileTable, newFileInfo);
}

HResult HadoopFSOperation::RenameFile(
    DIR_ID parentID, const string& orgFileName, const string& newFileName, OUT FILE_INFO& newFileInfo)
{
    HResult ret = ERROR_CODE_OK;
    
    FILE_INFO orgFileInfo;
    ret = GetSubfileInfo(parentID, orgFileName, orgFileInfo);
    
    if (ret != ERROR_CODE_OK)
    {
        DERROR("Get File Info failed, parentid: %lx, name: %s",
               parentID._id, orgFileName.c_str());
        return ret;
    }
    
    FILE_INFO tarFileInfo;
    ret = GetSubfileInfo(parentID, newFileName, tarFileInfo);
    uint32_t version = 0;
    if (ret == ERROR_CODE_OK)
    {
        DNOTE("Remove operation will replace the target file, parentid: %lx, name: %s",
              parentID._id, newFileName.c_str());
        
        version = tarFileInfo._fileVersion + 1;
    }
    
    newFileInfo = orgFileInfo;
    strcpy(newFileInfo._name, newFileName.c_str());
    newFileInfo._fileVersion = version;
    
    // write new information
    ret = DataOperations::WriteFileInfo(_fileTable, newFileInfo);
    if (ret != ERROR_CODE_OK)
    {
        DERROR("Write new File info failed, table: %s, name: %s",
               _fileTable.c_str(), newFileInfo._name);
        return ret;
    }
    
    // delete old one
    ret = DeleteFile(orgFileInfo);
    
    if (ret != ERROR_CODE_OK)
    {
        DERROR("Write orginal File info failed, table: %s, name: %s",
               _fileTable.c_str(), orgFileInfo._name);
        return ret;
    }
    
    return ret;
 
}

HResult HadoopFSOperation::DeleteFile(DIR_ID parentID, const string& orgFileName)
{   
    HResult ret = ERROR_CODE_OK;
    
    FILE_INFO orgFileInfo;
    ret = GetSubfileInfo(parentID, orgFileName, orgFileInfo);
    
    if (ret != ERROR_CODE_OK)
    {
        DERROR("Get File Info failed, parentid: %lx, name: %s",
               parentID._id, orgFileName.c_str());
        return ret;
    }
    
    // write delete information
    ret = DeleteFile(orgFileInfo);

    if (ret != ERROR_CODE_OK)
    {
        DERROR("Write orginal File info failed, table: %s, name: %s",
               _fileTable.c_str(), orgFileInfo._name);
        return ret;
    }
    
    return ret;
}

HResult HadoopFSOperation::RecoverFile(DIR_ID parentID, const string& orgFileName)
{   
    HResult ret = ERROR_CODE_OK;
    
    FILE_INFO orgFileInfo;
    ret = GetSubfileInfo(parentID, orgFileName, orgFileInfo);
    
    if (ret != ERROR_CODE_OK)
    {
        DERROR("Get File Info failed, parentid: %lx, name: %s",
               parentID._id, orgFileName.c_str());
        return ret;
    }
    
    // write delete information
    ret = DeleteFile(orgFileInfo, true);
    
    if (ret != ERROR_CODE_OK)
    {
        DERROR("Write orginal File info failed, table: %s, name: %s",
               _fileTable.c_str(), orgFileInfo._name);
        return ret;
    }
    
    return ret;
}

HResult HadoopFSOperation::ReadFile(const FILE_INFO& sourceInfo, uint64_t offset, OUT int8_t* buf, size_t readCount, OUT size_t& actualReadCount)
{
    HResult ret = ERROR_CODE_OK;

    //FILE_DATA data;
    //TODO: check file valid
    uint64_t realoffset = sourceInfo._loc._offset + FILE_DATA::GetHeaderSize() + offset;
    ret = DataOperations::ReadDataBlock(sourceInfo._loc._fileName, realoffset, buf, readCount, actualReadCount);
    
    if (ret != ERROR_CODE_OK)
    {
        DERROR("Read File failed, name: %s, offset: %lu, realoffset: %lu, headeroffset: %lu",
               sourceInfo._name, offset, realoffset, sourceInfo._loc._offset);
        return ret;
    }
    
    /*
    uint32_t datacrc = boost_crc32(buf, actualReadCount);
    if (datacrc != sourceInfo._dataCRC)
    {
        DERROR("Data CRC mismatch: read: %u, given: %u, count:%u", datacrc, sourceInfo._dataCRC, actualReadCount);
        ret = ERROR_CODE_DATA_CORRUPT;
    }
    */
    return ret;
}

HResult HadoopFSOperation::DeleteDir(DIR_INFO& dirinfo, bool recover)
{
    if (!recover)
    {
        dirinfo._isDeleted = true;
        dirinfo._deleteTime = time(NULL);
    }
    else
    {
        dirinfo._isDeleted = false;      
    }
    return DataOperations::WriteDirectoryInfo(_dirTable, dirinfo);
}

HResult HadoopFSOperation::DeleteFile(FILE_INFO& info, bool recover)
{
    if (!recover)
    {
        info._isDeleted = true;
        info._deleteTime = time(NULL);
    }
    else
    {
        info._isDeleted = false;      
    }
    
    return DataOperations::WriteFileInfo(_fileTable, info);
}

HResult HadoopFSOperation::GetNewDirId(USER_ID userid, DIR_ID& dirid)
{
    
    {
        AutoLocker locker(&_lock);
        
        if (_users.find(userid) != _users.end())
        {
            // already write
            DWARNING("current user is still writing now, user: %u", userid);
            return ERROR_CODE_RETRY_LATER;
        }
        _users.insert(userid);
    }
    
    HResult ret = DataOperations::GetNewDirId(userid, dirid);
    
    if (ret != ERROR_CODE_OK)
    {
        DERROR("Generate new dir id failed, user: %u, error: %d", userid, ret);
    }
    
    {
        AutoLocker locker(&_lock);
        _users.erase(userid);
    }
    return ret;
}

string HadoopFSOperation::GetNewFileName(USER_ID userid)
{
    return DataOperations::GetNewFileName(userid);
}

HResult HadoopFSOperation::GetCurrentFileName(USER_ID userid, string& filename)
{
    HResult ret = DataOperations::GetCurrentFileName(userid, filename);
    if (ret != ERROR_CODE_OK)
    {
        DERROR("can't get current file name for user: %u, err: %d",
               userid, ret);
    }
    
    return ret;
}