#include "archive.h"
#include "../sleipner_config.h"

#include "zlib/zlib.h"

#ifdef _ENABLE_LUABIND
#include "script/scripthelper.h"
#include <luabind/luabind.hpp>
#endif//_ENABLE_LUABIND

#include "util/common_macros.h"
#include "util/log.h"
#include "util/mmgr.h"		    
#include <cassert>
using namespace vfs;
using namespace util;

Archive::FileEntry::~FileEntry()
{	
	char*	pTemp = (char*)pData;
	if (pTemp) delete [] pTemp;
}

Archive::Archive() : 
	m_AllCached(false)
{
	
}

Archive::~Archive()
{
	m_File.Close();
	//if (m_pFile) fclose(m_pFile);
	//m_pFile = NULL;
}

unsigned long Archive::FindCompressBound(unsigned long	Size)
{
	return (unsigned long )((Size*1.01)+12);
}

struct	ExportData
{
	util::FileIO*		pFile;
	unsigned long		Index;
	unsigned long		TotalData;
};


void	Archive::ExportEntryCB(FileEntry* pEntry, void* pData)
{	
	assert(pEntry && pData && pEntry->pData);
	ExportData*	pInfo = (ExportData*)pData;
	
	unsigned long			MaxSize	= FindCompressBound(pEntry->UncompressedSize); //(pEntry->UncompressedSize*1.01) + 12;
	unsigned char*			pTemp = new unsigned char[MaxSize];
	
	int result = compress2(pTemp, &MaxSize, (unsigned char*)pEntry->pData, pEntry->UncompressedSize, 9);	// max compression

	if (MaxSize >= pEntry->UncompressedSize*0.95)	// if we're only getting 5% of less compression, the speed hit's not worth it
	{
		// we'll save out the original data
		MaxSize = pEntry->UncompressedSize;
		pEntry->CompressedSize  = MaxSize;
		pEntry->Flags = RES_UNCOMPRESSED;
		pInfo->pFile->Write(pEntry->pData, MaxSize*sizeof(unsigned char));
	} else
	{
		// save out the compressed information
		pEntry->Flags = RES_COMPRESSED;
		pEntry->CompressedSize	= MaxSize;
		pInfo->pFile->Write(pTemp, MaxSize*sizeof(unsigned char));
	}

	// data saved
	pInfo->TotalData+=pEntry->UncompressedSize;
	// track location
	pEntry->CompressedIndex = pInfo->Index;
	// update the index to the space after this block
	pInfo->Index+=MaxSize;

	delete [] pTemp;
}

struct	CollectEntries
{
	unsigned char*		pData;
	unsigned long	DataSize;
	unsigned long	DataIndex;
};

void	Archive::CollectEntriesCB(FileEntry* pEntry, void* pData)
{
	assert(pEntry && pData);
	CollectEntries* pCollect = (CollectEntries*)pData;
//	assert(pCollect->DataIndex+sizeof(FileEntry) < pCollect->DataSize);
	memcpy(&pCollect->pData[pCollect->DataIndex], pEntry, sizeof(FileEntry));
	pCollect->DataIndex+=sizeof(FileEntry);
}

bool	Archive::SaveArchive(const std::string& File)
{
	if (m_File.IsOpen()) m_File.Close();
	if (m_FileSet.GetElementC() == 0)
	{
		_LOG(MSG_ERROR, "Can't save an empty archive to file " << File);
		return false;	// unable to create file
	}

	if (!m_File.Open(File, FileIO::FILE_BINARY|FileIO::FILE_WRITE)) 
	{
		_LOG(MSG_ERROR, "Unable to save archive " << File);
		return false;	// unable to create file
	}

	FileHeader		Header;
	Header.VersionNum	= VERSION_NUMBER;
	Header.MagicNum		= MAGIC_NUMBER;
	Header.FileCount	= m_FileSet.GetElementC();
	Header.DataIndex	= sizeof(Header);				// data follows straight after header
	m_File.Write(&Header, sizeof(Header));

	// output data
	ExportData		ExportInfo;
	ExportInfo.Index		= Header.DataIndex;
	ExportInfo.pFile		= &m_File;
	ExportInfo.TotalData	= NULL;
	m_FileSet.ForEach(ExportEntryCB, &ExportInfo);

	Header.DataSize = ExportInfo.TotalData;
	Header.DataSizeCompressed = ExportInfo.Index - Header.DataIndex;	// total size

	// aggregate all the info, then compress and save out as a single operation
	CollectEntries	CollectInfo;
	CollectInfo.DataIndex = 0;
	CollectInfo.DataSize  = Header.FileCount*sizeof(FileEntry);
	CollectInfo.pData	  = new unsigned char[Header.FileCount*sizeof(FileEntry)];
	m_FileSet.ForEach(CollectEntriesCB, &CollectInfo);

	unsigned long			MaxSize	= FindCompressBound(Header.FileCount*sizeof(FileEntry));
	unsigned char*			pTemp   = new unsigned char[MaxSize];
	
	int result = compress2(pTemp, &MaxSize, CollectInfo.pData, CollectInfo.DataSize, 9);	// max compression
	Header.TOCSize  = CollectInfo.DataSize;
	Header.TOCSizeCompressed = MaxSize;
	Header.TOCIndex = Header.DataIndex + Header.DataSizeCompressed;


/*	unsigned char	Buffer[100000];
	unsigned long	Size = 100000;
	uncompress(Buffer, &Size, pTemp, MaxSize);
*/
	// write out info
	m_File.Write(pTemp, MaxSize*sizeof(unsigned char));
	delete [] pTemp;

	// rewrite header
	m_File.SetPos(0);
	m_File.Write(&Header, sizeof(Header));
	m_File.Close();
	return true;
}

bool	Archive::LoadArchive(const std::string& File)
{
	
	if (!m_File.Open(File, FileIO::FILE_BINARY|FileIO::FILE_READ)) 
	{
		_LOG(MSG_ERROR, "Unable to load archive " << File);
		return false;	// unable to load archive
	}
	FileHeader		Header;
	m_File.Read(&Header, sizeof(Header));
	if (Header.VersionNum != VERSION_NUMBER)
	{
		_LOG(MSG_ERROR, "Unable to load archive " << File << " : illegal version number");
		return false;	// unable to load archive
	}
	if (Header.MagicNum != MAGIC_NUMBER)
	{
		_LOG(MSG_ERROR, "Unable to load archive " << File << " : illegal magic number");
		return false;	// unable to load archive
	}

	// load the Table of contents, unpacking it as we go along
	unsigned char*	pCompressedTOC = new unsigned char[Header.TOCSizeCompressed];
	m_File.SetPos(Header.TOCIndex);
	m_File.Read(pCompressedTOC, Header.TOCSizeCompressed* sizeof(unsigned char));

	// create a temp array for loading all the file entries
	FileEntry*	FileA = new FileEntry[Header.FileCount];
	assert(Header.TOCSize == sizeof(FileEntry)*Header.FileCount && "Invalid TOC size");
	uncompress((unsigned char*)FileA, &Header.TOCSize, pCompressedTOC, Header.TOCSizeCompressed);

	// should now be decompressed into FileA.. we can delete pCompressedTOC
	delete [] pCompressedTOC;

	// build file-list
	// we could actually reuse the array generated in FileA, but that makes it harder to do things like collapse archives
	for (unsigned int i=0; i<Header.FileCount; i++)
	{
		// ADRIAN: Should pData be NULL, or are we leaving some garbage around by setting this ptr to NULL???
		//         Either way, VS is reporting pData as a bad ptr which causes the delete [] FileA statement to blow up horribly
		FileA[i].pData = NULL;

		// only load if not duplicated
		if (!FileExists(FileA[i].FileName))
		{
			FileEntry*	pEntry = new FileEntry;
			memcpy(pEntry, &FileA[i], sizeof(FileEntry));
			pEntry->pData = NULL;	// ensure that before it's loaded, the data points to NULL
			m_FileSet.AddElement(pEntry->FileName, pEntry);
		}
	}
	delete [] FileA;
	return true;
}

bool	Archive::CacheEntry(FileEntry* pEntry)
{
	assert(pEntry && "Invalid entry!");
	if (pEntry->pData)	
		return false;	// alrady loaded!
	pEntry->pData = (void*)(new unsigned char[pEntry->UncompressedSize]);

	// position for read
	m_File.SetPos(pEntry->CompressedIndex);


	if (pEntry->Flags & RES_COMPRESSED)
	{
		// load compressed data
		unsigned char*	pBuffer = new unsigned char[pEntry->CompressedSize];	// reuse this somehow to avoid framenting memory?
		m_File.Read(pBuffer, pEntry->CompressedSize*sizeof(unsigned char));

		// uncompress data
		unsigned long		MaxSize = pEntry->UncompressedSize;
		int result = uncompress((unsigned char*)pEntry->pData, &MaxSize, pBuffer, pEntry->CompressedSize);
		pEntry->UncompressedSize = MaxSize;

		// delete temp buffer
		delete [] pBuffer;
	} else
	{
		// not encrypted, so run the easy path
		m_File.Read(pEntry->pData, pEntry->UncompressedSize*sizeof(unsigned char));
	}
	return true;
}

void	Archive::CacheEntryCB(FileEntry* pEntry, void* pData)
{
	assert(pEntry && pData && "Invalid data!");
	Archive*	pThis = (Archive*)pData;
	pThis->CacheEntry(pEntry);
}

bool	Archive::CacheAll()
{
	// load and decompress all data from archive
	// we have some different options
	// 1. Decompress everything on load
	// 2. Load everything, but decompress at need
	// 3. Load indexes only, and load/decompress at need

	// 1 is probably fastest, assuming we need most of the archive. Takes up most space in memory
	// 2 reduces memory footprint, but has an overhead for each time a file is hit
	// 3 minimizes footprint, at the cost of much higher loadtimes. Much better for sparse files 
	///   (where most of the data in the archive isn't used)
	
	// This implements 1
	// we should be able to set this on a per archive basis, so that full archives (UI, etc) is fully loaded
	// and sparse archives (texture or level-data) loads TOC only

	
	//m_FileSet.ForEach(CacheEntryCB, this);
	return true;
}

bool	Archive::FileExists(const std::string& File)
{
	return m_FileSet.ElementExists(File);
}

int		Archive::BuildFileList(std::vector<std::string>& List, const std::string& Path, const std::string& Ext)
{
	int FileC = 0;
	int ElemC = (int)m_FileSet.GetElementC();

	for (int i = 0; i < ElemC; i++)
	{
		FileEntry* pFile = m_FileSet.GetElementByIndex(i); // this is probably called... once; need to optimize?
		std::string FileName = pFile->FileName;
		
		if (FileName.length() > Path.length())
		{
			std::string		BasePath, BaseExt;
			int				To = FileName.find_last_of("/\\");
			if (To > 0)
			{
				BasePath = FileName.substr(0, To);
			}
			To = FileName.find_last_of(".");
			if (To > 0)
			{
				BaseExt = FileName.substr(To+1, FileName.size()-To);
			}
			if (BasePath.empty() || (0 != strcmpi(Path.c_str(), BasePath.c_str())))
				continue;

			if (!Ext.empty())
			{
				if (BaseExt.empty() || (0 != strcmpi(Ext.c_str(), BaseExt.c_str())))
					continue;
			}

			List.push_back(FileName);
			FileC++;
		}
	}

	return FileC;
}


bool	Archive::GetFile(const std::string& File, LoadedBlock& Block)
{
	return GetFile(m_FileSet.GetKey(File), Block);
}


bool	Archive::GetFile(unsigned int FileID, LoadedBlock& Block)
{
	FileEntry*	pEntry = m_FileSet.GetElement(FileID);
	if (pEntry)
	{
		// make sure anything that was there is gone
		Block.Reset();

		// is it loaded yet?
		if (!pEntry->pData)
			CacheEntry(pEntry);

		// fill data
		Block.Size = pEntry->UncompressedSize;
		Block.DataArray = new unsigned char[Block.Size];
		memcpy(Block.DataArray, pEntry->pData, Block.Size);

		// free data?
		if (pEntry->UncompressedSize > 16 * 1024) {		// we free larger blocks when we can
			unsigned char* pDataA = (unsigned char*)pEntry->pData;
			UTIL_SAFE_DELETEA(pDataA);
			pEntry->pData = NULL;
		}
		return true;
	}
	return false;
}

bool	Archive::AddFile(const std::string& File)
{
	if (FileExists(File)) return false;	// file exists

	FileIO	InputFile;
	if (!InputFile.Open(File, FileIO::FILE_BINARY|FileIO::FILE_READ))
	{
		_LOG(MSG_ERROR, "Unable to load file " << File);
		return false;
	}
	FileEntry*	pEntry = new FileEntry;
	assert(File.size() < FILE_LENGTH);
	strcpy(pEntry->FileName, File.c_str());
	pEntry->Flags				= 0;
	pEntry->UncompressedSize	= InputFile.GetSize();
	pEntry->pData = new unsigned char[pEntry->UncompressedSize];
	InputFile.Read(pEntry->pData, pEntry->UncompressedSize* sizeof(unsigned char));

	pEntry->CompressedIndex = 0;
	pEntry->CompressedSize  = 0;

	m_FileSet.AddElement(File, pEntry);
	return true;
}	

bool		Archive::Unpack(const std::string& Dest)
{
	CacheAll();
	bool	Success = util::FileIO::MakeDirectory(Dest);
	for (unsigned int i=0; i<m_FileSet.GetElementC(); i++)
	{
		FileEntry*	pFile = m_FileSet.GetElementByIndex(i);
		if (!pFile) return false;

		// force it to cache
		LoadedBlock Block;
		if (!GetFile(pFile->FileName, Block)) continue;

		std::string		Path = VFS::GetPathName(pFile->FileName);
		std::string		File = VFS::GetFilename(pFile->FileName);
	
		std::string		Dir = Dest+"/"+Path;
		FileIO::MakeDirectory(Dir);
		FileIO	OutFile;
		if (OutFile.Open(Dir+"/"+File, FileIO::FILE_WRITE|FileIO::FILE_BINARY))
		{
			OutFile.Write(Block.DataArray, Block.Size);
			OutFile.Close();
		}
	}
	return true;
}

Archive*		Archive::CreateArchive()
{
	Archive* pArchive = new Archive();
	return pArchive;
}



void				Archive::Register(LuaPlus::LuaState* _pScript)
{
#ifdef _ENABLE_LUABIND
    lua_State * L = _pScript->GetCState ( );

	luabind::module(L)
    [

		luabind::class_<Archive>( "Archive" )
		.def( "LoadArchive",		&Archive::LoadArchive)
		.def( "SaveArchive",		&Archive::SaveArchive)
		.def( "FileExists",			&Archive::FileExists)
		.def( "AddFile",			&Archive::AddFile)
	];

	luabind::module(L)
	[
		luabind::def( "CreateArchive",	&CreateArchive )
	];
	

#endif // _ENABLE_LUABIND

}
