%%%-------------------------------------------------------------------
%%% File    : htree.erl
%%% Author  :  <vjache>
%%% Description : Hash Tree Algorithms Library
%%%
%%% Created : 17 Jun 2009 by  <vjache>
%%%-------------------------------------------------------------------
-module(htree).

%% API
-export([config_std/4,
		 info/2,
		 pipe_uploader/2,
		 delete_by/3,
		 lookup_by/3,
		 all_iter/1,
		 truncate/1,
		 undo_all_packed/1,
		 redo_all_packed/1,
		 promote_overflown/1]).
-export([test/2,
		 test/1]).

-include("../include/iter.hrl").
-include("../include/pipe.hrl").
-include("../include/util.hrl").
-include("../include/htree.hrl").

-define(CHECKERR(Expr,ErrTag),checkerr(Expr,ErrTag)).

-define(PIPE_UPLOADER_BUFF_SIZE,10000).
-define(PIPE_BUFF_SIZE_PER_BUCKET,250).
-define(LOOKUP_SUSPEND_TIME,3*1000).

-define(LLOCK(ResourceId,Retries),llock(ResourceId,Retries)).
-define(LUNLOCK(ResourceId),lunlock(ResourceId)).

-define(RO_LOG(Filename),{ro,Filename}).
-define(RW_LOG(Filename),{rw,Filename}).

%%====================================================================
%% API
%%====================================================================

%%--------------------------------------------------------------------
%% Function: 
%% Description:
%%--------------------------------------------------------------------
config_std(RootDir,FileName,QueueName,DiskLogPoolName) ->
	#h3conf{root_dir=RootDir,
			file_name=FileName,
			packed_ext="pck",
			unique_stamp_ext="unq",
			queue=QueueName,
			dlp_name=DiskLogPoolName,
			levels=[
					#level{lg2_size=8,
						   bucket_max_size=10,
						   priority=1},
					#level{lg2_size=6,
						   bucket_max_size=?KB(330),
						   priority=2},
					#level{lg2_size=4,
						   bucket_max_size=?KB(330),
						   priority=3}
				   ]}.
%%--------------------------------------------------------------------
%% Function: 
%% Description:
%%--------------------------------------------------------------------
info(Conf,overflow) ->
	fold_buckets(
	  Conf,
	  normal,
	  fun
		 (_Filename,last_level,AccIn) ->
			  AccIn;
		 (Filename,Level,AccIn) ->
			  MaxSize=Level#level.bucket_max_size,
			  ActSize=filelib:file_size(Filename),
			  if
				  ActSize>MaxSize ->
					  [{Filename,{ActSize,MaxSize}}|AccIn];
				  true ->
					  AccIn
			  end
	  end,[]);
info(Conf,packed) ->
	fold_buckets(
	  Conf,
	  packed,
	  fun
		 (_Filename,last_level,AccIn) ->
			  AccIn;
		 (Filename,_Level,AccIn) ->
			  [Filename|AccIn]
	  end,[]);
info(Conf,{key_buckets,Key}) ->
	existing_buckets_by_key(Conf, Key);
info(Conf,size) ->
	fold_buckets(
	  Conf,
	  normal,
	  fun(Filename,_Level,AccIn) ->
			  ActSize=filelib:file_size(Filename),
			  AccIn+ActSize
	  end,0).

%%--------------------------------------------------------------------
%% Function: 
%% Description:
%%--------------------------------------------------------------------
delete_by(Conf,Key,PredFun) when is_function(PredFun, 1)->
	[_|Buckets2Scan]=existing_buckets_by_key(Conf, Key),
	[transform(Conf,Bucket2Scan, 
			   fun(Pipe) ->
					   pipe_zip:new(
						 ?PIPE_BUFF_SIZE_PER_BUCKET, Pipe)
			   end,
			   fun(Iter) ->
					   iter_filter:new(
						 iter_unzip:new(Iter),
						 fun({Term,_Hash}=_RawTerm) ->
								 not PredFun(Term)
						 end)
			   end)||
	   Bucket2Scan<-Buckets2Scan],
	ok.
%%--------------------------------------------------------------------
%% Function: 
%% Description:
%%--------------------------------------------------------------------
lookup_by(Conf,Key,PredFun) when is_function(PredFun, 1)->
	DLPName=Conf#h3conf.dlp_name,
	suspend_for_lookup(Conf),
	[_|Buckets2Scan]=existing_buckets_by_key(Conf, Key),
	iter_mapper:new(
	  iter_concat:new(
		[iter_unzip:new(
		   iter_disk_log:new(
			 ?RO_LOG(Bucket2Scan),
			 fun(Log) ->
					 disk_log_pool:open_log(DLPName, Log)
			 end,
			 fun(Log) ->
					 disk_log_pool:close_log(DLPName, Log)
			 end))
		   ||Bucket2Scan<-Buckets2Scan]),
	  fun({Term,_Hash}) ->
			  case PredFun(Term) of
				  true ->
					  Term;
				  false ->
					  ?REJECT_REC
			  end
	  end).
%%--------------------------------------------------------------------
%% Function: 
%% Description:
%%--------------------------------------------------------------------
pipe_uploader(Conf,KeyFun) when is_function(KeyFun,1),
								is_record(Conf, h3conf) ->
	RootFile=filename:join(Conf#h3conf.root_dir, Conf#h3conf.file_name),
	BuffSize=?PIPE_UPLOADER_BUFF_SIZE,
	QName=h3,
	DLPName=Conf#h3conf.dlp_name,
	RootLog=?RW_LOG(RootFile),
	WriterPipe=pipe_zip:new(
				 BuffSize,
				 pipe_disk_logger:new(
				   10,
				   fun() ->
						   svc_queue:suspend(QName),
						   disk_log_pool:open_log(DLPName, RootLog),
						   RootLog
				   end,
				   fun()->
						   disk_log_pool:close_log(DLPName, RootLog),
						   overflow_check(Conf, RootFile),
						   svc_queue:resume(QName)
				   end)),
	pipe_mapper:new(
	  fun(Item) ->
			  {Item,term_hash(KeyFun(Item))}
	  end, WriterPipe).
%%--------------------------------------------------------------------
%% Function: 
%% Description:
%%--------------------------------------------------------------------
redo_all_packed(Conf) ->
	PackedFiles=info(Conf,packed),
	QueueName=Conf#h3conf.queue,
	lists:foreach(
	  fun(PackedFile)->
			  case bucket_file_level(Conf,PackedFile) of
				  false ->
					  ok;
				  last_level ->
					  ok;
				  Level ->
					  Priority=Level#level.priority,
					  svc_queue:enqueue(
						QueueName, 
						fun()->
								undo_packed_job(Conf,PackedFile)
						end, Priority)
			  end
	  end, PackedFiles).
%%--------------------------------------------------------------------
%% Function: 
%% Description:
%%--------------------------------------------------------------------
undo_all_packed(Conf) ->
	PackedFiles=info(Conf,packed),
	QueueName=Conf#h3conf.queue,
	lists:foreach(
	  fun(PackedFile)->
			  case bucket_file_level(Conf,PackedFile) of
				  false ->
					  ok;
				  last_level ->
					  ok;
				  Level ->
					  Priority=Level#level.priority,
					  svc_queue:enqueue(
						QueueName, 
						fun()->
								undo_packed_job(Conf,PackedFile)
						end, Priority)
			  end
	  end, PackedFiles).

enqueue_remdups(Conf,BucketFile) ->
	QueueName=Conf#h3conf.queue,
	svc_queue:enqueue(
	  QueueName, 
	  fun()->
			  remdups(Conf,BucketFile)
	  end, 9).
remdups(Conf,BucketFile) ->
	BucketFileUniqueStamp=BucketFile++[$.|Conf#h3conf.unique_stamp_ext],
	LMStamp=filelib:last_modified(BucketFileUniqueStamp),
	LMBucket=filelib:last_modified(BucketFile),
	if 
		LMBucket>LMStamp ->
			transform(Conf,BucketFile, 
					  fun(Pipe)->
							  pipe_zip:new(
								?PIPE_BUFF_SIZE_PER_BUCKET, Pipe)
					  end,
					  fun(Iter)->
							  iter_unique:new(
								iter_unzip:new(Iter))
					  end),
			touch_file(BucketFileUniqueStamp);
		true ->
			noop
	end,
	ok.
all_iter(Conf) when is_record(Conf, h3conf) ->
	DLPName=Conf#h3conf.dlp_name,
	IterList=fold_buckets(
			   Conf, normal, 
			   fun(BucketFile,_Level,AccIn) ->
					   [iter_unzip:new(
						  iter_disk_log:new(
							?RO_LOG(BucketFile),
							fun(Log) ->
									disk_log_pool:open_log(DLPName, Log)
							end,
							fun(Log) ->
									disk_log_pool:close_log(DLPName, Log)
							end))|AccIn]
			   end, []),
	iter_mapper:new(
	  iter_concat:new(IterList),
	  fun({Term,_Hash})->
			  Term
	  end).
truncate(Conf) when is_record(Conf, h3conf) ->
	DLPName=Conf#h3conf.dlp_name,
	fold_buckets(
	  Conf, normal, 
	  fun(BucketFile,_Level,_AccIn) ->
			  disk_log_pool:long_action(
				DLPName, ?RW_LOG(BucketFile), 
				fun(Log) ->
						?CHECKERR(disk_log:truncate(Log),truncate_err)
				end)
	  end, []),
	ok.
%%====================================================================
%% Internal functions
%%====================================================================
suspend_for_lookup(Conf) ->
	svc_queue:suspend(Conf#h3conf.queue, 0, lookup, ?LOOKUP_SUSPEND_TIME).

undo_packed_job(Conf,PackedFile) when is_record(Conf, h3conf) ->
	BucketFile=filename:basename(PackedFile, [$.|Conf#h3conf.packed_ext]),
	?LLOCK(BucketFile,infinity),
	append_log(Conf,BucketFile,PackedFile),
	file:delete(PackedFile),
	?LUNLOCK(BucketFile).
append_log(Conf,ToFile,FromFile) ->
	AsIsFun=fun(Arg)-> Arg end,
	append_log(Conf,ToFile,FromFile,AsIsFun,AsIsFun).
append_log(Conf,ToFile,FromFile,PipeFun,IterFun) 
  when is_record(Conf, h3conf),
	   is_function(PipeFun, 1),
	   is_function(IterFun, 1) ->
	DLPName=Conf#h3conf.dlp_name,
	ToLog=?RW_LOG(ToFile),
	FromLog=?RO_LOG(FromFile),
	OpenLogFun=fun(Log)->
					   disk_log_pool:open_log(DLPName, Log)
			   end,
	CloseLogFun=fun(Log)->
						disk_log_pool:close_log(DLPName, Log)
				end,
	Pump=pump:new(
		   IterFun(iter_disk_log:new(
					 FromLog, 
					 OpenLogFun,
					 CloseLogFun)), 
		   PipeFun(pipe_disk_logger:new(
					 10, 
					 fun()->
							 disk_log_pool:open_log(DLPName, ToLog),
							 ToLog
					 end, 
					 fun()->
							 disk_log_pool:close_log(DLPName, ToLog)
					 end))),
	pump:init(Pump),
	{ToLog,FromLog}.
bucket_file_level(Conf,Filename) ->
	RootDirSplit=filename:split(Conf#h3conf.root_dir),
	case lists:split(length(RootDirSplit), filename:split(Filename)) of
		{RootDirSplit,RelPathSplit} ->
			RelPathSplitSize=length(RelPathSplit),
			LevelsSize=length(Conf#h3conf.levels),
			if
				LevelsSize+1 == RelPathSplitSize ->
					last_level;
				LevelsSize >= RelPathSplitSize ->
					lists:nth(RelPathSplitSize, Conf#h3conf.levels);
				true ->
					false
			end;
		_ ->
			false
	end.
is_overflow(Conf,Filename) ->
	case bucket_file_level(Conf, Filename) of
		last_level -> %% last level never overflows
			false;
		Level ->
			MaxSize=Level#level.bucket_max_size,
			ActSize=filelib:file_size(Filename),
			if
				ActSize>MaxSize ->
					true;
				true ->
					false
			end
	end.
overflow_check(Conf, BucketFile) ->
	case is_overflow(Conf,BucketFile) of
		true ->
			enqueue_promote_bucket_job(Conf,BucketFile),
			true;
		false ->
			enqueue_remdups(Conf, BucketFile),
			false
	end.
enqueue_promote_bucket_job(Conf,BucketFile) ->
	Level0=bucket_file_level(Conf, BucketFile),
	Priority=Level0#level.priority,
	QueueName=Conf#h3conf.queue,
	svc_queue:enqueue(
	  QueueName, 
	  fun()->
			  svc_queue:suspend(QueueName, Priority+1),
			  promote_bucket_job(Conf,BucketFile),
			  svc_queue:resume(QueueName, Priority+1)
	  end, Priority).
promote_overflown(Conf) ->
	lists:foreach(
	  fun({BucketFile,_})->
			  enqueue_promote_bucket_job(Conf, BucketFile)
	  end, info(Conf,overflow)).

promote_bucket_job(Conf,BucketFile) when is_record(Conf, h3conf)->
 	case ?LLOCK(BucketFile,3) of
		true ->
			case is_overflow(Conf,BucketFile) of
				true ->
					promote_bucket(Conf,BucketFile,
								   fun(SubBucketFile) ->
										   overflow_check(Conf,SubBucketFile)
								   end),
					?LUNLOCK(BucketFile),
					overflow_check(Conf, BucketFile);
				false ->
					ok
			end;
		false ->
			throw({postpone,{bucket_busy,BucketFile}})
	end.
promote_bucket(Conf,BucketFile,CheckOverflowFun) ->
	DLPName=Conf#h3conf.dlp_name,
	case bucket_file_level(Conf, BucketFile) of
		last_level ->
			ok;
		false ->
			throw({bad_bucket,BucketFile});
		Level ->
			PackedExt=Conf#h3conf.packed_ext,
			Lg2Size=Level#level.lg2_size,
			promote_bucket(
			  DLPName, BucketFile,PackedExt,Lg2Size,CheckOverflowFun)
	end.
promote_bucket(DLPName,BucketFile,PackedExt,Lg2Size,CheckOverflowFun) 
  when is_function(CheckOverflowFun, 1)->
	BuffSize=?PIPE_BUFF_SIZE_PER_BUCKET,
	Dirname=filename:dirname(BucketFile),
	Filename=filename:basename(BucketFile),
	Pipe=pipe_switch:new(
		   fun({BucketCode,_Entry})->
				   BucketCode
		   end,
		   fun(BucketCode)->
				   SubBucketFile=bucket_file(Dirname,[BucketCode],Filename),
				   RwLog=?RW_LOG(SubBucketFile),
				   ensure_dir(SubBucketFile),
				   ZipPipe=pipe_zip:new(
							 BuffSize, 
							 pipe_disk_logger:new(
							   1,
							   fun()->
									   disk_log_pool:open_log(DLPName, RwLog),
									   RwLog
							   end,
							   fun()->
									   disk_log_pool:close_log(DLPName, RwLog),
									   CheckOverflowFun(SubBucketFile)
							   end)),
				   _BcStripPipe=pipe_mapper:new(
								  fun({_BucketCode,Entry})->
										  Entry
								  end, ZipPipe)
		   %%,pipe_async:new(BcStripPipe)
		   end),
	PackedFile=BucketFile++[$.|PackedExt],
	pack_bucket(DLPName,BucketFile,PackedFile),
	PackedLog=?RO_LOG(PackedFile),
	Iter=iter_mapper:new(
		   iter_unzip:new(
			 iter_disk_log:new(
			   PackedLog,
			   fun(_) -> disk_log_pool:open_log(DLPName, PackedLog) end,
			   fun(_) -> disk_log_pool:close_log(DLPName, PackedLog),
						 disk_log_pool:destroy_log(DLPName, PackedLog) 
			   end)),
		   fun({Term,TermHash}) ->
				   {TermHash1,BucketCode}=next_bcode(TermHash,Lg2Size),
				   {BucketCode,{Term,TermHash1}}
		   end),
	Pump=pump:new(Iter, Pipe),
	pump:init(Pump),
	ok.

llock(ResourceId,Retries) ->
	global:set_lock(_Id={ResourceId,self()}, [node()],Retries).
lunlock(ResourceId) ->
	global:del_lock(_Id={ResourceId,self()}, [node()]).
ensure_dir(File) ->
	case filelib:ensure_dir((File)) of
		ok -> ok;
		{error,Reason} ->
			throw(Reason)
	end.
term_hash(Term) ->
	erlang:phash2(Term).
transform(Conf,BucketFile,PipeFun,IterFun) 
  when is_function(PipeFun, 1),
	   is_function(IterFun, 1) ->
	DLPName=Conf#h3conf.dlp_name,
	PackedExt=Conf#h3conf.packed_ext,
	PackedFile=BucketFile++[$.|PackedExt],
	?LLOCK(BucketFile,infinity),
	pack_bucket(DLPName,BucketFile, PackedFile),
	{_BucketLog,PackedLog}=append_log(Conf, BucketFile, PackedFile,PipeFun,IterFun),
	disk_log_pool:destroy_log(DLPName, PackedLog),
	?LUNLOCK(BucketFile).

fold_buckets(Conf,normal,FoldFun,Acc0) ->
	fold_buckets(Conf,Conf#h3conf.file_name,FoldFun,Acc0);
fold_buckets(Conf,packed,FoldFun,Acc0) ->
	fold_buckets(Conf,Conf#h3conf.file_name++[$.|Conf#h3conf.packed_ext],FoldFun,Acc0);
fold_buckets(Conf,BuckName,FoldFun,Acc0) ->
	Dir=Conf#h3conf.root_dir,
	DirSplit=filename:split(Dir),
	DirSplitSize=length(DirSplit),
	Levels=Conf#h3conf.levels++[last_level],
	filelib:fold_files(
	  Dir, ".*", true,
	  fun(Filename,AccIn)->
			  FSplit=filename:split(Filename),
			  case lists:split(DirSplitSize, FSplit) of
				  {DirSplit, Relpath} ->
					  case lists:last(Relpath) of
						  BuckName ->
							  FoldFun(Filename,lists:nth(length(Relpath), Levels),AccIn);
						  _ ->
							  AccIn
					  end;
				  _ ->
					  AccIn
			  end
	  end, Acc0).

pack_bucket(DLPName, TargetFile,PackedFile) ->
	case filelib:is_regular(PackedFile) of
		false ->
			disk_log_pool:open_log(DLPName, Log=?RW_LOG(TargetFile)),
			?CHECKERR(disk_log:reopen(Log, PackedFile),reopen_err),
			disk_log_pool:close_log(DLPName, Log),
			packed;
		true ->
			?REP_WARN([{old_pack_detected,PackedFile},{for_bucket,TargetFile}]),
			reuse_packed
	end.
existing_buckets_by_key(Config, Key) when is_record(Config, h3conf) ->
	lists:filter(fun filelib:is_file/1, buckets_by_key(Config, Key)).
buckets_by_key(Config, Key) when is_record(Config, h3conf) ->
	RootDir=Config#h3conf.root_dir,
	FileName=Config#h3conf.file_name,
	BCodes=bcodes(term_hash(Key),Config#h3conf.levels),
	[bucket_file(RootDir,lists:sublist(BCodes, Len),FileName)
	||Len<-lists:seq(0, length(BCodes))].
bcodes(_TermHash,[]=_Levels) ->
	[];
bcodes(TermHash,[Level|Tail]=_Levels) ->
	Lg2Size=Level#level.lg2_size,
	{TermHash1,BucketCode}=next_bcode(TermHash,Lg2Size),
	[BucketCode|bcodes(TermHash1,Tail)].
next_bcode(TermHash,Lg2Size) when Lg2Size =<32,Lg2Size >0 ->
	V=32-Lg2Size,
	<<TermHash1:V/integer,
	  BucketCode:Lg2Size/integer>> = <<TermHash:32/integer>>,
	{TermHash1,BucketCode}.
bucket_file(RootDir,BucketCodeList,FileName) ->
	filename:join([RootDir|lists:foldr(fun(BucketCode,AccIn)->
						[lists:flatten(io_lib:format("~3.16.0B",[BucketCode]))|AccIn]
				end, [FileName], BucketCodeList)]).
touch_file(Filename) ->
	{ok, IoDevice}=?CHECKERR(file:open(Filename, [write, raw, binary]),touch_file),
	?CHECKERR(file:write(IoDevice, <<0>>),touch_file),
	?CHECKERR(file:close(IoDevice),touch_file).

checkerr({error,Reason},ErrTag) ->
	throw({ErrTag,Reason});
checkerr(Result,_ErrTag) ->
	Result.

%%====================================================================
%% Test functions
%%====================================================================
test(blf,Filename) ->
	Conf=config_std("d:/_buck_root", "bck.log", h3, disk_log_pool),
	bucket_file_level(Conf, Filename);
test(info,Opt) ->
	Conf=config_std("d:/_buck_root", "bck.log", h3, disk_log_pool),
	info(Conf, Opt);
test(bbk,Key) ->
	Conf=config_std("d:/_buck_root", "bck.log", h3, disk_log_pool),
	{buckets_by_key(Conf, Key),existing_buckets_by_key(Conf, Key)};
test(lookup,Key) ->
	Conf=config_std("d:/_buck_root", "bck.log", h3, disk_log_pool),
	iter:print(lookup_by(Conf, Key, 
						 fun({S,_P,_O}) when S==Key ->
								 true;
							(_) ->
								 false
						 end));
test(lookup1,Key) ->
	Conf=config_std("d:/_buck_root", "bck.log", h3, disk_log_pool),
	Iter=lookup_by(Conf, Key, 
						 fun({S,_P,_O}) when S==Key ->
								 true;
							(_) ->
								 false
						 end),
	Pipe=pipe:terminator(),
	Pump=pump:new(Iter, Pipe),
	pump:init(Pump);
test(delete,Key) ->
	Conf=config_std("d:/_buck_root", "bck.log", h3, disk_log_pool),
	delete_by(Conf, Key, 
						 fun({S,_P,_O}) when S==Key ->
								 true;
							(_) ->
								 false
						 end);
test(bcodes,Key) ->
	Conf=config_std("d:/_buck_root", "bck.log", h3, disk_log_pool),
	bcodes(term_hash(Key), Conf#h3conf.levels).
test(promote) ->
  	promote_bucket(disk_log_pool,"d:/_buck_root/0DE/000/bck.log","pck",8,
				   fun(_)->
						   ok
				   end);
test(prep) -> 
	Iter=iter_term_reader:new_from_file("D:/_Development/Erlang/wsp/demetra/test/valg_bulk_SUITE_data/triple.txt"),
	Pipe=pipe_disk_logger:new([{name,w},{file,"d:/_buck_root/test-data.log"}]),
	Pump=pump:new(Iter, Pipe),
	pump:init(Pump);
 
test(mk) ->
	Conf=config_std("d:/_buck_root", "bck.log", h3, disk_log_pool),
	Pipe=pipe_uploader(Conf,
					   fun({S,_P,_O})->
							   S
					   end),
	Iter=iter_disk_log:new([{name,src},{file,"d:/_buck_root/test-data.log"}]),
	Pump=pump:new(Iter, Pipe),
	pump:init(Pump).

