:- ensure_loaded([hyp_generation,
                  evaluation,
                  abduction_extension,
                  run,
                  user_score]). 

:- dynamic quick_score/2, failed_ic/1.

% NB:
% By default, a priority queue (smaller key first) is used for storing 
% the state candidates

% HOOK:
new_state_candidate_store([]).
is_empty_store([]).

% HOOK:
% this is called when a query computation is initiated.
% add any initialisation code for the heuristics function here.
heuristics_reset.

% HOOK:
% select_state(ExistingStateCandidates, SelectedState, RestStates)
%  ExistingStateCandidates: a list of Score-State pairs
%  SelectedState: Score-State pair selected from the candidates
%  RestStates: remaining pairs
select_state([H|T], H, T).

% HOOK:
% state_score(State, Score)
%  State = (Gs, As, Cs, Ns, Q, Info)
%  Score: a number

% NO SCORE
% state_score(_, 0). 


init_learning_score((0, HS)) :-
        evaluate([], Evaluation),
        heuristic_score([], Evaluation, [], HS),        %USER DEFINED IN USER_SCORE.PL
        assert(quick_score([], HS)).

learning_score((b, gpr(_L, _Goal)), (_OldGs, _OldAs, _OldCs, _OldNs, _OldQ, _OldInfo), (_Gs, As, _NewCs, _Ns, _Q, _Info), (SScore, HS)) :-
        find_rules(As, RuleForm),
        quick_score(RuleForm, HS),
        solution_score(SScore), !.
        
        
learning_score((b, gpr(_L, _Goal)), (_OldGs, _OldAs, _OldCs, _OldNs, _OldQ, _OldInfo), (_Gs, As, _NewCs, _Ns, _Q, Info), (SScore, HS)) :-
       % ilp_solution_changes(L, Goal, As),
        !,
        statistics(global_stack, [_, X]), print('memory: '),log(X),
        find_rules(As, RuleForm), 
        build_solution(As, Solution),
        evaluate(Solution, Evaluation),
        solution_score(SScore),
        %HS THE LOWER THE BETTER
        heuristic_score(Solution, Evaluation, Info,  HS),        %USER DEFINED IN USER_SCORE.PL
        assert(quick_score(RuleForm, HS)),
        (bb_get_value(ic_check, true) ->
                check_ic(Solution, OutComeIC),
                (OutComeIC = succeeded ->
                save_solution(Solution, Info, HS)
                ;
                write('\nFailed ic test, solution not considered:\n '), print(Solution), nl,
                assert(failed_ic(RuleForm)))
        ;
         save_solution(Solution, Info, HS)
        ).

learning_score((_Type, _Goal), (_OldGs, _OldAs, _OldCs, _OldNs, _OldQ, _OldInfo), (_Gs, _As, _NewCs, _Ns, _Q, Info), (0, HS)) :-
     %   print((Type, Goal)), nl,
        get_info(Info, scr, (_, HS)) .



%When shall we prune? We calculate the score of the best solution that we can obtain
%Given the partially constructed rule. If it's lower than the saved best solution then prune
%In the monotonic case we can only add conditions to the current SINGLE rule

%If it succeeds no prune!
prune((b, gpr(_L, _Goal)), (_OldGs, _OldAs, _OldCs, _OldNs, _OldQ, _OldInfo), (_Gs, As, _NewCs, _Ns, _Q, Info), (SScore, HS)) :- !,
        (bb_get_value(cover_loop, true) ->        
        build_solution(As, Solution),
        evaluate(Solution, Evaluation),
        heuristic_score(Solution, Evaluation, InfoHere,  ScoreHere), print(Solution), nl, nl,
        add_dummy_condition(Solution, SolutionPlusDummy),
        get_optimistic_evaluation(Evaluation, EvaluationOptimistic),
        heuristic_score(SolutionPlusDummy, EvaluationOptimistic, InfoHere,  ScoreOptimistic),
        (get_best_solution(_SolutionBest, _InfoBest, ScoreBest), !,
         wls(['The best score is ', ScoreBest, ' while the score here is ', ScoreHere, ' and the best we can obtain is ', ScoreOptimistic]),
         (ScoreOptimistic < ScoreBest -> true ; log('Prune!'), fail)
        ;
         true, !
        ),
        additional_prune(Solution,Evaluation)
        ;
        %cover loop not enabled
        true, !).                                     

prune(_, _, _, _).

add_dummy_condition([(H :- B) | Tail], [(H :- Bdummy) | Tail]) :-
        dummy_to_body(B, Bdummy).

dummy_to_body((H, B), (H, Bdummy)) :- !,
        dummy_to_body(B,Bdummy).

dummy_to_body( (Bleft), (Bleft, dummy)). 

get_optimistic_evaluation([], []). 
%If positive and entailed, the best that can happen is that it stays the same
get_optimistic_evaluation([out_example(Ex,1,1) | T], [out_example(Ex,1,1) | TP]) :- !,
        get_optimistic_evaluation(T, TP).
%If positive and not entailed, we can't fix it
get_optimistic_evaluation([out_example(Ex,1,-1) | T], [out_example(Ex,1,-1) | TP]) :- !,
        get_optimistic_evaluation(T, TP).
%If negative and entailed, we can fix it
get_optimistic_evaluation([out_example(Ex,-1,1) | T], [out_example(Ex,-1,-1) | TP]) :- !,
        get_optimistic_evaluation(T, TP).
%If negative and not entailed, the best that can happen is that it stays the same
get_optimistic_evaluation([out_example(Ex,-1,-1) | T], [out_example(Ex,-1,-1) | TP]) :- !,
        get_optimistic_evaluation(T, TP).


% OLD VERSION
%% BREADTH FIRST
%state_score((_Gs, _As, _Cs, _Ns, _Q, Info), V) :-
%        get_info(Info, depth, V).




% HOOK:
% add_states(NewStates, ExistingStates, AllStates)
%  NewStates: a list of new Score-State pairs to add
%  ExistingStates: existing pairs
%  AllStates: all the pairs
add_states([], States, States).
add_states([NewState|NewStates], ExistingStates, AllStates) :-
	add_state(ExistingStates, NewState, SsTmp),
	add_states(NewStates, SsTmp, AllStates), !.

add_state([], NewState, [NewState]).
add_state([(K-V)|Rest], NewK-NewV, NewStates) :-
	(NewK @=< K ->
		NewStates = [(NewK-NewV), (K-V)|Rest]
	;
		add_state(Rest, NewK-NewV, SsTmp),
		NewStates = [(K-V)|SsTmp]
	).
