/* -*- Mode:Prolog; coding:ISO-8859-1; -*- */

:- use_module(library(ordsets)).

:- use_module(library(lists)).


:- ensure_loaded([logger,
                  abduction_extension,
                  abduction,
                  stats,
                  evaluation,
                  sicstus_support,
                  blackboard,
                  hyp_generation,
                  tal,
                  aux_predicates,
                  post_learning,
                  validation,
                  tests,
                  files]). 


% Used to specify some of the options through the shell 
:- multifile option/2, saved_solution/3, retractme/1, quickscore/2.
:- dynamic command_option/2, examples/1, saved_solution/3, retractme/1, quickscore/2.



%@  @item run_tal(+File. -Solution)
%@  It runs the TALh algorithm on the specification file 
%@  File and return the hypothesis in Solution.
%@  It always succeeds but it returns a string about the outcome of the learning 
run_tal(File, Solution) :-
        (run(File, Solution), !; true),
        log('Learning terminated'),
        show_statistics,
        findall((Solution, Score), 
                saved_solution(Solution, _Info, Score), 
                Solutions),
        finalise_learning(Solutions).


%@  @item run(+File. -Solution)
%@  It runs the TALh algorithm on the specification file 
%@  var{File} and return the hypothesis in @var{Solution}
run(File, Solution) :-
        initialise_statistics,                                                  log('Statistics initialised'), 
        process_learning_file(File, Examples, Background, Modedecs, ICs),       log('Learning file processed'),
        bb_get_value(xvalidation_folds, XVF), !,
        ((number(XVF), XVF > 1) ->
                cross_v_run(XVF, Examples, Background, Modedecs, ICs),
                reset_all,
                assert(examples(Examples)),
                %Runs one last time with all the examples
                pick_strategy_and_run(Examples, Background, Modedecs, ICs, Solution)
        ;
                pick_strategy_and_run(Examples, Background, Modedecs, ICs, Solution)
        ).
        
%@  @item cross_v_run(XVF, Examples, Background, Modedecs, ICs)
%@  XVF is the number of folds. 
%@  It runs the learning XVF times collecting the results
cross_v_run(XVF, Examples, Background, Modedecs, ICs) :- 
                                                                                log('Cross validation enabled'),
                                                                                print('Number of folds: '), log(XVF),
                                                                                log('-----------------------------------'),
                %Collects the Folds (list of lists of examples)
                get_folds(Examples, XVF, Folds),      
                length(Folds, LFo), nl,nl,print(LFo),nl,nl,
                delete_output_file,                                                                               
                forall(nth1(Index, Folds, ThisEx),
                       (
                        subtract_c(Folds, ThisEx, Training),
                        append_all(Training, TrainingAppended),                 %log('Training set: '), portray_list(TrainingAppended),
                        retractall(examples(_)), assert(examples(TrainingAppended)), 
                        pick_strategy_and_run(TrainingAppended, Background, Modedecs, ICs, SolutionX),
                        reset_all,
                        assert(examples(ThisEx)),                               open_output_file(OutputStream), 
                        evaluate(SolutionX, Evaluation),                        wof([solution(Index, SolutionX)], OutputStream),
                                                                                wof([testset(Index, Evaluation)], OutputStream),
                        measures(Evaluation, Performance),                      wof([measures(Index, Performance)], OutputStream),
                                                                                write(OutputStream, '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n'),
                                                                                close_output_file(OutputStream)
                       )
                      )
                .




pick_strategy_and_run(Examples, Background, Modedecs, ICs, Solution) :-
        (bb_get_value(cover_loop, true) ->
                                                                                        log('Running with cover loop'),
                run_cover_loop(Examples, Background, Modedecs, ICs, Solution)
         ;
                (bb_get_value(single_seed, true) ->
                                                                                        log('Running with single seed'),
                        run_ss_on(Examples, Background, Modedecs, ICs, Solution)
                ;
                        run_ss_off(Examples, Background, Modedecs, ICs, Solution)
                )
       ).
                
run_ss_off(Examples, Background, Modedecs, ICs, Solution) :- 
        bb_get_value(subset, Subset),
        (Subset = all ->
                learn(Examples, Background, Modedecs, ICs, Solution)
        ;
                length(Examples, NE),
                NToSelect is integer(NE * Subset),
                rnd_select(Examples,NToSelect,Selected),
                log('Selected for goal'),
                portray_list(Selected),
                learn(Selected, Background, Modedecs, ICs, Solution)
        ).
        


run_ss_on(Examples, Background, Modedecs, ICs, Return) :-
        %find out how many examples are used
        %and select three examples
        bb_get_value(single_seed_ratio, Ratio),
        select_positives(Examples, PositiveExamples, NP),
        NToSelect is integer(NP / Ratio) + 1,
        rnd_select(PositiveExamples,NToSelect,Selected),
        log('Selected seeds'),
        portray_list(Selected),
        forall(member(CurrentExample, Selected),        %for each %run the learning
                               (
                                print('Executing learning on seed '), log(CurrentExample),
                                learn_dg([CurrentExample], Examples, Background, Modedecs, ICs, _Solution)
                                )
        ),
        %all the solutions are in saved_solution
        %combine solutions
        combine_solutions(Return).
 

run_cover_loop(Examples, Background, Modedecs, ICs, Solution) :-
        %We start with the full set of examples
        run_cover_loop(Examples, Examples, Background, Modedecs, ICs, Solution, [], 1),
        retractall(saved_solution(_, _, _)),
        theory_test_with_asserted_bg(Solution, Examples, Evaluation),
        heuristic_score(Solution, Evaluation, _Info, Score),
        assert(saved_solution(Solution, _, Score)),
        log('Final solutions:'),
        portray_list(Solution),
        log(Score).


run_cover_loop(ExamplesNow, Examples, Background, Modedecs, ICs, Solution, CurrentSolution, N) :-        
        log('Executing learning cycle '),
        log('Current full solution '),
        portray_list(CurrentSolution),
        %ExamplesNow are the examples that are still to cover
        %We want to filter out the positive examples
        %That are used for model generation (in the goal)
        %And use all of them to calculate the score
        reset_all,
        assert(examples(ExamplesNow)),
        print('\nExamples selected n '),
        length(ExamplesNow, L),
        log(L),
        select_positives(ExamplesNow, PositiveExamples, NP),
        bb_get_value(max_cl_iterations, MAX),
        ( (NP > 0, N =< MAX)   ->
                rnd_select(PositiveExamples,1,Selected),
                log('Selected seeds'),
                portray_list(Selected),
                %The learning starts now
                learn_dg(Selected, Examples, Background, Modedecs, ICs, _Solution),
                best_solution(BestSolutionHere, OutTest, Score),
                print('Best solution at this iteration:\n'), log(BestSolutionHere),
                append(BestSolutionHere, CurrentSolution, SolutionNext),
                bb_get_value(loop_threshold, LT),
                (Score > LT ->
                        log('Under threshold'),
                        Solution = SolutionNext
                 ;
                        (bb_get_value(assert_rule_in_cover_loop,true) ->
                                append(BestSolutionHere, Background, NewBackground)
                        ;
                                NewBackground = Background
                         ),
                        unentailedEx(OutTest, UnentailedExamples),
                        !,
                        NN is N + 1,
                        run_cover_loop(UnentailedExamples, Examples, NewBackground, Modedecs, ICs, Solution, SolutionNext, NN)
                )
        ;
                 log('No more positive examples or maximum iterations reached'),
                 Solution = CurrentSolution
         ).
                                                
                
        


%@  @item process_learning_file(+File, -Examples, -Background, -Modedecs, -ICs)
%@  Initialises the learning. Given a @var{File}, it derives the relevant entities for the learning.
%@  It also merges the default options with those declared in the file and prints a summary of the learning instance.
%@  Options are asserted using the blackboard.
process_learning_file(File, Examples, Background, Modedecs, ICs) :-
        bb_defaults,
        compile(File),                          log('Learning file compiled'),
        load_ilp_file(File, Examples, Background, Modedecs, ICs, Options),     
        retractall(examples(_)), assert(examples(Examples)), 
        bb_options(Options),
        collect_options(FullOptions),
        print_ilp_instance(Examples, Background, Modedecs, ICs, FullOptions).
            



%@  @item bb_defaults
%@  Collects all the default options in the default.txt file and put them in the blackboard
bb_defaults :-
        file_to_list('default.txt', OptionList),         %e.g. OptionList = [option(max_body_literals, 10), option(max_num_rules, 10)] 
        bb_options(OptionList).


bb_options(Options) :-        
        forall(member(default_option(Key, Value), Options), bb_set_value(Key, Value)),
        override_options.

override_options :-
        findall((Option, Value), option(Option, Value), ListNew),
        findall((COption, CValue), command_option(COption, CValue), ListNew2),
        override_options(ListNew),
        override_options(ListNew2).


override_options([]).

override_options([(Option,NewValue) | ListRest]) :-
        bb_set_value(Option, NewValue),
        override_options(ListRest).


collect_options([max_body_literals(O1), 
                 max_num_rules(O2),
                 max_depth(O3),
                 debug(O4),
                 stats(O5),
                 close_output(O6),
                 timeout(O7),
                 number_of_solutions(O8),
                 timeout_for_test(O9),
                 strategy(O10),
                 single_seed(O11),
                 single_seed_ratio(O12),
                 solution_pool(O13),
                 xvalidation_folds(O14),
                 ic_check(O15),
                 loop_threshold(O16),
                 max_cl_iterations(O17),
                 score_before_condition(O18),
                 subset(O19),
                 ordering(O20),
                 assert_rule_in_cover_loop(O21),
                 allow_repetition_of_producers(O22)]) :- 
        bb_get_value(max_body_literals, O1),
        bb_get_value(max_num_rules, O2),
        bb_get_value(max_depth, O3),
        bb_get_value(debug, O4),
        bb_get_value(stats, O5),
        bb_get_value(close_output, O6),
        bb_get_value(timeout, O7),
        bb_get_value(number_of_solutions, O8),
        bb_get_value(timeout_for_test, O9),
        bb_get_value(strategy, O10),
        bb_get_value(single_seed, O11),
        bb_get_value(single_seed_ratio, O12),
        bb_get_value(solution_pool, O13),
        bb_get_value(xvalidation_folds, O14),
        bb_get_value(ic_check, O15),
        bb_get_value(loop_threshold, O16),
        bb_get_value(max_cl_iterations, O17),
        bb_get_value(score_before_condition, O18),
        bb_get_value(subset, O19),
        bb_get_value(ordering, O20),
        bb_get_value(assert_rule_in_cover_loop, O21),
        bb_get_value(allow_repetition_of_producers, O22).





%@ @item load_ilp_file(+File, -....)
%@      From a file File that specifies the learning task, it collects
%@      all the arguments (Bavkgorund knowledge, mode declarations, examples...).
%@      The background knowledge is what is left in the file after the rest (Modedecs, Examples, ICs, Options) is taken away

load_ilp_file(File, Examples, Background, Modedecs, ICs, Options) :-
                file_to_list(File, ILPFileList),
                list_to_ord_set(ILPFileList, ILPFile),
                findall(
                        Ex, 
                        (member(Ex, ILPFile), functor(Ex, example, _)),
                        Examples
                ),
                findall(
                        Modeh, 
                        (member(Modeh, ILPFile), functor(Modeh, modeh, _)),
                        Modehs
                ),
                findall(
                        Modeb, 
                        (member(Modeb, ILPFile), functor(Modeb, modeb, _)),
                        Modebs
                ), 
                append(Modehs, Modebs, Modedecs), 
                findall(
                        IC, 
                        (member(IC, ILPFile), IC = (ic :- _)),
                        ICs
                ),      
                findall(
                        Option, 
                        (member(Option, ILPFile), functor(Option, option, _)),
                        Options
                ), 
                append([Examples, Modedecs, ICs, Options], TempList),
                list_to_ord_set(TempList, TempListOrd),
                ord_subtract(ILPFile, TempListOrd, BackgroundP),
                findall(
                        BGs, 
                        (member(BGs, BackgroundP), BGs \= (ic :- _)),
                        Background
                ).








finalise_learning(Solutions) :-
                log('\nWriting solutions\n'), log('######################################'),
                make_output_file,
                print_solutions(Solutions).



reset_all :- 
        retractall(quick_score(_,_)),
        retractall(saved_solution(_,_,_)),
        retractall(command_option(_,_)),
        retractall(examples(_)),
        retractall(retractme(_)),
        retractall(saved_best_solution(_,_,_)),
        retractall(modename(_,_)),
        retractall(for_the_top(_)),
        retractall(terminate(_)),
        clean_up.

