/*
 * LearningAPIMain.java
 *
 * Yaoyong Li 22/03/2007
 *
 * $Id: LearningAPIMain.java, v 1.0 2007-03-22 12:58:16 +0000 yaoyong $
 */
package gate.learning;

import gate.Document;
import gate.Factory;
import gate.ProcessingResource;
import gate.creole.AbstractLanguageAnalyser;
import gate.creole.ExecutionException;
import gate.creole.ResourceInstantiationException;
import gate.util.Files;
import gate.util.GateException;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.ObjectInputStream;
import java.io.OutputStreamWriter;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.charset.Charset;
import java.util.Date;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.apache.commons.io.output.FileWriterWithEncoding;
import org.supercsv.io.CsvListWriter;
import org.supercsv.io.ICsvListWriter;
import org.supercsv.prefs.CsvPreference;
import weka.classifiers.Classifier;
import weka.classifiers.Evaluation;
import weka.core.Instances;
import weka.core.Range;
import weka.core.converters.ArffSaver;
import weka.core.converters.ConverterUtils.DataSource;

/**
 * The main object of the ML Api. It does initialiation, read parameter values
 * from GUI, and run the selected learning mode. It can also be called by java
 * code, as an API (an GATE class), for using this learning api.
 */
@SuppressWarnings("serial")
public class LearningAPIMain2 extends AbstractLanguageAnalyser
        implements
        ProcessingResource {

    /** This is where the model(s) should be saved */
    private URL configFileURL;
    /**
     * Name of the AnnotationSet contains annotations specified in the DATASET
     * element of configuration file.
     */
    private String inputASName;
    /**
     * The annotationSet for the resulting annotations by application of models.
     */
    private String outputASName;
    /**
     * Run-time parameter learningMode, having three modes: training, application,
     * and evaluation.
     */
    private RunMode learningMode;
    /** Learning settings specified in the configuration file. */
    private LearningEngineSettings learningSettings;
    /**
     * The lightweight learning object for getting the features, training and
     * application.
     */
    LightWeightLearningApi lightWeightApi = null;
    /** The File for NLP learning Log. */
    private File logFile;
    /** Used by lightWeightApi, specifying training or application. */
    private boolean isTraining;
    /** Subdirectory for storing the data file produced by learning api. */
    private File workingDirectoryResults = null;
    /** Subdirectory used to store temporary files used by APPLICATION mode. */
    private File applicationTempDir;
    /** Doing evaluation. */
    private EvaluationBasedOnDocs evaluation;
    /** The MI learning information object. */
    MiLearningInformation miLearningInfor = null;
    /** The three counters for batch application. */
    int startDocIdApp;
    int endDocIdApp;
    int docIntervalForClassification;
    public static int positionInstance;

    public void setConfigFileURL(URL workingDirectory) {
        this.configFileURL = workingDirectory;
    }

    public URL getConfigFileURL() {
        return this.configFileURL;
    }

    public void setInputASName(String iasn) {
        this.inputASName = iasn;
    }

    public String getInputASName() {
        return this.inputASName;
    }

    public void setOutputASName(String iasn) {
        this.outputASName = iasn;
    }

    public String getOutputASName() {
        return this.outputASName;
    }

    public RunMode getLearningMode() {
        return this.learningMode;
    }

    public void setLearningMode(RunMode learningM) {
        this.learningMode = learningM;
    }

    public EvaluationBasedOnDocs getEvaluation() {
        return evaluation;
    }

    public EvaluationBasedOnDocs setEvaluation(EvaluationBasedOnDocs eval) {
        return this.evaluation = eval;
    }

    /** Trivial constructor. */
    public LearningAPIMain2() {
        // do nothing
    }

    /** Initialize this resource, and return it. */
    @Override
    public gate.Resource init() throws ResourceInstantiationException {
        fireStatusChanged("Checking and reading learning settings!");
        // here all parameters are needs to be checked
        // check for the model storage directory
        if (configFileURL == null) {
            throw new ResourceInstantiationException(
                    "WorkingDirectory is required to store the learned model and cannot be null");
        }
        // it is not null, check it is a file: URL
        if (!"file".equals(configFileURL.getProtocol())) {
            throw new ResourceInstantiationException(
                    "WorkingDirectory must be a file: URL");
        }
        // Get the working directory which the configuration
        // file reside in.
        File workingDirectory = null;
        try {
            workingDirectory = new File(configFileURL.toURI()).getParentFile();
        } catch (URISyntaxException use) {
            workingDirectory = Files.fileFromURL(configFileURL).getParentFile();
        }
        // it must be a directory
        if (!workingDirectory.isDirectory()) {
            throw new ResourceInstantiationException(workingDirectory
                    + " must be a reference to directory");
        }
        if (LogService.maxVerbosityLevel > 0) {
            System.out.println("Configuration File=" + configFileURL.toString());
        }
        try {
            if (!new File(configFileURL.toURI()).exists()) {
                throw new ResourceInstantiationException(
                        "Error: the configuration file specified does not exist!!");
            }
        } catch (URISyntaxException e1) {
            e1.printStackTrace();
            throw new ResourceInstantiationException(e1);
        }
        try {
            // Load the learning setting file
            // by reading the configuration file
            learningSettings =
                    LearningEngineSettings.loadLearningSettingsFromFile(configFileURL);
        } catch (Exception e) {
            throw new ResourceInstantiationException(e);
        }
        try {
            // Creat the sub-directory of the workingdirectroy where the data
            // files will be stored in
            if (LogService.maxVerbosityLevel > 0) {
                System.out.println("\n\n*************************");
                System.out.println("A new session for NLP learning is starting.\n");
            }
            workingDirectoryResults =
                    new File(workingDirectory, gate.learning.ConstantParameters.SUBDIRFORRESULTS);
            workingDirectoryResults.mkdir();
            logFile =
                    new File(new File(workingDirectory, ConstantParameters.SUBDIRFORRESULTS),
                    ConstantParameters.FILENAMEOFLOGFILE);
            LogService.init(logFile, true, learningSettings.verbosityLogService);
            StringBuffer logMessage = new StringBuffer();
            logMessage.append("\n\n*************************\n");
            logMessage.append("A new session for NLP learning is starting.\n");
            // adding WorkingDirectory parameter in the benchmarkingFeatures
            logMessage.append("The initiliased time of NLP learning: "
                    + new Date().toString() + "\n");
            logMessage.append("Working directory: " + workingDirectory.getAbsolutePath() + "\n");
            logMessage.append("The feature files and models are saved at: "
                    + workingDirectoryResults.getAbsolutePath() + "\n");
            // Call the lightWeightLearningApi
            lightWeightApi = new LightWeightLearningApi(workingDirectory);
            // more initialisation
            lightWeightApi.furtherInit(workingDirectoryResults, learningSettings);
            logMessage.append("Learner name: "
                    + learningSettings.learnerSettings.getLearnerName() + "\n");
            logMessage.append("Learner nick name: "
                    + learningSettings.learnerSettings.getLearnerNickName() + "\n");
            logMessage.append("Learner parameter settings: "
                    + learningSettings.learnerSettings.implementationName + "\n");
            logMessage.append("Surroud mode (or chunk learning): "
                    + learningSettings.surround);
            LogService.logMessage(logMessage.toString(), 1);
        } catch (Exception e) {
            throw new ResourceInstantiationException(e);
        }
        docIntervalForClassification = learningSettings.docIntervalForClassification;
        LearningAPIMain2.positionInstance = 1;
        fireProcessFinished();
        return this;
    } // init()

    /**
     * Run the resource.
     *
     * @throws ExecutionException
     */
    @Override
    public void execute() throws ExecutionException {
        // See if the corpus is provided
        if (corpus == null) {
            throw new ExecutionException("Provided corpus is null!");
        }
        if (corpus.size() == 0) {
            throw new ExecutionException("No Document found in corpus!");
        }
        
        int positionDoc = corpus.indexOf(document);

        // if it's the first document in the corpus, init variables
        if (positionDoc == 0) {
            lightWeightApi.inputASName = inputASName;
            lightWeightApi.outputASName = outputASName;
            startDocIdApp = 0;
            endDocIdApp = 0;

            if (LogService.maxVerbosityLevel > 0) {
                System.out.println("Pre-processing the " + corpus.size()
                        + " documents...");
            }
            try {
                LogService.init(logFile, true, learningSettings.verbosityLogService);
                LogService.logMessage("\n*** A new run starts.", 1);
                LogService.logMessage(
                        "\nThe execution time (pre-processing the first document): "
                        + new Date().toString(), 1);
                if (LogService.maxVerbosityLevel > 0) {
                    System.out.println("Learning starts.");
                    System.out.println("For the information about this learning see the log file "
                            + workingDirectoryResults.getAbsolutePath()
                            + File.separator
                            + ConstantParameters.FILENAMEOFLOGFILE);
                    System.out.println("The number of threads used is "
                            + learningSettings.numThreadUsed);
                }
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        // Classification: apply the model to current documents
        if (learningMode.equals(RunMode.CLASSIFICATION)) {
            ++endDocIdApp;
            if (endDocIdApp - startDocIdApp == docIntervalForClassification) {
                try {
                    // first checking if the model file is available or not
                    String modelFileName =
                            workingDirectoryResults.toString() + File.separator
                            + ConstantParameters.FILENAME_OF_MODEL;
                    if (!new File(modelFileName).exists()) {
                        System.out.println("Warning: the model is not available at the moment!!");
                        return;
                    }

                    // Load model
                    InputStream is = new FileInputStream(modelFileName);
                    ObjectInputStream objectInputStream = new ObjectInputStream(is);
                    Classifier classifierModel = (Classifier)objectInputStream.readObject();
                    objectInputStream.close();

                    // Check if the feature file exist
                    String arffFeatureFileName = workingDirectoryResults.toString() + File.separator
                            + ConstantParameters.FILENAME_FEATURES_ARFF;
                    if (!new File(arffFeatureFileName).exists()) {
                        System.out.println("Warning: the feature file is not available at the moment!");
                        LogService.logMessage("Warning: the feature file is not available at the moment!", 1);
                        return;
                    }

                    // Open the feature file and create an instance header form it
                    DataSource arffSource = new DataSource(arffFeatureFileName);
                    Instances instancesHeader = arffSource.getStructure();
                    if(instancesHeader.classIndex() == -1){
                        instancesHeader.setClassIndex(0);
                    }

                    // Boolean variable to set whether to append or not, used by CSV and Sequence ARFF feature file
                    // If at the first document, don't append. Otherwise, append.
                    boolean isAppend = true;
                    if(positionDoc == 0){
                        isAppend = false;
                    }

                    // CSV file to print classifier predictions
                    ICsvListWriter outCsvPredictions = new CsvListWriter(
                            new FileWriterWithEncoding(
                            new File(
                                workingDirectoryResults,
                                ConstantParameters.FILENAME_PREDICTIONS_CSV),
                            Charset.defaultCharset(), isAppend),
                            CsvPreference.NO_COMMENT_PREFERENCE);
                    
                    // File to print sequence ARFF sequence feature file
                    BufferedWriter arffSequenceFeatureFile =
                            new BufferedWriter(
                            new OutputStreamWriter(
                            new FileOutputStream(
                            new File(
                            workingDirectoryResults.toString(),
                            ConstantParameters.FILENAME_SEQUENCE_FEATURES_ARFF), isAppend),
                            Charset.defaultCharset()));

                    // If at the first document, print instances header to sequence ARFF sequence feature file
                    if(positionDoc == 0){
                        arffSequenceFeatureFile.write(instancesHeader.toString());
                    }

                    // Iterate each document. For each iteration, do:
                    // - print instances to Sequence ARFF file
                    // - print predicted instances to CSV file
                    // - print prediction result to document as annotations
                    for (int i = startDocIdApp; i < endDocIdApp; ++i) {
                        Document toProcess = (Document) corpus.get(i);
                        lightWeightApi.annotations2Instances(document, positionDoc, outCsvPredictions, arffSequenceFeatureFile, isTraining, learningSettings, instancesHeader, classifierModel);
                        
                        if (toProcess.getDataStore() != null && corpus.getDataStore() != null) {// (isDatastore)
                            corpus.getDataStore().sync(corpus);
                        }
                    }

                    // Close sequence ARFF sequence feature file
                    arffSequenceFeatureFile.flush();
                    arffSequenceFeatureFile.close();

                    // Close CSV file
                    outCsvPredictions.close();

                    OutputStreamWriter fstream =
                            new OutputStreamWriter(
                            new FileOutputStream(
                                new File("out.txt"),
                                true),
                            Charset.defaultCharset());
                    BufferedWriter out = new BufferedWriter(fstream);
                    out.write("File: " + positionDoc + "; instanceHeader size: " + instancesHeader.numInstances() + "\n");
                    out.close();
                    
                    startDocIdApp = endDocIdApp;
                } catch (ClassNotFoundException ex) {
                    Logger.getLogger(LearningAPIMain2.class.getName()).log(Level.SEVERE, null, ex);
                } catch (IOException e) {
                    e.printStackTrace();
                } catch (GateException e) {
                    e.printStackTrace();
                } catch (Exception ex) {
                    Logger.getLogger(LearningAPIMain2.class.getName()).log(Level.SEVERE, null, ex);
                }
            }
        }

        // If we've reached the last document, do batch process
        if (positionDoc == corpus.size() - 1) {
            // first select the training data and test data according to the
            // learning setting
            // set the inputASName in here, because it is a runtime parameter
            int numDoc = corpus.size();
            try {
                LogService.init(logFile, true, learningSettings.verbosityLogService);
                LogService.logMessage("The learning start at " + new Date().toString(),
                        1);
                LogService.logMessage("The number of documents in dataset: " + numDoc,
                        1);
                // Open the NLP feature file for storing the NLP feature vectors
                BufferedWriter outNLPFeatures = null;
                BufferedReader inNLPFeatures = null;
                BufferedWriter outFeatureVectors = null;
                ICsvListWriter outCsvFeatures = null;
                
                switch (learningMode) {
                    // if only need the feature data
                    case EXTRACT_FEATURE:
                        EvaluationBasedOnDocs.emptyDatafile(workingDirectoryResults, true);
                        LogService.logMessage("** Producing the feature files only!", 1);

                        // create CSV feature file
                        outCsvFeatures = new CsvListWriter(
                                new FileWriterWithEncoding(
                                new File(
                                    workingDirectoryResults,
                                    ConstantParameters.FILENAME_FEATURES_CSV),
                                "UTF-8"),
                                CsvPreference.NO_COMMENT_PREFERENCE);

                        for (int i = 0; i < numDoc; ++i) {
                            Document toProcess = (Document) corpus.get(i);
                            lightWeightApi.annotations2NLPFeatures(toProcess, i,
                                    outCsvFeatures, isTraining, learningSettings);
                            if (toProcess.getDataStore() != null
                                    && corpus.getDataStore() != null) {
                                Factory.deleteResource(toProcess);
                            }
                        }
                        outCsvFeatures.close();
                        LogService.logMessage("CSV feature file created.", 1);

                        // create ARFF file from CSV file
                        DataSource csvSource = new DataSource(workingDirectoryResults.toString() + File.separator +
                                    ConstantParameters.FILENAME_FEATURES_CSV);
                        ArffSaver arffSaver = new ArffSaver();
                        arffSaver.setInstances(csvSource.getDataSet());
                        arffSaver.setFile(new File(workingDirectoryResults,ConstantParameters.FILENAME_FEATURES_ARFF));
                        arffSaver.writeBatch();
                        LogService.logMessage("ARFF feature file created.", 1);

                        break;

                    // Create training model
                    case TRAINING:
                        // using the java code for training
                        String learnerType = learningSettings.learnerSettings.learnerType;
                        if(learnerType.equalsIgnoreCase("weka")){
                            // Check if the feature file exist
                            String arffFeatureFileName = workingDirectoryResults.toString() + File.separator
                                    + ConstantParameters.FILENAME_FEATURES_ARFF;
                            if (!new File(arffFeatureFileName).exists()) {
                                System.out.println("Warning: the feature file is not available at the moment!");
                                LogService.logMessage("Warning: the feature file is not available at the moment!", 1);
                                return;
                            }

                            /** Open the normal NLP feature file. */
                            DataSource arffSource = new DataSource(arffFeatureFileName);
                            Instances trainData = arffSource.getDataSet();
                            if(trainData.classIndex() == -1){
                                trainData.setClassIndex(0);
                            }

                            lightWeightApi.trainingWeka(trainData, learningSettings);
                        }else{
                            System.out.println("Error! Wrong learner type.");
                            LogService.logMessage("Error! Wrong learner type.", 0);
                        }

                        break;

                    case EVALUATION:
                        if (LogService.maxVerbosityLevel > 0) {
                            System.out.println("** Evaluation mode:");
                        }
                        LogService.logMessage("** Evaluation mode:", 1);
                        evaluation =
                                new EvaluationBasedOnDocs(corpus, workingDirectoryResults, inputASName);
                        evaluation.evaluation(learningSettings, lightWeightApi);

                        break;

                    case CLASSIFICATION:
                        // first checking if the model file is available or not
                        String modelFileName =
                                workingDirectoryResults.toString() + File.separator
                                + ConstantParameters.FILENAME_OF_MODEL;
                        if (!new File(modelFileName).exists()) {
                            System.out.println("Warning: the model is not available at the moment!!");
                            return;
                        }

                        // Load model
                        InputStream is = new FileInputStream(modelFileName);
                        ObjectInputStream objectInputStream = new ObjectInputStream(is);
                        Classifier classifierModel = (Classifier)objectInputStream.readObject();
                        objectInputStream.close();

                        // File to print classifier output
                        BufferedWriter outClassifier  =
                                new BufferedWriter(
                                new OutputStreamWriter(
                                new FileOutputStream(
                                new File(
                                workingDirectoryResults.toString(),
                                ConstantParameters.FILENAME_CLASSIFIER_OUTPUT)),
                                Charset.defaultCharset()));
                        System.out.println("Def charset: " + Charset.defaultCharset());

                        // Load sequence data
                        DataSource sequenceDataSource = new DataSource(workingDirectoryResults.toString() +
                            File.separator +
                            ConstantParameters.FILENAME_SEQUENCE_FEATURES_ARFF);
                        Instances sequenceData = sequenceDataSource.getDataSet(0);

                        // Evaluate
                        Evaluation eval = new Evaluation(sequenceData);
                        eval.evaluateModel(classifierModel, sequenceData);

                        outClassifier.write(eval.toSummaryString(true) + "\n");
                        outClassifier.write(eval.toMatrixString() + "\n");
                        outClassifier.write(eval.toClassDetailsString() + "\n");
                        StringBuffer predictionsStringBuffer = new StringBuffer();
                        Evaluation.printClassifications(classifierModel, sequenceData, sequenceDataSource, 1, new Range("first-last"), true, predictionsStringBuffer);
                        outClassifier.write("=== Predictions ===\n" + predictionsStringBuffer + "\n");

                        outClassifier.flush();
                        outClassifier.close();

                        break;
                        
                    default:
                        throw new GateException("The learning mode is not defined!");
                }
                LogService.logMessage("This learning session finished!.", 1);
                // LogService.close();
            } catch (IOException e) {
                e.printStackTrace();
            } catch (GateException e) {
                e.printStackTrace();
            } catch (Exception ex) {
                Logger.getLogger(LearningAPIMain2.class.getName()).log(Level.SEVERE, null, ex);
            }
            if (LogService.maxVerbosityLevel > 0) {
                System.out.println("This learning session finished!");
            }
        } // end of learning (position=corpus.size()-1)
    }

    /** Print out the information for featureData only option. */
    private void displayDataFilesInformation() {
        StringBuffer logMessage = new StringBuffer();
        logMessage.append("NLP features for all the documents are in the file"
                + workingDirectoryResults.getAbsolutePath() + File.separator
                + ConstantParameters.FILENAME_FEATURES_CSV + "\n");
        logMessage.append("Feature vectors in sparse format are in the file"
                + workingDirectoryResults.getAbsolutePath() + File.separator
                + ConstantParameters.FILENAMEOFFeatureVectorData + "\n");
        logMessage.append("Label list is in the file" + workingDirectoryResults.getAbsolutePath()
                + File.separator + ConstantParameters.FILENAMEOFLabelList + "\n");
        logMessage.append("NLP features list is in the file"
                + workingDirectoryResults.getAbsolutePath() + File.separator
                + ConstantParameters.FILENAMEOFNLPFeatureList + "\n");
        logMessage.append("The statistics of entity length for each class is in the file"
                + workingDirectoryResults.getAbsolutePath()
                + File.separator
                + ConstantParameters.FILENAMEOFChunkLenStats + "\n");
        System.out.println(logMessage.toString());
        LogService.logMessage(logMessage.toString(), 1);
    }

    /**
     * Determine the directory used to store temporary files when running in
     * APPLICATION mode.
     */
    protected File getApplicationTempDir() {
        if (applicationTempDir == null) {
            LogService.logMessage(
                    "Creating temp directory for application-mode files", 1);
            try {
                applicationTempDir = File.createTempFile("appl", ".tmp", workingDirectoryResults);
                applicationTempDir.delete();
                if (!applicationTempDir.mkdir()) {
                    throw new IOException(
                            "Error creating directory " + applicationTempDir);
                }
            } catch (IOException ioe) {
                LogService.logMessage("Could not create temporary directory for "
                        + "application-mode temp files, using " + workingDirectoryResults, 1);
                applicationTempDir = workingDirectoryResults;
            }
        }
        return applicationTempDir;
    }

    /**
     * Delete the temporary directory for application-mode temp files when this
     * resource is deleted.
     */
    @Override
    public void cleanup() {
        if (applicationTempDir != null && !applicationTempDir.equals(workingDirectoryResults)) {
            deleteRecursively(applicationTempDir);
        }
    }

    /**
     * Delete a file or directory. If the argument is a directory, delete its
     * contents first, then remove the directory itself.
     */
    private void deleteRecursively(File fileOrDir) {
        if (fileOrDir.isDirectory()) {
            for (File f : fileOrDir.listFiles()) {
                deleteRecursively(f);
            }
        }
        if (!fileOrDir.delete()) {
            LogService.logMessage("Couldn't delete "
                    + (fileOrDir.isDirectory() ? "directory " : "file ") + fileOrDir,
                    1);
        }
    }
}
