package biofilter.pipelines;

import biofilter.exceptions.InvalidRecordException;
import biofilter.filters.JoinSplitFilterBase;
import biofilter.records.FileNameRecord;
import biofilter.records.Record;
import biofilter.sources.RecordsFile;

import java.util.HashMap;
import java.util.Iterator;
import java.util.Set;

/**
 * This is an example of the use of a split join filter. There are two classes
 * in this illustration that we will focus on. The first is the concrete filter
 * that extends SplitJoinFilterBase. The second is the pipeline Class that sets
 * up the pipeline and runs it.
 * 
 * Lets take a look at what is going on here. The pipeline is composed of two
 * record sources, The concrete record source is the RecordsFile fnrf. This is a
 * concrete source that implements from the RecordsSource interface.
 *
 * Our pipeline will start with a file of file names, one file name per line.
 * We can use our RecordsFile source and FileNameRecord from previous examples.
 * Our pipeline will stream the list of filenames producing a stream of file
 * types. Since we don't want to see the same file type in the final stream,
 * we will have to get all the file names before returning file types. In
 * other words, we need a JoinSplitFilter (diagram of JoinSplit m <-> n
 * relationship).
 *
 * The first task is to write a new Record that represents a file type. We call
 * this FileTypeRecord. It is kept very simple, all it needs to do is implement
 * the Record interface and store the file type as a string. The next task is to
 * write the FileNamesToFileTypesFilter filter that will extend the
 * JoinSplitFilterBase. Lastly, using the already implemented RecordsFile and
 * FileNameRecord, we implement the pipeline.
 *
 * So, lets get started.
 *
 */
class FileTypeRecord implements Record {

    private String type = null;
    Integer occurrances = null;

    public String get() {
        return type;
    }

    public void set(String r) throws IllegalArgumentException {
        this.type = r;
    }

    /* These are added for additional functionality */
    public void setOccurrances(Integer o) {
        this.occurrances = o;
    }

    public Integer getOccurrances () {
        return this.occurrances;
    }
}

/**
 * The main task in creating a JoinSplitFilter is writing the transformRecord()
 * method. Most of the rest of the logic for this type of filter is contained
 * in the JoinSplitFilterBase class.
 * @author Thomas S. Brettin
 */
class FileNamesToFileTypesFilter extends JoinSplitFilterBase {

    /**
     * transformRecord is called from this objects getRecord method. This objects
     * getRecord method is implemented in the JoinSplitFilterBase (BTW,
     * transformRecord is always called from the objects getRecord method.
     *
     * When someone calls this objects getRecord for the first time, this object's
     * getRecord will call getRecord on it's source in a while loop (hence the
     * join part) filling an internal inputQueue with Records from this's source.
     *
     * The primary job then of the transformRecord() method is to loop through
     * the inputQueue operating on each Record in that queue building a set of
     * transformed Records and placing them in the outputQueue. When all this
     * is done, the transformRecord() method pops the first Record from the
     * outputQueue and returns it to the caller getRecord().
     *
     * @param r This ends up being null as specified in the call to
     * transformRecord from the getRecord implementation in the super class.
     * @return A transformed record.
     * @throws InvalidRecordException
     */
    @Override
    protected Record transformRecord(Record r) throws InvalidRecordException {

        HashMap<String, Integer> types = new HashMap<String, Integer>();
        while(this.inputQueue.isEmpty() == false){
            
            FileNameRecord fnr = (FileNameRecord) this.inputQueue.removeFirst();
            String fileName = fnr.get();
            String[] tokens = fileName.split("\\.");
            try {
                Integer freq = types.get(tokens[1]);
                types.put(tokens[1], (freq == null) ? 1 : freq + 1);
            } catch (Throwable t) {
                System.out.println("caught throwable " + t.toString());
                throw new InvalidRecordException("split failed on filename", true);
            }
        }

        Set<String> keys = types.keySet();
        Iterator iter = keys.iterator();
        while (iter.hasNext()) {
            String key = (String) iter.next();
            FileTypeRecord ftr = new FileTypeRecord();
            ftr.set(key);
            ftr.setOccurrances(types.get(key));
            this.outputQueue.add(ftr);
        }

        return (Record) this.outputQueue.removeFirst();
    }

    public Record peek() throws InvalidRecordException {
        throw new UnsupportedOperationException("Not supported yet.");
    }
}


/**
 * This pipeline is a continuation of the previous example Pipeline1. It adds
 * the join split filter to the pipeline and chains it to the records file
 * source.
 * @author Thomas S. Brettin
 */
class Pipeline3 {

    private RecordsFile fnrf = null;
    private FileNamesToFileTypesFilter fn2ft = null;

    public Pipeline3(String fileNameRecordsFile) throws Exception {
        fnrf = new RecordsFile(fileNameRecordsFile,
                "biofilter.records.FileNameRecord");
        fn2ft = new FileNamesToFileTypesFilter();
        fn2ft.setSource(fnrf);
    }

    public void execPipeline() throws Exception {
        while (fn2ft.eof() != true) {
            FileTypeRecord ftr = (FileTypeRecord) fn2ft.getRecord();
            System.out.println(ftr.get() + " found " + ftr.getOccurrances() + " times.");
        }
    }

    public static void main(String[] args) {
        try {
            Pipeline3 p = new Pipeline3(args[0]);
            p.execPipeline();
        } catch (Exception e) {
            System.err.println(e.getMessage());
        }
    }
}
/**


 */
 
