/**
 * Copyright 2012 Brigham Young University
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package edu.byu.nlp.pipes.docs;

import java.io.File;
import java.util.BitSet;
import java.util.Collections;
import java.util.List;
import java.util.logging.Logger;

import edu.byu.nlp.cluster.Dataset;
import edu.byu.nlp.data.SparseFeatureVector;
import edu.byu.nlp.pipes.DataSource;
import edu.byu.nlp.pipes.DataSources;
import edu.byu.nlp.pipes.DirectoryReader;
import edu.byu.nlp.pipes.Downcase;
import edu.byu.nlp.pipes.FieldIndexer;
import edu.byu.nlp.pipes.FilenameToContents;
import edu.byu.nlp.pipes.HeaderStripper;
import edu.byu.nlp.pipes.IndexFileToLabeledFileList;
import edu.byu.nlp.pipes.Instance;
import edu.byu.nlp.pipes.Instances;
import edu.byu.nlp.pipes.LabelIndexer;
import edu.byu.nlp.pipes.Pipe;
import edu.byu.nlp.pipes.Pipes;
import edu.byu.nlp.pipes.RegexpTokenizer;
import edu.byu.nlp.pipes.SerialPipeBuilder;
import edu.byu.nlp.pipes.StopWordRemover;
import edu.byu.nlp.pipes.WordIndexer;
import edu.byu.nlp.util.Indexer;

/**
 * @author rah67
 *
 */
public class TwentyNewsgroups {

	private static final Logger logger = Logger.getLogger(TwentyNewsgroups.class.getName());
	
	private final String basedir;
	private final String dataset;
	private final String split;

	private final FeatureSelectorFactory<String> featureSelectorFactory;
	
	public TwentyNewsgroups(String basedir, String dataset, String split, FeatureSelectorFactory<String> featureSelectorFactory) {
		this.basedir = basedir;
		this.dataset = dataset;
		this.split = split;
		this.featureSelectorFactory = featureSelectorFactory;
	}
	
	private Dataset dataset(DataSource<String, List<String>> rawData) {
		// Index the data
		Indexer<String> wordIndex = new WordIndexer<String, String>().process(rawData.getData());
		int numFeatures = wordIndex.size();
		
		// Create count vectors
		Iterable<Instance<String, SparseFeatureVector>> countVectors =
				Instances.transformData(rawData.getData(), new CountVectorizer<String>(wordIndex));
		
		// Feature selection
		BitSet features = featureSelectorFactory.newFeatureSelector(numFeatures).process(countVectors);
		logger.info("Number of features before selection = " + numFeatures);
		wordIndex = wordIndex.retain(features);
		logger.info("Number of features after selection = " + wordIndex.size());

		// Index labels
		Indexer<String> labelIndex = new LabelIndexer<String, List<String>>().process(rawData.getData());
	
		// Re-index after feature selection (requires re-creating count vectors)
		Pipe<String, List<String>, Integer, SparseFeatureVector> vectorizer = new SerialPipeBuilder<String, List<String>, String, List<String>>()
			.addLabelTransform(new FieldIndexer<String>(labelIndex))
			.addDataTransform(new CountVectorizer<String>(wordIndex))
			.build();
		
		DataSource<Integer, SparseFeatureVector> vectors = DataSources.connect(rawData, vectorizer);
		
		return new Dataset(DataSources.cache(vectors),
				Collections.<Instance<Integer, SparseFeatureVector>>emptyList(), wordIndex, labelIndex);
	}

	public Dataset dataset() {
		
		// This pipe leaves data in the form it is expected to be in at test time
		Pipe<String, String, String, String> indexToDocPipe =
				new SerialPipeBuilder<String, String, String, String>()
			.add(Pipes.oneToMany(new IndexFileToLabeledFileList(indexDirectory())))
			.add(Pipes.transformingPipe(new FilenameToContents(new File(basedir))))
			.addDataTransform(new HeaderStripper())
			.build();
			
		Pipe<String, String, String, List<String>> tokenizerPipe =
				new SerialPipeBuilder<String, String, String, String>()
			.addDataTransform(new Downcase())
			.addDataTransform(new RegexpTokenizer("[a-zA-Z]+"))
			.addDataTransform(StopWordRemover.malletStopWords())
			.build();

		Pipe<String, String, String, List<String>> combinedPipe =
				new SerialPipeBuilder<String, String, String, String>()
			.add(indexToDocPipe)
			.add(tokenizerPipe)
			.build();
		
		DataSource<String, List<String>> docSource =
				DataSources.connect(new DirectoryReader(indexDirectory()), combinedPipe);
		
		// Cache the data to avoid multiple disk reads
		List<Instance<String, List<String>>> cachedData = DataSources.cache(docSource);
		
		//
		// Cross-fold validation would create a new pipe factory for each fold.
		// If we have a static test set, we would only do this on the training data
		//
		return dataset(DataSources.from(cachedData));
	}
	
	private File indexDirectory() {
		return new File(new File(new File(basedir, "indices"), dataset), split);
	}
	
	/**
	public static List<Instance<String, List<String>>> cache(DataSource<String, List<String>> src) {
		List<Instance<String, List<String>>> cached = Lists.newArrayList();
		for (Instance<String, List<String>> instance : src.getData()) {
			List<String> copy = Lists.newArrayListWithCapacity(instance.getData().size());
			for (String ele : )
			cached.add(BasicInstance.of(
					instance.getLabel(),
					instance.getSource(),
					Lists.newArrayList(instance.getData())));
		}
		return cached;
	}
	**/

}
