package TAM;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.FilenameFilter;
import java.io.IOException;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;

import edu.stanford.nlp.ling.HasWord;
import edu.stanford.nlp.parser.lexparser.LexicalizedParser;
import edu.stanford.nlp.process.DocumentPreprocessor;
import edu.stanford.nlp.trees.GrammaticalStructure;
import edu.stanford.nlp.trees.GrammaticalStructureFactory;
import edu.stanford.nlp.trees.PennTreebankLanguagePack;
import edu.stanford.nlp.trees.Tree;
import edu.stanford.nlp.trees.TreebankLanguagePack;

public class PreprocessDocs {
	FilenameFilter filter = new FilenameFilter() {
		public boolean accept(File directory, String fileName) {
			return fileName.indexOf(".") == -1;
		}
	};

	private int getViewpoint(String filename) throws IOException {
		if (!new File("meta").isDirectory())
			return 0;

		FileReader fr = new FileReader("meta\\" + filename);
		BufferedReader br = new BufferedReader(fr);

		String s;
		int viewpoint = 0;
		while ((s = br.readLine()) != null) {
			if (s.indexOf("viewpoint") != -1 && s.indexOf("Israeli") != -1)
				break;
			else if (s.indexOf("viewpoint") != -1
					&& s.indexOf("Palestinian") != -1) {
				viewpoint = 1;
				break;
			}
		}

		br.close();
		fr.close();
		return viewpoint;
	}

	public boolean GenerateDocs(String filepath, boolean isViewpoint)
			throws IOException {
		File pathName = new File(filepath);
		if (!pathName.isDirectory())
			return false;

		File[] files = pathName.listFiles(filter);
		if (files == null)
			return false;

		FileWriter fw = new FileWriter("Doc");
		BufferedWriter bw = new BufferedWriter(fw);
		String s;
		System.out.println(files.length);
		for (int i = 0; i < files.length; i++) {
			int viewpoint = 0;
			if (isViewpoint) {
				viewpoint = getViewpoint(files[i].getName());
			}

			FileReader fr = new FileReader(files[i]);
			BufferedReader br = new BufferedReader(fr);
			bw.write(viewpoint + " ");

			while ((s = br.readLine()) != null) {
				s = s.replaceAll("[^a-zA-Z ]", "");
				s = s.replaceAll("[ ]+", " ");
				bw.write(s + " ");
			}

			bw.newLine();
			br.close();
			fr.close();

		}

		bw.close();
		fw.close();

		return true;
	}

	public boolean GenerateFature(String filepath) throws IOException {
		File pathName = new File(filepath);
		if (!pathName.isDirectory())
			return false;

		File[] files = pathName.listFiles(this.filter);
		if (files == null)
			return false;
		LexicalizedParser parser = new LexicalizedParser(
				"grammar/englishPCFG.ser.gz");
		TreebankLanguagePack tlp = new PennTreebankLanguagePack();
		GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();
		for (int i = 0; i < files.length; i++) {
			String filename = files[i].getName();
			FileWriter fw = new FileWriter("dependencies\\" + i);
			BufferedWriter bw = new BufferedWriter(fw);

			for (List<HasWord> sentence : new DocumentPreprocessor("docs\\"
					+ filename)) {
				Tree parse = parser.apply(sentence);
				GrammaticalStructure gs = gsf.newGrammaticalStructure(parse);
				Collection tdl = gs.typedDependenciesCCprocessed(true);

				Iterator it = tdl.iterator();
				while (it.hasNext()) {
					bw.write(it.next().toString() + "\n");
				}
				bw.write("\n");
			}

			bw.close();
		}

		return true;
	}

}
