package edu.uic.readfile;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.Reader;
import java.util.Iterator;
import java.util.List;

import org.tartarus.snowball.SnowballStemmer;

import edu.stanford.nlp.ling.HasWord;
import edu.stanford.nlp.ling.Sentence;
import edu.stanford.nlp.ling.TaggedWord;
import edu.stanford.nlp.tagger.maxent.MaxentTagger;

public class TrainingData
{
	public AllWord wordset = new AllWord();
	public String modelpath = "bidirectional-wsj-0-18.tagger";
	public StopWordList stopwordlist = new StopWordList();
	public TrainingFileSet fileset = new TrainingFileSet();
	public boolean isC = false;
	
	public void transferFile(String dirpath)
	{
		try
		{
			File dir = new File(dirpath);
		       
	        String[] children = dir.list();
	        
	        for(String trainingfile : children)
	        {
	        	BufferedReader br = new BufferedReader(new FileReader((dir.getAbsolutePath() + "/"+trainingfile)));
	        	
	        	constructVector(br, trainingfile);
	        	
	        }
	        
	        BufferedWriter br = new BufferedWriter(new FileWriter((dirpath + "vector.txt")));
	        fileset.output(br);
			
		} catch (FileNotFoundException e)
		{
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e)
		{
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}
	
	public void constructVector(Reader reader, String filename)
	{
		try
		{
			SingleFile file = new SingleFile(wordset.wordvector.size());
			//BufferedWriter bw = new BufferedWriter(new FileWriter("file1wordstem.txt"));
			file.fileName = filename;
			if(isC)
				file.value = 1;
			else
				file.value = -1;
			
			Class stemClass = Class
					.forName("org.tartarus.snowball.ext.englishStemmer");
			SnowballStemmer stemmer = (SnowballStemmer) stemClass.newInstance();

			MaxentTagger tagger = new MaxentTagger(modelpath);
			List<Sentence<? extends HasWord>> sentences = MaxentTagger
					.tokenizeText(reader);
			String word = "";
			for (Sentence<? extends HasWord> sentence : sentences)
			{
				Sentence<TaggedWord> tSentence = MaxentTagger
						.tagSentence(sentence);
				// System.out.println(tSentence.toString(false));
				for (Iterator<TaggedWord> wordIterator = tSentence.iterator(); wordIterator
						.hasNext();)
				{
					TaggedWord o = wordIterator.next();
					word = o.word().toLowerCase();
					if(stopwordlist.stopwords.contains(word))
						continue;
					if(!stopwordlist.postags.contains(o.tag()))
						continue;
					stemmer.setCurrent(word);
					stemmer.stem();
					String currentword = stemmer.getCurrent();
					//System.out.println(stemmer.getCurrent());
					
					file.updateVector(wordset.wordIndex(currentword));
					
					//bw.write(currentword + " " + o.tag());
					//bw.newLine();
				}
			}
			fileset.addFile(file);
			//fileset.addFile(file, isC);
			//wordset.savetoFile();

			//bw.close();
		} catch (Exception e)
		{
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}
	
	public SingleFile getVector(Reader reader, String filename)
	{
		try
		{
			SingleFile file = new SingleFile(wordset.wordvector.size());
			
			file.fileName = filename;

			
			Class stemClass = Class
					.forName("org.tartarus.snowball.ext.englishStemmer");
			SnowballStemmer stemmer = (SnowballStemmer) stemClass.newInstance();

			MaxentTagger tagger = new MaxentTagger(modelpath);
			List<Sentence<? extends HasWord>> sentences = MaxentTagger
					.tokenizeText(reader);
			String word = "";
			for (Sentence<? extends HasWord> sentence : sentences)
			{
				Sentence<TaggedWord> tSentence = MaxentTagger
						.tagSentence(sentence);
				// System.out.println(tSentence.toString(false));
				for (Iterator<TaggedWord> wordIterator = tSentence.iterator(); wordIterator
						.hasNext();)
				{
					TaggedWord o = wordIterator.next();
					word = o.word().toLowerCase();
					if(stopwordlist.stopwords.contains(word))
						continue;
					if(!stopwordlist.postags.contains(o.tag()))
						continue;
					stemmer.setCurrent(word);
					stemmer.stem();
					String currentword = stemmer.getCurrent();
					//System.out.println(stemmer.getCurrent());
					
					file.updateVector(wordset.wordIndex(currentword));
					

				}
			}
			//wordset.savetoFile();
			return file;
			//bw.close();
		} catch (Exception e)
		{
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		return null;
	}
	
	
	
	
	public void printId()
	{
		
	}
	
	
	public static void main(String[] args)
	{

		WordSetConstructor rf = new WordSetConstructor();
		System.out.println("begin");
//		try
//		{
//			rf.POSstemFile(new BufferedReader(new FileReader("file1.txt")));
//		} catch (FileNotFoundException e)
//		{
//			// TODO Auto-generated catch block
//			e.printStackTrace();
//		}
		
		rf.readFile("economy");
		
//		TrainingData td = new TrainingData();
//		td.isC = false;
//		td.wordset.readfromFile();
//		td.transferFile("articles/l");
//		td.transferFile("articles/c");
		
	}
	
	
}
