import java.io.File;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;


/**
 * Controls all functions to do with training data sets
 * 
 * @author tomalteman
 *
 */
public class dataset {
	
	private static int debug=0;
	private static int writeNum;
	
	//Data structs for doing conversion from text to tf-idf
	private static wordcount vocab;
	private static wordcount corpus;
	
	//Temp array for training set
	private static ArrayList<entry> temp;
	
	//Array for assigning labels
	private static String[] lbl;
	
	
	//Globals to calc info gain
	private static double total_docs;
	private static double total_topic;
	private static double topic_num;
	
	
	///////////////////////////
	//// Important globals
	///////////////////////////
	
	//Min frequency for feature selection
	private static int min_size = 1;
	private static int local_min_size = 1;
	
	//Checks to see if local training is being done
	private static boolean local_train = false;
	
	//Local info gain limit
	private static double local_ig = -0.8;
	private static double topic_ig = -0.8;
	
	///////////////////////////
	
	private static boolean newbog = false;
	
	/**
	 * Stores titles already in training set
	 * Integer indicates whether they are already stored or have just been discovered
	 */
	private static HashMap<String,Integer> titles;
	
	/**
	 * Get map of topic articles
	 * 
	 * @param topic
	 */
	private static void loadTopic(String topic)
	{
		//New list of art titles
		titles = new HashMap<String,Integer>();
		
		//Gets list of titles and puts them into hashMap
		File file = new File("training/"+topic+"/"+topic);
		toHash(TFile.read(file));
		
		//Set write number to size of titles
		writeNum = titles.size();
		
		
		System.out.println("Size of set is "+ titles.size());
	}
	
	/**
	 * Given list of articles and topic check whether they are already included, if not store them
	 * @param topic
	 * @param arts
	 */
	public static void updateTopic(String topic,ArrayList<article> arts)
	{
		int added = 0;
		article a;
		
		for(int i=0;i<arts.size();i++)
		{
			a = arts.get(i);
			
			//Check if not in titles yet
			if(!titles.containsKey(a.getTitle()))
			{
				titles.put(a.getTitle(), 1);
				TFile.writeArt(getName(topic),a);
				writeNum++;
				added++;
			}
			else if(debug==1) System.out.println("Already have " + a.getTitle());
		}
		
		System.out.println("Added "+ added);
		
		//Write titles list to file
		TFile.write(new File("training/"+topic+"/"+topic), toList());
	}
	
	/**
	 * Returns unique name for training file
	 * @param topic
	 * @return
	 */
	private static File getName(String topic)
	{
		return new File("training/"+topic+"/text/"+topic+writeNum+".txt");
	}
	
	/**
	 * Converts ArrayList to HashMap 
	 * 
	 * @param arts
	 */
	private static void toHash(ArrayList<String> arts)
	{
		for(int i=0;i<arts.size();i++)
		{
			titles.put(arts.get(i), 1);
		}
	}
	
	private static ArrayList<String> toList()
	{
		ArrayList<String> list = new ArrayList<String>();
		
		Iterator iter = titles.entrySet().iterator();
	    while (iter.hasNext()) {
	        Map.Entry<String,Integer> val = (Map.Entry<String,Integer>)iter.next();
	        list.add(val.getKey());
	    }
	    return list;
	}
	
	/**
	 * 
	 * Read all rss feeds in guardian file and update training set
	 * File is formatted: "<link> <topic>"
	 * 
	 * @param link
	 * @param topic
	 */
	public static void updateGuardian()
	{
		RSSReader reader = RSSReader.getInstance();
		String[] split;
		
		//Get links and topics to be acquired
		ArrayList<String> input = TFile.read(new File("training/guardian"));
		
		for(int i=0;i<input.size();i++)
		{
			//Split input on space to get link and topic
			split = input.get(i).split(" ");
			
			System.out.println("Update "+ split[1] + " with link " + split[0]);
			
			loadTopic(split[1]);
			updateTopic(split[1],reader.guardianFeed(split[0],split[1]));
		}
		
	}
	
	public static void updateReuters()
	{
		RSSReader reader = RSSReader.getInstance();
		String[] split;
		
		//Get links and topics to be acquired
		ArrayList<String> input = TFile.read(new File("training/reuters"));
		
		for(int i=0;i<input.size();i++)
		{
			//Split input on space to get link and topic
			split = input.get(i).split(" ");
			
			System.out.println("Update "+ split[1] + " with link " + split[0]);
			
			loadTopic(split[1]);
			updateTopic(split[1],reader.reutersFeed(split[0],split[1]));
		}
		
	}
	
	/**
	 * Gets file list given directory
	 * @param s
	 * @return
	 */
	public static File[] getFiles(String s)
	{
		File file = new File(s);
		return file.listFiles();
	}
	
	/**
	 * Extracts the filename from the path
	 * @param f
	 * @return
	 */
	private static String extractFileName(File f)
	{
		int last =0;
		String file="";
		file = f.toString();
		last = file.lastIndexOf("/") + 1;
		return (file.substring(last));
	}
	
	/**
	 * Reads files from text folder and converts them into bag of words 
	 * @param topic
	 */
	public static void textToBog(String topic)
	{
		File[] dir = getFiles("training/"+topic+"/text/");
		String file="", text="";
		RSSReader reader = RSSReader.getInstance();
		wordcount bog;
		for(int i=0;i<dir.length;i++) 
			{
				//Get filename without path
				file = extractFileName(dir[i]);
				if(debug==1) System.out.println("FILENAME: "+file);
				//Get text from file
				text = reader.trainTextRead(dir[i]);
				
				//IF correct then save
				if(!text.equals("-"))
				{
					//Get bag of words for file
					bog = getBog(text);
					
					if(newbog) TFile.write(new File("training/"+topic+"/newbog/"+file), bog.getList(false));
					else TFile.write(new File("training/"+topic+"/bog/"+file), bog.getList(false));
				}
			}
	}
	
	
	/**
	 * Gets String of text, removes stop words and stems before outputting list of words
	 * 
	 * @param text
	 * @return
	 */
	public static wordcount getBog(String text)
	{
		
		wordcount bog = new wordcount();
		char[] data = text.toCharArray();
		String word = "";
		
		//Get nouns
		bog = Classifier.nounExtract(text, false);
		
		//Go through array and add to string until space of invlaid char reached
		for(int i=0;i<data.length;i++)
		{
			//Append to word string if letter or digit
			if(Character.isLetterOrDigit(data[i])) word = word + Character.toString(data[i]);
			//If hyphen then do nothing
			else if(data[i] == '-') ;
			//If word not empty then verify that it should be added
			else if(word.length()>0)
			{
				word = verifyWord(word);
				if(word.length()>0) bog.add(word);
				word = "";
			}
		}
		
		//Make sure last word is included
		word = verifyWord(word);
		if(word.length()>0) bog.add(word);
		
		return bog;
	}
	
	/**
	 * Checks for stop word and performs stemming
	 * Returns empty string if word is not to be added
	 * 
	 * @param word
	 * @return
	 */
	public static String verifyWord(String word)
	{
		if(Stopwords.isStopword(word)) return "";
		//if(debug==1) System.out.println("Correct word: "+ word);
		
		String stem = Stemmer.stemWord(word.toLowerCase());
		//if(debug==1) System.out.println("Stemmed word: "+ stem);
		
		return stem;
	}
	
	//ADD IN GET VOCAB
	
	/**
	 * Create training set for libSVM in tf idf format given array of topics
	 * LIMIT is the maximum amount of training articles to be read for each topic
	 * This ensure the same number of doc for each set in the training file
	 * 
	 */
	public static void trainSetTFIDF(String[] topics, int limit)
	{
		int l = limit;
		lbl = topics;
		
		//Initialise vars for info gain
		total_topic = (double) limit;
		total_docs = (double) (topics.length*limit);
		topic_num = (double) topics.length;
		
		//Initialise objects
		vocab = new wordcount();
		corpus = new wordcount();
		ArrayList<wordcount> bogs = new ArrayList<wordcount>();
		File[] dir;
		
		//Load bogs
		for(int i=0;i<topics.length;i++)
		{
			dir = getFiles("training/"+topics[i]+"/bog/");
			
			//If limit set to 0 then use whole directory
			if(limit==0) l = dir.length;
			else if(limit>dir.length) l = dir.length;
			
			//Read each file into bogs
			for(int j=0;j<l;j++) 
				{
					//System.out.println(dir[j]);
					bogs.add(getBog(topics[i],dir[j],true,i));
				}
		}
		
		//Perform feature selection
		pruneCorpus();
		
		//Gets lines of training data
		ArrayList<String> lines = new ArrayList<String>();
		for(int i=0;i<bogs.size();i++) lines.add(wcToString(bogs.get(i)));
		
		//corpus.printCorpus();
		
		//Write vocab to file
		TFile.write(new File("training/class/new/vocab"), vocab.getList(false));
		
		//Write corpus to file
		TFile.write(new File("training/class/new/corpus"), corpus.getList(true));
		
		//Print to train file
		TFile.write(new File("training/class/new/train"), lines);
	}
	
	/**
	 * Create training set for with different params
	 * 
	 */
	public static void trainMulti(String[] topics, int limit, int size)
	{
		
		for(int z=0;z<size;z++)
		{
			min_size = 1+ z;
			topic_ig = -0.8;
			System.out.print(topic_ig + " : ");
			
			int l = limit;
			lbl = topics;
			
			//Initialise vars for info gain
			total_topic = (double) limit;
			total_docs = (double) (topics.length*limit);
			topic_num = (double) topics.length;
			
			//Initialise objects
			vocab = new wordcount();
			corpus = new wordcount();
			ArrayList<wordcount> bogs = new ArrayList<wordcount>();
			File[] dir;
			
			//Load bogs
			for(int i=0;i<topics.length;i++)
			{
				dir = getFiles("training/"+topics[i]+"/bog/");
				
				//If limit set to 0 then use whole directory
				if(limit==0) l = dir.length;
				else if(limit>dir.length) l = dir.length;
				
				//Read each file into bogs
				for(int j=0;j<l;j++) 
					{
						//System.out.println(dir[j]);
						bogs.add(getBog(topics[i],dir[j],true,i));
					}
			}
			
			//Perform feature selection
			pruneCorpus();
			
			//Gets lines of training data
			ArrayList<String> lines = new ArrayList<String>();
			for(int i=0;i<bogs.size();i++) lines.add(wcToString(bogs.get(i)));
			
			//corpus.printCorpus();
			
			System.out.println(vocab.getTotal());
			
			//Write vocab to file
			TFile.write(new File("training/param/new/vocab"+z), vocab.getList(false));
			
			//Write corpus to file
			TFile.write(new File("training/param/new/corpus"+z), corpus.getList(true));
			
			//Print to train file
			TFile.write(new File("training/param/new/train"+z), lines);
		}
	}
	
	private static void localMulti(String[] local,int limit, int size)
	{
		for (int z=0;z<size;z++)
		{
		//local_min_size = z;
		//local_ig = -0.8;
		//local_ig = 0.5 - 0.1 * (double) z;
		System.out.print(local_ig+ " : ");
		
		local_train = true;
		int l = limit;
		lbl = local;
		
		//Initialise vars for info gain
		total_topic = (double) limit;
		total_docs = (double) (local.length*limit);
		topic_num = (double) local.length;
		
		//Initialise objects
		vocab = new wordcount();
		corpus = new wordcount();
		ArrayList<wordcount> bogs = new ArrayList<wordcount>();
		File[] dir;
		
		//Load bogs
		for(int i=0;i<local.length;i++)
		{
			dir = getFiles("training/"+local[i]+"/bog/");
			
			//If limit set to 0 then use whole directory
			if(limit==0) l = dir.length;
			else if(limit>dir.length) l = dir.length;
			
			//Read each file into bogs
			for(int j=0;j<l;j++) 
				{
					//System.out.println(dir[j]);
					bogs.add(getBog(local[i],dir[j],true,i));
				}
		}
		
		//Perform feature selection
		pruneCorpus();
		
		//Gets lines of training data
		ArrayList<String> lines = new ArrayList<String>();
		for(int i=0;i<bogs.size();i++) lines.add(wcToString(bogs.get(i)));
		
		//corpus.printCorpus();
		
		System.out.println(vocab.getTotal());
		
		//Write vocab to file
		TFile.write(new File("training/param/new/vocab"+z), vocab.getList(false));
		//TFile.write(new File("training/param/new/localvocab"+z), vocab.getList(false));
		
		//Write corpus to file
		TFile.write(new File("training/param/new/corpus"+z), corpus.getList(true));
		//TFile.write(new File("training/param/new/localcorpus"+z), corpus.getList(true));
		
		//Print to train file
		TFile.write(new File("training/param/new/localtrain"+z), lines);
		}
	}
	
	/**
	 * Gets size of local training sets
	 */
	private static void localTrainSize()
	{
		File[] dir = getFiles("training/test/local/text");
		int bri=0,ab=0,manc=0,lon=0,dur=0;
		
		for(int i=0;i<dir.length;i++)
		{
			String s = dir[i].toString();
			if(s.contains("bristol")) bri++;
			if(s.contains("manchester")) manc++;
			if(s.contains("abergavenny")) ab++;
			if(s.contains("durham")) dur++;
			if(s.contains("london")) lon++;
		}
		
		System.out.println("Bristol: "+bri);
		System.out.println("Manc: "+manc);
		System.out.println("Aber: "+ab);
		System.out.println("Durham: "+dur);
		System.out.println("London: "+lon);
	}
	
	private static void localTrain(String[] local,int limit)
	{
		local_train = true;
		int l = limit;
		lbl = local;
		
		//Initialise vars for info gain
		total_topic = (double) limit;
		total_docs = (double) (local.length*limit);
		topic_num = (double) local.length;
		
		//Initialise objects
		vocab = new wordcount();
		corpus = new wordcount();
		ArrayList<wordcount> bogs = new ArrayList<wordcount>();
		File[] dir;
		
		//Load bogs
		for(int i=0;i<local.length;i++)
		{
			dir = getFiles("training/"+local[i]+"/bog/");
			
			//If limit set to 0 then use whole directory
			if(limit==0) l = dir.length;
			else if(limit>dir.length) l = dir.length;
			
			//Read each file into bogs
			for(int j=0;j<l;j++) 
				{
					//System.out.println(dir[j]);
					bogs.add(getBog(local[i],dir[j],true,i));
				}
		}
		
		//Perform feature selection
		pruneCorpus();
		
		//Gets lines of training data
		ArrayList<String> lines = new ArrayList<String>();
		for(int i=0;i<bogs.size();i++) lines.add(wcToString(bogs.get(i)));
		
		//corpus.printCorpus();
		
		//Write vocab to file
		//TFile.write(new File("training/localvocab"), vocab.getList(false));
		TFile.write(new File("training/class/localvocab"), vocab.getList(false));
		
		//Write corpus to file
		//TFile.write(new File("training/localcorpus"), corpus.getList(true));
		TFile.write(new File("training/class/localcorpus"), corpus.getList(true));
		
		//Print to train file
		TFile.write(new File("training/class/localtrain"), lines);
	}
	
	/**
	 * Gets test set from training data for location
	 * @param topics
	 * @param start
	 * @param length
	 */
	public static void localTestTrain(String[] topics, int start, int length)
	{
		lbl = topics;
		//Load vocab and corpus from file
		vocab = new wordcount();
		corpus = new wordcount();
		
		vocab.loadFromFile(TFile.read(new File("classifier/localvocab")), false);
		corpus.loadFromFile(TFile.read(new File("classifier/localcorpus")), true);
		
		ArrayList<String> test = new ArrayList<String>();
		wordcount wc;
		File[] dir;
		int j;
		
		//Load bogs
		for(int i=0;i<topics.length;i++)
		{
			dir = getFiles("training/"+topics[i]+"/bog/");
			j = start;
			
			if(dir != null)
			{
				System.out.println(topics[i]);
				System.out.println(dir.length);
				while((j<(start+length)) && (j<dir.length))
				{
					wc = getBog(topics[i],dir[j],false,0);
					test.add(wcToString(wc));
					j++;
				}
			}
		}
		
		//Write test to file
		TFile.write(new File("training/class/localpredict"), test);
	}
	
	/**
	 * Given a list of test data, converts to TFIDF bog and writes to test
	 * @param list
	 */
	public static void testToTFIDF(ArrayList<String> list)
	{
		//Load vocab and corpus from file
		vocab = new wordcount();
		corpus = new wordcount();
		
		vocab.loadFromFile(TFile.read(new File("training/vocab")), false);
		corpus.loadFromFile(TFile.read(new File("training/corpus")), true);
		
		ArrayList<String> test = new ArrayList<String>();
		wordcount wc;
		
		//For each string in list
		//Convert to bog then into a string of test file
		for(int i=0;i<list.size();i++)
		{
			wc = getBog(list.get(i));
			test.add(wcToString(wc));
		}
		
		//Write test to file
		TFile.write(new File("training/class/predict"), test);
	}
	
	public static void testFromTrain(String[] topics, int start, int length, int num)
	{
		lbl = topics;
		//Load vocab and corpus from file
		vocab = new wordcount();
		corpus = new wordcount();
		
		if(num == 0)
		{
			vocab.loadFromFile(TFile.read(new File("training/vocab")), false);
			corpus.loadFromFile(TFile.read(new File("training/corpus")), true);
		}
		else 
		{
			num = num *10;
			vocab.loadFromFile(TFile.read(new File("training/class/multi/vocab"+num)), false);
			corpus.loadFromFile(TFile.read(new File("training/class/multi/corpus"+num)), true);
		}
		
		ArrayList<String> test = new ArrayList<String>();
		wordcount wc;
		File[] dir;
		int j;
		
		//Load bogs
		for(int i=0;i<topics.length;i++)
		{
			dir = getFiles("training/"+topics[i]+"/bog/");
			j = start;
			
			if(dir != null)
			{
				System.out.println(topics[i]);
				System.out.println(dir.length);
				while((j<(start+length)) && (j<dir.length))
				{
					wc = getBog(topics[i],dir[j],false,0);
					test.add(wcToString(wc));
					j++;
				}
			}
		}
		
		//Write test to file
		if (num>0) TFile.write(new File("training/class/multi/predict"+num), test);
		else TFile.write(new File("training/class/predict"), test);
	}
	
	public static void getPredict(ArrayList<article> arts,boolean local, int test )
	{
		//Load vocab and corpus from file
		vocab = new wordcount();
		corpus = new wordcount();
		
		String voc ="", cor = "";
		
		if(local)
		{
			voc = "classifier/localvocab";
			cor = "classifier/localcorpus";
		}
		else
		{
			voc = "classifier/vocab";
			cor = "classifier/corpus";
		}
		
		if(test>0)
		{
			voc = "training/class/vocab"+test;
			cor = "training/class/corpus"+test;
		}
		
		vocab.loadFromFile(TFile.read(new File(voc)), false);
		corpus.loadFromFile(TFile.read(new File(cor)), true);
		
		ArrayList<String> predict = new ArrayList<String>();
		wordcount wc;
		article art;
		
		for(int i=0;i<arts.size();i++)
		{
			art= arts.get(i);
			wc = getBog(art.getTitle()+art.getText());
			predict.add(wcToString(wc));
		}
		
		TFile.write(new File("classifier/predict"),predict);
	}
	
	/**
	 * Converts a wordcount to a line of training data
	 * ASSUMING EXISTENCE OF VOCAB AND CORPUS
	 * @param wc
	 * @return
	 */
	public static String wcToString(wordcount wc)
	{
		temp = new ArrayList<entry>();
		
		//Remove unwanted features
		wc = pruneWc(wc);
		//System.out.println(wc.keys.size());
		
		//Get label of class
		String line= getLabel(wc.getTopic());
	
		//Add whitespace
		line = line + " ";
		
		//Get list of entries
		ArrayList<entry> ent = wc.getEntries();
		double total = (double) wc.getTotal();
		entry e;
		
		//Get index and calc tfidf for word
		for(int i=0;i<ent.size();i++)
		{	
			e = ent.get(i);
			if(vocab.getWord(e.word) != -1)
			{
				//System.err.println(e.word+" in vocab");
				
				//Set index
				e.index = vocab.getWord(e.word);
				
				//Set tfidf
				e.tfidf = getTFIDF((double) e.count, total, (double) corpus.getWord(e.word), (double) corpus.getDoc());
				
				//Add in ordered way
				addToTemp(e,true);
			}
			//else System.err.println(e.word+" not in vocab");
		}
		
		//Add temp to line
		line = line + returnTemp();
		
		return line;
	}
	
	/**
	 * Removes unwanted features from wordcount
	 * @param wc
	 * @return
	 */
	private static wordcount pruneWc(wordcount wc)
	{
		String word;
		
		for(int i=0;i<wc.keys.size();i++)
		{
			word = wc.keys.get(i);
			if(!vocab.contains(word)) wc.remove(word);
		}
		
		return wc;
	}
	
	/**
	 * Prunes words that occur infrequently
	 * Will handle information gain as well
	 * @param word
	 * @return
	 */
	private static boolean includeWord(String word)
	{
		
		if(local_train)
		{
			if(getInfoGain(word) < local_ig) return false;
			if(corpus.getCount(word) < local_min_size) return false;
		}
		else 
			{
				if(corpus.getCount(word) < min_size) return false;
				if(getInfoGain(word) < topic_ig) return false;
			}
		
		return true;
	}
	
	private static double getInfoGain(String word)
	{
		double res=0;
		int[] t = corpus.getTcount(word);
		double total_word = (double) corpus.getCount(word);
		
		
		if(debug==1) corpus.printLine(word,0);
		if(debug==1) System.out.println(total_word);
		
		//Get prob of a doc in each topic
		double p1 = -topic_num * ((total_topic/total_docs)*Math.log(total_topic/total_docs));
		if(debug==1) System.out.println("p1 is" +p1);
		
		//Get prob of doc is topic given word
		double p2=0;
		for(int i =0;i<t.length;i++) p2 = p2 + ( ( ((double) (t[i]+1))/(total_word+topic_num)) * Math.log(((double) (t[i]+1))/(total_word+topic_num)));
		if(debug==1) System.out.println("p2 is" +p2);
		
		//Get prob of doc isnt topic given word
		double p3=0;
		for(int i =0;i<t.length;i++) p3 = p3 + ( ( (total_word+1 -(double) (t[i]))/(total_word+topic_num)) * Math.log((total_word+1 -(double) (t[i]))/(total_word+topic_num)));
		if(debug==1) System.out.println("p3 is" +p3);
		
		res = p1 + p2+p3;
		
		return res;
	}
	
	/**
	 * Goes through corpus and selects preferred features
	 * This is done by removing infrequent terms and info gain 
	 */
	private static void pruneCorpus()
	{
		
		//Remove from corpus any words too infrequent else add them to vocab
		for(int i=0;i<corpus.keys.size();i++)
		{
			if(!includeWord(corpus.keys.get(i))) corpus.remove(corpus.keys.get(i));
			else vocab.addWord(corpus.keys.get(i));
		}
	}
	
	/**
	 * Returns String representation of arraylist temp
	 * @return
	 */
	private static String returnTemp()
	{
		String text="";
		
		for(int i=0;i<temp.size();i++) text = text + " " + temp.get(i).index + ":" + temp.get(i).tfidf;
		
		return text;
	}
	
	/**
	 * Adds entry to temp ensuring ascending order of indexes
	 * Bool index indicates whether temp is to be used for index or IG
	 */
	private static void addToTemp(entry e, boolean index)
	{
		double t = 0;
		
		for(int i=0;i<temp.size();i++)
		{
			if(index && (e.index < temp.get(i).index)) 
				{
				temp.add(i, e);
				return;	
				}
			else if(!index && (e.ig > temp.get(i).ig))
			{
				temp.add(i, e);
				return;	
			}
		}
		
		//If nothing bigger is already in array then add to end
		temp.add(e);
	}
	
	/**
	 * Goes through corpus and ranks in temp by infogain
	 */
	private static void sortByInfoGain()
	{
		temp = new ArrayList<entry>();
		String s = "";
		entry e;
		
		for(int i=0;i<corpus.keys.size();i++)
		{
			e = new entry();
			s = corpus.keys.get(i);
			
			e.word = s;
			e.ig = getInfoGain(s);
			if(vocab.contains(e.word))
				{
					System.out.println("Vocab contains "+e.word);
					addToTemp(e,false);
				}
		}
		
		System.out.println(temp.size());
		
		ArrayList<String> pr  = new ArrayList<String>();
		for(int i=0;i<temp.size();i++)
		{
			pr.add(corpus.printLine(temp.get(i).word, temp.get(i).ig));
		}
		
		TFile.write(new File("infogain"), pr);
	}
	
	/**
	 * Calculates tfidf value
	 * @param wordocc
	 * @param totalwords
	 * @param dococc
	 * @param doctotal
	 * @return
	 */
	public static double getTFIDF(double wordocc, double totalwords, double dococc, double doctotal)
	{
		double ans = 0;
		
		double tf = wordocc/totalwords;
		double idf = Math.log(doctotal/dococc);
		
		ans = tf * idf;
		
		return ans;
	}
	
	/**
	 * Gets a label for training set
	 * Looks through topic input array and returns index as label
	 * IF topic is TEST then simply return -1
	 */
	public static String getLabel(String topic)
	{
		if(topic.length() < 1) return "-1";
		
		//System.out.println("not -1");
		
		//BASIC FOR TESTING PURPOSES
		//if(topic.equals("stage")) return "-1";
		for(int i=0;i<lbl.length;i++) if(topic.equals(lbl[i])) return Integer.toString(i+1);
		
		System.err.println("Not found in topic array: "+topic);
		
		return "-1";
	}
	
	/**
	 * Loads a bog from file
	 * Train boolean indicates whether words need to be added to corpus or dictionary
	 * @param word
	 * @param f
	 * @return
	 */
	private static wordcount getBog(String word, File f, boolean train,int topic)
	{
		String[] split;
		
		wordcount doc = new wordcount(word);
		
		//Increment document counter in corpus
		if(train) corpus.addDoc();
		
		//Read bog file
		ArrayList<String> words = TFile.read(f);
		
		//For each line in bog file, store in doc and add to vocab and corpus
		for(int i=0;i<words.size();i++)
		{
			split = words.get(i).split(" ");
			
			//1st is word / 2nd is occ number
			doc.add(split[0],Integer.parseInt(split[1]));
			
			//NOW DONE BY PRUNE CORPUS FUNCTION Add to vocab if missing
			//if(train) vocab.addWord(split[0]);
			
			//Add to corpus
			if(train) corpus.addToCorpus(split[0],topic,lbl.length);
		}
		
		return doc;
	}
	
	//Converts text to bogs for an input array
	public static void getBogs(String[] topics)
	{
		for(int i=0;i<topics.length;i++) textToBog(topics[i]);
	}
	
	/**
	 * Moves Files from unosrted into cat folders
	 */
	public static void sortTest()
	{
		File[] files = getFiles("training/test/unsorted");
		RSSReader rss = RSSReader.getInstance();
		//ArrayList<article> arts = new ArrayList<article>();
		article art;
		
		for(int i=0;i<files.length;i++) 
			{
				art = new article();
				art = (rss.getArticle(files[i]));
				
				switch (art.category)
				{
					case 1: TFile.writeArt(new File("training/test/news/"+extractFileName(files[i])), art); break;
					case 2: TFile.writeArt(new File("training/test/biz/"+extractFileName(files[i])), art); break;
					case 3: TFile.writeArt(new File("training/test/ent/"+extractFileName(files[i])), art); break;
					case 4: TFile.writeArt(new File("training/test/sport/"+extractFileName(files[i])), art); break;
					default: break;
				}
			}
	}
	
	public static void addToFound()
	{
		ArrayList<String> found = TFile.read(new File("training/test/found"));
		File[] dir;
		String[] names = {"news","sport","ent","biz"};
		article art;
		RSSReader rss = RSSReader.getInstance();
		String title;
		
		for(int i=0;i<names.length;i++)
		{
			dir = getFiles("training/test/"+names[i]);
			System.out.println(names[i]);
			for(int j=0;j<dir.length;j++)
				{
					art = rss.getArticle(dir[j]);
					title = art.getTitle();
					System.out.println(title);
					if((title.length()>0) && !found.contains(title)) found.add(title);
				}
		}
		
		TFile.write(new File("training/test/found"), found);
	}
	
	public static void main(String[] args)
	{
		//Get more training data
		//updateGuardian();
		//updateReuters();
		
		//String[] s = {"stage","education","politics"};
		//String[] s = {"news","business","entertainment","sports"};
		//String[] s = {"television","football"};
		//String[] s = {"politics","world","travel"};
		
		//Convert text to bag of words
		//getBogs(s);
		
		
		//Convert bog to training data format
		
		//int[] size = {1,2};
		//trainMulti(s,100,22);
		
		//System.out.println(TFile.read(new File("training/vocab")).size());
		//String[] s = {"news","business","entertainment","sports"};System.out.println(TFile.read(new File("training/corpus")).size());
		
		//Testing test load
		/*ArrayList<String> test = new ArrayList<String>();
		test.add("sienna directli vivaldi");
		testToTFIDF(test);*/
		
		//String[] t = {"business","entertainment","football"};
		//String[] t = {"politics","business","television","football"};
		
		//Test training data
		//String[] s = {"business","entertainment","sports"};
		//String[] s = {"news","business","entertainment","sports"};
		//trainSetTFIDF(s,100);
		//testFromTrain(s,50,50,0);
		
		/*for(int i=5;i<15;i++)
		{
			int x = i*10;
			testFromTrain(s,150,50,i);
		}*/
		
		//sortTest();
		//addToFound();
		
		//Testing remove remove func
		/*ArrayList<String> p = TFile.read(new File("training/corpus"));
		for(int i=0;i<p.size();i++) System.out.println(p.get(i));
		wordcount wc = new wordcount();
		wc.loadFromFile(p, true);
		//wc.print();
		wc.remove("mandolin");
		TFile.write(new File("training/c"), wc.getList(true));*/
		
		//Create local news training data
		String[] s = {"test/news","test/biz","test/ent","test/sport"};
		//getBogs(s);
		trainSetTFIDF(s,77);
		//testFromTrain(s,46,100);
		
		//Testing info gain
		/*System.out.println(getInfoGain("momentum"));
		System.out.println(getInfoGain("make"));
		System.out.println(getInfoGain("sportmad"));*/
		
		//sortByInfoGain();
		//localTrain();
		
		//Get local training data
		//String[] l = {"test/local/sorted/manchester","test/local/sorted/bristol","test/local/sorted/durham","test/local/sorted/london"};
		//String[] li = {"test/local/sorted/abergavenny","test/local/sorted/bristol"};
		//String[] li = {"test/local/sorted/manchester","test/local/sorted/abergavenny"};
		//getBogs(l);
		//localTrain(l,50);
		//in(li,0,17);
		//localMulti(l,50,5);
		//sortByInfoGain();
		//localTestTrain(li,0,17);
	}

}
