package TAIC.text;

import java.io.FileInputStream;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Scanner;

import TAIC.Classifier.Classifier;

public class TextBayes extends Classifier {
	int totalClasses = 2 ;
	double topPortion = 0.1 ; 

	public static void main( String[] args ) {
		TextBayes tb = new TextBayes ( ) ; 
		tb.train( "auxilary") ;
		tb.printTopWords() ;
	}

	
	public double test(String testFile) {
		
		return 0;
	}

	public void setTopPortion ( double p ) {
		if ( p > 1 ) p =1 ;
		topPortion = p ; 
	}
	
	public void train(String trainFile) {
		try {
			Scanner scanner = new Scanner ( new FileInputStream ( trainFile )) ;
			double[] totalWords = new double [ totalClasses ] ;    // total words in single class 
			while ( scanner.hasNextInt() ) {
				int classname = scanner.nextInt();
				String buffer = scanner.nextLine() ;
				Scanner strScanner = new Scanner ( buffer ) ; 
				while ( strScanner.hasNext() ) {
					String str = strScanner.next() ;
					int pos = str.indexOf( ':') ;
					int fname = Integer.valueOf( str.substring(0,pos)) ; 
					int count = Integer.valueOf( str.substring(pos + 1)) ;
					WordInClass word = wordCount.get( fname ) ;
					if ( word == null ) {
						word = new WordInClass ( totalClasses ) ;
						word.ID = fname ; 
						wordCount.put( fname, word) ; 
					}
					word.count [ classname ] += count ;
					totalWords [ classname ]+= count ; 
				}
			}
			
			
			double allWordsInDoc = 0 ;
			for ( int i = 0 ;i < totalClasses ; i ++ )  allWordsInDoc += totalWords [ i ];
			
			topWords = new WordInClass [ wordCount.size() ] ;
			int len = 0 ; 
			for (  WordInClass iter : wordCount.values()) { 
				double innerCount = 0 ;
				topWords [ len++ ] = iter ;  
				for ( int i = 0 ; i  < totalClasses ; i ++) { 
					iter.prob [ i ] = ( 1 + iter.count [ i ] )/ ( totalWords [ i ] + wordCount.size() ) ;
					innerCount += iter.count [ i ] ; 
				}
				iter.Pt = innerCount / allWordsInDoc ;
				for ( int i = 0 ; i < totalClasses ; i ++ ) {
					iter.C[ i ] = iter.prob [ i ] / iter.Pt ;
					iter.NC [ i ] = ( 1 - iter.prob[ i ]) / ( 1 - iter.Pt ) ;
				}
				caleIG ( iter ) ;  
			}
			Arrays.sort( topWords, 0, len ) ;
			
		}catch ( Exception e ) {
			e.printStackTrace() ; 
		}
	}

	void caleIG ( WordInClass word ) { 
			double t1 = 0 ;
			double t2 = 0 ; 
			for ( int j = 0 ; j < totalClasses ; j ++ ) {
				t1 += word.C[j] * Math.log( word.C[j]) ;
				t2 += word.NC[j] * Math.log( word.NC[j]) ;
			}
			t1 *= word.Pt ;
			t2 *= ( 1 - word.Pt ) ;
			word.IG = t1 + t2 ; 
	}
	
	public void printTopWords ( ) {
		System.out.println ( wordCount.size() ) ;
		HashMap < Integer, String > inverse = new HashMap <Integer, String > () ;
		lang.getID("zoos") ;
		for ( Map.Entry < String, Integer > iter : lang.vec.entrySet())  inverse.put( iter.getValue(),iter.getKey() ) ;
		for ( int i = 0 ; i < 40 ; i ++ ) {
			System.out.print ( inverse.get(topWords[ i ].ID) + "\t" + topWords [ i ].IG ) ;  
			//for ( int j = 0 ;j < totalClasses ; j ++ ) System.out.print( " " + topWords[i].prob[j]  ) ;
			System.out.println();
		}
	}

	
	public HashMap < Integer , WordInClass > wordCount = new HashMap < Integer, WordInClass > () ;
	public WordInClass topWords [] = null ;
	public LanguageVec lang = new LanguageVec ( "english.txt") ;
}
