package langnstats.project.languagemodel.loglinear;

///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2001 Chieu Hai Leong and Jason Baldridge
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
//////////////////////////////////////////////////////////////////////////////   

import java.io.File;

import langnstats.project.ParserTools.StanfordParserHandel;
import langnstats.project.lib.WordType;
import opennlp.maxent.BasicContextGenerator;
import opennlp.maxent.ContextGenerator;
import opennlp.maxent.GISModel;
import opennlp.maxent.MaxentModel;
import opennlp.maxent.io.SuffixSensitiveGISModelReader;

/**
 * Test the model on some input.
 *
 * @author  Jason Baldridge
 * @version $Revision: 1.2 $, $Date: 2001/11/20 17:07:17 $
 */
public class Predict {
    public static MaxentModel _model;
    public static ContextGenerator _cg = new BasicContextGenerator();
    
    public Predict (MaxentModel m) {
    		_model = m;
    		}
    
    private void eval (String predicates) {
    		double[] ocs = _model.eval(_cg.getContext(predicates));
//	System.out.println("For context: " + predicates
//			   + "\n" + _model.getAllOutcomes(ocs) + "\n");
        	}

    /**
     * Main method. Call as follows:
     * <p>
     * java Predict dataFile (modelFile)
     */
    public static double [] predict(WordType wordType) {
    		String modelFileName;
				StanfordParserHandel parserHandel = new StanfordParserHandel();
				parserHandel.initializeParser();
/*    		if (args.length > 0) {
    			dataFileName = args[0];
	    if (args.length > 1) 
	    		modelFileName = args[1];
	    else
	    		modelFileName =
	    			dataFileName.substring(0,dataFileName.lastIndexOf('.'))
		    + "Model.txt";
	}
	else {
	    dataFileName = "";
	    modelFileName = "trainAModel.txt";
	}*/
    		
    		modelFileName = "ModelFile";
    		Predict predictor = null;
    		try {
    				GISModel m =
    				new SuffixSensitiveGISModelReader(
    						new File(modelFileName)).getModel();
    						predictor = new Predict(m);
    				}	catch (Exception e) {
    					e.printStackTrace();
    					System.exit(0);
    				}	


//        double ppl=0.0;

//	if (dataFileName.equals("")) {
//	    	try {
//	    			BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
	    			String s = wordType.getName();
//	    			predictor.eval("");
	    			double[] ocs;
//	    			double[] ocs2 = new double [34]; 
//	    			int count=0;
//            	while ((s = in.readLine()) != null && s.length() != 0){
//			count++;
	    
	    			String featureStr="";
	    			
	    			
						//////////////////////////////////////////////////////////////////
       		///// This is where you should throw in features/////////
       		///// Please remember to add spaces between features/////
  			
	    			featureStr += parserHandel.getParseFeature(s);
    		   featureStr	+=	" "+parserHandel.getHistoryLengthFeature();
    		   
    		   		//////////////////////////////////////////////////////////////////
    		   
    		   
						featureStr += " "+s;
        	ocs = _model.eval(_cg.getContext(featureStr));
//        		System.out.println("For context: " + s
//                           + "\n" + _model.getAllOutcomes(ocs2) + "\n");
//			if(count>1){
        		double [] sorted=new double[WordType.size()];
        		for(int i=0;i<WordType.size();i++){ 	
                				String oc=_model.getOutcome(i);
                				WordType wt = WordType.get(oc);
                				sorted[wt.getIndex()]=ocs[i];
        		}
        		return sorted;
    }
}

/*                			if(oc.equals(s)){
						ind=i;
					}
        			}
        			ppl+=Math.log(ocs1[ind]);
			} 
			for(int i=0;i<34;i++){
				ocs1[i]=ocs2[i];
			}
		}
		System.out.println("ppl: "+Math.exp(-ppl/count));
		System.out.println("bits: "+(-ppl/count)/Math.log(2));
            } catch (Exception e) {
        	e.printStackTrace();
		System.exit(0);
	    }*/
//	}
/*	else {
	    try {
		DataStream ds =
		    new PlainTextByLineDataStream(
			new FileReader(new File(args[0])));

		while (ds.hasNext()) {
		    String s = (String)ds.nextToken();
		    predictor.eval(s.substring(0, s.lastIndexOf(' ')));
		}
		return;
	    }
	    catch (Exception e) {
		System.out.println("Unable to read from specified file: "
				   + args[0]);
		System.out.println();
	
	    }
	}
    }
    
}*/
