package sis.ra.evaluation;

import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.HashMap;

import sis.ra.partofspeech.wordindex;
import sis.ra.utility.Utils;

public class tokenizedSentence {

	public static HashMap<Integer, wordindex> tokenize (String text, String htformat)
	{
		String linguistxpath="c:\\Program Files\\INXIGHT\\LinguistxPlatform3.7\\lx-3\\";
		String posresult="";
		Utils.writeFile(linguistxpath+"windows-2003-i686-msvc-7.1-32\\test.txt ", text);
		String s="";
		String encoding="";
		String format="";
		String language="";
		String sentencestart="";
		String sentenceend="";
		int start=0;
		int end=0;
		int offset=0;
		Integer count=0;
		HashMap<Integer,wordindex> sentenceindex=new HashMap<Integer, wordindex> ();
		ArrayList<wordindex> sentenceindexArray=new ArrayList<wordindex> ();
		ArrayList<wordindex> posindexArray=new ArrayList<wordindex> ();
		
		String cmd="\""+linguistxpath+"windows-2003-i686-msvc-7.1-32\\test_platform.exe\" -k 735442535d524e "+
//		" -tag " +
		" -tokenize " + 
		"-d " + "\""+linguistxpath + "lang\"" + 
//		" -stem " +
		" -"+htformat+" " +"\"" +linguistxpath + "windows-2003-i686-msvc-7.1-32\\test.txt\" ";
		try {
//			System.out.println(cmd);
		     Process p = Runtime.getRuntime().exec(cmd);
		    
		    BufferedReader stdInput = new BufferedReader(new InputStreamReader(p.getInputStream()));
		    BufferedReader stdError = new BufferedReader(new InputStreamReader(p.getErrorStream()));
		    while ((s = stdInput.readLine()) != null)   
		    {
	//	    	System.out.println(s);
		    	// analyze token part
		    	if (s.contains("segment tokenization -- tokens:"))
		    	{
		    		while (!(s= stdInput.readLine()).contains("segment tokenization -- normalized tokens:"))
		    		{
		    			if (s.startsWith("  ")&&s.contains("start=")&&s.contains("end=")&&!s.startsWith("     "))
		    				{
		//    					System.out.println(s);
		    					String [] t=s.split("\t");
		    					if (t.length<4) continue;
		    					//wordindex(String pos, Integer position, String token, String type, String word)
		    					wordindex wi=new wordindex("", Integer.parseInt(t[2].substring(6).trim()),"","", t[0].trim());
		    					sentenceindex.put(Integer.parseInt(t[2].substring(6).trim()), wi);
		    					sentenceindexArray.add(wi);
		    				}
		    		}
		    	}
		    	
		    	if (s.contains("segment tokenization -- tags:"))
		    	{
		    		
		    		while ((s= stdInput.readLine())!=null&&!s.contains("The character encoding of this segment is"))
		    		{
		    			
		    			if (s.startsWith("  ")&&s.trim().length()>2&&!s.startsWith("      "))
		    			{
	//	    				System.out.println("=="+s);
		    				String [] t=s.split("\t");
		    				if (t.length>2) continue;
		    				//public wordindex(String entity, String entitytype, String pos,Integer position, String token, String type, String word) {
	//	    				System.out.println(count+" "+sentenceindexArray.get(count).getWord());
		    				sentenceindexArray.get(count).setPos(t[1].trim());
		    				
		    				count=count+1;
		    			}		    			
		    		}
		    	}
		    }
	//	    while ((s = stdError.readLine()) != null)   System.out.println(s);
		}
		catch (IOException e) {
			    e.printStackTrace();    
		}

		return sentenceindex;
	}
	
	public static void main(String[] args) {
		String filepathstr="C:\\Documents and Settings\\I820753\\Desktop\\dataset\\testing company\\";//Google_wiki.txt";//wiki//Amazon.com_wiki.txt";
		File filepath=new File (filepathstr);
		File[] files=filepath.listFiles();
		ArrayList<String> sentence=new ArrayList<String> ();
		for (int i=0;i<files.length;i++)
		{
			if (!files[i].toString().contains("_wiki.segsent")) continue;
		
			String content=Utils.readFile(files[i].toString());//files[i].toString());
			
		//	System.out.println(content);
			
			String [] lines=content.split("\n");
			for (int j=0;j<lines.length;j++)
			{
				String [] t=(lines[j]).split("\t");
				if (t.length<2) continue;
				String sentid=t[0];
				String sent=t[1];
				Utils.writeToFile(filepathstr+"sentence.pos", lines[j], true, true);
				String seg=seperateWikipediaArticleSentence.toString(tokenize(sent,"text"));
				System.out.println(seg);
				Utils.writeToFile(filepathstr+"sentence.pos", seg, true, true);
			}
		}
	}

}
