package sis.ra.evaluation;

import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.TreeSet;

import sis.ra.partofspeech.wordindex;
import sis.ra.utility.Utils;


public class seperateWikipediaArticleSentence {

	public static String toString(HashMap<Integer,wordindex> posresult)
	{
		
		TreeSet<Integer> sortedSet=new TreeSet<Integer>(posresult.keySet());
		Object [] sortedArray=sortedSet.toArray();

		//output the wordindexresult
		String out="";
		for (int i=0;i<sortedArray.length;i++)
		{
			if (posresult.get(sortedArray[i]).getEntity()==null)
				out=out+sortedArray[i]+"\t"+posresult.get(sortedArray[i]).getWord()+"\t"+posresult.get(sortedArray[i]).getPos()+"\n";
			else
			{
				if (posresult.get(sortedArray[i]).getReference()==null)
				out=out+sortedArray[i]+"\t"+posresult.get(sortedArray[i]).getWord()+"\t"+posresult.get(sortedArray[i]).getPos()+"\t"+
					posresult.get(sortedArray[i]).getEntity()+"\t"+posresult.get(sortedArray[i]).getEntitytype()+"\n";		
				else
					out=out+sortedArray[i]+"\t"+posresult.get(sortedArray[i]).getWord()+"\t"+posresult.get(sortedArray[i]).getPos()+"\t"+
					posresult.get(sortedArray[i]).getEntity()+"\t"+posresult.get(sortedArray[i]).getEntitytype()+"\t"+posresult.get(sortedArray[i]).getReference()+"\n";		
			}
		}
		return out;
	}
	
	public static HashMap<Integer, wordindex> pos (String text, String htformat)
	{
		String linguistxpath="c:\\Program Files\\INXIGHT\\LinguistxPlatform3.7\\lx-3\\";
		String posresult="";
		Utils.writeFile(linguistxpath+"windows-2003-i686-msvc-7.1-32\\test.txt ", text);
		String s="";
		String encoding="";
		String format="";
		String language="";
		String sentencestart="";
		String sentenceend="";
		int start=0;
		int end=0;
		int offset=0;
		Integer count=0;
		HashMap<Integer,wordindex> sentenceindex=new HashMap<Integer, wordindex> ();
		ArrayList<wordindex> sentenceindexArray=new ArrayList<wordindex> ();
		ArrayList<wordindex> posindexArray=new ArrayList<wordindex> ();
		
		String cmd="\""+linguistxpath+"windows-2003-i686-msvc-7.1-32\\test_platform.exe\" -k 735442535d524e "+
//		" -tag " +
		" -tokenize " + 
		"-d " + "\""+linguistxpath + "lang\"" + 
//		" -stem " +
		" -"+htformat+" " +"\"" +linguistxpath + "windows-2003-i686-msvc-7.1-32\\test.txt\" ";
		try {
//			System.out.println(cmd);
		     Process p = Runtime.getRuntime().exec(cmd);
		    
		    BufferedReader stdInput = new BufferedReader(new InputStreamReader(p.getInputStream()));
		    BufferedReader stdError = new BufferedReader(new InputStreamReader(p.getErrorStream()));
		    while ((s = stdInput.readLine()) != null)   
		    {
	//	    	System.out.println(s);
		    	// analyze token part
		    	if (s.contains("segment tokenization -- tokens:"))
		    	{
		    		while (!(s= stdInput.readLine()).contains("segment tokenization -- normalized tokens:"))
		    		{
		    			if (s.startsWith("  ")&&s.contains("start=")&&s.contains("end=")&&!s.startsWith("     "))
		    				{
	//	    					System.out.println(s);
		    					String [] t=s.split("\t");
		    					if (t.length<4) continue;
		    					//wordindex(String pos, Integer position, String token, String type, String word)
		    					wordindex wi=new wordindex("", Integer.parseInt(t[2].substring(6).trim()),"","", t[0].trim());
		    					sentenceindex.put(Integer.parseInt(t[2].substring(6).trim()), wi);
		    					sentenceindexArray.add(wi);
		    				}
		    		}
		    	}
		    	
		    	if (s.contains("segment tokenization -- tags:"))
		    	{
		    		
		    		while ((s= stdInput.readLine())!=null&&!s.contains("The character encoding of this segment is"))
		    		{
		    			
		    			if (s.startsWith("  ")&&s.trim().length()>2&&!s.startsWith("      "))
		    			{
	//	    				System.out.println("=="+s);
		    				String [] t=s.split("\t");
		    				if (t.length>2) continue;
		    				//public wordindex(String entity, String entitytype, String pos,Integer position, String token, String type, String word) {
	//	    				System.out.println(count+" "+sentenceindexArray.get(count).getWord());
		    				sentenceindexArray.get(count).setPos(t[1].trim());
		    				
		    				count=count+1;
		    			}		    			
		    		}
		    	}
		    }
	//	    while ((s = stdError.readLine()) != null)   System.out.println(s);
		}
		catch (IOException e) {
			    e.printStackTrace();    
		}

		return sentenceindex;
	}
	
	public static HashMap<Integer, String> seperateSentence (String text, String htformat)
	{
		HashMap<Integer, String> sents=new HashMap<Integer, String>();
		String linguistxpath="c:\\Program Files\\INXIGHT\\LinguistxPlatform3.7\\lx-3\\";
		String posresult="";
		Utils.writeFile(linguistxpath+"windows-2003-i686-msvc-7.1-32\\test.txt ", text);
		String s="";
		int start=0;
		int end=0;
		int offset=0;
		int id=-1;
		Integer count=0;
		int last=0;
		int current=0;

		String sentence="";
		String cmd="\""+linguistxpath+"windows-2003-i686-msvc-7.1-32\\test_platform.exe\" -k 735442535d524e "+
//		" -tag " +
		" -segment " +
//		" -tokenize " + 
		"-d " + "\""+linguistxpath + "lang\"" + 
//		" -stem " +
		" -"+htformat+" " +"\"" +linguistxpath + "windows-2003-i686-msvc-7.1-32\\test.txt\" ";
		
		int cont=0;
		try {
//			System.out.println(cmd);
		     Process p = Runtime.getRuntime().exec(cmd);
		    
		    BufferedReader stdInput = new BufferedReader(new InputStreamReader(p.getInputStream()));
		    BufferedReader stdError = new BufferedReader(new InputStreamReader(p.getErrorStream()));
		    while ((s = stdInput.readLine()) != null)   
		    {
		    	cont=cont+1;

		    	System.out.println(s);
		    	if (s.contains("sentence:	start=")||s.contains("heading:	start="))	
		    	{
		    		if (s.startsWith("    ")) continue;
		    		if (s.startsWith("      ")) continue;
//		    		System.out.println(s);
		    		String [] tags=s.split("\t");
		    		start=Integer.parseInt(tags[1].replace("start=", "").trim());
		    		end=Integer.parseInt(tags[2].replace("end=", "").trim());

		    		if (id!=-1) 	sents.put(id,sentence);
		    		id=start;
		    		sentence="";
		    	}
		    	else
		    	{
		    		String [] ss=s.split("\t");
		    		if (ss.length!=4||s.startsWith("	")) continue;
		    		if (ss.length!=4||s.startsWith("      ")) continue;
//		    		System.out.println(s);
		    		//  favorite	  (alphabetic)	start=32045	end=32053
		    		//  fisherman	  (alphabetic)	start=32054	end=32063
		    		//  .	  (punctuation)	start=32063	end=32064
//		    		System.out.println(ss[0]+"=="+ss[1]+"=="+ss[2]+"=="+ss[3]);
		    		start=Integer.parseInt(ss[2].replace("start=", "").trim());
		    		end=Integer.parseInt(ss[3].replace("end=", "").trim());

		    		current=start;
//		    		System.out.println(current+" "+last);
		    		for (int c=0;c<current-last;c++)
		    		{
		    			sentence=sentence+" ";
		    		}
		    		sentence=sentence+ss[0].trim();
		    		last=end;
//		    		System.out.println(start+" "+end+" "+sentence);
		    		
//		    		String term
		    	}
		    		
		    }
		    sents.put(id,sentence);
	//	    while ((s = stdError.readLine()) != null)   System.out.println(s);
		}
		catch (IOException e) {
			    e.printStackTrace();    
		}
		return sents;
	}
	
	public static void main(String[] args) {
		/*
		 * wikipedia article
		 */
//		String filepathstr="C:\\Documents and Settings\\I820753\\Desktop\\dataset\\testing company\\";//wiki//Amazon.com_wiki.txt";
		/*
		 * yahoo article
		 * C:\Documents and Settings\I820753\Desktop\dataset\yahoo doc
		 */
		//String filepathstr="C:\\Documents and Settings\\I820753\\Desktop\\dataset\\yahoo doc\\";
		/*
		 * yahoo search result; hits; CNET
		 */
		String filepathstr="C:\\Documents and Settings\\I820753\\Desktop\\dataset\\cnet\\";
		File filepath=new File (filepathstr);
		File[] files=filepath.listFiles();
		for (int i=0;i<files.length;i++)
		{
//			if (!files[i].toString().contains("_wiki.txt")) continue;
//			if (!files[i].toString().contains(".yah")) continue;
			if (!files[i].toString().contains(".txt")) continue;
			String content=Utils.readFile(files[i].toString());
			System.out.println(files[i].toString()+"\t"+content.length());
			
	//		File cur=new File(files[i].toString().replace(".txt", ".segsent"));
	//		if (cur.exists()) continue;
			for (int b=1;b<200;b++)
			content=content.replace("["+b+"]", "");
//			Utils.writeToFile("C:\\Documents and Settings\\I820753\\Desktop\\dataset\\wiki\\id.txt",files[i].toString() ,true,true);
			
//			String contents=toString(pos(content,"text"));
			HashMap<Integer, String>	sentences=seperateSentence (content, "text");
			TreeSet<Integer> sortedSet=new TreeSet<Integer>(sentences.keySet());
			Object [] sortedArray=sortedSet.toArray();
			String out="";
			for (int j=0;j<sortedArray.length;j++)
			{
				Integer id=(Integer)sortedArray[j];
				String sentence=sentences.get(sortedArray[j]);
				System.out.println(id+"\t"+sentence.trim());
				
				Utils.writeToFile(files[i].toString().replace(".txt", ".segsent"), id+"\t"+sentence.trim(),true,true);
			}
		}
	}

}
