 /* 
  Copyright Software Engineering Research laboratory <serl@cs.wichita.edu>

 This program is free software; you can redistribute it and/or
 modify it under the terms of the GNU Library General Public
 License as published by the Free Software Foundation; either
 version 2 of the License, or (at your option) any later version.

 This program is distributed in the hope that it will be useful,
 but WITHOUT ANY WARRANTY; without even the implied warranty of
 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 Library General Public License for more details.

 You should have received a copy of the GNU Library General Public
 License along with this program; if not, write to the Free
 Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 
 */
package wichita.edu.utility;


import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.StringReader;

import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;

import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.core.StopFilter;
import org.apache.lucene.analysis.en.PorterStemFilter;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.util.Version;
import org.w3c.dom.NodeList;
import org.w3c.dom.Document;
import org.w3c.dom.Node;
import org.xml.sax.SAXException;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map.Entry;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/*
@author: Sara Bahrami <mxbahramizanjani@wichita.edu>

* Purpose: Parsing all of XML files(output of SrcML.java)
* output: Corpus which has document for each XML file
* 
*/

public class XmlParser{
	public static HashMap<String,ArrayList<String>> Corpus=new HashMap<String,ArrayList<String>>();
	public static String dirPath1 = "/home/sara/research/codereview/";
	//directory which includes all the XML files
	public static String dirPath ="/home/sara/research/codereview/Mylyn/Repository/Srcml/";
	 public static void main(String[] args) {
		try{
			
			File folder = new File(dirPath);
			File[] listOfFiles = folder.listFiles();
			for (File file : listOfFiles)
				{
					if (file.length()!=0)	
					{
						String filename=file.getName().replace('-','/' );
						System.out.println(filename);
					ListMatching(filename,CreatCorpus(file,dirPath));
					
					} 
					}
			
		
		print("Corpus.txt",dirPath1);}
		catch(Exception e)
		{e.printStackTrace();}

	}
	
	/********************************************************************/
	public static  String CamelCase (String Str)	
	{   
		Str=Str.replaceAll("[\\W|\\_]"," ");	
		Str=Str.replaceAll("((?<=[a-z])(?=[A-Z]))|((?<=[A-Z])(?=[A-Z][a-z]))" , " " );	
		return Str;		
		
	} 

/*********************************************************************************************************************/
	public static ArrayList<String> CreatCorpus(File file,String dirPath) throws ParserConfigurationException, SAXException, IOException
	{	
				
				ArrayList<String> myList_name = new ArrayList<String>();
				String name=file.getName();
				File fXmlFile= new File(dirPath+name);
				DocumentBuilderFactory dbFactory = DocumentBuilderFactory.newInstance();
				DocumentBuilder dBuilder = dbFactory.newDocumentBuilder();
				Document doc = dBuilder.parse(fXmlFile);
				doc.getDocumentElement().normalize();
				NodeList nList_name = doc.getElementsByTagName("name");
				for (int temp=0; temp<nList_name.getLength();temp++) {
					Node nNode = nList_name.item(temp);
					String value;
					value=nNode.getFirstChild().getTextContent();
					String[] value1=CamelCase(value).split("\\s+");
					for (int j=0;j<value1.length;j++)
					{	
						if(!myList_name.contains(value1[j]))
						{
							myList_name.add(value1[j]);
						}
						
						}
		
				}	
				NodeList nList_comment= doc.getElementsByTagName("comment");
				for (int temp=0; temp<nList_comment.getLength();temp++) {
					Node nNode = nList_comment.item(temp);
					String value1;
					value1=nNode.getFirstChild().getTextContent().trim();
					if(!(value1.contains("Copyright")))
					{
						value1 = value1.replaceAll("[\\r|\\n]", " ");
						value1 = value1.replaceAll("[\\*|\\/]", "");
						value1 = value1.replaceAll("@author", "");
						value1=value1.trim();
						String[] str=value1.split("\\s+");
						for(int j=0;j<str.length;j++)	
		
							{
							if(!myList_name.contains(str[j]))
							{
								myList_name.add(str[j]);
							}
					}
	}
		}
				
				NodeList nlListexp =  doc.getElementsByTagName("expr");
                for (int k = 0; k < nlListexp.getLength(); k++) {
	                Node node =nlListexp.item(k);
	                if (node.hasChildNodes()==true)
	                if(node.getFirstChild().getNodeType()==Node.TEXT_NODE)
	                {
	                	String exprvalue=node.getFirstChild().getTextContent().trim();
	                	exprvalue = exprvalue.replaceAll("[\\r|\\n]", " ");
	                	String[] str=exprvalue.split("\\s+");
						for(int j=0;j<str.length;j++)	
		
							{	
							if(!myList_name.contains(str[j]))
							{
								myList_name.add(str[j]);
							}

					}
	                }
                }
                
                
				
		return myList_name;}


	
/***********************************************************************************/	
	
	public static void ListMatching (String fname, ArrayList<String> Str)		
	{  	
		if(!Corpus.containsKey(fname))
			Corpus.put(fname, Str);
			else
					
			{	for (Entry<String, ArrayList<String>> entry : Corpus.entrySet())
					{if (entry.getKey().equalsIgnoreCase(fname))
						for(int j=0;j<Str.size();j++)
						{
				            	   entry.getValue().add(Str.get(j));
  
						}
					}
				
				}
		

	}

/******************************************************************************************/	
	public static void print(String fname,String dirPath1) throws Exception
	{
		File Corpusfile = new File(dirPath1+fname);
		BufferedWriter writer = new BufferedWriter(new FileWriter(Corpusfile));
		int j=0;
		for (Entry<String, ArrayList<String>> entry : Corpus.entrySet())
				{
					writer.write(entry.getKey());
					writer.write("\t");
		
					for (int i=0;i<entry.getValue().size();i++)
					{ 
						writer.write(removeStopWordsAndStem(entry.getValue().get(i)).trim());
						writer.write(" ");}
						j=j+1;
						if(j<Corpus.size())
						writer.newLine();}
		
		writer.flush();
		writer.close();
						
	}
/********************************************************************************************/
	public static String removeStopWordsAndStem(String input) throws IOException {
		String[] stop_word={"abstract","assert","boolean","break","byte","case","catch","char","class","const","continue"
				,"default","do","double","else","enum","extends","final","finally","float","for","goto","if","implements","import","instanceof","int"
				,"interface","long","native","new","package","private","protected","public","return","short","static","strictfp","super",
				"switch","synchronized","this","throw","throws","transient","try","void","volatile","while","false","null","true"};
		ArrayList<String> stopWords = new ArrayList<String>();
		for (int k=0;k<stop_word.length;k++)
			stopWords.add(stop_word[k]);
	    TokenStream tokenStream = new StandardTokenizer(
	            Version.LUCENE_40, new StringReader(input));
	    tokenStream = new StopFilter(Version.LUCENE_36, tokenStream, StandardAnalyzer.STOP_WORDS_SET);
	    tokenStream = new StopFilter(Version.LUCENE_36, tokenStream, StopFilter.makeStopSet(Version.LUCENE_40, stopWords));
	    tokenStream = new PorterStemFilter(tokenStream);
	    StringBuilder sb = new StringBuilder();
	    CharTermAttribute token = tokenStream.getAttribute(CharTermAttribute.class);
	    tokenStream.reset();
	    while (tokenStream.incrementToken()) {
	        if (sb.length() > 0) {
	            sb.append(" ");
	        }
	        sb.append(token.toString());
	    }
	    tokenStream.end();
	    tokenStream.close();
	    return sb.toString();
	}

	
}
