package infastructure;

import info.bliki.wiki.filter.PlainTextConverter;
import info.bliki.wiki.model.WikiModel;
import edu.stanford.nlp.process.*;
import java.util.List;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.PrintWriter;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Properties;
import java.util.logging.Logger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import opennlp.tools.sentdetect.SentenceDetectorME;
import opennlp.tools.sentdetect.SentenceModel;
import opennlp.tools.util.InvalidFormatException;

import org.apache.lucene.benchmark.byTask.feeds.ContentSource;
import org.apache.lucene.benchmark.byTask.feeds.DocData;
import org.apache.lucene.benchmark.byTask.feeds.EnwikiContentSource;
import org.apache.lucene.benchmark.byTask.utils.Config;
import org.apache.commons.compress.*;
import org.apache.commons.compress.compressors.CompressorException;
import org.apache.commons.compress.compressors.CompressorStreamFactory;
import org.jsoup.Jsoup;

import edu.stanford.nlp.process.DocumentPreprocessor;
import edu.stanford.nlp.util.StringUtils;

//*********Based on David's implementaion!********8


public class wikiReader {

	public static final int numDocsToIndex =4000;
	private static int nIndexed=0 ;
	//private static int nSkipped=0 ;
	public static final String TEXTPATH="C:\\study\\technion\\MSc\\semester1\\Y!\\datasets\\dumpTest\\";
	public static final  String PWDNLP= "C:\\softwares\\apache-opennlp-1.5.3\\";
	

	private static List<String> movieReceptionPattern =Arrays.asList(
			"==Reception==",
			"==Critical reception==",		
			"==Release==",
			"==Reviews==",
			"==Responses==",
			"==Release and reaction==",
			"==Critical response==",
			"==Accolades==",
			"== Reception ==",
			"== Critical reception ==",		
			"== Release ==",
			"== Reviews ==",
			"== Release and reaction ==",
			"== Responses ==",
			"== Critical response ==",
			"== Accolades =="

	);
	private static List<String> movieReceptionPatternToRemove =Arrays.asList(
			"===Reception===",
			"===Critical reception===",		
			"==Release==",
			"===Reviews===",
			"===Responses===",
			"===Release and reaction===",
			"===Critical response===",
			"===Accolades===",
			"=== Reception ===",
			"=== Critical reception ===",		
			"=== Release ===",
			"=== Reviews ===",
			"=== Release and reaction ===",
			"=== Responses ===",
			"=== Critical response ==",
			"=== Accolades ==="
			);
	
	/*args[0]= path to source
	  args[1]= category to look for 
	  args[2]= section to look for
	 */
	public static void main(String[] args) throws FileNotFoundException 
	   
	 {
		PrintWriter writerLog = new PrintWriter(TEXTPATH +"log.txt");
		StringBuilder loger=new StringBuilder();		
		
		try
	  {
			ContentSource wikiSource = creatContentSource(args[0]);

	   //Creating the iterator:
		
		//The loop:
		DocData dd = new DocData();
		ArrayList<String> catList= new ArrayList<>();
		int indxCnt=0;
		int cnt = 0;
		int nSkipped = 0;
		String name= "";
		String title="";
		String rawBody="";
		String Body="";
		String htmlBody = "";
		String sectionBody="";
		String[] sentences =null;
		int cat;
		
	    //while (nIndexed.getIndexed() < numDocsToIndex) {
		while (cnt < numDocsToIndex)
		{
			dd.clear();
			catList.clear();
			name= "";
			title="";
			cat=0;
			 
			try 
			{
				wikiSource.getNextDocData(dd);
			} 
			catch (Exception e1) 
			{// catch NoMoreDataException ||
			// IOException
			    break;
			}
			rawBody = dd.getBody();
			if (rawBody == null) 
			{
			    //nSkipped.incrementAndGet();
				nSkipped++;
			    continue;
			}
		    name = dd.getName();
			title = dd.getTitle();
			if (title.equals("Crouching Tiger, Hidden Dragon"))
			{
				catList = extractCategories(rawBody);
				for ( String category :catList)
				{
					if (category.contains("films"))
					{
						cat = 1;
						break;
					}
				}
//				    BufferedReader  br = new BufferedReader(new FileReader("C:\\study\\technion\\MSc\\semester1\\Y!\\dumpTest\\bladeText.txt"));
//					StringBuilder sb = new StringBuilder();
//			        String line = br.readLine();
//
//			        while (line != null) 
//			        {
//			            sb.append(line);
//			            sb.append('\n');
//			            line = br.readLine();
//			        }
//			        String everything = sb.toString();
//			        br.close(); */
					
					//System.out.println("category: "+ category.toString() +"title: " +title);
					//body=html2text(cleanTextForIndexing(rawBody));
				if (cat == 1 )
					{
						htmlBody= wikiTextToPlainText(cleanTextForIndexing(rawBody));
						Body=html2text(htmlBody);
//			        PrintWriter bladeWriter = new PrintWriter(TEXTPATH +"CrouchingTigerHiddenDragon.txt");
//					bladeWriter.write(cleanTextForIndexing(rawBody));
//					bladeWriter.close();
					//System.out.println("cleaned body");
					//**EXTRACT SECTION**
					
					//sectionBody=extractSection(everything,"reception");
						try 
						{
							sectionBody=extractSection(Body,"reception");
						}
						catch (Exception e)
						{
							loger.append("error in : "+title + e.getMessage());
							loger.append('\n');
							cnt++;
							nSkipped++;
							continue;
							
						}
					
						if (sectionBody!=null && sectionBody.length() > 0 )
						{
							StringBuilder sbDocNum = new StringBuilder();
							sbDocNum.append("");
							sbDocNum.append(indxCnt+1);
							sentences = parseSentences(sectionBody);
							//TODO: go sentence by sentence and test if it is empty
							saveToFile(sentences, title,"wiki"+sbDocNum);
							indxCnt++;
								//System.out.println("parsed sentences");
								//break;
						}
						else 	
						{
							loger.append("no reception in: "+title);
							loger.append('\n');
							nSkipped++;
							
							//cnt++;
							
							//continue;
						}
					
					//PrintWriter writer= new PrintWriter(TEXTPATH+ title+".txt");
			         //writer.write(body);
					
					
					//body=
//					for (int i=0; i<)
						//extractSection(body,"reception");
			        // writer.close();	
					}
				else //no films category
					{
						nSkipped++;
					}
	
		//}
			//System.out.println(catList.iterator().toString());
			
			}//for the if title
			cnt++;
		}
		//System.out.println("finishing");
			loger.append("finished indexing : "+indxCnt +" files and skipped " +nSkipped +" files");
			loger.append('\n');
			writerLog.write(loger.toString());
			writerLog.close();
		
		// *extract Wikipedia categories */
		 
		  }
		    catch (Exception e)
			{
		    	loger.append(e.getMessage());
				loger.append('\n');
				writerLog.write(loger.toString());
			}
 }
	
	  public static ContentSource creatContentSource(String inputFile) throws IOException
	  {
		  ContentSource wikiSource = new EnwikiContentSource();
		  Properties prop = new Properties();
		  prop.put("docs.file", inputFile);
		  prop.put("content.source.forever", "false");
		  prop.put("keep.image.only.docs", "false");
		  wikiSource.setConfig(new Config(prop));
		  wikiSource.resetInputs();
		  return wikiSource;
	  }
	  
	  public static ArrayList<String> extractCategories(String body) 
	  {
		  String regPattern = "\\[\\[[:\\ ]*[C|c]ategory:[^\\]]*\\]\\]";
		  Pattern pattern = Pattern.compile(regPattern);
		  Matcher matcher = pattern.matcher(body);
		  ArrayList<String> out = new ArrayList<String>();
		  while (matcher.find()) 
		  {
		      String cat = matcher.group();
		      if (cat != null && cat.length() > 0) 
		      {
				  cat = cat
				  .substring(cat.indexOf("gory:") + 5, cat.indexOf("]]"))
				  .trim();
				  out.add(cat);
		      }
		  }
		  return out;
	  }
	  
	  public static String cleanTextForIndexing(String text) {
		// assure that ".\n" will be detected as EOS
		String out = text.replaceAll("\\.\n", "\\. \n");
		// assure that any EOP (\n\n) follows an EOS
		// String out = text.replaceAll("[\n|\r][\n|\r]", " \\. \n\n");
		// DC 02072012: allow white-spaces between the two new-lines
		// out = out.replaceAll("\n\n", " \\. \n\n");
		out = out.replaceAll("\n[\\s]*\n", " \\. \n\n");
		// assure that ".<ref" , ".<math" (and similar tags) will be detected as
		// EOS
		out = out.replaceAll("\\.<", "\\. <");
		// Assure that each table row will be identified as separated sentence
		out = out.replaceAll("\\|\\-", "\\. \\|\\-");
		// Assure that any header will be marked as a separated sentence
		out = out.replaceAll("==\n", "==\\. \n");
		// Find the last category in the page and cut all text from there
		// (ignoring the last paragraph of links to other languages)
		int lastCatIndex = out.lastIndexOf("Category:");
		if (lastCatIndex > 0) {
		    int marker = out.indexOf("]]", lastCatIndex);
		    if (marker >= 0) {
		out = out.substring(0, marker + 2);
		    }
		}
		return out;
		    }
	  
	  
	  public static String html2text(String html) {
		    return Jsoup.parse(html).text();
		}
	  
	  public int getIndexed()
	    {
	       return nIndexed;
	    }
	  
	  /*Wiki cleanning: with Bliki-core
	  /** for plain-text extraction **/
	     static WikiModel wikiModel = new WikiModel(
	      "http://www.mywiki.com/wiki/${image}",
	      "http://www.mywiki.com/wiki/${title}");

	   /**
	       * Extract plain text from the WikiText Based on the Bliki engine (Java
	       * Wikipedia API) <link>http://code.google.com/p/gwtwiki/</link> under
	       * Eclipse Public license V 1.0
	       * </link>http://www.eclipse.org/legal/epl-v10.html</link>
	       * 
	       * @return the plain-text
	       */
	   public static String wikiTextToPlainText(String input) 
	     {
	    	  return wikiModel.render(new PlainTextConverter(), input);
	     }
	   

	   public static String[] parseSentences (String input) throws InvalidFormatException, IOException
	   {
		   InputStream modelIn = new FileInputStream(PWDNLP+"en-sent.bin");   
		   
			   SentenceModel model = new SentenceModel(modelIn);
			   SentenceDetectorME sentenceDetector = new SentenceDetectorME(model);
			   String[] sentences = sentenceDetector.sentDetect(input);
			   return sentences;		 	
		   
	   }
	      
	   public static void saveToFile(String[] body, String title, String docNum) throws Exception
	   {
		   StringBuilder sb = new StringBuilder();
		   ArrayList<String> sentences;
		   PrintWriter writer = new PrintWriter(TEXTPATH +title+".txt");
		   String everything ="";
		   sb.append("<DOC>");
 	   	   sb.append('\n');
 	   	   sb.append("<DOCNO>"+docNum+"</DOCNO>" );
 	   	   sb.append('\n');
 	   	   sb.append("<TEXT>");
 	   	   sb.append('\n');
 	   	   sb.append("<title>"+title);
 	   	   sb.append('\n');
 	   	   sb.append("<senti>"+"neu");
 	   	   sb.append('\n');  
		   for (int i=0; i<body.length; i++)
           {
			   //if (body[i].contains("{{") || (body[i].contains("}}")))
				 //  {
					   sentences=cleanSentences(body[i]); 
					   for (String s : sentences)
					   {
						   sb.append("<s>"+s+"</s>"); 
					   }
				  // }
			  // else 
           		//	{sb.append("<s>" +(body[i]) + "</s>");}
					   
           		sb.append('\n');	  
           }
		   sb.append("</senti>");
		   sb.append('\n');
		   sb.append("</title>");
	   	   sb.append('\n');
	   	   sb.append("</TEXT>");
	   	   sb.append('\n');
	   	   sb.append("</DOC>");
	   	   sb.append('\n');
           everything = sb.toString();
           writer.write(everything);
           writer.close();
           return;
	   }
	   
	   
	   public static String extractSection(String body, String section) throws Exception 
		{
		   List<String> patternList = new ArrayList<>();
		   String sectionText="";
		   
		   switch (section)
			  {
			  case "reception":
				  patternList = movieReceptionPattern;
			  		break;
			  } 
		   
			  for (String regPattern : patternList)
			  {
				  if (body.contains(regPattern))
				  {
				  sectionText = body.split(regPattern)[1].split("[^=]==[^=]")[0];
				  if (sectionText!= null && sectionText.length() > 0 )
					  break;
				  }
				  else 
					  continue;				 
			  }

		   return sectionText;
		}
	   
	   
	   public static ArrayList<String>  cleanSentences(String body) throws Exception
	   {
		   //remove the {{cite web}}
		   String strBody="";
		   if (body.contains("==="))
		   {
			   for (String pat : movieReceptionPatternToRemove )
	    		 {
				   	strBody.concat(body.replace(pat, "")); 
	    		 } 
		   }
		   String regPattern ="(\\}\\}[^\\{^\\}]*\\{\\{|[^\\}]*\\{\\{|\\}\\}[^\\{]*|)";
		   Pattern pattern = Pattern.compile(regPattern);
		   
		   Matcher matcher = pattern.matcher(strBody);
		   ArrayList<String> clearSentence = new ArrayList<String>();
		   while (matcher.find()) 
		   {
			    String sentence = matcher.group();
			    if (sentence != null && sentence.length() > 0) 
			    {
			    	//remove the {{ and }} after the regex action and add to the sentence list along with ====
			    	 String s=sentence;
			    	 String strRes="";
			    	 
			    	 if (sentence.contains("{{"))
			    		 s = sentence.replaceAll("\\{\\{", "").replaceAll("\\}\\}", "");			    		
			    	 if (s.contains("}}"))
			    	 {
			    		 strRes = s.replaceAll("\\}\\}", "");   	 
			    	 	 clearSentence.add(strRes);
			    	 }
			    	 //else 
			    		 if (strRes.length()>0)
			    			 clearSentence.add(strRes);
			    		 else 
			    			 clearSentence.add(s);			    	 
//			    	sentence = sentence
//			    			.substring(sentence.indexOf("\{") + 5, sentence.indexOf("]]"))
//			    			.trim();
			    }
			}
		return clearSentence;
	 }


}



