
import java.io.DataOutputStream;
import java.io.FileOutputStream;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;


import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;

import edu.uci.ics.crawler4j.crawler.CrawlConfig;
import edu.uci.ics.crawler4j.crawler.CrawlController;
import edu.uci.ics.crawler4j.crawler.Page;
import edu.uci.ics.crawler4j.crawler.WebCrawler;
import edu.uci.ics.crawler4j.fetcher.PageFetcher;
import edu.uci.ics.crawler4j.parser.HtmlParseData;
import edu.uci.ics.crawler4j.robotstxt.RobotstxtConfig;
import edu.uci.ics.crawler4j.robotstxt.RobotstxtServer;
import edu.uci.ics.crawler4j.url.WebURL;


public class MainCrawler extends WebCrawler{
	public static String forder="TinTucTimNhanh";
	 private final static Pattern FILTERS = Pattern.compile(".*(\\.(css|js|bmp|gif|jpe?g" + "|png|tiff?|mid|mp2|mp3|mp4"
             + "|wav|avi|mov|mpeg|ram|m4v|pdf" + "|rm|smil|wmv|swf|wma|zip|rar|gz))$");
	 @Override
     public boolean shouldVisit(WebURL url) {
             String href = url.getURL().toLowerCase();
             return !FILTERS.matcher(href).matches() && href.startsWith("http://tintuc.timnhanh.com.vn/");
     }
	  @Override
      public void visit(Page page) {
		  	  String title = "";
		  	  String body1 = "";
		  	  String body = "";
		  	  String titleURL = "";
		  	  String author = "";
		  	  String bodyContent = "";
		  	  String date="";
		  	  String titleParseFix="";
              int docid = page.getWebURL().getDocid();
              String url = page.getWebURL().getURL();
              System.out.println("URL: "+url);
          
              if (page.getParseData() instanceof HtmlParseData) {
                    HtmlParseData htmlParseData = (HtmlParseData) page.getParseData();
                    String text = htmlParseData.getText();
                    String html = htmlParseData.getHtml();
                    List<WebURL> links = htmlParseData.getOutgoingUrls();   
                      //write file
					writeFile(html.toString(),"E://DataNew1122012//html//"+forder+"//index" + docid);
					///Parse Data
					Pattern titleParse =  Pattern.compile("<p\\sclass=\"title\">([^>]*)</p>");
					//Pattern bodyParse1 = Pattern.compile("<p\\sclass=\"pHead\">([^*]*)<p.+?right;\"><strong>([^>]*)</strong></p>"); 
					Pattern bodyParse2 = Pattern.compile("<div\\sclass=\"newsDetail\"\\sid=\"content_news\">([^*]*)<div\\sclass=\"newsCredit\">");
					Pattern authorParse = Pattern.compile("<div\\sstyle=\"float:right\">([^>]*)</div>");
					
					Pattern dateParse = Pattern.compile("<li\\sclass=\"date\">.+?<span>([^>]*)</span></li>");
					Matcher regexTitle = titleParse.matcher(html);
					//Matcher regexBody1 = bodyParse1.matcher(html);
					Matcher regexBody2 = bodyParse2.matcher(html);
					Matcher regexAuthor = authorParse.matcher(html);
					Matcher regexDate = dateParse.matcher(html);
						//=============================================
						//Pattern bodyParse2 = Pattern.compile("<P.+?>([^>]*)</P>");
						//Matcher regexBody2 = bodyParse2.matcher(html);
					while(regexDate.find()){
						date +=regexDate.group(1);
					}
					while(regexTitle.find()){
						titleURL+=regexTitle.group(1);
					}
//					Document docTitle = Jsoup.parse(titleURL);
					//title = docTitle.body().text();
					while(regexAuthor.find()){
						author += regexAuthor.group(1);
					}
					//author = author.replaceAll("<.+?>", "");
//					while(regexBody1.find()){
//						body1 += regexBody1.group(1);
//					}
					while(regexBody2.find()){
						body += regexBody2.group(1).replaceAll(titleURL.toString(), "");
					}
						
						
					if( !body.equals("")){
						Document docBody = Jsoup.parse(body);
						bodyContent = docBody.body().text();
						
						saveFile("<url>"+url+"</url>","<date>"+ date.trim().replace("&nbsp;", "")+"</date>","<title>"+ titleURL.trim()+"</title>", "<body>"+bodyContent.trim()+"</body>","<author>"+ author.trim()+"</author>", "E://DataNew1122012//plainText//"+forder+"//index"+docid);
						
					}
					 
              }
            
            
      }
	  	public void writeFile (String content, String fileName) {
			try {
				// Create file
				
				DataOutputStream outStream = new DataOutputStream(
						new FileOutputStream(fileName +".html"));
				outStream.write(content.getBytes("UTF-8"));

				//System.out.println("Da ghi file xong: " + fileName );
				// Close the output stream
				outStream.close();
			} catch (Exception e) {// Catch exception if any
				System.err.println("Loi: " + e.getMessage());
			}
		}
	  	public void saveFile(String url,String date,String title,String body,String author,String file){
	  		try {
	  			DataOutputStream bw = new DataOutputStream(
						new FileOutputStream(file +".txt"));
				bw.write(url.getBytes("UTF-8"));
				bw.writeBytes("\n");
				bw.write(date.getBytes("UTF-8"));
				bw.writeBytes("\n");
				bw.write(title.getBytes("UTF-8"));
				bw.writeBytes("\n");
				bw.write(body.getBytes("UTF-8"));
				bw.writeBytes("\n");
				bw.write(author.getBytes("UTF-8"));
				bw.writeBytes("\n");
				//System.out.println("Da ghi file xong: " + file);
				// Close the output stream
				bw.close();
			} catch (Exception e) {// Catch exception if any
				System.err.println("Loi: " + e.getMessage());
			}
		}
	  
	  	public static void main(String[] args) throws Exception{
		
	  		String crawlStorageFolder = "E://DataNew1122012//html//"+forder;
	  		int numberOfCrawlers = 3;
	  		CrawlConfig config = new CrawlConfig();
	  		config.setUserAgentString("Mozilla/6.0 (Macintosh; I; Intel Mac OS X 11_7_9; de-LI; rv:1.9b4) Gecko/2012010317 Firefox/10.0a4");
	  		config.setCrawlStorageFolder(crawlStorageFolder);
	  		config.setPolitenessDelay(0);
	  		config.setMaxDepthOfCrawling(-1);
	  		config.setMaxPagesToFetch(20136);
	  		config.setResumableCrawling(false);
	  		PageFetcher pageFetcher = new PageFetcher(config);
	  		RobotstxtConfig robotstxtConfig = new RobotstxtConfig();
	  		RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher);
	  		CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer);
	  		controller.addSeed("http://tintuc.timnhanh.com.vn/doi-song/suc-khoe/20110304/35ab42a9/an-sang-the-nao-tot-cho-da-day.htm");
	  		controller.start(MainCrawler.class, numberOfCrawlers);
	  		System.out.println("Done!");
       

	}

}
