/**
 * crawler
 *
 * outlook@China software studio
 * Copyright (c) 2008-2010. All Rights Reserved.
 * http://www.outlook.com
 */

package org.outlook.crawler.util;

import java.io.File;
import java.io.InputStreamReader;
import java.io.Reader;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.outlook.common.log.LogUtil;
import org.outlook.common.util.URLUtil;
import org.outlook.crawler.client.domain.Source;
import org.outlook.crawler.client.pojo.HttpStatus;
import org.outlook.crawler.client.pojo.Webinfo;
import org.outlook.crawler.util.parser.LinkDim;
import org.outlook.crawler.util.parser.LinkParser;

/**
 * function:
 * @author Watson email: watson.wu@hotmail.com
 * @date 2011
 */
public class WebinfoProcessor {
	
	private Log logger = LogFactory.getLog(WebinfoProcessor.class);
	
	private final static long currentTime = new Date().getTime(); 
	private File target;
	private LinkParser linkParser;
	private String domain;
	private Map<String, Object> convers = new HashMap<String, Object>();

	public WebinfoProcessor() {
		super();
		this.target = WebsiteCrawlerTest.target;
		this.domain = WebsiteCrawlerTest.domain;
		if(null != WebsiteCrawlerTest.CONVERS) {
			for(String conver : WebsiteCrawlerTest.CONVERS)
				convers.put(conver, null);
		}

		linkParser = new LinkParser(WebsiteCrawlerTest.FILTERS);
	}

	public void parseAndSave(Webinfo webinfo, Source source) throws Exception {
		File file= null;
		Reader reader = null;
//		boolean happenConver;
		try {
			if(webinfo != null && webinfo.getStatus() == HttpStatus.SC_OK) {
				String url = webinfo.getUrl();
				URL currentURL = new URL(url);
				StringBuffer filePath = new StringBuffer(target.getAbsolutePath()).append(currentURL.getPath());
				
				// download page will be static, no necessary to keep dynamic page. TODO should or not?
//				String extension = filePath.substring(filePath.lastIndexOf("."));
//				if(convers.get(extension)!=null) {
//					filePath.replace(filePath.lastIndexOf("."), filePath.length(), ".html");
//					happenConver = true;
//				}

				boolean isFresh = false;
				file = new File(filePath.toString());
				if(!file.exists()) {
					file.getParentFile().mkdirs();
					file.createNewFile();
					if(file.getName().indexOf(".")==-1)
						LogUtil.error(logger, "no file subfix?");
				} else {
					if(currentTime - file.lastModified() < source.getUpdateCycle())
						isFresh = true;
				}
				
				StringBuffer html = new StringBuffer(webinfo.getHtmlSource());
				try {
					if(!isFresh)
						FileUtils.writeStringToFile(file, html.toString(), source.getCharset());
				} catch (Exception e) {
					LogUtil.debug(logger, e.getMessage());
				}
				
				reader = new InputStreamReader(FileUtils.openInputStream(file), source.getCharset());
				List<LinkDim> list = linkParser.parseAll(reader, html);
				
				if(null == list || list.isEmpty()) return ;
				int offset = 0;
				boolean changed = false;
				int i=1;
				for(LinkDim linkDim : list) {
					String link = linkDim.getUrl();
					String address = formatLink(link, url);
					if(!address.contains(domain))//out of site's link
						continue;

					if(!isFresh && linkParser.isIgnore(link)) {
						LogUtil.trace(logger, ">>>> ", i++, link);
						//update html with replace link
						final int start = linkDim.getStart()+offset;
						final int end = linkDim.getEnd()+offset;
						String overlay = html.substring(start, end).replace(link, address);
						html = html.replace(start, end, overlay);
						//LogUtil.error(logger, html);
						offset += address.length()-link.length();

						changed = true;
						continue;
					} else {
						//TODO
						//for handle link update when dynamic page change to static
//						URL linkURL = new URL(link);
//						String linkSubfix = linkURL.getPath().substring(linkURL.getPath().lastIndexOf("."));
//						if(convers.get(linkSubfix)!=null) {
//							//TODO
//						}
					}

					if(!CrawlTaskMonitor.isURlProccessed(address)) {
						final CrawlWebsiteTask task = new CrawlWebsiteTask(address);
						task.setSource(source);
						WebsiteCrawlerTest.tpm.add(task);
						if(CrawlTaskMonitor.addPage(address)) {
							//TODO
							LogUtil.debug(logger, "add ", address, "to queue");
						} else {
							//TODO
							LogUtil.debug(logger, address, "add to queue get false");
						}
					}
				}
				
				if(!isFresh && changed) {
					FileUtils.writeStringToFile(file, html.toString(), source.getCharset());
					LogUtil.trace(logger, html);
				}
			} else {
				//TODO
			}
		} catch (Exception e) {
			throw e;
		} finally {
			if(reader != null)
				reader.close();
			if(file != null)
				file = null;
		}
	}

	private String formatLink(String link, String currentURL) throws Exception {
		try {
			new URL(link);
			return link;
		} catch (MalformedURLException e) {
			return URLUtil.complete(currentURL, link);
		}
	}

	public File getTarget() {
		return target;
	}

	public void setTarget(File target) {
		this.target = target;
	}

	public LinkParser getLinkParser() {
		return linkParser;
	}

	public void setLinkParser(LinkParser linkParser) {
		this.linkParser = linkParser;
	}

	public String getDomain() {
		return domain;
	}

	public void setDomain(String domain) {
		this.domain = domain;
	}

	public Map<String, Object> getConvers() {
		return convers;
	}

	public void setConvers(Map<String, Object> convers) {
		this.convers = convers;
	}

}
