package com.flute.icrawler.app.processor.fetch;

import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;

import javax.xml.xpath.XPathConstants;
import javax.xml.xpath.XPathFactory;

import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.HttpStatus;
import org.apache.http.HttpVersion;
import org.apache.http.NameValuePair;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.HttpClient;
import org.apache.http.client.entity.UrlEncodedFormEntity;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.params.HttpClientParams;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.message.BasicNameValuePair;
import org.apache.http.params.BasicHttpParams;
import org.apache.http.params.HttpConnectionParams;
import org.apache.http.params.HttpParams;
import org.apache.http.params.HttpProtocolParams;
import org.apache.http.protocol.HTTP;
import org.apache.http.util.EntityUtils;
import org.cyberneko.html.parsers.DOMParser;
import org.w3c.dom.Document;
import org.w3c.dom.NamedNodeMap;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;

import com.flute.icrawler.app.entity.CrawlResult;
import com.flute.icrawler.app.processor.AbstractProcessor;
import com.flute.icrawler.app.util.ParameterKey;
import com.flute.icrawler.config.CrawlConfigUtil;
import com.flute.icrawler.config.Item;
import com.flute.icrawler.framework.framework.CrawlUrl;
import com.flute.icrawler.framework.processor.result.FailProcessorResult;
import com.flute.icrawler.framework.processor.result.ResultParameter;

public class FetchHttpReg extends AbstractProcessor {

	private static final String DEFAULTCHARSET = "gbk";
	private static Integer DEFAULT_TIMEOUT_SECONDS = new Integer(12000);
	private static Integer DEFAULT_SOTIMEOUT_MS = new Integer(20000);

	@Override
	public void process(CrawlUrl crawlUrl) {

		List items = CrawlConfigUtil.getBaseConfig().getRegurl().getItem();
		for (Iterator iter = items.iterator(); iter.hasNext();) {
			Item item = (Item) iter.next();
			String regurl = item.getAddress();
			String logintype = item.getLogintype();
			if (regurl.equals(crawlUrl.getUrl())) {
				if (logintype.equals(0)) {
					HttpClient httpClient = new DefaultHttpClient(
							configureHttpParams());
					HashMap<String, String> regparam = new HashMap<String, String>();
					regparam.put(item.getUsernameparam(), item.getUsername());
					regparam.put(item.getPasswordparam(), item.getPassword());
					handleHttp(httpClient, crawlUrl, regparam);
				} else {
					// 需要验证码的登录方式：
					// 1.动态解析HTML上的JS 得到图片URL
					// 2.通过URL得到图片并解析
					// 3.将解析好的字符串传入
				}
			}

		}

	}

	/**
	 * 组装基础参数
	 * 
	 * @return
	 * @throws RuntimeException
	 */
	private HttpParams configureHttpParams() throws RuntimeException {

		// 伪装的浏览器类型
		// IE7 是
		// Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)
		// Mozilla/5.0 (compatible; heritrix/1.14.3 +http://www.baidu.com)

		String userAgent = "Mozilla/5.0 (compatible; heritrix/1.14.3 +http://www.baidu.com)";
		userAgent = "HttpComponents/1.1";

		// prepare parameters
		HttpParams params = new BasicHttpParams();
		HttpProtocolParams.setVersion(params, HttpVersion.HTTP_1_1);
		HttpProtocolParams.setContentCharset(params, "UTF-8");
		HttpProtocolParams.setUserAgent(params, userAgent);

		HttpConnectionParams.setConnectionTimeout(params, getConnectTimeout());
		HttpConnectionParams.setSoTimeout(params, getSoTimeout());

		HttpClientParams.setRedirecting(params, true);

		return params;
	}

	// 连接超时时间
	private int getConnectTimeout() {
		return DEFAULT_TIMEOUT_SECONDS;
	}

	// io超时时间
	private int getSoTimeout() {
		return DEFAULT_SOTIMEOUT_MS;
	}

	/**
	 * 提供POST方式 登录页面
	 * 
	 * @param curiString
	 * @param httpClient
	 * @return
	 * @throws ClientProtocolException
	 * @throws IOException
	 */
	private InputStream configureHttpPost(String curiString,
			HttpClient httpClient, HashMap regparam)
			throws ClientProtocolException, IOException {

		List<NameValuePair> nvps = new ArrayList<NameValuePair>();
		Iterator<Map.Entry<String, String>> it = regparam.entrySet().iterator();
		while (it.hasNext()) {
			Map.Entry<String, String> entry = it.next();
			nvps.add(new BasicNameValuePair(entry.getKey(), entry.getValue()));
		}

		HttpPost httppost = new HttpPost(curiString);
		httppost.setEntity(new UrlEncodedFormEntity(nvps, HTTP.UTF_8));

		HttpResponse response = httpClient.execute(httppost);

		/*
		 * if (maxLength > 0) { httppost.addHeader(RANGE,
		 * RANGE_PREFIX.concat(Long .toString(maxLength - 1))); }
		 */
		int iStatusCode = response.getStatusLine().getStatusCode();

		if (iStatusCode == HttpStatus.SC_OK) {
			String charset = EntityUtils
					.getContentCharSet(response.getEntity());
			if (charset == null) {
				charset = DEFAULTCHARSET;
			}
			String content = EntityUtils
					.toString(response.getEntity(), charset);
			// 有中间页面时要再请求一次
			String uri = null;
			if ((uri = this.getRedirectUrl(content)) != null) {
				try {
					return this.configureHttpPost(uri, httpClient, regparam);
				} catch (ClientProtocolException e) {
					e.printStackTrace();
				} catch (IOException e) {
					e.printStackTrace();
				}
			}
			return new ByteArrayInputStream(content.getBytes("utf-8"));
		} else {
			throw new RuntimeException("http status error!" + iStatusCode);
		}
	}

	/**
	 * 
	 * @param content
	 * @return
	 */
	private String getRedirectUrl(String content) {
		try {
			DOMParser parser = new DOMParser();
			parser.setFeature("http://xml.org/sax/features/namespaces", false);
			parser
					.setProperty(
							"http://cyberneko.org/html/properties/names/elems",
							"upper");
			parser
					.setProperty(
							"http://cyberneko.org/html/properties/names/attrs",
							"lower");
			parser.parse(new InputSource(new InputStreamReader(
					new ByteArrayInputStream(content.getBytes()))));
			Document doc = parser.getDocument();
			NodeList nodes = (NodeList) XPathFactory
					.newInstance()
					.newXPath()
					.evaluate(
							"(//META[@http-equiv='REFRESH']|//META[@http-equiv='refresh'])/@content",
							doc, XPathConstants.NODESET);
			if (nodes == null || nodes.getLength() == 0) {
				return null;
			}
			String nodevalue = nodes.item(0).getNodeValue();
			return nodevalue.substring(nodevalue.indexOf("=") + 1);
		} catch (Exception e) {
			throw new RuntimeException(e);
		}
	}

	private void handleHttp(HttpClient httpClient, CrawlUrl crawlUrl,
			HashMap regparam) {
		try {

			Document document = this.nekoParser(configureHttpPost(crawlUrl
					.getUrl(), httpClient, regparam));
			StringBuffer buffer = new StringBuffer();
			this.transform(document, buffer);
			String content = buffer.toString().replaceAll("(\r\n|\n\r){2,}",
					"\r\n");
			System.out.println("content =" + content);

			CrawlResult result = (CrawlResult) crawlUrl.getResultParameter(
					ParameterKey.RESULT_CRAWL).getValue();

			result.setContent(content.getBytes());
			addResultParameter(ParameterKey.RESULT_CRAWL, result, crawlUrl);
		} catch (HtmlParserException e) {
			e.printStackTrace();
		} catch (IOException ioe) {
			ioe.printStackTrace();
		}

	}

	/**
	 * neko w3c 解析器
	 * 
	 * @param in
	 * @return
	 * @throws HtmlParserException
	 */
	private Document nekoParser(InputStream in) throws HtmlParserException {
		try {
			DOMParser parser = new DOMParser();

			parser.setFeature("http://xml.org/sax/features/namespaces", false);
			parser
					.setProperty(
							"http://cyberneko.org/html/properties/names/elems",
							"upper");
			parser
					.setProperty(
							"http://cyberneko.org/html/properties/names/attrs",
							"lower");

			parser.parse(new InputSource(new InputStreamReader(in, "utf-8")));
			return parser.getDocument();
		} catch (SAXException e) {
			// logger.log(Level.WARNING, "parser html error");
			throw new HtmlParserException("parser html error-SAXException:"
					+ e.getMessage());
		} catch (IOException e) {
			// logger.log(Level.WARNING, "read html source error");
			throw new HtmlParserException("read html source error:"
					+ e.getMessage());
		} finally {
			try {
				in.close();
			} catch (IOException e) {
			}
		}
	}

	public class HtmlParserException extends Exception {

		private static final long serialVersionUID = 1L;

		public HtmlParserException() {
		}

		public HtmlParserException(String message) {
			super(message);
		}

		public HtmlParserException(Throwable cause) {
			super(cause);
		}

		public HtmlParserException(String message, Throwable cause) {
			super(message, cause);
		}

	}

	/**
	 * transform DOM to String
	 * 
	 * 
	 * @param node
	 * @param buffer
	 */
	private void transform(Node node, StringBuffer buffer) {
		if (node.getNodeType() == Node.TEXT_NODE) {
			String text = node.getNodeValue();
			if (text != null && !text.equals("")) {
				buffer.append(text);
			}
		} else if (node.getNodeType() == Node.ELEMENT_NODE) {
			String tag = node.getNodeName().toUpperCase();
			// 过滤不需要的节点（eg.广告节点一般在iframe,embed中）
			if (tag.equals("IFRAME") || tag.equals("FREAME")
					|| tag.equals("SCRIPT") || tag.equals("STYLE")
					|| tag.equals("LINK") || tag.equals("NOSCRIPT")
					|| tag.equals("EMBED")) {
				return;
			}
			buffer.append("<").append(tag);
			NamedNodeMap attrs = node.getAttributes();
			for (int i = 0; i < attrs.getLength(); i++) {
				Node attr = attrs.item(i);
				buffer.append(" ").append(attr.getNodeName().toLowerCase())
						.append("='").append(attr.getNodeValue()).append("'");
			}
			buffer.append(">");
			if (node.hasChildNodes()) {
				NodeList nodes = node.getChildNodes();
				for (int j = 0; j < nodes.getLength(); j++) {
					this.transform(nodes.item(j), buffer);
				}
			}
			buffer.append("</").append(tag).append(">");
		} else if (node.getNodeType() == Node.DOCUMENT_NODE) {
			this.transform(((Document) node).getDocumentElement(), buffer);
		} else if (node.getNodeType() == Node.ATTRIBUTE_NODE) {
			buffer.append(node.getNodeValue());
		}
	}

	/**
	 * 回应 200 最终处理
	 * 
	 * @param response
	 * @param crawlUrl
	 */
	private void handleOK(HttpResponse response, CrawlUrl crawlUrl) {

		CrawlResult result = (CrawlResult) crawlUrl.getResultParameter(
				ParameterKey.RESULT_CRAWL).getValue();

		HttpEntity httpEntity = response.getEntity();
		try {

			long contentLength = httpEntity.getContentLength();
			// LOGGER.info("FetchGetHTTP:{}", "FetchGetHTTP:" +
			// "contentLength = "
			// + contentLength);
			// 内容超过最大长度

			// 保存当前获取的中间结果
			String content = "";

			// 如果没有网页内容中没有字符类型 则设置默认值
			content = EntityUtils.toString(httpEntity, DEFAULTCHARSET);

			if (null == content || "".equals(content.trim())) {
				crawlUrl.registerProcessorResult(new FailProcessorResult());
				return;
			}
			result.setContent(content.getBytes());

			addResultParameter(ParameterKey.RESULT_CRAWL, result, crawlUrl);
		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}

	/**
	 * This method populates <code>curi</code> with response status and content
	 * type.
	 * 
	 * @param curi
	 *            CrawlURI to populate.
	 * @param method
	 *            Method to get response status and headers from.
	 */
	private void addResultParameter(String parameterName,
			Serializable parameterObject, CrawlUrl crawlUrl) {

		// curi.setContentType((ct == null) ? null : ct.getValue());

		ResultParameter resultParameter = new ResultParameter();
		resultParameter.setName(parameterName);
		resultParameter.setValue(parameterObject);

		crawlUrl.registerResultParameter(resultParameter);

	}

}
