package com.eshop.common.util.io;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.net.URL;
import java.net.URLConnection;
import java.nio.charset.Charset;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

// 搜索Web爬行者  
public class FeiNiu implements Runnable {
	private final static String regxpForHtml = "<([^>]*)>";
	public static Pattern pattern = Pattern.compile(regxpForHtml);
	String startUrl = "http://www.feiniu.com/category/";// 开始搜索的起点
	boolean caseSensitive = false;// 是否区分大小写
	String type;
	public static final String RESTORE_PATH = "F:/goods/";

	public FeiNiu(String type) {
		this.type = type;
	}

	public void run() {// 启动搜索线程
		crawl(type);
	}

	public static String filterHtml(String str) {
		Matcher matcher = pattern.matcher(str);
		StringBuffer sb = new StringBuffer();
		boolean result1 = matcher.find();
		while (result1) {
			matcher.appendReplacement(sb, "");
			result1 = matcher.find();
		}
		matcher.appendTail(sb);
		return sb.toString().replaceAll("\r", "").replaceAll("\n", "").replaceAll("\r\n", "").replaceAll(":", "");
	}

	private static String getHtmlData(String url) { // 传入的页面地址
		StringBuffer sb = new StringBuffer();
		try {
			BufferedReader br = new BufferedReader(new InputStreamReader(getStream(url), Charset.forName("UTF-8")));// 编码格式根据页面去定
			String s = null;

			while ((s = br.readLine()) != null) {
				sb.append(s);
			}
			br.close();
		} catch (Exception e) {
			e.printStackTrace();
		}
		return sb.toString(); // 返回页面的html 信息
	}

	private static java.io.InputStream getStream(String url) throws IOException {
		System.out.println(url);
		java.net.URL connUrl = new URL(url);
		java.net.HttpURLConnection conn = (HttpURLConnection) connUrl.openConnection();
		conn.setRequestProperty("User-agent", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 2.0.50727; Maxthon 2.0)");
		java.io.InputStream input = conn.getInputStream();
		return input;
	}

	// 执行实际的搜索操作
	public void crawl(String type) {
		int i = 1;
		int count = 1;
		Document doc;
		while (i <= count) {
			doc = Jsoup.parse(getHtmlData(startUrl + type + "?page=" + i));
			Elements elements = doc.select("span.page_count");
			if(elements.size()>0){
				count = Integer.parseInt(elements.get(0).html());
			}
			elements = doc.select("div.listPic");
			for (Element element : elements) {
				getProductByUrl(element);
			}
			i++;
		}
		System.out.println(startUrl + " ===== is end……");
	}

	void getProductByUrl(Element element) {
		String url = element.child(0).attr("href");
		String src = element.child(0).child(0).attr("src").replace("200x200", "800x800");
		String interHtml = getHtmlData(url);
		Document interDoc = Jsoup.parse(interHtml);
		Element e = interDoc.getElementById("detailBox");
		String name = e.children().first().html().replaceAll("/", "-").replaceAll("\\*", "_");
		String price = filterHtml(e.getElementById("priceFn").getElementsByAttributeValue("itemprop", "highPrice").html());
		String spec = "";
		if (interDoc.getElementsByClass("spec").size() > 0) {
			spec = filterHtml(interDoc.getElementsByClass("spec").get(0).html()).replaceAll("/", "-").replaceAll("\\*", "_");
		}
		Elements es = interDoc.getElementsByClass("topic-path");
		Elements ae = es.get(0).getElementsByTag("a");
		String path = RESTORE_PATH;
		for (int ia = 1; ia < ae.size(); ia++) {
			path += ae.get(ia).html() + "/";
		}
		File file = new File(path);
		if (!file.exists()) {
			file.mkdirs();
		}
		download(src, path + name + "#" + spec + "#" + price + ".jpg");
	}

	public static void download(String urlString, String filename) {
		// 构造URL
		URL url;
		OutputStream os = null;
		InputStream is = null;
		try {
			url = new URL(urlString);
			// 打开连接
			URLConnection con = url.openConnection();
			// 输入流
			is = con.getInputStream();
			// 1K的数据缓冲
			byte[] bs = new byte[1024];
			// 读取到的数据长度
			int len;
			// 输出的文件流
			os = new FileOutputStream(filename);
			// 开始读取
			while ((len = is.read(bs)) != -1) {
				os.write(bs, 0, len);
			}
		} catch (IOException e) {
			e.printStackTrace();
		} finally {
			try {
				if (os != null) {
					os.close();
				}
				if (is != null) {
					is.close();
				}
			} catch (IOException e) {
				e.printStackTrace();
			}
		}
	}

	// 主函数
	public static void main(String[] args) {
		String[] types = { "C19059" };
		for (String type : types) {
			FeiNiu crawler = new FeiNiu(type);
			Thread search = new Thread(crawler);
			System.out.println(type + " Start searching...");
			search.start();
		}
	}
}
