package com.sentiment.crawler;

import java.io.File;
import java.io.IOException;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;

import org.apache.log4j.Logger;
import org.jsoup.Connection;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.nodes.Node;
import org.jsoup.nodes.TextNode;
import org.jsoup.select.Elements;
import org.jsoup.select.NodeTraversor;
import org.jsoup.select.NodeVisitor;

import com.sentiment.config.Config;
import com.sentiment.db.CrawlingPageDB;
import com.sentiment.db.TextDB;
import com.sentiment.obj.CrawlingPage;
import com.sentiment.obj.Text;
import com.sentiment.webcollector.util.CharsetDetector;
import com.sentiment.webcollector.util.FileUtils;

/**
 * 爬取CSDN的博客搜索结果，分页爬取博客，并下载搜索结果中的每个网页到本地
 * 
 * @author Ma
 *
 */
public class CSDNCrawler {

	private List<CrawlingPage> li = new ArrayList<CrawlingPage>();
	private static Logger logger = Config.getLogger(new CSDNCrawler().getClass());

	static /* 用一个自增id来生成唯一文件名 */
	// static int id = 0;
	AtomicInteger id = new AtomicInteger(0);

	 public static void main(String args[])throws Exception{
	
	 logger.info("CSDNCrawlerConnection");
	 String path =
	 "D:\\javaworkspace\\Detection1.1-7.26——改\\Detection1.1\\test\\txt\\" +
	 id.getAndIncrement() + ".html";
	 String keyword = "爬虫";
	 String url =
	 "http://so.csdn.net/so/search/s.do?"+"q="+URLEncoder.encode("爬虫",
	 "utf-8")+"&t=blog&o=&s=";
	 Document doc = Connect(url);
	
	 //获取下一层网页链接
	 Elements links = doc.select(".search-link");
	
	 for (Element e : links) {
	 if (e.getAllElements().size() == 2) {
	
	 Element ae = e.select("a[href]").first();
	 String href = ae.attr("href");
	 Document article = Connect(href);
	
	 }
	 }
	
	 logger.info("CSDNCrawlerConnection finished");
	
	 }

	public static Document Connect(String url) throws IOException {

		logger.info("Connect url " + url);
		Connection connection = Jsoup.connect(url);
		connection.timeout(10000);// 设置连接超时时间
		// 给服务器发消息头，告诉服务器，俺不是java程序。CSDN不允许java程序访问
		connection.header("User-Agent", "Mozilla/4.0 (compatible; MSIE 5.0; Windows 7; DigExt)");
		Document doc = connection.get();// 获取返回的html的document对象
		return doc;
	}

	public void getSearch(String keyword) {

		logger.info("CSDNCrawlerConnection");

		String url;
		try {
			url = "http://so.csdn.net/so/search/s.do?" + "q=" + URLEncoder.encode(keyword, "utf-8") + "&t=blog&o=&s=";

			Document doc = Connect(url);

			// 获取下一层网页链接
			Elements links = doc.select(".search-link");

			for (Element e : links) {

				if (e.getAllElements().size() == 2) {

					Element link = e.select("a[href]").first();
					String href = link.attr("href");
					Document article = Connect(href);

					byte[] content = article.html().getBytes();

					String encode = CharsetDetector.guessEncoding(content);

					String html = "page-" + id.getAndIncrement() + ".html";
					try {
						FileUtils.writeFileWithParent(Config.htmlSavingPath + html, content);
						logger.info("Save Page: " + html);
					} catch (IOException ex) {
						logger.error(ex.toString());
					}
					GetPageInfo(html, encode, href);

				}
			}
		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		CrawlingPageDB cpdb = new CrawlingPageDB();
		cpdb.cleanAll();
		for (CrawlingPage cp : li) {
			// 将页面编码信息、URL地址、文件名等存储进数据库
			cpdb.insertData(cp);
		}
		cpdb.DBClose();
		logger.info("CSDNCrawlerConnection  finished");
	}

	/**
	 * 根据文件名和html文件编码格式获得页面内容
	 * 
	 * @param FileName
	 *            文件名
	 * @param encode
	 *            编码类型
	 * @param url
	 *            文件url
	 */
	public void GetPageInfo(String FileName, String encode, String url) {

		String articleTitle = "";
		String articleContent = "";
		String articleTime = "";
		File fl = new File(Config.htmlSavingPath + FileName);
		try {

			Element article = Jsoup.parse(fl, encode);
			Element Title = article.select(".link_title").first();
			System.out.println("title test : " + Title);
			System.out.println("articleTitle: " + articleTitle);

			// 找出正文段
			Element ContentTxt = article.select(".article_content").first();

			// 对p标签的content正文内容进行处理
			Elements eles = ContentTxt.getElementsByTag("p");
			if (!eles.isEmpty()) {
				StringBuffer buffer = new StringBuffer();
				if (eles.size() > 0) {
					for (Element ele : eles) {
						buffer.append(ele.text() + "\n");
					}
				}
				articleContent = buffer.toString();
			} else {
				// 删除正文中的代码段(还有其他格式
				ContentTxt.select("pre").remove();
				articleContent = convertNodeToText(ContentTxt);
			}
			// 将文章内容存入数据库
			SavePageDB(articleTitle, articleContent);
			// 获取文章时间
			Element time = article.select(".link_postdate").first();
			articleTime = time.text();
			logger.info("article " + articleTitle + "    articleTime:" + articleTime);
		} catch (Exception e) {
			e.printStackTrace();
		}
	}

	private void SavePageDB(String articleTitle, String articleContent) {
		// TODO Auto-generated method stub
		TextDB tdb = new TextDB();
		// tdb.cleanAll();
		// 第三个参数判断是用户输入还是爬取的
		Text tx = new Text(articleTitle, articleContent);
		tdb.insertData(tx);
		logger.info("-----将爬取页面内容存进数据库-----完成");
		tdb.DBClose();
	}

	/*
	 * 转化成txt类型（带换行）
	 */
	public static String convertNodeToText(Element element) {
		final StringBuilder buffer = new StringBuilder();
		new NodeTraversor(new NodeVisitor() {
			boolean isNewline = true;

			@Override
			public void head(Node node, int depth) {
				if (node instanceof TextNode) {
					TextNode textNode = (TextNode) node;
					String text = textNode.text().replace('\u00A0', ' ').trim();
					if (!text.isEmpty()) {
						buffer.append(text);
						isNewline = false;
					}
				} else if (node instanceof Element) {
					Element element = (Element) node;
					if (!isNewline) {
						if ((element.isBlock() || element.tagName().equals("br"))) {
							buffer.append("\n");
							isNewline = true;
						}
					}
				}
			}

			@Override
			public void tail(Node node, int depth) {
			}
		}).traverse(element);
		return buffer.toString();
	}

}
