package util.urlSpider;

import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Date;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;

import util.HtmlUnit.SpiderContentTxt;

/*这个类作用是处理采集下的网页内容，使用了正则表达式截取出需要采集的网址*/

public class SpiderRobotURL {

	public void run() {
		// 程序出错后重启的时间间隔
		System.setProperty("LOG_DIR", "./");
		Log logger = LogFactory.getLog(SpiderRobotURL.class);
		// 搜索引擎的编号
		Connection conn = null;
		Statement stmt = null;
		Connection connCheck = null;
		Statement stmtCheck = null;
		Connection connGetSite = null;
		Statement stmtGetSite = null;
		java.sql.ResultSet rs = null;
		java.sql.ResultSet rsGetSite = null;
		SpiderContentTxt sct = new SpiderContentTxt();
		// workingID是个标记位，指示在现在处理到哪一位了
		int workingID = 0;
		// search既是keywordnofilter的标记位也是tag-url的表名
		String search = "sec" + sourceNet;

		try {
			Class.forName("com.mysql.jdbc.Driver");
			conn = DriverManager.getConnection("jdbc:mysql://" + host + "/" + dBName + "?user=root&password=777888999a!&useUnicode=true&characterEncoding=utf8");
			connGetSite = conn;
			connCheck = conn;
			stmtGetSite = connGetSite.createStatement();
			stmtCheck = connCheck.createStatement();
			// 查询出需要处理的网址，check是标记为0就是需要处理的
			rsGetSite = stmtGetSite.executeQuery("select * from keywordnofilter where " + search + " = 0");
			while (rsGetSite.next()) {
				workingID = rsGetSite.getInt("id");
				// 抓取网址开始的时间点
				Long btime = System.currentTimeMillis();
				sct.setProxyString(proxyString);
				sct.setCookiesString(cookiesString);
				// System.out.println(pagingRex);
				sct.setPagingRex(pagingRex);
				sct.setPagingRexHead(pagingRexHead);
				int reConnectTime = 5;
				String SitesString = null;
				while (reConnectTime > 0) {
					SitesString = sct.SpiderPageContent(first + URLEncoder.encode(rsGetSite.getString("keyword"), "utf-8") + end);
					if (SitesString.length() > 10) {
						reConnectTime = reConnectTime - 5;
					} else {
						reConnectTime--;
					}
				}
				// 到这里已经处理完了当下的keywords，需要修改filter字段
				stmt = conn.createStatement();

				// 如果是0表示链接有错误返回的只是默认值
				if (SitesString.length()<2) {
					// 出错的没有成功抓取的连接放入badurl表
					stmt.execute("insert into badurl (id,badid,search) values (0," + workingID + ",'" + search + "')");

				} else {
//System.out.println(SitesString);
					Pattern p = Pattern.compile(urlPattern);
					Matcher m = p.matcher(SitesString);
					int tagURLController = 0;
					while (m.find()) {
						String url = m.group(2).replaceAll("&amp;", "&");
//System.out.println(url);
						// 用0做默认值，因为有些表look是int类型，如何没有匹配上就用0可以写入，但是null就写入不了int类型的数据库表
						String look = "0";
//System.out.println(lookRex);
						Pattern lookPattern = Pattern.compile(lookRex);
						Matcher lookMatcher = lookPattern.matcher(url);
						logger.debug(url);
						while (lookMatcher.find()) {
							look = lookMatcher.group(2);
//System.out.println(look);
							logger.debug(11111);
						}
						if (head.length() > 2) {
							url = head + url.replaceAll("&amp;", "&");
						}
						// 过长的url处理，抓取.html .shtml等结尾前的部分，如果没有就跳出这次循环这条数据不要了
						if (url.length() > 1000) {
							Pattern LongPattern = Pattern.compile("http://.*\\.shtml|http://.*\\.html|http://.*\\.htm|http://.*\\.asp|http://.*\\.aspx|http://.*\\.httm|http://.*\\.bottom|http://.*\\.xhtml|http://.*\\.shtm|http://.*\\.php|http://.*\\.jsp|http://.*\\.jhtml|http://.*\\.dhtml");
							Matcher LongMatcher = LongPattern.matcher(url);
							LongMatcher.reset();
							if (LongMatcher.find()) {
								url = LongMatcher.group();
							} else {
								continue;
							}

						}
						// 这是了重复判断机制，如果有重复的就不加入了，这就使中断后再开始采集时不会出现重复数据
						rs = stmt.executeQuery("select id from " + search + " where look = '" + look + "'");
						if (!rs.next()) {
//System.out.println("insert into " + search + "(id,tag,url,look,sourceNet,subId,spider,pub,deltag,sequence) values (0,'" + rsGetSite.getString("keyword") + "','" + url + "','" + look + "'," + sourceNet + "," + rsGetSite.getInt("subId") + ",0,0,0," + tagURLController + ")");
							stmt.execute("insert into " + search + "(id,tag,url,look,sourceNet,subId,spider,pub,deltag,sequence) values (0,'" + rsGetSite.getString("keyword") + "','" + url + "','" + look + "'," + sourceNet + "," + rsGetSite.getInt("subId") + ",0,0,0," + tagURLController + ")");
						}
						tagURLController++;
						logger.info(tagURLController);
					}
					// tagURLController=0也就是一个链接也没抓取到，把这个页计入到badurl表中
					if (tagURLController == 0) {
						stmt.execute("insert into badurl (id,badid) values (0," + workingID + ")");
					}
				}
				Long etime = System.currentTimeMillis();
				// 打印出处理使用的时间
				logger.info(dBName + "----" + search + "----" + workingID + "----使用了" + (etime - btime) + "毫秒----" + new Date());

				// 修改标记放在处理抓取下来的字符串之前目的就是如果这条数据出错了，下次在重启时也不需要再处理了，虽然少处理一天数据但保证了程序的连续性
				stmtCheck.execute("update keywordnofilter set " + search + " = 1 where id = " + workingID);
			}
		}
		// 所有exception的处理方式都是打印错误名字后再休眠一会重新启动
		catch (ClassNotFoundException e) {
			logger.warn("-----ClassNotFoundException------");
			e.printStackTrace();
		} catch (UnsupportedEncodingException e) {
			logger.warn("-----UnsupportedEncodingException------");
			e.printStackTrace();
		} catch (SQLException e) {
			logger.warn("-----SQLException------");
			e.printStackTrace();
		} finally {
			try {
				if (rs != null) {
					rs.close();
					rs = null;
				}
				if (stmt != null) {
					stmt.close();
					stmt = null;
				}
				if (conn != null) {
					conn.close();
					conn = null;
				}
				if (rsGetSite != null) {
					rsGetSite.close();
					rsGetSite = null;
				}
				if (stmtGetSite != null) {
					stmtGetSite.close();
					stmtGetSite = null;
				}
				if (connGetSite != null) {
					connGetSite.close();
					connGetSite = null;
				}
				if (stmtCheck != null) {
					stmtCheck.close();
					stmtCheck = null;
				}
				if (connCheck != null) {
					connCheck.close();
					connCheck = null;
				}
				if(sct != null){
					sct = null;
				}
			} catch (SQLException e) {
				e.printStackTrace();
			}
		}

	}

	String urlPattern = null;
	String host = null;
	String dBName = null;
	String first = null;
	String codeNo = null;
	String head = null;
	int sourceNet = 0;
	String lookRex = null;

	public void setLookRex(String lookRex) {
		this.lookRex = lookRex;
	}

	public void setSourceNet(int sourceNet) {
		this.sourceNet = sourceNet;
	}

	public void setHead(String head) {
		this.head = head;
	}

	public void setCodeNo(String codeNo) {
		this.codeNo = codeNo;
	}

	public void setFirst(String first) {
		this.first = first;
	}

	String end = null;

	public void setEnd(String end) {
		this.end = end;
	}

	public void setHost(String host) {
		this.host = host;
	}

	public void setdBName(String dBName) {
		this.dBName = dBName;
	}

	public void setUrlPattern(String urlPattern) {
		this.urlPattern = urlPattern;
	}

	String cookiesString = null;

	public void setCookiesString(String cookiesString) {
		this.cookiesString = cookiesString;
	}

	public void setProxyString(String proxyString) {
		this.proxyString = proxyString;
	}

	String proxyString = null;

	String pagingRex = null;

	public void setPagingRex(String pagingRex) {
		this.pagingRex = pagingRex;
	}

	public void setPagingRexHead(String pagingRexHead) {
		this.pagingRexHead = pagingRexHead;
	}

	String pagingRexHead = null;
}
