package com.zhiletu.collect;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.net.URL;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;

import com.zhiletu.collect.config.CollectConfig;
import com.zhiletu.collect.config.RuleConfig;
import com.zhiletu.collect.entry.ArticleEntry;
import com.zhiletu.collect.entry.CollectInfoEntry;
import com.zhiletu.collect.entry.CollectRuleEntry;
import com.zhiletu.collect.entry.CollectWordEntry;
import com.zhiletu.collect.entry.CollecterEntry;
import com.zhiletu.collect.splitword.SplitWord;
import com.zhiletu.collect.splitword.WordFilter;
import com.zhiletu.collect.util.DBUtil;
import com.zhiletu.collect.util.HtmlUtil;
import com.zhiletu.collect.util.ImgConverter;
import com.zhiletu.collect.util.InvokeProxy;
import com.zhiletu.collect.util.Jpegoptim;
import com.zhiletu.collect.util.ParseString;
import com.zhiletu.collect.util.Random;
import com.zhiletu.collect.util.RegExMatcher;
import org.ansj.domain.Term;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.params.HttpProtocolParams;
import org.apache.http.protocol.HTTP;
import org.apache.log4j.Logger;
import org.quartz.JobExecutionContext;
import org.quartz.JobExecutionException;

import org.springframework.scheduling.quartz.QuartzJobBean;

/**
 *
 * @Title: 采集器主类
 * @Package com.zhiletu.collect
 * @Description: 注意，是单线程分发！注意数据库和Java不一样，配置表中的正则表达式只能加一个\转义，否则匹配失败。比如：(<h1\s+id="artibodyTitle"\s+[^>]+\s*>)(.+)(</h1>) 是在数据库的配置
 * @author zhiletu.com
 * @date 2021年3月11日
 * @version V1.0
 */
public class Collecter extends QuartzJobBean {
	/** web根绝对路径*/
	public static String web_Root = null;

	/** web附件目录（相对webroot的附件固定目录）*/
	public static String web_upload_path = null;

	/** 相对webroot上传路径格式(必须是时间日期格式)*/
	public static String upload_path_format = null;

	/** 是否开启debug模式*/
	public static boolean debug_Mode = false;

	/** 伪装一个浏览器，防止403*/
	public static String user_agent = null;

	private static Logger log = Logger.getLogger(Collecter.class);

	/**
	 * @desc 执行所有有效的规则 (遍历)
	 * @return
	 */
	public static void execRule() throws Exception {
		log.info("开始采集数据……");
		String sql = "select * from collect_rule where is_valid='true'";
		List<Map<String, Object>> rules = RuleConfig.querySql(sql);
		sql = null;
		// 遍历采集规则
		for (Map<String, Object> rule : rules) {
			String name = null;
			try {
				name = ParseString.nvl(rule.get(CollectRuleEntry.ruleName));
				log.info("开始执行规则：" + name);
				Collecter.execRule2DB(ParseString.nvl(rule.get(CollectRuleEntry.ruleId)));
				log.info("结束执行规则：" + name);
			}
			catch (Throwable e) {
				log.error("异常规则：" + name, e);
			}
			finally {
				rule = null;
				name = null;
			}
		}
		log.info("采集数据完成");
		rules = null;
	}

	/**
	 * @desc 指定规则编码，然后执行规则定义的采集任务并返回文章列表
	 * @param ruleId
	 * @return
	 */
	public static List<Map<String, String>> execRule(String ruleId) {
		if (ruleId == null || "".equals(ruleId)) {
			throw new RuntimeException("规则id为空……退出");
		}
		// 初始化规则对应的全局配置
		initCollectConfig(ruleId);

		Map<String, Object> rule = RuleConfig.queryRuleConfigById(ruleId);

		List<Map<String, String>> artlist = Collecter.execRule(rule);
		rule = null;

		return artlist;
	}

	/**
	 * @desc 指定规则编码，然后执行规则定义的采集任务并入库
	 * @param ruleId
	 * @return
	 */
	public static int execRule2DB(String ruleId) throws Exception {
		if (ParseString.isNull(ruleId)) {
			throw new RuntimeException("规则id为空……退出");
		}
		// 初始化规则对应的採集器全局配置
		Map<String, Object> collecter = initCollectConfig(ruleId);

		Map<String, Object> rule = RuleConfig.queryRuleConfigById(ruleId);

		String catId = ParseString.nvl(rule.get(CollectRuleEntry.catId));
		if ("".equals(catId)) {
			log.error("没有配置文章分类！终止采集");
			return 0;
		}
		if (rule == null || rule.isEmpty()) {
			throw new RuntimeException("规则为空：ruleId=" + ruleId);
		}
		List<Map<String, String>> artlist = Collecter.execRule(rule);
		if (artlist.size() == 0) {
			rule = null;
			artlist = null;
			return 0;
		}

		int size = CollectSave.save2DB(artlist, collecter, catId);

		rule = null;
		artlist = null;
		collecter = null;
		return size;
	}

	/* 必先调用,初始化规则对应的全局配置*/
	private static Map<String, Object> initCollectConfig(String ruleId) {
		Map<String, Object> config = RuleConfig.queryCollectConfigByRuleId(ruleId);
		Collecter.web_Root = ParseString.nvl(config.get(CollecterEntry.webRoot));
		Collecter.debug_Mode = CollecterEntry.value_debugMode_on.equals(config.get(CollecterEntry.debugMode))
				? true : false;
		Collecter.web_upload_path = ParseString.nvl(config.get(CollecterEntry.webUploadPath));
		Collecter.upload_path_format = ParseString.nvl(config.get(CollecterEntry.uploadPathFormat));
		Collecter.user_agent = CollectConfig.getProperty("USER_AGENT");
		if (ParseString.isNull(user_agent)) {
			user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:48.0) Gecko/20100101 Firefox/48.0";
		}

		return config;
	}

	/**
	 * @description 通过规则参数调用规则执行
	 * @param rule
	 * @return
	 */
	protected static List<Map<String, String>> execRule(Map<String, Object> rule) {
		if (rule.isEmpty()) {
			throw new RuntimeException("规则为空……退出");
		}
		int ruleId = Integer.parseInt(ParseString.nvl(rule.get(CollectRuleEntry.ruleId))); // 要执行的采集规则id，用于记录采集记录的索引，在采集百万级网页时优化索引
		String urlIndex = ParseString.nvl(rule.get(CollectRuleEntry.urlIndex)); // 采集索引页（主题列表）网址
		String charset = ParseString.nvl(rule.get(CollectRuleEntry.charset));  // 采集页面编码
		String preFix = ParseString.nvl(rule.get(CollectRuleEntry.preFix));  // 采集页面url前缀
		String picUrlReplacement = ParseString.nvl(rule.get(CollectRuleEntry.picUrlReplace));    // 图片url前缀替换字符串（如果是直接加url前缀则替换字符串是‘/’）
		String regEx = ParseString.nvl(rule.get(CollectRuleEntry.urlRegex));    //  待采集网址正则
		boolean caseInsensitive = str2bool(CollectRuleEntry.value_insensitive_yes, rule.get(CollectRuleEntry.insensitive));  //  正则匹配，true 表示忽略大小写
		String stratageRepeat = ParseString.nvl(rule.get(CollectRuleEntry.strategyRepeat)); // 防重复采集策略
		String titleRegx = ParseString.nvl(rule.get(CollectRuleEntry.titleRegex));    //  标题正则
		String contentRegx = ParseString.nvl(rule.get(CollectRuleEntry.contentRegex));  //  正文正则
		boolean needHandle = false; // 带采集网址是否需要二次处理
		boolean collectImg = str2bool(CollectRuleEntry.value_collectImg_yes, rule.get(CollectRuleEntry.collectImg)); // 是否采集图片
		boolean png2jpg = str2bool(CollectRuleEntry.value_png2jpg_yes, rule.get(CollectRuleEntry.png2jpg));    // png图片转格式jpg, 壓縮圖片必須轉格式
		String targetDomin = ParseString.nvl(rule.get(CollectRuleEntry.imgSrcDomain));  // 发布后图片src目标域名(eg. pic.tomrrow.com)
		int collectNum = Integer.parseInt(ParseString.nvl(rule.get(CollectRuleEntry.collectNum)));    // 采集条数
		String ruleName = ParseString.nvl(rule.get(CollectRuleEntry.ruleName));    //  规则名称
		boolean isReplaceSameWord = str2bool(CollectRuleEntry.value_sameWordR_yes, rule.get(CollectRuleEntry.sameWordR)); //是否同义词替换  
		boolean comeFrom = str2bool(CollectRuleEntry.value_comeFrom_yes, rule.get(CollectRuleEntry.comeFrom)); //  是否添加转载自  
		String modleHandleClassName = ParseString.nvl(rule.get(CollectRuleEntry.modleHandleClassName)); // 模型处理器实现类的名称 
		String modleHandleScript = ParseString.nvl(rule.get(CollectRuleEntry.modleHandleScript)); // 模型处理器执行的js
		String[] modleHandleConf = { modleHandleClassName, modleHandleScript };

		List<Map<String, String>> list = Collecter.info2file(ruleId, urlIndex, charset, preFix, regEx, caseInsensitive, stratageRepeat, titleRegx, contentRegx, needHandle, collectImg, picUrlReplacement, png2jpg, targetDomin, collectNum, modleHandleConf, isReplaceSameWord, comeFrom);
		log.info("规则采集完成：" + ruleName + "\n采集条数：" + list.size());

		urlIndex = charset = preFix = picUrlReplacement = regEx = titleRegx = contentRegx = targetDomin = ruleName = null;

		return list;
	}

	private static boolean str2bool(String strTrue, Object value) {
		return strTrue.equals(value);
	}

	/**
	 * @description 返回抓取的所有文章
	 * @ruleId 执行的采集规则id
	 * @param urlIndex
	 * @param charset
	 * @param regEx
	 * @param caseInsensitive true表示不区分大小写
	 * @param stratageRepeat 防重复采集策略
	 * @param titleBef 正则表达式表示的标题 
	 * @param contentBef 正则表达式表示的正文 
	 * @param needHandle 需要二次处理
	 * @param collectImg 是否采集图片
	 * @param collectNum 采集条数
	 * @param png2jpg png图片转格式jpg
	 * @param modleHandleConf 模型处理器相关配置
	 * @param wordReplace 同义词开关
	 * @param comeFrom 来源
	 * @return List<Map < String, String>> 抓取的文章列表
	 */
	protected static List<Map<String, String>> info2file(int ruleId, String urlIndex, String charset, String preFix, String regEx, boolean caseInsensitive, String stratageRepeat, String titleBef, String contentBef, boolean needHandle,
			boolean collectImg, String picUrlReplacement, boolean png2jpg, String targetDomin, int collectNum, String[] modleHandleConf, boolean wordReplace, boolean comeFrom) {

		List<Map<String, String>> artlist = Collecter.info2file(ruleId, urlIndex, charset, preFix, regEx, caseInsensitive, stratageRepeat, titleBef, contentBef, needHandle, collectNum, modleHandleConf, wordReplace, comeFrom);
		if (artlist.isEmpty())
			return artlist;

		if (collectImg) {

			String uploadPath = web_upload_path + subfilepath() + "/";
			// 图片保存路径
			String savePath = Collecter.web_Root + uploadPath;
			// web发布图片src前缀（eg.http://pic.tomrrow.com/2015/08/）
			String srcpre = "https://" + targetDomin + "/" + uploadPath;
			// 图片默认压缩目标30k
			long targetSize = 30000l;
			List<Object[]> pics = new ArrayList<Object[]>();
			for (int x = 0; x < artlist.size(); x++) {
				Map<String, String> art = artlist.get(x);
				String content = Collecter.picHandler(art, picUrlReplacement, preFix, savePath, png2jpg, pics, srcpre, targetSize);

				artlist.get(x).put(ArticleEntry.articleContent, content);
				content = null;
			}

			try {
				if (!pics.isEmpty())
					Collecter.downloadFail(pics);
			}
			catch (Exception e) {
				log.error("记录下载失败图片异常：" + e);
			}
		}

		return artlist;
	}

	/*
	 * @Title: picHandler
	 * @Description: 处理一篇文章源码内的图片
	 * @param art 文章实体
	 * @param picUrlReplacement 图片URL待替换部分
	 * @param preFix 图片所在网站的URL前缀，用于追加到图片相对路径上组成完整的URL，便于下载图片到本地
	 * @param savePath 保存路径
	 * @param png2jpg 是否转jpg压缩
	 * @param pics 记录下载失败的图片
	 * @param srcpre web发布图片src前缀（eg.http://pic.tomrrow.com/2015/08/）
	 * @param targetSize 图片默认压缩目标30k
	 * @return String 处理后的文章源码
	 */
	private static String picHandler(Map<String, String> art, String picUrlReplacement, String preFix, String savePath, boolean png2jpg, List<Object[]> pics, String srcpre, long targetSize) {
		// 取图片链接
		String imgReg = "<\\s*(img|IMG)\\s+([^>]+)\\s*([/]?)>";

		String content = art.get(ArticleEntry.articleContent);

		List<String> imglist = RegExMatcher.findMatchGroup(imgReg, content, false);

		for (String img : imglist) {
			try {
				String imgpre = "";
				String subpre = "src=\"";
				int start = 0;
				String img1 = img.replaceAll("'", "\"");
				imgpre = img1.substring(0, (start = img1.indexOf(subpre) + subpre.length()));
				String subimg = img1.substring(start);
				String imgurl = subimg.substring(0, (start = subimg.indexOf("\"")));
				if (ParseString.notNull(picUrlReplacement)) {// 图片url替换前缀
					if (!"/".equals(picUrlReplacement))
						imgurl = imgurl.replaceAll(picUrlReplacement, preFix);
					else
						imgurl = preFix + imgurl;
				}
				String imgfix = subimg.substring(start);
				String imgname = imgurl.substring(imgurl.lastIndexOf("/") + 1).toLowerCase();

				try {
					String[] fix = { ".jpg", ".png", ".gif", ".jpeg", ".bmp" };
					for (String f : fix) {
						if (imgname.endsWith(f))
							break;
						int temp = 0;
						if ((temp = imgname.indexOf(f)) > 0) {
							imgname = imgname.substring(0, temp + f.length());
							log.info("修正一个图片扩展名：" + imgname);
							imgurl = imgurl.substring(0, imgurl.indexOf(imgname) + imgname.length());
							f = null;
							break;
						}
						f = null;
					}
					fix = null;
				}
				catch (Exception e) {
					log.error(e);
				}
				// 先下载原图，再转格式，删除源文件
				Collecter.download(imgurl, imgname, savePath);
				// 间隔1S，防止流量溢出
				Thread.sleep(1000);
				try {
					if (!new File(savePath + imgname).canRead()) {// 下载失败记录 pic_url,img_name,post_title,from_url
						pics.add(new Object[] { imgurl, imgname, art.get(ArticleEntry.articleTitle), art.get(ArticleEntry.fromUrl) });
						log.info("下载失败：" + imgurl);
					}
				}
				catch (Exception e) {
					e.printStackTrace();
				}
				String newimg = imgpre + srcpre + imgname + imgfix;
				// 物理转换
				boolean convert = false;
				if (png2jpg && imgname != null) {
					if (!imgname.endsWith(".jpg") && !imgname.endsWith(".jpeg")) {
						// 转换jpg是因为jpg可以无损压缩到很小
						String source = savePath + imgname;
						imgname = imgname.replaceAll("png", "jpg");
						imgname = imgname.replaceAll("gif", "jpg");

						String result = savePath + imgname;
						String resultrelease = savePath + "release-" + imgname;

						try {
							File sou = new File(source);
							// 40k的png放过
							if (sou.canRead() && sou.length() > targetSize + 10)
								convert = ImgConverter.convert(source, "jpg", result, resultrelease, true);
							result = null;
							resultrelease = null;
						}
						catch (Exception e) {
							log.error(e);
						}
						if (convert) {// 转换成功才修改图片名称
							newimg = imgpre + srcpre + imgname + imgfix;

							// 图片压缩(配置)30kbyte							
							String realfilepath = savePath + imgname;
							jpegotim(realfilepath, targetSize);
							realfilepath = null;
						}
					}
				}

				newimg = newimg.replaceAll("alt=\"\\s*\"", "alt=\"zhiletu\"");

				content = content.replaceAll(img, newimg);

				if (debug_Mode)
					log.info("锁定图片：" + imgurl + "\n新图片：" + newimg);

				imgpre = imgname = imgfix = null;
			}
			catch (Throwable e) {
				log.error("处理图片异常：", e);
			}
		}

		// 无图片时，追加随机默认图片，用于生成特色图像
		if (imglist.size() == 0) {
			try {
				content = "<img width=\"400\" height=\"300\" src=\"https://pic.zhiletu.com/" + randPicWithRPath() + "\"/><br>" + content;
			}
			catch (Exception e) {
				e.printStackTrace();
			}
		}
		// 追加微信公众号推广广告
		//content += "<p><img class=\"alignnone size-full wp-image-7133\" src=\"https://pic.zhiletu.com/2020/09/zltdcode.jpg\" alt=\"智乐兔科技\" width=\"100\" height=\"100\" /></p>";

		imglist = null;

		return content;
	}

	/**
	 * 随机获取一张带相对路径的图片名称
	 * @return
	 * @throws Exception
	 */
	private static String randPicWithRPath() throws Exception {
		File file = new File("/mnt/webroot/blog/wp-content/uploads/travel/");
		File[] ms = file.listFiles();
		int c = (int) Random.getRandCustom(ms.length - 1);

		return "travel/" + ms[c].getName();
	}

	/** 执行图片压缩 
	 * @param currfilepath 绝对路径
	 * @param targetSize 目标字节数,单位byte
	 * */
	public static void jpegotim(String currfilepath, long targetSize) {
		Jpegoptim.jpegotim(currfilepath, targetSize);
	}

	/** 子文件夹路径（按年/月/日 组织目录）*/
	private static String subfilepath() {
		java.text.SimpleDateFormat sdf = new java.text.SimpleDateFormat(Collecter.upload_path_format);

		return sdf.format(new Date().getTime());
	}

	/**
	 * @desc 返回抓取的所有文章
	 * @ruleId 执行的采集规则id
	 * @param urlIndex 网址索引页URL
	 * @param charset
	 * @param regEx
	 * @param caseInsensitive true表示不区分大小写
	 * @param stratageRepeat  防重复采集策略
	 * @param titleBef 正则表达式表示的标题 
	 * @param contentBef 正则表达式表示的正文 
	 * @param needHandle 需要二次处理
	 * @param collectNum 采集条数
	 * @param modleHandleConf 模型处理器相关配置，对文章的加工处理，支持从前端脚本干预
	 * @param wordReplace 同义词开关
	 * @return
	 */
	public static List<Map<String, String>> info2file(int ruleId, String urlIndex, String charset, String preFix, String regEx, boolean caseInsensitive, String stratageRepeat, String titleBef, String contentBef, boolean needHandle, int collectNum, String[] modleHandleConf, boolean wordReplace, boolean comeFrom) {
		// 获取网址列表
		List<String> hflist = Collecter.findHerfList(urlIndex, regEx, caseInsensitive, charset);
		if (hflist.isEmpty()) {
			log.info("url匹配为空");
			return new ArrayList<Map<String, String>>();
		}
		// 过滤已采集 ---------------------------------------------------------------------------------------------important!!!!!!!!!!!!
		hflist = Collecter.sitefilter(hflist, preFix, stratageRepeat);
		List<Map<String, String>> artlist = new ArrayList<Map<String, String>>();
		// 分别获取正文和标题 并保存	
		if (hflist.isEmpty()) {
			// syso+alt+/
			log.info("未匹配url……");
			return artlist;
		}
		for (int x = 0; x < hflist.size() && x < collectNum; x++) {
			try {
				String arthrf = hflist.get(x);
				if (needHandle)
					arthrf = handleHrf(arthrf);
				if (ParseString.isNull(arthrf))
					continue;
				String realHref = ParseString.nvl(preFix) + arthrf;

				// 内容编码可能和预设的编码不一致
				HttpGet httpget = new HttpGet();
				DefaultHttpClient httpclient = new DefaultHttpClient();
				HttpProtocolParams.setUserAgent(httpclient.getParams(), Collecter.user_agent);
				httpget.setURI(new java.net.URI(realHref));
				HttpResponse response = httpclient.execute(httpget);
				HttpEntity entity = response.getEntity();
				//获取请求中的编码
				String relcharset = HtmlUtil.getContentCharSet(entity);
				//若没返回编码则使用传递的编码
				if (relcharset == null) {
					relcharset = charset;
				}
				if (relcharset == null) {
					relcharset = HTTP.DEFAULT_CONTENT_CHARSET;
				}
				StringBuffer source = Collecter.readStream(realHref, false, relcharset);
				if (debug_Mode)
					log.info("采集数据为：\n" + source);
				Map<String, String> art = Collecter.makeArtcle(realHref, titleBef, contentBef, source, modleHandleConf, wordReplace, comeFrom);
				art.put(ArticleEntry.srcCache, source.toString());

				source = null;
				if (art.isEmpty() || art == null) {
					throw new RuntimeException("未采集到网页，请检查配置。");
				}
				if (debug_Mode)
					log.info(art);
				artlist.add(art);
				// 采集成功记录（防止重复采集）------------------------------------------------------------------------important!!!!!!!!!!!!
				recordCollect(arthrf, ruleId, art);
				arthrf = null;
				// 间隔5S，防止流量溢出
				Thread.sleep(5000);

			}
			catch (Exception e) {
				log.error("处理文章异常", e);
			}
		}
		hflist = null;

		return artlist;
	}

	/* 过滤已采集网址*/
	private static List<String> sitefilter(List<String> hflist, String preFix, String strategyRepeat) {
		List<String> restlist = new ArrayList<String>();
		for (int x = 0; x < hflist.size(); x++) {
			String arthrf = hflist.get(x);
			String sql = "select id from collect_info where is_collect= ? and site_url = ? "; // 在表数据较大时（百万级），需要启用rule_id索引

			if (CollectRuleEntry.value_strategy_repeat_day.equals(strategyRepeat)) { // 按天重复采集的，一般是数据更新，url不变的接口或报表 
				sql += " and DATE_FORMAT(date_time, '%Y-%c-%d')=DATE_FORMAT(now(), '%Y-%c-%d')";
			}
			List<Map<String, Object>> list = RuleConfig.querySql(sql, new Object[] { CollectInfoEntry.value_isCollect_yes, arthrf });
			if (list.size() <= 0) {
				restlist.add(arthrf);
			}
			arthrf = null;
		}
		if (debug_Mode)
			log.info("防重复采集过滤完毕……");
		if (restlist.size() == 0)
			log.info("未找到可采集url，请明天再采");
		return restlist;
	}

	/* 记录采集成功网址*/
	private static void recordCollect(String arthrf, int ruleId, Map<String, String> art) {
		String sql = "insert into collect_info(id, site_url, is_collect, rule_id, src_cache, pre_cache, hot_word) values(null, ?, ?, ?, ?, ?, ?)";

		Object[] o = new Object[] { arthrf, CollectInfoEntry.value_isCollect_yes, ruleId, ParseString.nvl(art.get(ArticleEntry.srcCache)),
				ParseString.nvl(art.get(ArticleEntry.preCache)), ParseString.nvl(art.get(ArticleEntry.articleTag)) };

		RuleConfig.update(sql, o);

		sql = null;
		o = null;
	}

	/*
	 * @description 下载失败记录
	 * @desc 適配wordpress
	 */
	private static int downloadFail(List<Object[]> pics) throws Exception {
		if (pics.isEmpty()) {
			return 0;
		}

		String insert = "INSERT INTO downrecord (pic_url,img_name,post_title,from_url) VALUES (?, ?, ?, ?)";


		String url = CollectConfig.getDBname();
		String username = CollectConfig.getUsername();
		String passwd = CollectConfig.getPasswd();

		Connection conn = DBUtil.getConnection(url, username, passwd);
		PreparedStatement stmt = null;
		int num = 0;
		try {
			conn.setAutoCommit(false); // 开启事务
			stmt = conn.prepareStatement(insert);
			num = DBUtil.batchUpdate(pics, stmt);
			conn.commit();
		}
		catch (Exception e) {
			conn.rollback();
			throw e;
		}
		finally {
			stmt.close();
			conn.close();
		}

		return num;
	}

	/**
	 * 特殊处理
	 * @param arthrf
	 * @return
	 */
	private static String handleHrf(String arthrf) {
		// <a href="">...</a>
		int start = arthrf.indexOf("\"");
		int end = arthrf.indexOf("\">");
		if (end > start && start > -1)
			return arthrf.substring(start, end);
		else
			return null;
	}

	/**
	 * @desc 在指定的页面上查找一组符合规则的超链接列表
	 * @param urlIndex 带采集网址索引页
	 * @param regEx 带采集网址匹配规则
	 * @param caseInsensitive true表示不区分大小写
	 * @param charset
	 * @return
	 */
	public static List<String> findHerfList(String urlIndex, String regEx, boolean caseInsensitive, String charset) {
		List<String> hrefList = new ArrayList<String>();
		if (CollectRuleEntry.value_url_regex.equals(regEx)) { // 如果只有urlIndex而regEx为空，仅采集索引页当前页。
			hrefList.add(urlIndex);
			return hrefList;
		}

		StringBuffer sb = Collecter.readStream(urlIndex, true, charset);

		//log.info("索引页：" + sb);
		String source = sb.toString();
		hrefList = RegExMatcher.findMatchGroup(regEx, source, caseInsensitive);
		sb = null;
		source = null;
		if (debug_Mode)
			for (String u : hrefList)
				log.info("成功匹配：" + u);
		return hrefList;
	}

	/**
	 *
	 * @Title: genArticle
	 * @Description: wordpress已使用主题functions.php加入自动特色图片生成功能
	 * @return void
	 */
	public static void genArticle() {
		// 解析并创建文章实体：标题、正文、分类、作者等
		// 标题Reg：(<h>)(.+)(</h>)
		// 正文Reg：(<b>)(.+)(</b>)
		// 分类Reg：(<c>)(.+)(</c>)
		// 针对文章实体处理图片等：默认图片、公众号二维码等

		// 文章入库：入库，根据PHP主题层特色图片生成方案测试是否可行，如果不行，需要后台创建特色图像。
		
		/* 将下面的代码添加到当前主题的functions.php中，用于自动根据文章图片创建特色图片，前提是文章有图片附件，应该需要创建文章的图片附件（多插一条post，类型为image，post_parent为当前文章id）
		function wpforce_featured() {
		    global $post;
		    $already_has_thumb = has_post_thumbnail($post->ID);
		    if (!$already_has_thumb)  {
		        $attached_image = get_children( "post_parent=$post->ID&post_type=attachment&post_mime_type=image&numberposts=1" );
		        if ($attached_image) {
		                foreach ($attached_image as $attachment_id => $attachment) {
		                set_post_thumbnail($post->ID, $attachment_id);
		            }
		        } else {
		            set_post_thumbnail($post->ID, mt_rand(10241, 10552));//指定媒体库一张图片的 id
		        }		        
		    }
		}  //end function
		add_action('the_post', 'wpforce_featured');
		add_action('save_post', 'wpforce_featured');
		add_action('draft_to_publish', 'wpforce_featured');
		add_action('new_to_publish', 'wpforce_featured');
		add_action('pending_to_publish', 'wpforce_featured');
		add_action('future_to_publish', 'wpforce_featured');
		*/
	}


	/**
	 * @Title: makeArtcle
	 * @Description: 按指定标记截取title、content
	 * @param arthrf 文章引用来源
	 * @param titleBef 正则表达式表示的标题前标志
	 * @param contentBef 正则表达式表示的正文前标志
	 * @param source 文章源码（一般是采集的结果）
	 * @param modleHandleConf 模型处理器相关配置，如果不为空，负责按处理器职能加工内容，比如正文格式化输出
	 * @param wordReplace 同义词替换开关
	 * @param comeFrom 是否标记转载自
	 * @return Map<String, String>
	 */
	public static Map<String, String> makeArtcle(String arthrf, String titleBef, String contentBef, StringBuffer source, String[] modleHandleConf, boolean wordReplace, boolean comeFrom) {
		if (source == null || source.length() == 0)
			throw new RuntimeException("get nothing ^^");
		String src = source.toString();
		// 0 title, 1 content
		Map<String, String> art = new HashMap<String, String>();

		// 标题和内容预处理
		if (null != modleHandleConf && modleHandleConf.length > 1) {
			try {// 使用动态代理旁路 默认值："com.zhiletu.collect.collect.ModleHandleArticleFormat"
				src = (String) new InvokeProxy("com.zhiletu.collect.collect.ModleHandleArticleFormat",
						"handle", new Object[] { src, modleHandleConf[1], modleHandleConf[0] }, new Class[] { String.class, String.class, String.class })
						.invoke();
				art.put(ArticleEntry.preCache, src);
			}
			catch (Exception e) {
				throw new RuntimeException(e);
			}
		}

		Matcher mt = RegExMatcher.getMatcher(titleBef, src, false);
		if (mt.find()) { // 匹配正则一次
			String title = mt.group(2);

			if (!ParseString.isNull(title)) {
				if (wordReplace)
					title = Collecter.sameWordReplace(wordReplace, title, true);
				art.put(ArticleEntry.articleTitle, title);
			}
			else {
				throw new RuntimeException("do not get the title……！");
			}
			title = null;
		}
		else {
			log.info("未找到标题，返回空集合");
			src = null;
			return art;
		}

		mt = RegExMatcher.getMatcher(contentBef, src, false);
		if (mt.find()) { // 匹配正则一次
			String content = mt.group(2);
			if (!"".equals(ParseString.nvl(content))) {
				// 对内容清除所有html标签和脚本、样式后，取长度大于2的词组列表,不过滤英文
				List<Term> terms = SplitWord.getTermsFiltEn(ParseString.clearHtmlMark(content), false);
				if (terms != null && !terms.isEmpty()) {
					// 同义词替换（防止网络重复或者修改关键词）------------------------------------------------------------------------important!!!!!!!!!!!!
					if (wordReplace) {
						content = Collecter.sameWordReplace(wordReplace, terms, content, false);
					}
					else { // 不替换同义词，但是保存词典
						SplitWord.saveWordDict(terms);
						log.info("高频词汇采样保存词典完成");
					}

					// 根据高频词汇打标签，默认取5个标签
					List<String> wdtags = SplitWord.getWordTagByTop(terms, 5, true);
					String tags = "";
					if (wdtags.size() > 0) {
						tags = wdtags.toString().trim();
						tags = tags.substring(1, tags.length() - 1);
					}
					art.put(ArticleEntry.articleTag, tags);
				}

				if (comeFrom)
					content += "<p>from:" + arthrf + "</p>";

				art.put(ArticleEntry.articleContent, content);
				art.put(ArticleEntry.fromUrl, arthrf);
			}
			else {
				throw new RuntimeException("do not get the content……！");
			}
			content = null;
		}
		else {
			log.info("未找到内容，返回空集合");
			src = null;
			return art;
		}

		src = null;

		return art;
	}

	/**
	 * 批量替换关键词
	 * @param isReplaceSameWord 开关
	 * @param terms 词汇列表
	 * @param content 文章内容
	 * @return
	 */
	public static String sameWordReplace(boolean isReplaceSameWord, List<Term> terms, String content, boolean istitle) {
		log.info("开始进行同义词替换");
		int percent = 0;
		// 获取高频词汇（默认30%）
		if (istitle) {
			percent = 100;
		}
		else {
			String per = ParseString.nvl(CollectConfig.getProperty("REPLACE_PER")); // 词汇采样百分比

			try {
				percent = Integer.parseInt(per);
			}
			catch (Exception e) {
				log.error("replace_per参数应该是整数, 使用默认值20%");
				percent = 20;
				//return content;
			}
		}

		Map<String, WordFilter> map = SplitWord.getMapByPer(terms, percent, true);
		log.info("高频词汇采样完成");
		if (map == null || map.isEmpty()) {
			log.info("未找到长度大于2的高频词汇……");
			return content;
		}

		StringBuffer sbname = new StringBuffer();

		// 根据高频词汇取同义词库
		Iterator<Map.Entry<String, WordFilter>> iter = map.entrySet().iterator();
		while (iter.hasNext()) {
			WordFilter word = null;
			try {
				word = iter.next().getValue();
			}
			catch (Exception e) {
				word = new WordFilter("", "", 0);
				log.error("不应该异常！", e);
			}

			sbname.append("'").append(ParseString.nvl(word.getWordName())).append("',");
		}
		map = null;
		String sql = "select * from collect_word where 1=1 ";
		if (sbname.lastIndexOf(",") > -1)
			sql += " and word_name in (" + sbname.deleteCharAt(sbname.lastIndexOf(",")) + ") ";

		sbname = null;

		List<Map<String, Object>> wordlist = RuleConfig.querySql(sql);

		// 用得到的同义词库正则替换文章内容
		List<String> lisiten = new ArrayList<String>();
		for (Map<String, Object> word : wordlist) {
			String replace = ParseString.nvl(word.get(CollectWordEntry.wordName));
			if (lisiten.contains(replace))
				continue; // 暂时过滤多条替换结果(a->b,a->c,a->d....)和传递替换（a->b,b->a）
			String replacement = ParseString.nvl(word.get(CollectWordEntry.wordReplace));
			lisiten.add(replace);
			lisiten.add(replacement);
			content = RegExMatcher.regexReplace(content, replace, replacement);
			replace = null;
			replacement = null;
		}
		log.info("同义词替换完成，替换词目条数：" + wordlist.size());
		lisiten = null;
		wordlist = null;

		return content;
	}

	/**
	 * 批量替换关键词
	 * @param isReplaceSameWord 开关
	 * @param content
	 * @return
	 */
	public static String sameWordReplace(boolean isReplaceSameWord, String content, boolean istitle) {
		log.info("开始进行同义词替换");
		int percent = 0;
		// 获取高频词汇（默认30%）
		if (istitle) {
			percent = 100;
		}
		else {
			String per = ParseString.nvl(CollectConfig.getProperty("REPLACE_PER")); // 词汇采样百分比

			try {
				percent = Integer.parseInt(per);
			}
			catch (Exception e) {
				log.error("replace_per参数应该是整数, 使用默认值20%");
				percent = 20;
				//return content;
			}
		}

		Map<String, WordFilter> map = SplitWord.getMapByPer(content, percent, true);
		log.info("高频词汇采样完成");
		if (map == null || map.isEmpty()) {
			log.info("未找到长度大于2的高频词汇……");
			return content;
		}

		StringBuffer sbname = new StringBuffer();

		// 根据高频词汇取同义词库
		Iterator<Map.Entry<String, WordFilter>> iter = map.entrySet().iterator();
		while (iter.hasNext()) {
			WordFilter word = null;
			try {
				word = iter.next().getValue();
			}
			catch (Exception e) {
				word = new WordFilter("", "", 0);
				log.error("不应该异常！", e);
			}

			sbname.append("'").append(ParseString.nvl(word.getWordName())).append("',");
		}
		map = null;
		String sql = "select * from collect_word where 1=1 ";
		if (sbname.lastIndexOf(",") > -1)
			sql += " and word_name in (" + sbname.deleteCharAt(sbname.lastIndexOf(",")) + ") ";

		sbname = null;

		List<Map<String, Object>> wordlist = RuleConfig.querySql(sql);

		// 用得到的同义词库正则替换文章内容
		List<String> lisiten = new ArrayList<String>();
		for (Map<String, Object> word : wordlist) {
			String replace = ParseString.nvl(word.get(CollectWordEntry.wordName));
			if (lisiten.contains(replace))
				continue; // 暂时过滤多条替换结果(a->b,a->c,a->d....)和传递替换（a->b,b->a）
			String replacement = ParseString.nvl(word.get(CollectWordEntry.wordReplace));
			lisiten.add(replace);
			lisiten.add(replacement);
			content = RegExMatcher.regexReplace(content, replace, replacement);
			replace = null;
			replacement = null;
		}
		log.info("同义词替换完成，替换词目条数：" + wordlist.size());
		lisiten = null;
		wordlist = null;

		return content;
	}

	/**
	 * 处理行上超链接
	 * @param line
	 * @return
	 */
	private static String handleLine(String line) {

		return RegExMatcher.regexReplace(line, "<(a|A).*</(a|A)>", "");
	}

	/**
	 * @desc 读取网页到内存
	 * @param url 获取url源
	 * @param isurlIndex 是否处理索引页的超链接
	 * @param charset
	 * Mozilla/5.0 (Windows; U; Windows NT 6.1; zh-CN; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15
	 * @return
	 */
	public static StringBuffer readStream(String url, boolean isurlIndex, String charset) {
		InputStreamReader inputStreamReader = null;
		BufferedReader bufferedReader = null;
		StringBuffer sb = new StringBuffer();
		try {
			URL arturl = new URL(url);

			HttpURLConnection httpConn = (HttpURLConnection) arturl.openConnection();
			httpConn.setRequestMethod("GET");
			if (ParseString.isNull(user_agent)) {
				user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:48.0) Gecko/20100101 Firefox/48.0";
			}

			httpConn.setRequestProperty("User-Agent", user_agent);
			httpConn.setConnectTimeout(50 * 1000);
			inputStreamReader = new InputStreamReader(httpConn.getInputStream(), charset);
			bufferedReader = new BufferedReader(inputStreamReader);

			String line = null;

			if (isurlIndex)// 列表页不需要处理超链接
				while ((line = bufferedReader.readLine()) != null) {
					sb.append(line);
					line = null;
				}
			else
				while ((line = bufferedReader.readLine()) != null) {
					line = handleLine(line);
					sb.append(line);
					line = null;
				}
		}
		catch (Exception e) {
			log.error(e);
			e = null;
		}
		finally {
			try {
				if (bufferedReader != null) {
					bufferedReader.close();
					bufferedReader = null;
				}
				if (inputStreamReader != null) {
					inputStreamReader.close();
					inputStreamReader = null;
				}
			}
			catch (IOException e) {
				log.error(e);
				e = null;
			}
		}

		return sb;
	}

	/**
	 * @desc 下载图片到本地
	 * @param urlString
	 * @param filename
	 * @param savePath
	 * @throws Exception
	 */
	public static boolean download(String urlString, String filename, String savePath) {
		boolean flag = false;
		// 输入流  
		InputStream is = null;
		OutputStream os = null;
		String imgpath = savePath + filename;
		// 特殊方法释放前一文件的文件锁
		String releaseimgpath = savePath + "_release" + filename;
		File releaseFile = null;
		try {
			// 构造URL
			URL url = new URL(urlString);

			HttpURLConnection httpConn = (HttpURLConnection) url.openConnection();
			httpConn.setRequestMethod("GET");
			httpConn.setConnectTimeout(60000);
			httpConn.setRequestProperty("User-Agent", user_agent);
			// 输入流
			is = httpConn.getInputStream();

			// 1K的数据缓冲
			byte[] bs = new byte[1024];
			// 读取到的数据长度
			int len;
			// 输出的文件流
			File sf = new File(savePath);
			if (!sf.exists()) {
				sf.mkdirs();
			}
			os = new FileOutputStream(imgpath);
			// 开始读取
			while ((len = is.read(bs)) != -1) {
				os.write(bs, 0, len);
			}
			os.flush();
			os.close();
			is.close();
			is = httpConn.getInputStream();
			releaseFile = new File(releaseimgpath);
			os = new FileOutputStream(releaseFile);
			if ((len = is.read(bs)) != -1) {
				os.write(bs, 0, len);
			}
			os.flush();
			url = null;
			flag = new File(imgpath).canRead();
		}
		catch (Exception e) {
			log.error(e);
			e = null;
		}
		finally {
			// 完毕，关闭所有链接
			try {
				if (os != null)
					os.close();
				if (is != null)
					is.close();
				is = null;
				releaseFile.delete();
			}
			catch (IOException e) {
				log.error(e);
			}
			if (flag)
				log.info("成功下载图片：" + imgpath);
			else
				log.info("未能下载图片：" + imgpath);
		}

		return flag;
	}

	public static void main(String[] args) {
		// 采集新浪科技探索
		//Collecter.execRule(Collecter.ruleSinaTech);
		// Collecter.execRule("1");
	}

	@Override
	protected void executeInternal(JobExecutionContext arg0)
			throws JobExecutionException {
		try {
			Collecter.execRule();
		}
		catch (Exception e) {
			e.printStackTrace();
		}

	}
	
	/*  
	 * public static final Object[] ruleSinaTech = {
		"http://tech.sina.com.cn/discovery/", 
		"utf-8", 
		"http://tech.sina.com.cn/d/.?/?\\d{4}-\\d{1,2}-\\d{1,2}/doc-\\w+\\d+\\.shtml", 
		 true, 
		"(<h1\\s+id=\"artibodyTitle\"\\s+[^>]+\\s*>)(.+)(</h1>)", // 必须是3段，中间是目标内容
		"(<!-- 秒拍视频处理 -->)(.+)(<!-- 概念股公司简介显示 -->)",  // 必须是3段，中间是目标内容
		false,
		true, // 是否采集图片
		true, // png图片转格式jpg
		"pic.tomrrow.com", // 绑定域名
		1, // 采集条数
		"新浪科技探索",
		true
		};	
	
	// 规则-新浪科技互联网
	public static final Object[] ruleSinaInet = {		
		"http://tech.sina.com.cn/internet/", 
		"utf-8", 
		"http://tech.sina.com.cn/.?/?\\d{4}-\\d{1,2}-\\d{1,2}/doc-\\w+\\d+\\.shtml", 
		 true, 
		"(<h1\\s+id=\"artibodyTitle\"\\s+[^>]+\\s*>)(.+)(</h1>)", // 必须是3段，中间是目标内容
		"(<!-- 秒拍视频处理 -->)(.+)(<!-- 概念股公司简介显示 -->)",  // 必须是3段，中间是目标内容
		false,
		true, // 是否采集图片
		true, // png图片转格式jpg
		"pic.tomrrow.com", // 图片src绑定域名
		5, // 采集条数
		"新浪科技互联网",
		true
		};
	
	// 规则-新浪科技探索
	
	 */
}