package com.vs.crawl.news;

import java.io.File;
import java.sql.Timestamp;
import java.text.ParseException;
import java.util.HashSet;
import java.util.Properties;
import java.util.regex.Pattern;

import javax.xml.bind.JAXBException;

import org.jsoup.nodes.Document;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.html.ParseHtml;
import com.logDataBase.LogMonitor;
import com.model.LogDateBaseSingletion;
import com.model.rule.FileRule;
import com.model.xmlElement.DataColumns;
import com.model.xmlElement.DataFilter;
import com.model.xmlElement.input.Target;
import com.model.xmlElement.output.DataBase;
import com.model.xmlElement.output.Output;
import com.model.xmlElement.output.OutputFile;
import com.model.xmlElement.output.WriteFileRule;
import com.tool.DateUtils;
import com.tool.JaxbReadXmlTool;
import com.tool.LogDataBaseSqlTool;
import com.tool.LogDataBaseTool;
import com.tool.PropertiesUtil;

import constant.XmlElementCoonstant;
import cn.edu.hfut.dmic.webcollector.crawler.DeepCrawler;
import cn.edu.hfut.dmic.webcollector.model.Links;
import cn.edu.hfut.dmic.webcollector.model.Page;
import cn.edu.hfut.dmic.webcollector.util.JDBCHelper;



/**
 * *********************************************
 * @author Administrator
 * @FileName SoupLangCrawler.java
 * @Description 正式爬虫
 **********************************************
 */
public class SoupLangCrawler extends DeepCrawler{
	public static final Logger LOG = LoggerFactory.getLogger(SoupLangCrawler.class);
	
	private File file;												//xml配置文件
	private String separator;										//分隔符
	private WriteCrawler writeCrawler;								//文件写入器
	private FileRule fileRule;										//文件写入规则
	private Target target;											//目标配置

	public SoupLangCrawler(String crawlPath,String xmlFileName,String jarFilePath,String hadoopHomeDir) throws Exception {
		super(crawlPath,xmlFileName);
		file = new File(xmlFileName);
		Output output = this.getDataColumns().getOutput();
		separator = output.getOutputFile().getOutputFileSeparator();
		writeCrawler = new WriteCrawler(this);
		writeCrawler.setJarFilePath(jarFilePath);
		writeCrawler.setHadoopHomeDir(hadoopHomeDir);
		target = this.getDataColumns().getInput().getTarget();
		this.setThreads(target.getThreadsNumber());
	}
	
	public static void main(String[] args) throws Exception{
		DataBase logDataBase = LogDateBaseSingletion.getSingle();
		JDBCHelper.createMysqlTemplate(
				logDataBase.getDataBaseName(),
				logDataBase.getDataBaseUrl(),
				logDataBase.getDataBaseUserName(),
				logDataBase.getDataBasePassword(),
				20,
				100);
		
		/**
		 * 启动爬虫需要两个参数
		 * 第一个参数为配置的爬虫xml的文件路径
		 * 第二个参数为hadoop上传工具的根据安装的版本的jar文件路径
		 * 第三个参数为运行爬虫程序-》hadoop2.6的时候hadoop安装路径(0.20.2版本不需要指定,2.6.0版本需要指定)
		 */
//		String xmlFilePath = "C:/Users/Administrator/Desktop/test1.xml";
//		String xmlFileName = xmlFilePath.substring(xmlFilePath.lastIndexOf("/") + 1);
//		String jarFilePath = "C:/Users/Administrator/Desktop/news/jar/hadoop0.20.jar";
//		String hadoopHomeDir = "";
		
		String xmlFilePath = args[0];
		String xmlFileName = xmlFilePath.substring(xmlFilePath.lastIndexOf("/") + 1);
		String jarFilePath = args[1];
		String hadoopHomeDir = "";
		if(args.length == 3){
			hadoopHomeDir = args[2];
		}
		
		LOG.info(xmlFilePath);
		LOG.info(xmlFileName);
		LOG.info(jarFilePath);
		LOG.info(hadoopHomeDir);
		
		LOG.info("执行中");
		LogDataBaseTool.update(LogDateBaseSingletion.getSingle().getDataBaseName(), 
				LogDataBaseSqlTool.getLogDataBase_startCrawler(), 
				new Object[]{"2",new Timestamp(System.currentTimeMillis()),xmlFileName});
		
		SeedsCrawler seedsCrawler = new SeedsCrawler("/home/hu/data/souplang-seedsCrawler",xmlFilePath,false);
		seedsCrawler.createFileRule();
		seedsCrawler.createCustomSeeds();
		seedsCrawler.start(seedsCrawler.getTarget().getLevel());
		LOG.info("seeds->size->" + seedsCrawler.getSeeds().size());
		
		LOG.info("种子抓取完成");
		LogDataBaseTool.update(LogDateBaseSingletion.getSingle().getDataBaseName(), 
				LogDataBaseSqlTool.getLogDataBase_finishSeedCrawler(), 
				new Object[]{seedsCrawler.getSeeds().size(),xmlFileName});
		
		
		SoupLangCrawler soupLangCrawler = new SoupLangCrawler("/home/hu/data/souplang-crawler",xmlFilePath,jarFilePath,hadoopHomeDir);
		
		LOG.info("更新本次爬虫task的文件名");
		OutputFile outputFile = soupLangCrawler.getDataColumns().getOutput().getOutputFile();
		LogDataBaseTool.update(LogDateBaseSingletion.getSingle().getDataBaseName(), 
				LogDataBaseSqlTool.getLogDataBase_updateCrawlerTask(), 
				new Object[]{outputFile.getOutputFileLocal().getOutputLocalFilePath(soupLangCrawler.getTarget().getTaskId(),outputFile.getUser(),soupLangCrawler.getTarget().getTaskTime(),soupLangCrawler.getTarget().getName()),
							xmlFileName});
		
		soupLangCrawler.createFileRule();
		soupLangCrawler.setSeeds(seedsCrawler.getSeeds());
		soupLangCrawler.createCustomSeeds();
		LogMonitor logMonitor = new LogMonitor(xmlFileName,30,soupLangCrawler);
		new Thread(logMonitor).start();
		soupLangCrawler.start(1);
		soupLangCrawler.getWriteCrawler().writeData();
		
		LOG.info("已完成");
		LogDataBaseTool.update(LogDateBaseSingletion.getSingle().getDataBaseName(), 
				LogDataBaseSqlTool.getLogDataBase_finishCrawler(), 
				new Object[]{"3",new Timestamp(System.currentTimeMillis()),xmlFileName});
		
		logMonitor.shutdown();
	}
	
	@Override
	public void createFileRule(){
		WriteFileRule writeFileRule = this.getDataColumns().getOutput().getWriteFileRule();
		if(writeFileRule != null && writeFileRule.getRuleName() != null && !"".equals(writeFileRule.getRuleName())){
			String ruleName = writeFileRule.getRuleName();
			System.out.println(ruleName);
			Properties prop = PropertiesUtil.readPropertiesAsStream(XmlElementCoonstant.FILERULE);
			String className = PropertiesUtil.getValue(prop, ruleName);
			System.out.println(className);
			try {
				fileRule = (FileRule) Class.forName(className).newInstance();
				fileRule.setWriteCrawler(writeCrawler);
				fileRule.setValue(writeFileRule.getRuleValue());
			} catch (InstantiationException e1) {
				e1.printStackTrace();
			} catch (IllegalAccessException e1) {
				e1.printStackTrace();
			} catch (ClassNotFoundException e1) {
				e1.printStackTrace();
			}
		}
	}
	
	@Override
	public void createCustomSeeds() {
		this.setSeeds(new HashSet<String>());
		seeds.add("http://item.jd.com/918498.html");
	}

	@Override
	public Links visitAndGetNextLinks(Page page) {
		/* 返回链接，递归爬取 */
		Links nextLinks = new Links();
		Document document = page.getDoc();
		String url = page.getUrl();
		boolean needFetch = false;
		if(url != null && !"".equals(url)){
			if(target.getPatten() != null && !"".equals(target.getPatten())){
				String[] parttens = target.getPatten().split("\\|");
				for(int i = 0,len = parttens.length;i < len;i++){
					if(Pattern.matches(parttens[i], url)){
						needFetch = true;
						break;
					}
				}
			}else{
				needFetch = true;
			}
		}
		if(needFetch){
			DataColumns dataColumns = null;
			try {
				dataColumns = JaxbReadXmlTool.readString(DataColumns.class, file.getAbsolutePath());
			} catch (JAXBException e) {
				e.printStackTrace();
			}
			ParseHtml parseHtml = new ParseHtml(url,document,dataColumns);
			dataColumns = parseHtml.parseHtml();
			
			boolean needAppend = true;
			StringBuilder dataFromUrl = new StringBuilder();
			if(dataColumns != null){
				if(target.isTarget_save_seed1()){
					dataFromUrl.append(url).append(separator);
				}
				DataFilter dataFilter = dataColumns.getDataFilter();
				dataColumns.sortDataColumnOutputs();
				for(int i = 0,size = dataColumns.size();i < size;i++){
	    			//dataColumn属性dataoutput_value_not_null=true、数据为空的值剔除
	    			if(dataColumns.get(i).getDataColumnOutput().isOutputNotNull()){
	    				if(dataColumns.get(i).getDataColumnOutput().valueIsNull()){
	    					needAppend = false;
		    				break;
	    				}else{
	    					if(i == size - 1){
		    					dataFromUrl.append(dataColumns.get(i).getDataColumnOutput().getValue()).append("\r\n");
		    				}else{
		    					dataFromUrl.append(dataColumns.get(i).getDataColumnOutput().getValue()).append(separator);
		    				}
	    				}
	    			}else{
	    				if(i == size - 1){
	    					dataFromUrl.append(dataColumns.get(i).getDataColumnOutput().getValue()).append("\r\n");
	    				}else{
	    					dataFromUrl.append(dataColumns.get(i).getDataColumnOutput().getValue()).append(separator);
	    				}
	    			}
	    			//如果配置了时间,并且时间为特定时间的数据才保存
	    			if(dataFilter != null && dataFilter.getDataInputName().equals(dataColumns.get(i).getDataColumnInput().getName())){
						try {
							if(!DateUtils.isToday(DateUtils.getSdf(dataFilter.getDateInputFormater()).parse(dataColumns.get(i).getDataColumnOutput().getValue().toString()),Integer.valueOf(dataFilter.getDate()))){
								LOG.info("时间不符合,剔除" + dataColumns.get(i).getDataColumnOutput().getValue());
								needAppend = false;
								break;
							}
						} catch (ParseException e) {
							e.printStackTrace();
						}
					}
	    		}
			}
			if(needAppend && dataFromUrl.length() > 0){
				synchronized (this) {
					//数据追加
					this.getData().append(dataFromUrl);
					if(target.isTarget_save_seed2()){
						this.getSeedsData().append(url).append("\r\n");
					}
					//记录size +1
					this.getTotalSize().getAndAdd(1);
					//记录字节 +bytes
					this.getTotalBytesLength().getAndAdd(dataFromUrl.toString().getBytes().length);
					//执行文件写入规则
					if(fileRule != null){
						fileRule.executeFileRule();
					}
				}
			}
		}
		nextLinks.addAllFromDocument(document);
		return nextLinks;
	}
	
	
	
	
	
	
	
	
	
	
	
	public File getFile() {
		return file;
	}

	public void setFile(File file) {
		this.file = file;
	}

	public WriteCrawler getWriteCrawler() {
		return writeCrawler;
	}

	public void setWriteCrawler(WriteCrawler writeCrawler) {
		this.writeCrawler = writeCrawler;
	}

	public Target getTarget() {
		return target;
	}

	public void setTarget(Target target) {
		this.target = target;
	}
	
}
