package com.sentiment.crawler;

import java.io.File;
import java.io.IOException;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Pattern;

import org.apache.log4j.Logger;
import org.jsoup.nodes.Document;

import com.sentiment.config.Config;
import com.sentiment.contentextractor.ContentExtractor;
import com.sentiment.contentextractor.News;
import com.sentiment.db.CrawlingPageDB;
import com.sentiment.db.RalationMapDB;
import com.sentiment.db.TextDB;
import com.sentiment.obj.CrawlingPage;
import com.sentiment.obj.RalationMap;
import com.sentiment.obj.Text;
import com.sentiment.strategy.IdGenerator;
import com.sentiment.strategy.TitleFilter;
import com.sentiment.tools.StringTools;
import com.sentiment.webcollector.crawler.DeepCrawler;
import com.sentiment.webcollector.model.Links;
import com.sentiment.webcollector.model.Page;

public class SohuCrawler extends DeepCrawler{
	
	private List<CrawlingPage> li = new ArrayList<CrawlingPage>();
	private Logger logger = Config.getLogger(this.getClass());
	private String srcId = "";

	public SohuCrawler(String crawlPath, String srcId) {
		super(crawlPath);
		this.srcId  = srcId;
		// TODO Auto-generated constructor stub
	}

	@Override
	public Links visitAndGetNextLinks(Page page) {
		// TODO Auto-generated method stub
		String url = page.getUrl();
		if (Pattern.matches("http://www.sohu.com/s\\?wd=.*", url)){
			Links nextLinks = new Links();
			nextLinks.addAllFromDocument(page.getDoc(),"h3>a[data-click]");
			return nextLinks;
		}else{
			try{
				String title = getTitle(page);
				String tarId = IdGenerator.generateTarId();
				CrawlingPage cp = new CrawlingPage(srcId, tarId, url, title);
				li.add(cp);
			} catch (Exception e){
				logger.error(e);
			}
		}
		return null;
	}

	public void getSerach(String keyword) {
		CrawlingPageDB cpdb = new CrawlingPageDB();
		try{
			addSeed("http://www.sohu.com/s?wd=" + URLEncoder.encode(keyword, "utf-8"));
			start(2);
			for (CrawlingPage cp : li) {
				cpdb.insertData(cp);
			}
			cpdb.DBClose();
		} catch (Exception e){
			logger.error(e);
		}
	} 
	
	private String getTitle(Page page) {
		// TODO Auto-generated method stub
		Document doc = page.getDoc();
		String title = doc.title();
		title = StringTools.deleteNbspAndBlank(title);
		return title;
	}

	public void createRalationMap() {
		CrawlingPageDB cpdb = new CrawlingPageDB();
		List<CrawlingPage> cpli = cpdb.findAllDataByKeyAndVal("bySrc", srcId);
		cpdb.DBClose();
		ArrayList<String> tarIdAll = new ArrayList<String>();
		for (int i = 0; i < cpli.size(); i++) {
			tarIdAll.add(cpli.get(i).getId());
		}
		
		RalationMap rm = new RalationMap(srcId, tarIdAll, cpli.size());
		RalationMapDB rmdb = new RalationMapDB();
		rmdb.insertData(rm);
		rmdb.DBClose();
	}
	
	public void parserAllhtml() {
		RalationMapDB rmdb = new RalationMapDB();
		RalationMap rm = rmdb.findDataByKeyAndVal("srcId", srcId);
		rmdb.DBClose();
		List<String> tarIdAll = rm.getTarIdAll();
		CrawlingPageDB cpdb = new CrawlingPageDB();
		TextDB tdb =new TextDB();
		for (int i = 0; i < rm.getTarAmount(); i++) {
			CrawlingPage cp = cpdb.findDataByKeyAndVal("id", tarIdAll.get(i));
			if (cp == null) {
				logger.error("-----未找到该对应页面 放弃处理-----");
				continue;
			}else if (TitleFilter.filter(cp.getTitle())) {
				logger.error("-----在配置文件中有该标题信息-----请检查titleFilter.xml配置文件-----");
				logger.error("-----说明该标题是处于过滤状态  即还没对该标题采取合适的正文提取方案-----");
				continue;
			}
			try {
				News news = ContentExtractor.getNewsByUrl(cp.getUrl());
				Text tx = new Text(cp.getTitle(), news.getContent(), cp.getId(), getWriter(), news.getTime());
				tdb.insertData(tx);
			} catch (IOException e) {
				logger.error(e);
			} catch (Exception e) {
				logger.error(e);
			}
		}
		tdb.DBClose();
		cpdb.DBClose();
	}
	
	private boolean deleteFile(File file) {
		if (file.exists()) {
			if (file.isFile()) {
				return file.delete();
			} else if (file.isDirectory()) {
				File files[] = file.listFiles();
				for (int i = 0; i < files.length; i++) {
					deleteFile(files[i]);
				}
			}
			return file.delete();
		}
		return false;
	}

	private String getWriter() {
		return "null";
	}
}


