package com.gxljc.bear.crawler.itaogao.cnfol;

import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;

import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import com.mongodb.BasicDBObject;
import com.mongodb.Bytes;
import com.mongodb.DBCursor;
import com.mongodb.DBObject;
import com.gxljc.commons.mongo.cli.BaseMongoRepository;
import com.gxljc.commons.util.Pair;
import com.gxljc.bear.crawler.base.BaseMongoTools;
import com.gxljc.bear.crawler.base.DolphinCrawlerConsts;
import com.gxljc.bear.crawler.base.DolphinFetchData;
import com.gxljc.bear.crawler.image.ImageFresh;
import com.gxljc.bear.crawler.itaogao.ItaogaoConst;
import com.gxljc.bear.crawler.itaogao.NewsPageTable;
import com.gxljc.bear.crawler.itaogao.util.HttpUtil;
import com.gxljc.bear.crawler.itaogao.util.MongodbUtil;
import com.gxljc.bear.crawler.util.DateUtil;
import com.gxljc.bear.crawler.util.HtmlUtil;
import com.gxljc.bear.crawler.util.SparkUtil;
import com.gxljc.bear.crawler.weixin.WeixinConst;

/**
 * 中金在线Page爬虫
 * 
 * @author tanghaitao
 * 
 * @since 2021年11月23日
 */
public class CnfolPageCrawler implements Serializable {

	private static final long serialVersionUID = 1L;
	private static final String CNFOL_LINE_BREAK_WORD = " 　　";
	private static final Integer MAX_SINGLE_CORE = 100;
	public static Logger LOG = Logger.getLogger(CnfolPageCrawler.class);
	private Boolean proxy = false;

	public CnfolPageCrawler(Boolean proxy) {
		this.proxy = proxy;
	}

	public void crawl() throws Exception {
		List<NewsPageTable> seeds = getSeed();
		if (CollectionUtils.isEmpty(seeds)) {
			LOG.error("page is empty");
			return;
		}
		int size = seeds.size();
		if (size < MAX_SINGLE_CORE) { // 少于一定数量时，使用单线程爬取
			crawlNoSpark(seeds);
		} else {
			crawl(seeds);
		}
	}

	public void crawlNoSpark(List<NewsPageTable> seeds) {
		if (CollectionUtils.isEmpty(seeds)) {
			LOG.error("page is empty");
			return;
		}
		for (NewsPageTable table : seeds) {
			try {
				crawl(table);
			} catch (Exception ex) {
				LOG.error("crawlNoSpark error!", ex);
			}
		}
	}

	public void crawl(List<NewsPageTable> seeds) {
		int coreMax = 20;
		JavaSparkContext jsc = SparkUtil.createCommonsSparkContext("bear-中金在线 page详情爬取-" + seeds.size(),
				coreMax, coreMax * 2, CnfolPageCrawler.class);
		JavaRDD<NewsPageTable> seedsRDD = jsc.parallelize(new ArrayList<NewsPageTable>(seeds));
		long count = seedsRDD.mapPartitions(new FlatMapFunction<Iterator<NewsPageTable>, Integer>() {
			private static final long serialVersionUID = 1L;

			@Override
			public Iterable<Integer> call(Iterator<NewsPageTable> seeds) throws Exception {
				int successCnt = 0;
				while (seeds.hasNext()) {
					try {
						NewsPageTable seed = seeds.next();
						int ret = crawl(seed);
						if (ret == 1)
							successCnt++;
						if (successCnt % 100 == 0) {
							LOG.info("successCnt=" + successCnt);
						}
					} catch (Exception er) {
						er.printStackTrace();
					}
				}
				return Arrays.asList(successCnt);
			}
		}).count();
		LOG.info("all count=" + count);
		jsc.stop();
	}

	// 单条url入口
	public int crawl(NewsPageTable table) throws Exception {
		LOG.info("crawl = " + table.crawlUrl);
		String url = table.getCrawlUrl();
		DolphinFetchData fetchData = DolphinFetchData.getInstance(DolphinCrawlerConsts.CrawlerChannelType.bear
				.getName());
		String html = fetchData.getEncodeHtml(url, proxy, ItaogaoConst.CRAWL_TIMEOUT);
		if (StringUtils.isEmpty(html)) {
			throw new Exception("page result is empty url = " + url);
		}
		extract(table, html);
		MongodbUtil.saveMongodb(table, ItaogaoConst.MONGODB_TABLE.CNFOL.getValue());
		return DolphinCrawlerConsts.RESULT_YES;
	}

	// 内容抽取
	private void extractContent(NewsPageTable table, Document doc) {
		Elements list = doc.select("#Content");
		if (CollectionUtils.isEmpty(list))
			return;
		StringBuffer sb = new StringBuffer();
		String text = list.text();
		String[] contents = text.split(CNFOL_LINE_BREAK_WORD);
		for (String content : contents) {
			if (StringUtils.isBlank(content)) {
				continue;
			}
			if (sb.length() > 0)
				sb.append(DolphinCrawlerConsts.DEFAULT_LINE_BREAK_WORD);
			content = content.trim();
			while (content.startsWith("　")) {
				content = content.substring(1);
			}
			sb.append(content);
		}
		table.setContent(sb.toString());
	}

	// 发布时间抽取
	private void extractPublishDate(NewsPageTable table, Document doc) {
		Elements list = doc.select("#pubtime_baidu");
		if (CollectionUtils.isEmpty(list))
			return;
		String publishDate = list.get(0).text();
		if (StringUtils.isEmpty(publishDate))
			return;
		Date date = DateUtil.parseFormat(publishDate, "yyyy-MM-dd HH:mm:ss");
		table.setPublishDate(date);
	}

	// 标题抽取
	private void extractTitle(NewsPageTable table, Document doc) {
		Elements h1List = doc.select("#Title");
		if (CollectionUtils.isEmpty(h1List))
			return;
		table.setTitle(h1List.get(0).text());
	}

	// 作者抽取
	private void extractResource(NewsPageTable table, Document doc) {
		try {
			Elements elements = doc.select("#source_baidu");
			if (CollectionUtils.isEmpty(elements))
				return;
			String source = elements.get(0).text();
			if (StringUtils.isEmpty(source))
				return;
			source = source.trim().replace("来源：", "").replace(" ", "").replace("　", "");
			table.setSource(source);
		} catch (Exception er) {
			er.printStackTrace();
			LOG.error("extract author error, url=" + table.crawlUrl, er);
		}
	}

	// 作者抽取
	private void extractAuthor(NewsPageTable table, Document doc) {
		try {
			Elements elements = doc.select("#author_baidu");
			if (CollectionUtils.isEmpty(elements))
				return;
			String author = elements.get(0).text();
			if (StringUtils.isEmpty(author))
				return;
			author = author.trim().replace("作者：", "");
			table.setAuthor(author);
		} catch (Exception er) {
			er.printStackTrace();
			LOG.error("extract author error, url=" + table.crawlUrl, er);
		}
	}

	// 缩略图抽取
	private void extractImage(NewsPageTable table, Document doc) throws Exception {
		Elements elements = doc.select("#Content");
		if (CollectionUtils.isEmpty(elements))
			return;
		Elements es = elements.select("img");
		if (CollectionUtils.isEmpty(es))
			return;
		List<String> saveImages = new LinkedList<>();
		ImageFresh fresh = ImageFresh.getInstance();
		for (Element e : es) {
			String image = e.attr("src");
			if (StringUtils.isEmpty(image))
				continue;
			String newUrl = HtmlUtil.pictureUrlPref(image, table.crawlUrl);
			Pair<Integer, String> newPicPair = fresh.freshOne(newUrl);
			String newPic = newPicPair.second;
			if (StringUtils.isEmpty(table.logoUrl)) { // 当缩略图存在即不再设置
				table.setLogoUrl(newPic);
			}
			saveImages.add(newPic);
		}
		if (!CollectionUtils.isEmpty(saveImages)) {
			table.setImages(saveImages);
		}
	}

	// 抽取object
	private void extract(NewsPageTable table, String html) throws Exception {
		Document doc = Jsoup.parse(html);
		extractContent(table, doc);
		extractPublishDate(table, doc);
		extractImage(table, doc);
		extractAuthor(table, doc);
		extractResource(table, doc);
		extractTitle(table, doc);
		HttpUtil.insertImage2Content(table);
		table.setStatus(0);
		table.setCrawlFlag(1);
	}

	// 获取seed
	public List<NewsPageTable> getSeed() throws Exception {
		BaseMongoRepository repo = BaseMongoTools
				.getInstance(DolphinCrawlerConsts.MongoDBName.ITAOGAO.getName());
		BasicDBObject keys = new BasicDBObject();
		keys.put(DolphinCrawlerConsts.MONGODB_DEFAULT_ID, 1);
		keys.put(ItaogaoConst.SCHEMA_COLUMN_NAME.CRAWL_URL.getValue(), 1);
		keys.put(WeixinConst.SCHEMA_COLUMN_NAME.FAIL_TIME.getValue(), 1);
		DBObject query = new BasicDBObject();
		query.put(ItaogaoConst.SCHEMA_COLUMN_NAME.CRAWL_FLAG.getValue(), new BasicDBObject("$eq", 0));
		DBCursor cursor = repo.getCollection(ItaogaoConst.MONGODB_TABLE.CNFOL.getValue()).find(query, keys)
				.addOption(Bytes.QUERYOPTION_NOTIMEOUT);
		List<NewsPageTable> seeds = new LinkedList<>();
		List<DBObject> dataIterator = cursor.toArray();
		for (DBObject data : dataIterator) {
			try {
				Object id = data.get(DolphinCrawlerConsts.MONGODB_DEFAULT_ID);
				Object url = data.get(ItaogaoConst.SCHEMA_COLUMN_NAME.CRAWL_URL.getValue());
				NewsPageTable table = new NewsPageTable();
				table.setId(id.toString());
				table.setCrawlUrl(url.toString());
				seeds.add(table);
			} catch (Exception er) {
				LOG.error("getSeed error!", er);
			}
		}
		return seeds;
	}

}