package com.gxljc.bear.crawler.itaogao.gzdaily;

import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;

import com.google.gson.Gson;
import com.google.gson.reflect.TypeToken;
import com.mongodb.BasicDBObject;
import com.mongodb.Bytes;
import com.mongodb.DBCursor;
import com.mongodb.DBObject;
import com.gxljc.commons.mongo.cli.BaseMongoRepository;
import com.gxljc.commons.util.Pair;
import com.gxljc.bear.crawler.base.BaseMongoTools;
import com.gxljc.bear.crawler.base.DolphinCrawlerConsts;
import com.gxljc.bear.crawler.base.DolphinFetchData;
import com.gxljc.bear.crawler.image.ImageFresh;
import com.gxljc.bear.crawler.itaogao.ItaogaoConst;
import com.gxljc.bear.crawler.itaogao.NewsPageTable;
import com.gxljc.bear.crawler.itaogao.NewsPageTemplate;
import com.gxljc.bear.crawler.itaogao.util.HbaseUtil;
import com.gxljc.bear.crawler.itaogao.util.HttpUtil;
import com.gxljc.bear.crawler.itaogao.util.MongodbUtil;
import com.gxljc.bear.crawler.util.DateUtil;
import com.gxljc.bear.crawler.util.HtmlUtil;
import com.gxljc.bear.crawler.util.SparkUtil;

/**
 * 广州日报 PAGE 数据爬虫。
 * 
 * @author yujunjie
 * @since 2022-9-12
 */
public class GzDailyPageCrawler implements Serializable {
	
	private static final long serialVersionUID = 1L;
	public static Logger LOG = Logger.getLogger(GzDailyPageCrawler.class);
	private Boolean proxy = false;
	private final int CONTENT_LESS_NUM = 80; // 内容少于多少即放弃

	public GzDailyPageCrawler(Boolean proxy) {
		this.proxy = proxy;
	}

	public void crawl() throws Exception {
		List<NewsPageTable> seeds = getSeed();
		if (CollectionUtils.isEmpty(seeds)) {
			LOG.error("page is empty");
			return;
		}
		crawlNoSpark(seeds);
	}

	public void crawlNoSpark(List<NewsPageTable> seeds) {
		if (CollectionUtils.isEmpty(seeds)) {
			LOG.error("page is empty");
			return;
		}
		for (NewsPageTable table : seeds) {
			try {
				crawl(table);
			} catch (Exception ex) {
				LOG.error("crawlNoSpark error!", ex);
				return;
			}
		}
	}

	public void crawl(List<NewsPageTable> seeds) {
		int coreMax = 20;
		JavaSparkContext jsc = SparkUtil.createCommonsSparkContext("bear-广州日报 page详情爬取-" + seeds.size(),
				coreMax, coreMax * 2, GzDailyPageCrawler.class);
		JavaRDD<NewsPageTable> seedsRDD = jsc.parallelize(new ArrayList<NewsPageTable>(seeds));
		long count = seedsRDD.mapPartitions(new FlatMapFunction<Iterator<NewsPageTable>, Integer>() {
			private static final long serialVersionUID = 1L;

			@Override
			public Iterable<Integer> call(Iterator<NewsPageTable> seeds) throws Exception {
				int successCnt = 0;
				while (seeds.hasNext()) {
					try {
						NewsPageTable seed = seeds.next();
						int ret = crawl(seed);
						if (ret == 1)
							successCnt++;
						if (successCnt % 100 == 0) {
							LOG.info("successCnt=" + successCnt);
						}
					} catch (Exception er) {
						er.printStackTrace();
					}
				}
				return Arrays.asList(successCnt);
			}
		}).count();
		LOG.info("all count=" + count);
		jsc.stop();
	}

	// 单条url入口
	public int crawl(NewsPageTable table) throws Exception {
		LOG.info("crawl = " + table.crawlUrl);
		String url = table.getCrawlUrl();
		DolphinFetchData fetchData = DolphinFetchData.getInstance(DolphinCrawlerConsts.CrawlerChannelType.bear
				.getName());
		Pair<String, byte[]> pair = fetchData.getResult2Pair(url, proxy, ItaogaoConst.CRAWL_TIMEOUT);
		if (pair == null) {
			LOG.error("page result is empty url = " + url);
			return DolphinCrawlerConsts.RESULT_NO;
		}
		extract(table, pair.first);
		HbaseUtil.saveHbase(table, pair.second);
		MongodbUtil.saveMongodb(table, ItaogaoConst.MONGODB_TABLE.GZDAILY.getValue());
		return DolphinCrawlerConsts.RESULT_YES;
	}

	// 内容抽取
	private void extractContent(NewsPageTable table, NewsPageTemplate template) {
		List<String> contents = template.getContent();
		if (CollectionUtils.isEmpty(contents))
			return;
		StringBuffer sb = new StringBuffer();
		for (String content : contents) {
			if (sb.length() > 0)
				sb.append(DolphinCrawlerConsts.DEFAULT_LINE_BREAK_WORD);
			content = content.trim();
			while (content.startsWith(" ")) {
				content = content.substring(1);
			}
			sb.append(content);
		}
		table.setContent(sb.toString());
	}

	// 发布时间抽取
	private void extractPublishDate(NewsPageTable table, NewsPageTemplate template) {
		String publishDate = template.getPublishDate();
		publishDate = publishDate.replaceAll("年", "-").replaceAll("月", "-").replaceAll("日", "").substring(0, 10);
		if (StringUtils.isEmpty(publishDate))
			return;
		Date date = DateUtil.parseFormat(publishDate, "yyyy-MM-dd");
		table.setPublishDate(date);
	}

	// 标题抽取
	private void extractTitle(NewsPageTable table, NewsPageTemplate template) {
		List<String> title = template.getTitle();
		if (CollectionUtils.isEmpty(title))
			return;
		table.setTitle(title.get(0));
		String subTitle = template.subTitle;
		if (StringUtils.isEmpty(subTitle))
			return;
		subTitle = subTitle.replace("——", "");
		table.setSubTitle(subTitle);
	}

	// 作者抽取
	private void extractAuthor(NewsPageTable table) {
		try {
			String author = null;
			if (StringUtils.isBlank(table.content) || table.content.length() < CONTENT_LESS_NUM)
				return;
			if (table.content.indexOf(DolphinCrawlerConsts.DEFAULT_LINE_BREAK_WORD) < 0)
				return;
			String[] lines = table.content.split(DolphinCrawlerConsts.DEFAULT_LINE_BREAK_WORD);
			for (String line : lines) {
				author = parseAuthor(line);
				if (StringUtils.isNotEmpty(author)) {
					table.setAuthor(author);
					return;
				}
			}
		} catch (Exception er) {
			LOG.error("extract author error, url=" + table.crawlUrl, er);
		}
	}

	// 抽取作者
	private String parseAuthor(String text) {
		final String[] parse = { "广州日报记者(.*?) 通讯员", "广州日报记者(.*?)$", "电 （记者(.*?)）", "讯 （记者[ ]?(.*?)[ ）]" };
		for (String p : parse) {
			Pattern pattern = Pattern.compile(p);
			Matcher m = pattern.matcher(text);
			if (m.find()) {
				return m.group(1);
			}
		}
		return null;
	}

	// 缩略图抽取
	private void extractImage(NewsPageTable table, NewsPageTemplate template) throws Exception {
		List<String> images = template.images;
		if (CollectionUtils.isEmpty(images))
			return;
		List<String> saveImages = new LinkedList<>();
		ImageFresh fresh = ImageFresh.getInstance();
		for (String image : images) {
			String newUrl = HtmlUtil.pictureUrlPref(image, table.crawlUrl);
			Pair<Integer, String> newPicPair = fresh.freshOne(newUrl);
			String newPic = newPicPair.second;
			if (StringUtils.isEmpty(table.logoUrl)) { // 当缩略图存在即不再设置
				table.setLogoUrl(newPic);
			}
			saveImages.add(newPic);
		}
		if (!CollectionUtils.isEmpty(saveImages)) {
			table.setImages(saveImages);
		}
	}

	// 抽取object
	private void extract(NewsPageTable table, String result) throws Exception {
		final Gson GSON = new Gson();
		NewsPageTemplate template = GSON.fromJson(result, new TypeToken<NewsPageTemplate>() {
		}.getType());
		extractContent(table, template);
		extractPublishDate(table, template);
		extractImage(table, template);
		HttpUtil.insertImage2Content(table);
		extractAuthor(table);
		extractTitle(table, template);
		table.setStatus(0);
		table.setCrawlFlag(1);
	}

	// 获取seed
	public List<NewsPageTable> getSeed() throws Exception {
		BaseMongoRepository repo = BaseMongoTools
				.getInstance(DolphinCrawlerConsts.MongoDBName.ITAOGAO.getName());
		BasicDBObject keys = new BasicDBObject();
		keys.put(DolphinCrawlerConsts.MONGODB_DEFAULT_ID, 1);
		keys.put(ItaogaoConst.SCHEMA_COLUMN_NAME.CRAWL_URL.getValue(), 1);
		DBObject query = new BasicDBObject();
		query.put(ItaogaoConst.SCHEMA_COLUMN_NAME.CRAWL_FLAG.getValue(), new BasicDBObject("$eq", 0));
		DBCursor cursor = repo.getCollection(ItaogaoConst.MONGODB_TABLE.GZDAILY.getValue()).find(query, keys)
				.addOption(Bytes.QUERYOPTION_NOTIMEOUT);
		List<NewsPageTable> seeds = new LinkedList<>();
		List<DBObject> dataIterator = cursor.toArray();
		for (DBObject data : dataIterator) {
			try {
				Object id = data.get(DolphinCrawlerConsts.MONGODB_DEFAULT_ID);
				Object url = data.get(ItaogaoConst.SCHEMA_COLUMN_NAME.CRAWL_URL.getValue());
				NewsPageTable table = new NewsPageTable();
				table.setId(id.toString());
				table.setCrawlUrl(url.toString());
				seeds.add(table);
			} catch (Exception ex) {
				LOG.error("getSeed error!", ex);
			}
		}
		return seeds;
	}

}