package com.gxljc.bear.crawler.itaogao.chinanews;

import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import org.apache.commons.lang.ObjectUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;

import com.alibaba.fastjson.JSONObject;
import com.gxljc.commons.util.Pair;
import com.gxljc.commons.util.StringUtil;
import com.gxljc.bear.crawler.itaogao.NewsPageTable;
import com.gxljc.bear.crawler.itaogao.util.DayUtil;
import com.gxljc.bear.crawler.itaogao.util.FetchDataUtil;
import com.gxljc.bear.crawler.itaogao.util.HbaseUtil;
import com.gxljc.bear.crawler.itaogao.util.HttpUtil;
import com.gxljc.bear.crawler.itaogao.util.ImageUtil;
import com.gxljc.bear.crawler.itaogao.util.MongodbUtil;
import com.gxljc.bear.crawler.util.DateUtil;

/**
 * @author haitao E-mail:haitao@summba.com
 * @version createTime：2021年9月20日 下午4:44:11
 * 
 */

public class ChinanewsCrawler {
	private final static List<String> crawlerSite = new ArrayList<String>();
	private final static String URL_HEAD = "http://www.chinanews.com/scroll-news/";
	private final static String MONGONAME_COLLECTION = "t_site_chinanews";
	private final static Logger LOG = Logger.getLogger(ChinanewsCrawler.class);
	private Map<String, Object> newsMap = new HashMap<String, Object>();
	private List<String> titleSeeds = new ArrayList<String>();
	private final static String SOURCE = "中国新闻网";
	protected boolean proxy;

	public ChinanewsCrawler() {
		crawlerSite.add("gj");// 国际
		crawlerSite.add("cj");// 财经
		crawlerSite.add("fortune");// 金融
		crawlerSite.add("ny");// 能源
		crawlerSite.add("it");// IT
		crawlerSite.add("estate");// 房产
		proxy = false;
		newsMap.put("status", 0);
		newsMap.put("mediaName", SOURCE);
	}

	/**
	 * =========爬虫启动入口(All)==========
	 */
	public void startCrawlerAll() {
		genDefaultSeeds();
		LOG.info("Start crawler detail china news!");
		crawlerNews();
		LOG.info("china news crawler finished!");
	}
	
	
	/**
	 * =========爬虫启动入口(update)==========
	 */
	public void startCrawlerUpdate() {
		genDefaultSeedsUpdate();
		LOG.info("Start crawler detail china news!");
		crawlerNews();
		LOG.info("china news crawler finished!");
	}

	/**
	 * 获取一年的种子url
	 */
	void genDefaultSeeds() {
		LOG.info("======Start chinanews seeds crawler!=======");
		Date date = new Date();
		SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");
		String yearMonth = sdf.format(date);
		String year = yearMonth.split("-")[0];
		String monthSplit = yearMonth.split("-")[1];
		for (int month = 1; month <= Integer.parseInt(monthSplit); month++) {
			for (int day = 1; day <= 31; day++) {
				String monthStr = DayUtil.handleValueLessThan10(month);
				String dayStr = DayUtil.handleValueLessThan10(day);
				for (String site : crawlerSite) {
					String visitUrl = makeVisitUrl(year, monthStr, dayStr, site);
					if (visitUrl.isEmpty() || !HttpUtil.checkUrlexists(visitUrl)) {
						continue;
					}
					crawlerTitleSeeds(visitUrl, "titleUrl");
				}
			}
		}
		LOG.info(String.format("seeds crawler finished! and seeds size is %s", titleSeeds.size()));
	}

	/**
	 * 获取当天的种子url
	 */
	void genDefaultSeedsUpdate() {
		LOG.info("======Start chinanews seeds crawler!=======");
		Date date = new Date();
		SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");
		String ymd = sdf.format(date);
		String yearStr = ymd.split("-")[0];
		String monthStr = ymd.split("-")[1];
		String dayStr = ymd.split("-")[2];
		for (String site : crawlerSite) {
			String visitUrl = makeVisitUrl(yearStr, monthStr, dayStr, site);
			if (visitUrl.isEmpty() || !HttpUtil.checkUrlexists(visitUrl)) {
				continue;
			}
			crawlerTitleSeeds(visitUrl, "titleUrl");
		}
		LOG.info(String.format("seeds crawler finished! and seeds size is %s", titleSeeds.size()));
	}
	
	/**
	 * 获取titleurl 种子
	 * 
	 * @param url
	 * @param key
	 */
	@SuppressWarnings("unchecked")
	void crawlerTitleSeeds(String url, String key) {
		Pair<String, byte[]> pair = FetchDataUtil.fetResult(url, false);
		if (pair != null) {
			String result = "";
			result = pair.first;
			if (result != null) {
				JSONObject jsonResult = (JSONObject) JSONObject.parse(result);
				if (jsonResult != null && jsonResult.get(key) != null) {
					for (String titleUrl : (List<String>) jsonResult.get(key)) {
						if (!MongodbUtil.needReCrawl(titleUrl, MONGONAME_COLLECTION)) {
							titleSeeds.add(titleUrl);
						}
					}
					if(titleSeeds.size()%1000==0){
						LOG.info(String.format("titleSeeds size is %s", titleSeeds.size()));
					}
				}
			}

		}
	}

	@SuppressWarnings("unchecked")
	void crawlerNews() {
		if (titleSeeds.size() == 0) {
			LOG.info(String.format("titleSeeds Size is %s", titleSeeds.size()));
			return;
		}
		for (String seed : titleSeeds) {
			LOG.info(String.format("crawlering seed is %s", seed));
			Pair<String, byte[]> pair = FetchDataUtil.fetResult(seed, proxy);
			if (pair == null) {
				continue;
			}
			String result = "";
			result = pair.first;
			byte[] html = pair.second;
			NewsPageTable table = new NewsPageTable();
			if (result != null) {
				JSONObject jsonResult = (JSONObject) JSONObject.parse(result);
				// 来源指定为"中国新闻网"，非指定来源continue
				String source = ObjectUtils.toString(jsonResult.get("publicTime"));
				if (source.contains("来源")) {
					source = source.split("来源：|来源:")[1].trim().split(" ")[0];
				} else {
					source = ObjectUtils.toString(jsonResult.get("source"));
				}

				List<String> contentList = new ArrayList<String>();
				Object content = jsonResult.get("content");
				if (content != null) {
					contentList = (List<String>) content;
				}
				
				String id = ObjectUtils.toString(StringUtil.genMD5Val(seed));
				String title = ObjectUtils.toString(jsonResult.get("title"));
				String subTitle = ObjectUtils.toString(jsonResult.get("subTitle"));
				String logoUrl = ObjectUtils.toString(jsonResult.get("logoUrl"));
				String author = ObjectUtils.toString(jsonResult.get("author"));
				String publishDate = ObjectUtils.toString(jsonResult.get("publicTime"));
				Object images = jsonResult.get("images");
				List<String> imagesNew = new ArrayList<String>();
				if (images != null) {
					imagesNew = ImageUtil.ImageUrlService((List<String>) images, seed);
				}
				if (imagesNew.size() != 0) {
					newsMap.put("images", imagesNew);
				}
				if (author.contains("作者") && author.split("作者：").length == 2) {
					author = author.split("作者：|作者:")[1].trim();
				}else if(author.contains("编辑")){
					author = author.split("编辑：|编辑:")[1].split("】")[0];
				}
				if (!logoUrl.isEmpty()) {
					try {
						// 补齐相对图片路径
						logoUrl = ImageUtil.imageUrlSupply(seed, logoUrl);
						// 图片平台转换
						logoUrl = ImageUtil.imageConvertplatform(logoUrl);
					} catch (Exception e) {
						LOG.info(String.format("logoUrl convert failed! logoUrl is %s", logoUrl));
						e.printStackTrace();
					}
				}
				if (!publishDate.isEmpty()) {
					if (publishDate.contains("来源")) {
						publishDate = publishDate.split("　来源")[0];
					} else if (publishDate.contains("发布时间：")) {
						publishDate = publishDate.split("发布时间：")[1];
						if (publishDate.contains("编辑")) {
							publishDate = publishDate.split("编辑")[0];
						}
					} else {
						publishDate = publishDate.split("　")[0];
					}
				}
				Date pd = DateUtil.parseFormat(publishDate, "yyyy年MM月dd日 HH:mm");
				if (pd != null) {
					newsMap.put("publishDate", pd);
				}
				// 添加基础数据
				newsMap.put("_id", id);
				newsMap.put("title", title);
				newsMap.put("subTitle", subTitle);
				newsMap.put("logoUrl", logoUrl);
				if(contentList.size() !=0){
					newsMap.put("content", StringUtils.join(contentList, "####").replaceAll("　　####|　　", "").trim());
				}
				newsMap.put("author", author);
				newsMap.put("crawlUrl", seed);
				newsMap.put("crawlFlag", 1);
				newsMap.put("crawlDate", new Date());
				newsMap.put("source", source);

				// 更新mongdb
				MongodbUtil.update2Mongodb(newsMap, MONGONAME_COLLECTION);

				table.setTitle(title);
				table.setId(id);

				// 更新到hbase
				HbaseUtil.saveHbase(table, html);

				LOG.info(String.format("insert the id/rowkey is %s ", id));
			}

		}
	}

	@SuppressWarnings("unchecked")
	void crawlerNewsSingle(String seed) {
		Pair<String, byte[]> pair = FetchDataUtil.fetResult(seed, proxy);
		if(pair == null){
			return;
		}
		String result = "";
		result = pair.first;
		byte[] html = pair.second;
		NewsPageTable table = new NewsPageTable();
		if (result != null) {
			JSONObject jsonResult = (JSONObject) JSONObject.parse(result);
			// 来源指定为"中国新闻网"，非指定来源continue
			//System.out.println(jsonResult);
			String source = ObjectUtils.toString(jsonResult.get("publicTime"));
			System.out.println("================");
			System.out.println(source);
			if (source.contains("来源")) {
				source = source.split("来源：|来源:")[1].trim().split(" ")[0];
			} else {
				source = ObjectUtils.toString(jsonResult.get("source"));
			}
			List<String> contentList = new ArrayList<String>();
			Object content = jsonResult.get("content");
			if (content != null) {
				contentList = (List<String>) content;
			}
			System.out.println("========1=======");
			String id = ObjectUtils.toString(StringUtil.genMD5Val(seed));
			String title = ObjectUtils.toString(jsonResult.get("title"));
			String subTitle = ObjectUtils.toString(jsonResult.get("subTitle"));
			String logoUrl = ObjectUtils.toString(jsonResult.get("logoUrl"));
			String author = ObjectUtils.toString(jsonResult.get("author"));
			String publishDate = ObjectUtils.toString(jsonResult.get("publicTime"));
			Object images = jsonResult.get("images");
			List<String> imagesNew = new ArrayList<String>();
			if (images != null) {
				imagesNew = ImageUtil.ImageUrlService((List<String>) images, seed);
			}
			if (imagesNew.size() != 0) {
				newsMap.put("images", imagesNew);
			}
			if (author.contains("作者") && author.split("作者：").length == 2) {
				author = author.split("作者：")[1].trim();
			}else if(author.contains("编辑")){
				author = author.split("编辑：|编辑:")[1].split("】")[0];
			}
			if (!logoUrl.isEmpty()) {
				try {
					// 补齐相对图片路径
					logoUrl = ImageUtil.imageUrlSupply(seed, logoUrl);
					// 图片平台转换
					logoUrl = ImageUtil.imageConvertplatform(logoUrl);
				} catch (Exception e) {
					LOG.info(String.format("logoUrl convert failed! logoUrl is %s", logoUrl));
					e.printStackTrace();
				}
			}
			System.out.println("==========2============");
			if (!publishDate.isEmpty()) {
				if (publishDate.contains("来源")) {
					publishDate = publishDate.split("　来源")[0];
				} else if (publishDate.contains("发布时间：")) {
					publishDate = publishDate.split("发布时间：")[1];
					if (publishDate.contains("编辑")) {
						publishDate = publishDate.split("编辑")[0];
					}
				} else {
					publishDate = publishDate.split("　")[0];
				}
			}
			Date pd = DateUtil.parseFormat(publishDate, "yyyy年MM月dd日 HH:mm");
			if (pd != null) {
				newsMap.put("publishDate", pd);
			}
			// 添加基础数据
			newsMap.put("_id", id);
			newsMap.put("title", title);
			newsMap.put("subTitle", subTitle);
			newsMap.put("logoUrl", logoUrl);
			if(contentList.size() !=0){
				newsMap.put("content", StringUtils.join(contentList, "####").replaceAll("　　####|　　", "").trim());
			}
			newsMap.put("author", author);
			newsMap.put("crawlUrl", seed);
			newsMap.put("crawlFlag", 1);
			newsMap.put("crawlDate", new Date());
			newsMap.put("source", source);
			System.out.println("=============3==========");
			// 更新mongdb
			MongodbUtil.update2Mongodb(newsMap, MONGONAME_COLLECTION);

			table.setTitle(title);
			table.setId(id);
			System.out.println(table.id);
			System.out.println(table.title);
			//System.out.println(html.toString());
			// 更新到hbase
			HbaseUtil.saveHbase(table, html);

			LOG.info(String.format("insert the id/rowkey is %s ", id));
		}

	}

	String makeVisitUrl(String year, String month, String day, String site) {
		String url = "";
		if (!year.isEmpty() && !month.isEmpty() && !day.isEmpty()) {
			url = URL_HEAD + site + "/" + year + "/" + month + day + "/news.shtml";
		}
		return url;
	}


}
