package com.happyfamily.springboot.crowdsourcingplatform.service.weng.impl;

import com.happyfamily.springboot.crowdsourcingplatform.dao.weng.IndustryDynamicsDao;
import com.happyfamily.springboot.crowdsourcingplatform.dao.weng.LastInfomationDao;
import com.happyfamily.springboot.crowdsourcingplatform.model.IndustryDynamics;
import com.happyfamily.springboot.crowdsourcingplatform.model.LastestInfomation;
import com.happyfamily.springboot.crowdsourcingplatform.service.weng.ProcessService;
import com.happyfamily.springboot.crowdsourcingplatform.util.WormUtils;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.select.Elements;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;

import java.util.ArrayList;
import java.util.List;

@Service
@Transactional(rollbackFor = Exception.class)
public class ProcessServiceImpl implements ProcessService
{
	/**
	 * 爬取的路径
	 */
	@Value("${worm.url}")
	private String url;

	/**
	 * 注入Dao
	 */
	@Autowired
	LastInfomationDao lastInfomationDao;
	@Autowired
	IndustryDynamicsDao industryDynamicsDao;

	/**
	 * 创建LastestInfomation数组用于存储解析下来的实体
	 */
	private List<LastestInfomation> list = new ArrayList<>();

	/**
	 * 创建IndustryDynamics数组用于储存实体
	 */
	private List<IndustryDynamics> dynamicsList = new ArrayList<>();


	@Override
	public void process()
	{
		//获取网页源码进行解析
		String content = WormUtils.sendGet(url);
		//通过Jsoup进行页面解析
		Document document = Jsoup.parse(content);
		paraseList(document);
		lastInfomationDao.save(list);
		list = new ArrayList<>();
	}

	/**
	 * 解析
	 *
	 * @param document
	 */
	private void paraseList(Document document)
	{
		//根据网页标签解析源码
		Elements elements = document.select("h4 > a[target]");
		for (int i = 0; i < elements.size(); i++)
		{
			LastestInfomation lastestInfomation = new LastestInfomation();
			lastestInfomation.set(35, "互联网资讯", elements.get(i).select("a").text(), elements.get(i).select("a").attr("href"));
			//将解析后的实体放入集合中
			list.add(lastestInfomation);
		}
	}

	@Override
	public void processToIndustry(String industry)
	{
		String url = "";
		String ncentent = "";
		Integer pid;
		if (industry.trim().equals("数码通讯行业"))
		{
			url = "http://it.sohu.com/934?spm=smpc.null.side-nav.19.1582264002410BcvrMZu";
			pid = 45;
			ncentent = "数码通讯";
		} else if (industry.trim().equals("生活服务行业"))
		{
			url = "http://it.sohu.com/913?spm=smpc.null.side-nav.29.1582264626473765lg9m";
			pid = 46;
			ncentent = "生活服务";
		} else if (industry.trim().equals("智能硬件行业"))
		{
			url = "http://it.sohu.com/882?spm=smpc.null.side-nav.24.1582264362602Xl3QaZj";
			pid = 47;
			ncentent = "智能硬件";
		} else if (industry.trim().equals("软件科学行业"))
		{
			url = "http://it.sohu.com/880?spm=smpc.null.side-nav.39.15822643828624bIv362";
			pid = 48;
			ncentent = "软件科学";
		}
		//获取网页源码进行解析
		String content = WormUtils.sendGet(url);
		//通过Jsoup进行页面解析
		Document document = Jsoup.parse(content);

		//根据网页标签解析源码
		Elements elements = document.select("h4 > a[target]");

		for (int i = 0; i < elements.size(); i++)
		{
			IndustryDynamics industryDynamics = new IndustryDynamics();
			industryDynamics.set(35, ncentent, elements.get(i).select("a").text(), elements.get(i).select("a").attr("href"));
			//将解析后的实体放入集合中
			dynamicsList.add(industryDynamics);
		}
		industryDynamicsDao.save(dynamicsList);
		dynamicsList = new ArrayList<>();
	}

}
