package com.files.crawler.controller;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.web.bind.annotation.*;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;


/**
 * @author 袁强
 * @version 2024年10月17日 21:31:30
 */
@RestController
@RequestMapping("/")
//@Data
public class WebController {

	private static final Logger logger = LoggerFactory.getLogger(WebController.class);
	@RequestMapping("/test")
	@ResponseBody
	public Object bootstrap() {
		return "success~";
	}

	/**
	 * {"urls":[111,222,333]}
	 * @param object
	 */
	@RequestMapping("/web")
	@ResponseBody
	public Map<String, Object> jsoupWeb(@RequestBody JSONObject object) {
		logger.info("抓取的网页地址: {}" , object);
		Map<String, Object> map = new HashMap<>();
		JSONArray urls = object.getJSONArray("urls");
		for (Object url :urls) {
			map.put("源【"+url.toString()+"】",jsoupWeb(url.toString()));
		}
		return map;
	}

	/**
	 *
	 * @param url :抓取的网页地址
	 * String url = "https://www.example.com";
	 */
	private Map<String, String> jsoupWeb(String url) {
		Map<String, String> urlMap = new HashMap<>();
		try {
			// 连接到指定的网页
			Document doc = Jsoup.connect(url).get();
			// 选择页面中的所有链接
			Elements links = doc.select("a[href]");
			for (Element link : links) {
				// 获取链接的 href 属性（URL）
				String linkUrl = link.attr("href");
				// 获取链接的文本内容（标题）
				String linkText = link.text();
				logger.info("标题: " + linkText + "，链接: " + linkUrl);
				urlMap.put(linkText,linkUrl);
			}
		} catch (IOException e) {
			logger.error("抓取网页失败~, {}" , e);
		}
		return urlMap;
	}

}
