package org.example;

import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.HashSet;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

public class SimpleWebCrawler {

    // 用于存储已经访问过的链接，避免重复爬取
    private static Set<String> visitedLinks = new HashSet<>();

    public static void main(String[] args) {
        String startUrl = "https://www.nipic.com"; // 起始网址，请替换为你想爬取的网址
        crawl(startUrl);
    }

    /**
     * 从给定的URL开始爬取链接。
     * @param url 起始的网页URL。
     */
    private static void crawl(String url) {
        if (!visitedLinks.contains(url)) {
            try {
                visitedLinks.add(url); // 添加到已访问集合，防止重复访问
                System.out.println("正在爬取: " + url);

                URL realUrl = new URL(url);
                HttpURLConnection connection = (HttpURLConnection) realUrl.openConnection();
                connection.setRequestMethod("GET");

                int responseCode = connection.getResponseCode();
                if (responseCode == HttpURLConnection.HTTP_OK) {
                    BufferedReader in = new BufferedReader(new InputStreamReader(connection.getInputStream()));
                    String inputLine;
                    StringBuilder content = new StringBuilder();

                    while ((inputLine = in.readLine()) != null) {
                        content.append(inputLine);
                    }
                    in.close();

                    // 使用正则表达式提取所有http或https开头的链接
                    Pattern pattern = Pattern.compile("(https?://[\\w\\-\\.]+(/\\S*)?)");
//                    Pattern pattern = Pattern.compile("(https?://[\\\\w\\\\-\\\\.]+)(?=\")");
                    Matcher matcher = pattern.matcher(content.toString());

                    while (matcher.find()) {
                        String link = matcher.group();
                        crawl(link); // 递归爬取找到的链接
                    }
                } else {
                    System.out.println("无法访问: " + url + ", 响应码: " + responseCode);
                }
            } catch (Exception e) {
                e.printStackTrace();
                System.out.println("爬取 " + url + " 出错: " + e.getMessage());
            }
        }
    }
}
