package com.zyx.spider.common;

import lombok.extern.slf4j.Slf4j;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;

@Slf4j
public class RobotsTxtParser {

    private final List<String> disallowedPaths = new ArrayList<>();

    public RobotsTxtParser(String websiteUrl) throws IOException {
        fetchAndParseRobotsTxt(websiteUrl);
    }

    private void fetchAndParseRobotsTxt(String websiteUrl) throws IOException {
        URL robotsTxtUrl = new URL(websiteUrl + "/robots.txt");
        try (BufferedReader reader = new BufferedReader(
                new InputStreamReader(robotsTxtUrl.openStream()))) {
            String line;
            while ((line = reader.readLine()) != null) {
                if (line.startsWith("Disallow:")) {
                    String path = line.substring("Disallow:".length()).trim();
                    disallowedPaths.add(path);
                }
            }
        }
    }

    public boolean isPathAllowed(String path) {
        for (String disallowedPath : disallowedPaths) {
            if (path.startsWith(disallowedPath)) {
                return false;
            }
        }
        return true;
    }

//    public static void main(String[] args) {
//        try {
//            RobotsTxtParser parser = new RobotsTxtParser("https://example.com");
//            System.out.println(parser.isPathAllowed("/some/path")); // 示例检查路径
//        } catch (IOException e) {
//            log.error(e.getMessage());
//        }
//    }
}
