local http = require("http")
local io = require("io")
local url = "http://example.com" -- 目标网站的URL
local filename = "links.txt"     -- 保存链接的文件名

-- 以下是一个完整的示例代码，用于实现一个简单的爬虫程序，爬取指定网页并提取其中的链接：
local function fetch_links()
    local options = { method = "GET" }
    local response = http.request(url, options)
    if response then
        local body = response:read("*all")
        local links = {}
        local regex = "href=\"([^\"]*)\""
        local pattern = io.popen(
                "sed -n -e 's/\\([^[:space:]][[:space:]][^<]*\\)/\\1/g' -e 's/<[^>]*>/ /g' -e 's/^ //' -e 's/$/\\n/' <<<" ..
                body)
            :read("*all")
        for link in pattern:gmatch("([^\n]*\n?)") do
            table.insert(links, link)
        end
        local file = io.open(filename, "a")
        if file then
            for _, link in ipairs(links) do
                file:write(link .. "\n")
            end
            file:close()
            print("Links saved to " .. filename)
        else
            print("Failed to open file for writing")
        end
    else
        print("Failed to fetch page")
    end
end

fetch_links() -- 调用fetch_links函数开始爬虫程序
