import requests
from lxml import etree

# https://www.zbj.com/fw/?k=saas
url = "https://www.zbj.com/fw/?k=saas&p=2"
headers = {
    "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
    "priority": "u=0, i",
    "sec-ch-ua": "\"Chromium\";v=\"130\", \"Microsoft Edge\";v=\"130\", \"Not?A_Brand\";v=\"99\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Windows\"",
    "sec-fetch-dest": "document",
    "sec-fetch-mode": "navigate",
    "sec-fetch-site": "same-origin",
    "sec-fetch-user": "?1",
    "upgrade-insecure-requests": "1",
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 Edg/130.0.0.0"
}
res = requests.get(url, headers=headers)
# print(res.text)

html = etree.HTML(res.text)

# 定位到每个card的div
divs = html.xpath("//*[@id='__layout']/div/div[3]/div[1]/div[4]/div/div[2]/div[1]/div[2]/div")
# print(divs)

for div in divs:
    price = div.xpath("./div/div[3]/div[1]/span/text()")[0].strip("¥")
    name = "sass".join(div.xpath("./div/div[3]/div[2]/a/span/text()"))
    shop = div.xpath("./div/div[5]/div/div/div/text()")[0]
    print("价格:", price, "标题:", name, "厂家:", shop)
