import requests

url = "https://spiderbuf.cn/web-scraping-practice/scraper-practice-c01"
headers = {
    "user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
    "cookie":"_ga=GA1.1.269610293.1743563940; __cgf3t=G0gzgFKDRlLtmZH7NrzqOb1x4pek1xNQk12KKc4g21Y-1731624199; __gads=ID=d314684659e4aeb0:T=1743563942:RT=1743581742:S=ALNI_MZsTS-A5W7-_pcyxJ0av49u-QslvQ; __gpi=UID=000010858eab5ab1:T=1743563942:RT=1743581742:S=ALNI_MbSIg_wieH6nKTxPNrlXHEzGhOubw; __eoi=ID=3689acd366c98345:T=1743563942:RT=1743581742:S=AA-Afjah66_aJboJVjMIHNblH6FX; _ga_7B42BKG1QE=GS1.1.1743581681.2.1.1743581811.0.0.0",
    "referer":"https://spiderbuf.cn/web-scraping-practices/1",
}
res = requests.get(url,headers=headers)
print(res.content.decode())
