import urllib.request
from urllib import request
from bs4 import BeautifulSoup


url = 'https://www.baidu.com'
def crawl(url):
    headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36'}
    req = request.Request(url,headers=headers)
    page = urllib.request.urlopen(req,timeout=20)
    contents = page.read()
    print(contents)

crawl(url)
#爬虫没有问题 ，sex8反扒厉害

