import requests
def get_html(url):
    head = {"user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36"}
    try:
        r = requests.get(url=url,headers=head)
        r.encoding = r.apparent_encoding
        r.raise_for_status()
        return r.text

    except Exception as err:
        print(err)


if __name__ == '__main__':
    url_1 = 'https://www.tencent.com/zh-cn/'
    print(get_html(url_1))

import requests
try:
    url = 'https://img0.baidu.com/it/u=1414485057,4020434543&fm=253&fmt=auto&app=120&f=JPEG?w=500&h=519'
    r = requests.get(url)
    r .raise_for_status()
    r.encoding = r.apparent_encoding
    with open('444.gif','wb+')as f:
        f.write(r.content)
except Exception as err:
    print(err)

import requests
try:
    url_list = ["http://www.bspider.top/static/yh31/images/202103302056530751.gif",
                "http://www.bspider.top/static/yh31/images/202103182029425871.jpg"]
    for url in url_list:
        r=requests.get(url,timeout=3)
        dir="d:\\"
        file_name=url.split("/")[-1:][0]
        file_name=dir+file_name
        with open(file_name,"wb+")as f:
            f.write(r.content)
except Exception as err:
    print(err)
    import requests


    def get_html(url):
        heapq = {
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36"}
        try:
            r = requests.get(url, headers=heapq)
            r.encoding = r.apparent_encoding
            r.raise_for_status()
            return r.text
        except Exception as error:
            print(error)


    if __name__ == '__main__':
        url = "http://www.baidu.com"
        print(get_html(url))
import requests
def get_html(url):
    heapq = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36"}
    try:
        r = requests.get(url=url, params=data,headers=heapq)
        r.encoding = r.apparent_encoding
        r.raise_for_status()
        with open("aaa.txt", 'a', encoding='utf-8') as fp:
            fp.write(r.text)
    except Exception as error:
        print(error)

if __name__ == '__main__':
    for i in range(1, 6):
        data = {'page': i}
        url = "http://www.bspider.top/qidian/"
        get_html(url)