import urllib.request as ur
from urllib.error import HTTPError

def test():
    count = 1
    f1 = open('urls.txt', 'r', encoding='utf-8')
    for each_line in f1:
        try:
            req = ur.Request(each_line, headers={'User-Agent': 'Mozilla/5.0'})
            response = ur.urlopen(each_line).read()
            filename = "url_%d.txt" % count
            count += 1
            f = open(filename, 'w', encoding='utf-8' )
            f.write(str(response))
            f.close()
        except HTTPError as reason:
            print('无法访问%s' % each_line)
    count += 1

test()
import urllib.request
import chardet

def main():
    i = 0
    
    with open("urls.txt", "r") as f:
        # 读取待访问的网址
        urls = f.read().splitlines()
        
    for each_url in urls:
        response = urllib.request.urlopen(each_url)
        html = response.read()

        # 识别网页编码
        encode = chardet.detect(html)['encoding']
        if encode == 'GB2312':
            encode = 'GBK'
        
        i += 1
        filename = "url_%d.txt" % i

        with open(filename, "w", encoding=encode) as each_file:
            each_file.write(html.decode(encode, "ignore"))

if __name__ == "__main__":
    main()
