import requests
import random
import time
import os
headers = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.139 Safari/537.36"}
whole = 3964  # 3964
# proxies_list =\
#      ['120.194.55.139:6969', '120.220.220.95:8085', '223.96.90.216:8085', '223.96.90.216:8085',
#      '120.194.55.139:6969', '47.106.105.236:80', '47.106.105.236:80', '117.114.149.66:55443',
#      '210.5.10.87:53281', '121.13.252.61:41564']
err_list = []
failed_list = []


def result(posr):
    print("爬取%s,以下为因编码错误导致的爬取失败url:" % posr)
    for x in range(len(err_list)):
        print("[%d]:[%s]" % (x + 1, err_list[x]))
    print("爬取%s,以下为爬取失败的url:" % posr)
    for x in range(len(failed_list)):
        print("[%d]:[%s]" % (x + 1, failed_list[x]))
    with open('result.txt', 'w') as re:
        for x in range(len(err_list)):
            re.writelines(err_list[x])
        for x in range(len(failed_list)):
            re.writelines(failed_list[x])


with open("url.txt", "r", encoding="utf-8") as source:
    try:
        for i in range(whole):
            time.sleep(random.randint(1, 3))
            # random_proxies = random.choice(proxies_list)
            # proxies = {'http': random_proxies}
            url = source.readline()
            url_list = list(url.split())
            resp = requests.get(url_list[0], timeout=10, headers=headers)
            resp.encoding = resp.apparent_encoding
            file_name = url_list[0]
            pos = len(file_name) - 1
            while file_name[pos] != '/':
                pos -= 1
            file_name = file_name[pos + 1:]
            file_path = 'html/'
            try:
                with open(file_path + file_name, 'w', encoding="gbk") as out:
                    out.write(resp.text)
            except UnicodeError:
                err_list.append(url_list[0])
            now = int(i / whole * 100) + 1
            situation = ""
            size = os.path.getsize(file_path+file_name)
            if size < 35840:
                situation += "failed"
            else:
                situation += "success"
            print("[当前正在爬取:%25s][%4d|%4d][%2d%s][爬取结果:%7s][文件大小:%6d]"
                  % (file_name, i + 1, whole, now, '%', situation, size))
    except:
        result("中止")
    finally:
        result("完成")

# 我目前的想法是 让程序不断地去跑 轮番扫描文件 不合格的文件就重新爬

