import requests
import os
import time
import random
from colorama import Fore


def printf(name, now, total, situation, size):
    print("\r|[当前正在爬取:%25s][%4d|%4d][%2d%s][爬取结果:%7s][文件大小:%6d]"
          % (name, now, total, int(now * 100 / total), "%", situation, size), end="")


def convert(dest):
    pos = len(dest) - 1
    while dest[pos] != '/':
        pos -= 1
    return dest[pos + 1:]


def save(name_list):
    # print("saving!")
    with open("result.txt", "w", encoding="utf-8") as result:
        for i in name_list:
            result.write(i + "\n")
    # print("save done!")


# 接受一个列表
def crawling(origin, path):
    failed_num = 0
    headers = {
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/"
                      "98.0.4758.139 Safari/537.36"}
    proxies_list = \
        ['120.194.55.139:6969', '120.220.220.95:8085', '223.96.90.216:8085', '223.96.90.216:8085',
         '120.194.55.139:6969', '47.106.105.236:80', '47.106.105.236:80', '117.114.149.66:55443',
         '210.5.10.87:53281', '121.13.252.61:41564']
    now = 0
    new_missing_list = []
    for i in origin:
        now += 1
        situation = ""
        size = 0
        try:
            time.sleep(random.randint(1, 3))
            random_proxies = random.choice(proxies_list)
            proxies = {'http': random_proxies}
            # print("当前url:{}".format(i))
            resp = requests.get(i, timeout=10, headers=headers, proxies=proxies)
            resp.encoding = resp.apparent_encoding
            with open(path + convert(i), 'w') as out:
                out.write(resp.text)
            size = os.path.getsize(path + convert(i))
            if size < 32000:
                situation += "failed"
                new_missing_list.append(i)
                failed_num += 1
            else:
                situation += "success"
                failed_num = 0
        except:
            new_missing_list.append(i)
            failed_num = 0
            situation += "failed"
            printf(convert(i), now, len(origin), situation, size)
            continue
        finally:
            printf(convert(i), now, len(origin), situation, size)
            if failed_num >= 80:
                print("|超过%2d个网页连续爬取失败,程序退出,建议更换ip!" % failed_num)
                return
    print("\n|有{}个网页未爬取成功,现在程序将继续从目标网站爬取:".format(len(new_missing_list)))
    save(new_missing_list)
    crawling(new_missing_list, path)


path_tmp = "html\\"
file_list = os.listdir(path_tmp)
missing_list = []
# 0为网址 1为文件名 web应用开发
source_list = []
url_list = []
whole = 3964
with open("url.txt", "r", encoding="utf-8") as source:
    for num in range(whole):
        url = list(source.readline().split())[0]
        source_list.append(convert(url))
        url_list.append(url)

for x in range(whole):
    size_tmp = os.path.getsize(path_tmp + source_list[x])
    if size_tmp < 32000:
        missing_list.append(url_list[x])
# print(url_list[0])
print("|有{}个网页未爬取成功,现在程序将继续从目标网站爬取:".format(len(missing_list)))
crawling(missing_list, path_tmp)
