import requests
import time
import random
import os
import re
from bs4 import BeautifulSoup
from PIL import Image
from io import BytesIO
# 扩大递归深度尽量减少栈溢出
import sys

sys.setrecursionlimit(10000000)
targetPath = "E:\\郭朝靖\\pythonwork\\doubanpage"


# 保存方法
def saveFile(path):
    if not os.path.isdir(targetPath):
        os.mkdir(targetPath)

    pos = path.rindex("/")
    t = os.path.join(targetPath, path[pos + 1:])
    return t


# 爬虫主代码
def pachong(url):
    heard = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Accpet-Encoding': 'gzip,deflate',
        'Accept-Language': 'zh-CN,zh;q=0.8',
        'Cache-Control': 'max-age=0',
        "connection": 'keep-alive',
        'Host': 'www.douban.com',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1;WOW64) AppleWebKit/537.36 (KHTML, likeGecko) Chrome/47.0.2526.108 safari/527.36 2345Explorer/8.7.0.16013'
    }
    w_ip = '192.168.0.' + str(random.randint(1, 254))
    # print(w_ip)
    resport = requests.get(url, headers=heard, allow_redirects=False)  # proxies ={'http':w_ip},
    return resport


# 运行
url = "https://www.douban.com/"

all_url = set()
t_url = set()
t_url.add(url)
a = time.time()

while 7:
    #print(t_url)
    if len(t_url) == 0:
        break
    url1 = t_url.pop()
    try:
        resport = pachong(url1)
    except:
        print("===")
        # print(len(all_url))
        # all_url.discard(url1)
        continue
    data = resport.text

    for link, t in set(re.findall(r"(href=[\"\'](.*?)[\"\'])", str(data))):
        if "https:" in link:
            a = t[-4:]
            if not (a == ".jpg" or a == ".png" or a == ".gif" or a == ".css"):
                all_url_length = len(all_url)
                all_url.add(t)
                if all_url_length != len(all_url):
                    t_url.add(t)  # 把链接存到set
                    print(len(all_url), t)
                    # req = pachong(t)
                    # data = req.text

b = time.time()
print("-=-=-=-=-=-=-=-=-=-=-=-=-=-123123=-=12=-===-=-=-=-=-=-=-=-=-=-=-=-")
print(all_url)
print(b - a)

# try:
#     print(req.headers['Location'])
# except:
#     print(req.text)

# for link,t in set(re.findall(r'(https[^s]*?(jpg|png|gif))',str(data))):
#    photo_path = saveFile(link)
#    photo_request = requests.get(link,stream=True)
#    f = open(photo_path,"wb")
#     for chunk in photo_request.iter_content(chunk_size=512):
#         if chunk:
#             f.write(chunk)
