import requests
from bs4 import BeautifulSoup

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                  'Chrome/90.0.4430.212 Safari/537.36',
    'Cookie': 'bid=RJSPRK1rFbY; push_noty_num=0; push_doumail_num=0; dbcl2="226248034:/LZTT2nFj7A"; ct=y; '
              '__utmv=30149280.22624; '
              '_vwo_uuid_v2=DA406FF64603A72C3E921E35574304627|b75b5b79a8b38752f96ee3915bd3b5f1; '
              '__utmz=223695111.1621084205.3.3.utmcsr=douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; '
              'ap_v=0,6.0; __utmz=30149280.1621157885.9.4.utmcsr=cn.bing.com|utmccn=('
              'referral)|utmcmd=referral|utmcct=/; ck=5eiX; _pk_ref.100001.4cf6=["","",1621161708,'
              '"https://www.douban.com/"]; _pk_ses.100001.4cf6=*; '
              '__utma=30149280.1866918799.1592112879.1621157885.1621161709.10; '
              '__utmb=30149280.0.10.1621161709; __utmc=30149280; '
              '__utma=223695111.588405269.1620652637.1621084205.1621161709.4; '
              '__utmb=223695111.0.10.1621161709; __utmc=223695111; '
              '_pk_id.100001.4cf6=ab4cca199d791460.1620652637.4.1621161716.1621086046. '
}


def douban():
    url = 'https://www.douban.com/group/665266/'
    r = requests.get(url=url, headers=headers)
    page = BeautifulSoup(r.text, 'html.parser')
    link_list = page.find_all('td', class_='title')
    ls = [l.find('a').get('href') for l in link_list]
    r.close()
    return ls


def get_page_count(url):
    r = requests.get(url=url, headers=headers)
    page = BeautifulSoup(r.text, 'html.parser')
    thispage = page.find('span', class_='thispage')
    r.close()
    if thispage is None:
        return 1
    else:
        return thispage.get('data-total-page')


def info(url):
    page_count = int(get_page_count(url))
    values = []
    for i in range(page_count):
        url_page = url + '?start=%s' % (i * 100)
        r = requests.get(url=url_page, headers=headers)
        page = BeautifulSoup(r.text, 'html.parser')
        page = page.find_all('img', attrs={'alt': '', 'title': '', 'height': 'auto'})
        r.close()
        values.extend(page)
    return values


link_list = douban()

for l in link_list:
    values = info(l)
    for v in values:
        image = v.get('src')
        print(image)
        r = requests.get(url=image, headers=headers)
        with open('D:/images/%s.%s' % (image[image.rfind('/') + 1:image.rfind('.')], image[image.rfind('.') + 1:]),
                  'wb') as f:
            if r.status_code == 200:
                f.write(r.content)
                f.close()
        r.close()
