# -*- coding: utf-8 -*-
import requests
import re
import os
import time

# 爬取的地址（其他博主应该都可以）
root_url = "https://weibo.com/p/1005053109260091/photos?from=page_100505&mod=TAB#place"

headers = {
    'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                  "Chrome/87.0.4280.141 Safari/537.36",
    'cookie': 'JSESSIONID=J8866281-8ECNNGOXYNHB3J09SN4N1-9XIT1DLK-IMNV3; maliyun_temporary_console0=1AbLByOMHeZe3G41KYd5WWZvrM%2BGErkaLcWfBbgveKB6rjUdbBcar6Hh%2FUCRU4Tf5God05%2FMh13drj1R4s09YXU3d4KNz8sGJFHjBpgkpoMzbwqHyOlXAWM%2BwddANtsW3o6ebfWwzjLw%2FWSY5jf4XA%3D%3D; aliyun_choice=CN; cna=UvunGJSxf3UCAXAkj2UhIn/V; xlly_s=1; ping_test=true; t=480beca51a2fc7691097d3d1c9f6ead0; _tb_token_=fb3eb7e57ee53; cookie2=14b4d8972990b5901852be09389422e4; _samesite_flag_=true; _hvn_login=6; csg=aa48950e; login_aliyunid="%E4%BD%A0%E8%8B%A5%E6%88%90%E9%A3%8E521"; login_aliyunid_ticket=oJ415kiPzj6Nf_4NpoU_BOTwChTBoNM1ZJeedfK9zxYnbN5hossqIZCr6t7SGxRigm2Cb4fGaCdBZWIzmgdHq6sXXZQg4KFWufyvpeV*0*Cm58slMT1tJw3_A$$EqXNM8NbTu6dfJB8*SUjfMjsKvmQM0; login_aliyunid_luid="BG+tH55hq4Wa70d51ef22753ab03d2c4065c3552045+8GlxDn9A3BXpqoCm7usPig=="; login_aliyunid_csrf=_csrf_tk_1249313784843271; login_aliyunid_abi="BG+8E3J5MW7d39c8c233e5dcb8e95828f9a93768a1b+1lFYCusPtbRHW/B+5fDmXao/+MPAOWBPTqa58sNqQb5liC8wpus="; login_aliyunid_pk=1271067061321500; login_aliyunid_pks="BG+Zr59puS50WVwhrlaYzlRhbuve7Vnkp/SZUGowOeVDvs="; hssid=1DoOHOqtvz-3Zzvhlq8_U8Q1; hsite=6; aliyun_country=CN; aliyun_site=CN; aliyun_lang=zh; FECS-XSRF-TOKEN=1c1fd7b8-2086-40ff-8ab2-95f8ce8e2ea9; FECS-UMID=%7B%22token%22%3A%22Yb8c6e2ca70f0aa6a64c41ca1dade7629%22%2C%22timestamp%22%3A%228820291659545B405340647B%22%7D; currentRegionId=cn-hangzhou; console_base_assets_version=3.22.5; tfstk=cepNI9NeIAHZx1BjwO645XmgHQzl3OwBjn2FV1AYUFcGVPVQcwxNiJ4Axp4RYi_-K1f..; l=eBN9nrLRjSMOwbXXKO5Zhurza77tPQAf5sPzaNbMiIncB6qsSFvsuEAQD-3fwkxPWhQNnsGeL3lS3OYMBqLSHyznixv9-ewGZbQZndK1.; isg=BEhJEAaGvFuCO9Df1BW4PK1UGbZa8az79kL4pwL7vEO23eFHjgUziiiRUbWtbWTT',  # 填入自己的cookie
    'refer': root_url
}


def get_params():
    res = requests.get(root_url, headers=headers)
    try:
        param_dict = {
            'oid': re.findall("\$CONFIG\['oid'\]='(\d{10})'; ", res.text)[0],
            'title': re.findall("\$CONFIG\['title_value'\]='(.*?)'; ", res.text)[0],
            'page_id': re.findall("\$CONFIG\['page_id'\]='(\d{16})'; ", res.text)[0],
            'uid': re.findall("\$CONFIG\['uid'\]='(\d{10})'; ", res.text)[0]
        }
    except:
        print('【设置cookie了没】')
        return None
    # print(param_dict)
    return param_dict


def get_one_page():
    res = requests.get(root_url, headers=headers)
    jpg_list = [f'https://wxt.sinaimg.cn/large/{pic_id}.jpg' for pic_id in re.findall('photo_id=(.*?)&', res.text)]
    since_id = re.findall(r'&since_id=(.*?)"', res.text)[0]
    return jpg_list, since_id


def get_per_page(param_dict, page, since_id):
    url = "https://weibo.com/p/aj/album/loading"
    params = {
        "ajwvr": "6",
        "type": "photo",
        "owner_uid": param_dict['oid'],
        "viewer_uid": param_dict['uid'],
        "since_id": since_id,
        "page_id": param_dict['page_id'],
        "page": page,
        "ajax_call": "1",
        "__rnd": round(time.time() * 1000),
    }
    # print(params)
    # print(params)
    res = requests.get(url, headers=headers, params=params)
    data = res.json()['data']
    # print(data)
    # 获取每一张高清大图
    jpg_list = [f'https://wxt.sinaimg.cn/large/{pic_id}.jpg' for pic_id in re.findall('photo_id=(.*?)&', data)]
    since_id = re.findall(r'&since_id=(.*?)"', res.text)[0]
    return jpg_list, since_id


def download(url, root):
    filename = os.path.join(root, url.split('/')[-1])
    with open(filename, 'wb') as f:
        f.write(requests.get(url).content)
        print(url.split('/')[-1], ' 下载完成')


def run():
    # 获取请求参数
    param_dict = get_params()

    # 如果没有获取到参数，直接停止脚本
    if not param_dict:
        return

    # 创建文件夹
    root = f'D://{param_dict["title"]}'
    if not os.path.exists(root):
        os.mkdir(root)

    # 爬取第一页，第一页比较特殊，照片在html里面
    print(f'-----------------正在爬取第1页-----------------------')
    pic_list, since_id = get_one_page()
    # 下载图片
    for pic in pic_list:
        download(pic, root)
    print(f'-----------------------------------------------------')
    # 开始翻页爬取，之后的每一页页面通过动态加载返回json也是在html里面
    for i in range(2, 1000):
        print(f'-----------------正在爬取第{i}页-----------------------')
        # 爬取延迟，以防反爬，封禁ip或账号
        time.sleep(1)
        # 爬取每一页的图片
        pic_list, since_id = get_per_page(param_dict, i, since_id)
        # 下载图片
        for pic in pic_list:
            download(pic, root)
        print(f'-------------------------------------------------------')


if __name__ == '__main__':
    run()