import json
import os
import re
import time

import requests
from bs4 import BeautifulSoup

def get_user_agent():
    with open('../../99 文件/user-agent.txt', 'r') as f:
        return f.readline()


with open('../../99 文件/微博cookie.txt', 'r') as f:
    cookie = f.readline()

headers = {
    'user-agent': get_user_agent(),
    'cookie': cookie
}


def get_page_id(user_id):
    """
    获取用户信息page_id
    :param user_id: 用户ID
    """
    url = f'https://weibo.com/u/{user_id}'
    r = requests.get(url=url, headers=headers)
    r.encoding = 'utf-8'
    page_id = re.search(r'\$CONFIG\[\'page_id\'\]=\'(?P<page_id>.*?)\'', r.text).groups('page_id')[0]
    r.close()
    return page_id


def get_since_id(str):
    """
    获取since_id
    """
    pattern = re.compile(r'since_id=(?P<since_id>.*?)"')
    s = pattern.search(str)
    if s is not None:
        return s.groups('since_id')[0]
    else:
        return None


def get_first_page_images(page_id):
    """
    获取首页的图片
    """
    url = f'https://weibo.com/p/{page_id}/photos?type=photo'
    params = {
        'type': 'photo'
    }
    r = requests.get(url=url, headers=headers, params=params)
    r.encoding = 'utf-8'
    bs = BeautifulSoup(r.text, 'html.parser')
    s = bs.find_all('script')[-1].string
    pattern = re.compile(r'^FM.view\((?P<data>.*?)\)$')
    data = pattern.search(s).groups('data')[0]
    html = json.loads(data)['html']
    bs_html = BeautifulSoup(html, 'html.parser')
    images = bs_html.find_all('img', class_='photo_pict')
    urls = [image.get('src') for image in images]
    r.close()
    return get_since_id(r.text), urls


def get_other_page_images(owner_uid, viewer_uid, page_id, since_id, page):
    """
    获取其他页的图片
    """
    url = 'https://weibo.com/p/aj/album/loading'
    params = {
        'ajwvr': 6,
        'type': 'photo',
        'owner_uid': owner_uid,
        'viewer_uid': viewer_uid,
        'since_id': f'{since_id}',
        'page_id': page_id,
        'page': page,
        'ajax_call': 1,
        '__rnd': int(round(time.time() * 1000)),
    }
    r = requests.get(url=url, headers=headers, params=params)
    r.encoding = 'utf-8'
    html = r.json()['data']
    bs_html = BeautifulSoup(html, 'html.parser')
    images = bs_html.find_all('img', class_='photo_pict')
    urls = [image.get('src') for image in images]
    r.close()
    return get_since_id(r.text), urls


def get_image_urls(owner_uid, viewer_uid):
    """
    爬取微博相册列表
    :param owner_uid: 自己的微博ID
    :param viewer_uid: 对方的微博ID
    :return: 相册的URL列表
    """
    page_id = get_page_id(owner_uid)
    print('page_id获取成: %s' % page_id)
    page = 1
    if page_id is not None:
        since_id, urls = get_first_page_images(page_id)
        print('第1页爬取成功')
        if since_id is not None:
            while True:
                page += 1
                a, b = get_other_page_images(owner_uid, viewer_uid, page_id, since_id, page)
                print('第%s页爬取成功' % page)
                urls.extend(b)
                if a is None:
                    break
                else:
                    since_id = a
        for i, url in enumerate(urls):
            if url.find('https:') == -1:
                url = 'https:' + url
            urls[i] = url.replace('thumb300', 'mw1024')
        return urls
    return None


if __name__ == '__main__':
    # 自己的ID
    my_uid = '6794598292'
    # 要爬取的ID
    view_uid = '6794598292'
    urls = get_image_urls(view_uid, my_uid)
    # 保存本地的路径
    file_path = 'D:/image/weibo/%s/' % view_uid
    if not os.path.exists(file_path):
        os.makedirs(file_path)
    for index, image in enumerate(urls):
        print('第%d/%d张图片下载成功,url=%s' % (index + 1, len(urls), image))
        try:
            r = requests.get(url=image, headers=headers, timeout=10)
            file_name = '%s.%s' % (index + 1, image[image.rfind('.') + 1:])
            with open(file_path + file_name, 'wb') as f:
                if r.status_code == 200:
                    f.write(r.content)
                    f.close()
            r.close()
        except BaseException:
            print('读取图片超时')
