import requests, re
from bs4 import BeautifulSoup
from fake_useragent import UserAgent

def take_html(url: str) -> list:
    '''
    param
    -----------
    url: 传入类型为 s 的书本url

    return
    -----------
    url_list: 一个图片链接列表
    
    book_id: 书id
    '''
    book_id = re.findall(r'\d+', url)[0]
    ua = UserAgent().random
    headers = {
        "Sec-Fetch-Dest": "image",
        "UserAgent": ua,
    }
    req = requests.get(url, headers = headers).text
    soup = BeautifulSoup(req, 'html.parser')
    ul = soup.find('ul', class_='img_list')
    html_list = ul.find_all('img', class_='lazy')
    
    url_list = []
    for i in html_list:
        url = i.get('src').replace('pic', 'img')
        url_list.append(url)
    return url_list, book_id

if __name__ == '__main__':
    print(take_html('https://ahmog.com/cn/s/322512/'))

