import requests
from bs4 import BeautifulSoup

headers = {
    'Connection': 'keep-alive',
    'Pragma': 'no-cache',
    'Cache-Control': 'no-cache',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
    # 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_16_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.92 Safari/537.36',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
}

cookies = {
    'session': '.eJyrViotTi1SsqpWyiyOT0zJzcxTsjLQUcrJTwexSopKU3WUcvOTMnNSlayUDM3gQEkHrDE-M0XJyhjCzkvMBSmKKTVNMjMDkiamFkq1tQDfeR3n.YPg9yw.LdJE3XAyox2uu4dLBZ-BsYt97bM'
}

def download_img(url):

    filename = url.split('/')[-1]
    req = requests.get(url, headers=headers, cookies=cookies)
    with open(f'imgs/{filename}', 'wb') as f:
        f.write(req.content)

def crawler():


    response = requests.get('http://47.103.13.124:8001/poison_url', headers=headers, cookies=cookies, verify=False)


    soup = BeautifulSoup(response.text, 'lxml')
    movie_list = soup.find('div', class_='movie-list').find_all('a', class_='list-group-item')

    for movie in movie_list:
        img_url = movie.find('p').find_next_sibling('p').get_text()
        download_img(img_url)
        print(f'{img_url} downloader finish.')

crawler()

