import requests
import os
import json
from bs4 import BeautifulSoup

img_url = 'https://d1.hucdn.com/upload/item/1810/06/09573836565596_800x800.jpg'
base_url = 'https://m.beidian.com/detail/detail.html'
params = {
    "iid": 30648034
}
os.makedirs('../../img/'+ str(params["iid"]), exist_ok= True)
def chunk_download(imgUrl, idx):
    try:
        res = requests.get(imgUrl, stream = True)
        with open('../../img/'+ str(params["iid"]) +'/head'+ str(idx)+'.jpg', 'wb') as outfile:
            for chunk in res.iter_content(chunk_size = 32):
                outfile.write(chunk)
    except Exception as e:
        print('error')
        print(e)



def crawlerPage(param):
    try:
        res = requests.get(base_url, params=param)
        html = BeautifulSoup(res.text, "html.parser")
        images = html.select(' #J_banner .slider-wrap img')
        lenth = len(images)
        for i in range(lenth):
            if i == 0:
                chunk_download(images[i].attrs['src'],i)
            else:
                chunk_download(images[i].attrs['dataimg'], i)
        print('download finish')
    except Exception as e:
        print('error')
        print(e)

crawlerPage(params)
# chunk_download(img_url)