'''
download cqww

京东爬虫较为简单，构造合理的请求头是关键。

'''
from fake_useragent import UserAgent # 用于构建请求头
from bs4 import BeautifulSoup #用于URL解析
import requests # 用于请求资源
import os,time # 其他辅助

ua=UserAgent()

headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36',
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'accept-encoding': 'gzip, deflate, br',
            'accept-language': 'zh-CN,zh;q=0.9',
}

page=1
for page in range(1,5):

    folder_path = 'cqww'+str(page)+'/'
    if os.path.exists(folder_path) == False:
        os.makedirs(folder_path)

    url='https://search.jd.com/Search?keyword=%E5%85%85%E6%B0%94%E5%A8%83%E5%A8%83&suggest=2.his.0.0&wq=&page='+str(page)+'&s=90&click=0'
    html1 = requests.get(url, headers=headers).content.decode('utf-8')
    soup = BeautifulSoup(html1, 'html.parser')# 'html.parser'
    # print(soup) #查看是否解析成功
    li_list = soup.find_all('li', class_='gl-item')#查找li标签
    # print(len(li_list))

    counter=0
    for li in li_list:
        start=time.time()
        counter=counter+1
        # image = 'https:' + li.find('div', class_='p-img').find('a').find('img').get('data-lazy-img')
        image = 'https:' + li.find('div', class_='p-img').find('a').find('img').get('src')
        print(image)
        cache=requests.get(image)
        data=cache.content
        name = folder_path + image[-16:]
        with open(name,'wb') as file:
            file.write(data)
        end=time.time()
        print('image'+str(counter)+' in page '+str(page)+' is downloaded. It costs %.2f seconds.'%(end-start))
        time.sleep(1)
    
