from requests.exceptions import RequestException
from bs4 import BeautifulSoup
import requests
import time,json


def getPage(url):
    '''爬取指定url地址的信息'''
    try:
        #定义请求头信息
        headers={
            'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Encoding':'gzip, deflate, br',
            'Accept-Language':'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
            'Cache-Control':'max-age=0',
            'Connection':'keep-alive',
            'Cookie':'user-key=a662e32f-b565-4fc0-94d9-54c6282b74cf; cd=0; cn=1; shshshfp=c664be4a428f304335055d7a428d9f7d; shshshfpa=6cc4a6b0-cdf3-a196-cb02-a7adba4a89fb-1542542559; shshshsID=2f175710b35f14b5ccdc197dfd7ece8b_11_1542545550640; __jda=122270672.1542542559636740513868.1542542560.1542542560.1542542560.1; __jdb=122270672.27.1542542559636740513868|1.1542542560; __jdc=122270672; __jdv=122270672|direct|-|none|-|1542542559637; shshshfpb=0b32a234ca6f271a253b005041b99468ba08ffd10aac3d4fa5bf154e0b; __jdu=1542542559636740513868; pinId=Zd0m2YWv5cdGID1tlqy4kA; pin=happyxiaer; unick=%E9%A1%B6%E7%BA%A7%E7%8C%AB%E7%8C%AB; _tp=hqUyrkcRL7%2F8aERku5rIsQ%3D%3D; _pst=happyxiaer; PCSYCityID=1000; cart-main=xx; TrackID=1JeN9VHZqgcbBpvW0QEUD57OZ3-oGTD1wGJiEw2WwjMFcWNtz7UmYfprJZ7FiD11TuvEw2mb2xJZV4qKukQa1N_E63Hd6iZTr6D3T0PkoOt3I28KvojrErlCjzpZvIErR; ipLoc-djd=1-72-2819; 3AB9D23F7A4B3C9B=PFSA65LIJP4UVKB7XST5OV6QHXPKMQPZ4AN7S76I3DKOQV7YVPR6IKZ3YXBCABXZTSRIBZHBQNNN2W5IZ2XAK3A6YI',
            'Host':'cart.jd.com',
            'Referer':'https://www.jd.com/',
            'Upgrade-Insecure-Requests':'1',
            'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:63.0) Gecko/20100101 Firefox/63.0'
            }
        #执行爬取
        res = requests.get(url,headers=headers)
        #判断并返回结果
        if res.status_code == 200:
            return res.text
        else:
            return None
    except RequestException:	
        return None

def parsePage(content):
    '''解析爬取网页中内容，并返回结果'''

    #========使用BeautifulSoup解析======================
    #初始化，返回BeautifulSoup对象
    soup = BeautifulSoup(content,'lxml')
    #解析网页中标签信息
    items = soup.find_all(name="div",attrs={"class":"item-form"})
    #遍历并解析图书具体信息
    for item in items:
        yield {
            'proName':item.select("div.p-name a")[0].string.strip(),
            'image':item.select("div.p-img a img")[0].attrs['src'],
            'price':item.select("p.plus-switch strong")[0].string.strip(),
        }
    

def main():
    '''主程序函数，负责调度执行爬取处理'''
    url = 'https://cart.jd.com/cart.action'
    html = getPage(url) #执行爬取
    if html:
        for item in parsePage(html): #执行解析并遍历
            print(item)

#判断当前执行是否为主程序，并遍历调度主函数来爬取信息
if __name__ == '__main__':
    main()
