
# coding: utf-8

# In[2]:


from requests.exceptions import RequestException
from pyquery import PyQuery
import requests
import time,json
import os
os.chdir("E:\pythonstudy")


# In[4]:


def getpage(url):
    '''爬取指定url地址的信息'''
    try:
        # 定义header头信息
        headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'Cookie': '__jdu=15218961992551579821768; shshshfpa=5903cf8c-bd8b-322c-bcfc-a64ab265084c-1535382369; shshshfpb=21d56d35afff24df4a237de0555f5a3955b850f902c275487849413622; ipLocation=%u5317%u4EAC; ipLoc-djd=1-72-2799-0; __jdv=122270672|direct|-|none|-|1537670167939; PCSYCityID=1000; user-key=2662c9b9-4aff-41fe-be49-8d580d393593; __jdc=122270672; _gcl_au=1.1.1283386478.1537782639; cart-main=xx; cn=2; 3AB9D23F7A4B3C9B=7WAWS5UUIWOGINTWNRX2NIMT7LMNXL4SPQYL3TW4MFYZ7VUWSJJLKQCU6W73JNTC7DWXKISFJSCMGLD7XIGPIJYSAU; cd=0; shshshfp=06c74d78cb886af22a36a15dec95e30c; __jda=122270672.15218961992551579821768.1521896199.1537782607.1537795027.5; __jdb=122270672.3.15218961992551579821768|5.1537795027; wlfstk_smdl=xa79gqajguk35psagfcyyx37engujdig',
            'Host': 'cart.jd.com',
            'referer': 'https://cart.jd.com/addToCart.html?rcd=1&pid=13512114031&pc=1&eb=1&rid=1537782709448&em=',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
        }

        # 提交请求爬取信息，执行爬取
        res = requests.get(url, headers=headers)
        # 判断并返回结果
        if res.status_code == 200:
            return res.content.decode('utf-8')
        else:
            return None
    except RequestException:	
        return None


# In[9]:


def parsepage(content):
    #----使用xpath----
    #初始化，返回根节点对象
    doc = PyQuery(content)
    items = doc("div.item-form")

    
#     解释网页

    for item in items.items():
        yield{
            'title':item.find("div.p-name a").text(),
            'price':item.find("pstrong").text(),
        }
        


# In[10]:


def main():
    '''主程序函数，负责调度执行爬取处理'''
    url = 'https://cart.jd.com/cart.action?r=0.24326644391940544#none'
    html = getPage(url) #执行爬取
    if html:
        parsepage(html)

    if html:
        for item in parsepage(html):
            print(item)


# In[11]:


if __name__ == '__main__':
    main()

