# -*- coding:utf-8 -*-
# 爬取京东购物车信息
import requests # 请求库模块
from bs4 import BeautifulSoup
import re

def getPage():
    '''获取指定地址页面信息'''
    url = "https://cart.jd.com/cart.action"

    headers = {
        # ':method':'GET',
        # ':path':'/cart.action',
        # ':scheme':'https',
        # ':authority':'cart.jd.com',
        # 'accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
        # 'accept-encoding':'gzip, deflate, br',
        # 'accept-language':'zh-CN,zh;q=0.9,en;q=0.8',
        # 'cache-control':'max-age=0',
        'cookie':'__jdu=97554093; shshshfpa=ec9b42f4-a7b0-890b-423a-b2e7648bd5c8-1559354924; shshshfpb=f9muHpTFkRO0ARIRLaJLRdg%3D%3D; user-key=380bdead-a819-4b24-9150-113949fed190; pinId=pOlmXKyDEt25oyDb-PFLZQ; pin=1099671704_m; unick=Bianca_27; _tp=PhaglPaByHpn1c1QU0MDRQ%3D%3D; _pst=1099671704_m; unpl=V2_ZzNtbUoAFh0mX05QchlcBmJXEl8RXhRCIA5BVylMDABuVBRfclRCFX0UR1NnGVwUZgoZWEJcQxJFCEdkeBBVAWMDE1VGZxBFLV0CFSNGF1wjU00zQwBBQHcJFF0uSgwDYgcaDhFTQEJ2XBVQL0oMDDdRFAhyZ0AVRQhHZHMfWAFiAhpfcmdEJUU4RVZ4HFgAVwIiXHIVF0l1C05ceB8RDWEHFlhDX0EldDhF; __jdv=122270672|kong|t_0_|zssc|fe676bb2-cf73-4166-8a73-f321f0e58207-p_113102|1560214910496; __jda=122270672.97554093.1559354922.1560214910.1560473458.6; __jdc=122270672; areaId=27; PCSYCityID=2376; ipLoc-djd=27-2376-4343-53945; cart-main=xx; mt_xid=V2_52007VwsUVlldU1IdeRFUV2MLRQFZRAFbHUkFWVE3Aw4CDllXRhkaTlRRNVZBWggIB2ocSBlUGWYEDlJYSVNTFE0aVwdgMxBiXWhbXB1IGFgCZgoSVFRcUF0fQR9cNWcFFlY%3D; cd=0; cn=9; shshshfp=6e93d113db8a65a1974e84f3fb41124e; 3AB9D23F7A4B3C9B=XVYFEJDBZNEBT6FHFAZHCAVOJKL3XKAAXPRF2KWMDIA43S6UQFDBXCR3JG5JJHMQVP3HTX5LDDAYQWN5DW64MPYMLM; shshshsID=2d741a984a9a07aa8ee680daae0f702c_25_1560478441721; __jdb=122270672.38.97554093|6.1560473458; thor=22649DF429687F2A821DF3F42548B9DBD49E58CB2C260D8BADEA11AA4F8E1DFB9B6FFB7445C090AFEEFA6BBD7585DC59B3BB0646E46A14AC8F586109DA26B6D7838942BB948607D9E273F8B218797E32FE8A8E287F826A9F1721D684E6E22054D26C80CA8DE0A3DA0379F62E7973D2101440CA816ED00A10BB82AE96B5E1B55FD8482CA5BD24D910B7757CE605F47DCC',
        # 'upgrade-insecure-requests':'1',
        'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36',
    }

    try:
        # 请求到的响应结果
        res = requests.get(url, headers=headers)
        if res.status_code == 200:
            return res.text

    except:
        return None

def parsePage(html):
    # items = [{}, {}, {} ...]
    '''解析页面信息'''
    soup = BeautifulSoup(html, "lxml")
    imgs = soup.select("div a img")
    names = soup.select('div[class="item-msg"] div[class="p-name"] a')
    prices = soup.select('p[class="plus-switch"] strong')
    counts = soup.select('div[class="quantity-form"] input[autocomplete="off"]')
    sums = soup.select('div[class="cell p-sum"] strong')
    # 遍历所有物品，并对所需内容进行封装
    for i in range(len(imgs)):
        yield {
            'id':i+1,
            'name':names[i].text.strip(),
            'image_url':'http:' + imgs[i].attrs['src'],
            'price':prices[i].string,
            'count':counts[i].attrs['value'],
            'sum':sums[i].string
        }

def main():
    '''主函数'''
    # 根据请求爬虫模块获取指定URL页面信息
    html = getPage()
    # 判断请求到的页面是否有内容
    if html:
        for item in parsePage(html):
            print(item)

# 程序入口
if __name__ == '__main__':
    main()