#爬取京东购物车信息
import os,time,sys
import re,requests,json
from pyquery import PyQuery
from bs4 import BeautifulSoup
from urllib.parse import urlencode
from urllib.request import urlretrieve

def getPage():
    '''爬取街拍信息'''
    headers = {
        'authority':'cart.jd.com',
        'method':'GET',
        'path':'/cart.action',
        'scheme':'https',
        'accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'accept-encoding':'gzip, deflate, br',
        'accept-language':'zh-CN,zh;q=0.9',
        'cookie':'__jdu=1077446858; shshshfpa=89dc48b1-f080-335a-70cc-b3e82718e280-1543278221; shshshfpb=22a04982ec89d4cd48a9d36a6bbed39fe52aa17a211e196676bfc8ec10; ipLoc-djd=1-72-2799-0; ipLocation=%u5317%u4EAC; __jdc=122270672; PCSYCityID=1; unpl=V2_ZzNtbUBVEBEgDUBXeR4IB2IDEg8SV0EVdFpAUX8aCQY0ABdZclRCFXwURldnGlgUZwYZX0BcQBNFCHZXcxFYA2YCEVRyZ3MWdThGVUsZXQNhBhdaS1NKJUUPdmRLGlQNYwQVXkdWcxRFCXYZexhaA2IGFVRGXnMURQs%3d; __jdv=122270672|kong|t_299570028_|tuiguang|33b5d47236e3411ca1310c7452d2b245|1545719555784; user-key=f1888d3b-8a2d-45af-84e0-0a904ad28351; cart-main=xx; _gcl_au=1.1.1275071159.1545719592; cd=0; shshshfp=e65eaf9559be383086c54f4a64c915c6; cn=7; __jda=122270672.1077446858.1543278216.1545719556.1545724509.6; wlfstk_smdl=azpj32t3e7k26noso78xhthmt5phxls7; TrackID=1RSif_ve81j5ZJydM5BJvOgAdn9HxC8HMhOgXkt94rFr0ev2V3LkPD-TiLSLMkbOQMC-eCcNBEcLyA-HKpVZv2kGhuGeklRC-w5D7pd6wpnx9M9FNq69ev_a-ssf-qKpQ; pinId=5KU3ETrB6ky-5PTyQ664pQ; pin=%E4%B8%9C%E4%B8%9Claileo; unick=jd_%E4%B8%9C%E4%B8%9C; ceshi3.com=103; _tp=b0b0F8T%2BhTrAQAQTSLJ%2FetkjdLgAyG6FdfWkkjNXNxw%3D; _pst=%E4%B8%9C%E4%B8%9Claileo; 3AB9D23F7A4B3C9B=GYQK2WOSKXPUOU7KZODULJH32XF2TNMJCYAUF5BLMXJ6ABOKXMTPGBTAM7YWXY3S7N3QYCHYLCOJLGE6QO75V5PMRY; shshshsID=4071707a4c4a14df232b4b48b67c5d3c_6_1545726568078; __jdb=122270672.12.1077446858|6.1545724509; thor=C6A4FD84B1A2464FA653828C5EE2BE26C726A0C7D8169777C0206C1F54F632048C91D00929EF49B98239FD3D74ABDBFED95101372075984DFD7E969D148FAFD90E7E6F7B156CE07DB46FEA437348ACC803E8B2064DAE58A2C230F02159809F6A0FD7747BD086EC513E605BC50F9DBDB4023690270BA19492B6B659A7347547A9B9BE3E6CF01CE5B10B17FE6F1CBA2571',
        'upgrade-insecure-requests':'1',
        'user-agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.6788.400 QQBrowser/10.3.2767.400',
    }
    url = 'https://cart.jd.com/cart.action'

    try:
        res = requests.get(url,headers=headers)
        if res.status_code == 200:
            # print(res.text)[:2000]
            return res.text
    except Exception as e:
        print('没有数据')
        return None

def parsePage(html):
    '''解析数据'''

    # 解析HTML文档
    doc = PyQuery(html)
    #获取网页中所有标签并遍历输出标签名
    items = doc("div[class='item-form']")
    #遍历封装数据并返回
    for item in items.items():
        pname = item.find("div[class='p-name'] a").text()
        pic = item.find('div[class="goods-item"] div[class="p-img"] a img').attr('src')
        num = item.find("input[class='itxt']").attr('value')
        price = item.find("div[class*='cell p-price'] strong").text()
        psum = item.find("div[class='cell p-sum'] strong").text()
        yield {
            'title': pname,
            'image': pic, 
            'num': num,
            'price': price,
            'allprice': psum,
        }

def savePage(item):
    '''保存图片'''
    #获取图片
    imgurl = 'https:'+item.get('image')
    title = item.get('title')
    urlretrieve(imgurl,'./images/jingdong/'+str(title)+'.jpg')
    #将图书详细信息写入文件
    with open('./jingdong.txt','a',encoding='utf-8') as f:
        f.write(json.dumps(item,ensure_ascii=False)+'\n')

def main():
    '''主函数'''
    #判断当前路径是否存在images文件夹
    if not os.path.exists('./images/jingdong'):
        os.makedirs('./images/jingdong')
    html = getPage()
    items = parsePage(html)
    for item in items:
        print(item)
        savePage(item)

if __name__ == '__main__':
    main()