# 访问京东商城网址，选择多个商品放入购物车后查看自己的购物车。使用Python爬取京东商城网址购物车中的所有商品信息，并将信息写入文件中。
# 第八周作业2)
# 班级：Python五期
# 学员：李子坚

from requests.exceptions import RequestException
#from lxml import etree
#from bs4 import BeautifulSoup
from pyquery import PyQuery
import requests
import time,json
import os

def mkdir(path):
    '''创建文件夹'''
    #去除首位空格
    path=path.strip()
    #去除尾部 \ 符号
    path=path.rstrip("\\")
 
    #判断路径是否存在，如果不存在则创建文件夹
    if not os.path.exists(path):
        os.makedirs(path)

def getPage(url):
    '''爬取指定url地址的信息'''
    try:
        #定义header头信息
        
        headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'Cookie': 'user-key=b7fe0d54-375c-443b-b035-6d29689afadf; __jdu=508871407; PCSYCityID=1607; shshshfpa=73ed7925-f8df-a174-7a13-d377b993f5af-1529999248; shshshfpb=0cbee5b56246f456f054835c2d28f0c3db93113e95c8cfe555b31ef8cc; TrackID=173fVy_iHJ5Il_-WIijr743rB_FSKkx7yWCg0fYgxJGSWSKh9Bo1Nr-CKXR_7Br5BJ4WNfzWQSasUmkiblJAE3X0zAicz8qDTvzSiWKmfyqI; pinId=TKxKag2iPWsjRG1gFSCy4A; pin=wdjejwhkKeUYQX; unick=jejwhkKeUYQX; _tp=76qgvizbv%2FmUaAqJ1tmdYw%3D%3D; _pst=wdjejwhkKeUYQX; __jdc=122270672; ipLoc-djd=1-72-4137-0; areaId=1; shshshfp=b9c8f6f29de524535dae1ddc06761301; cart-main=xx; wlfstk_smdl=1qn5m1op0wnbj1b3o84fiuqk5c34u5ew; 3AB9D23F7A4B3C9B=PTK4NHSWI73PYNOCDP6HF3PWWRJ4REQ4DXITCWPSWSVV2QJ4BLXREKBWC4T4BYPZSBITCM4W4QRWHKMELZVOWSD5WM; __jda=122270672.508871407.1529999245.1530064935.1530116263.4; ec_cl_st=1; unpl=V2_ZzNtbUYDEEB2AEJRKUpaDGILQltLXhEXIAxABi8QDg1vUxpYclRCFXwUR1NnGlUUZgIZXUtcQxVFCEdkexhdBGYGEF9KU3MJdThBUXIRWAJnCiJeQmdDJXEJT119GVkEYTMiWnJRQRxwDEZSS2kVQSRXbg0AEhdWRQ1GU3sRVQdgCyJcclZzQxsIR1V6GF0FbwQSEEZWShxzCENVfSldNWQ%3d; CCC_SE=ADC_hU8%2bE8xAGVWRupoJYzUA38aYsK50uJ07eDmPEJ%2fQXle%2fJM1z9QxUi7vABFVVvcwAZUK%2fq63%2bls5O2l%2fYa4ppaNV2P5MrAT7M4ybdTN%2boqi3lEP9%2bFzKEzk3Z7cDuL4aWY2n4Ruq3Ev4eGuJdRbvXgqTlc9Z6ZnxNDkc7O3X%2bX2jDRI0QzQWVWE0A87w9CuV0GcqwRm7qo44KMWDHTSC1kC84bIN7kV6MsmCBsOpfRXvfWcNaUpDHIcRD1KBAtVkGSk7Gamm433vOzkLTSKtTE6yU%2foCPGMkIy7kgv4jvFw%2b%2bYD2Jf3CCSl%2b22%2fiTVIhfCeCdxK6BZflByjpeukKwAXfTCUStijPoLRbT%2fF9OB5t5MDsp7t7UobyOGWyq8LD8b1HWVhnyj8nRgiIsTuWp6SMeLAnCZXVkrck5%2fhu56SCpfwjBTmnnVUY1AbUqrofrMzXtPXKBihoKytKmEuwblyEnBX1Uvhr9ulClR6ESO3sgv%2fsI0kmq8QhBgXEH8kok1HcnxEUTesqh3wsjjbXr4QfJ763xwxIDZMKXGU1u6BBANPZeKSy7mpDCYEH3zycYTEWfSTjwRyJ%2bh8pJXDPIM7rcosBdKlZY5OOATopaKQ%2f8KH2JQu36Wvcm5sxdflxik7%2bkDFAdRyHlzn%2fNIXfD78yg%2b4gAI4J1qY1Vgr8lpKI%3d; __jdv=122270672|www.fanqianbb.com|t_1000043395_-1|tuiguang|5ebd2954cb7849a788c3d57ce8c99a94|1530116292427; mt_xid=V2_52007VwcTW1ReUl8eTylcBWQHE1JaXk4PHE8QQABnVkdOVFsFXgNNH1kNYgQbAA1ZBg8vShhcDHsCFU5eUUNbHkIZVQ5nASJQbVtiXhxPGVkCVwMWUw%3D%3D; cn=9; cd=0; shshshsID=b1ab131da0b39a6e3ebf8ca2ab92c92d_7_1530116504661; __jdb=122270672.11.508871407|4.1530116263',
            'Host': 'cart.jd.com',
            'referer': 'https://search.jd.com/Search?keyword=nubia%E7%BA%A2%E9%AD%94&enc=utf-8&suggest=9.def.0.V13&wq=nubia&pvid=42e4a6d7794e46cfaece809f7bbcaa13',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36',
        }

        #提交请求爬取信息，执行爬取
        res = requests.get(url,headers=headers)
        #判断并返回结果
        if res.status_code == 200:
            #return res.text
            return res.content.decode('utf-8')
        else:
            return None
    except RequestException:	
        return None

def parsePage(content):
    '''解析爬取网页中内容，并返回结果'''
    #print(content)

    #初始化，返回Pyquery对象
    doc = PyQuery(content)
    #解析网页中<div class="item-form">....</div>信息（购物车中每一件商品信息）

    items = doc("div.item-form")
    index = 0
    
    #遍历并解析每件商品具体信息
    for item in items.items():
        index += 1

        #下载商品图片
        img_url = item.find("div.p-img a img").attr('src')
        file_name = img_url.split('/').pop()
        file_path = os.path.join("./jdcartimg", file_name)
        downloadImg("https:"+img_url,file_path)

        #提取商品信息
        yield {
            'index':str(index),
            'image':img_url,                                        #item.find("div.p-img a img").attr('src'),
            'title':item.find('div.item-msg div.p-name a').text(),
            'props':item.find('div.props-txt:first').text(),
            'price':item.find("div.p-price strong").text(),
            'quantity':item.find("div.quantity-form input.itxt").attr('value'),
            'sum':item.find("div.p-sum strong").text(),
        }
    
def downloadImg(image_url,file_path):
    '''根据url下载图片文件'''
    try:
        #创建response对象
        #print(image_url)
        res = requests.get(image_url) 

        #下载图片文件
        with open(file_path,'wb') as f:
            f.write(res.content)
    except RequestException:	
        pass
        
def writeFile(content):
    '''执行文件追加写操作'''
    with open("./jdcart.txt",'a',encoding="utf-8") as f:
        f.write(json.dumps(content,ensure_ascii=False).replace("\"","")+"\n")

def main():
    '''主程序函数，负责调度执行爬取处理'''
    url = 'https://cart.jd.com/cart.action?'
    html = getPage(url) #执行爬取
    if html:
        #爬取内容逐行输出到文件
        for item in parsePage(html): #执行解析并遍历
            #逐行输出购物车商品信息
            item_info = "{:<5}{:<96}{:<44}{:<15}{:>8}{:>4}{:>10}".format(item['index'],item['image'],item['title'],item['props'],item['price'],item['quantity'],item['sum'])
            print(item_info)
            writeFile(item_info) #执行写操作

#判断当前执行是否为主程序，并遍历调度主函数来爬取信息
if __name__ == '__main__':
    print("亲，正在下载京东商城购物车的商品信息，请稍候……")

    #判断当前文件夹下输出文件jdcart.txt是否存在，如果已存在则删除
    if os.path.exists("./jdcart.txt"):
        os.remove("./jdcart.txt")
    #创建存放电影图片文件夹
    mkdir("./jdcartimg")

    #输出标题行
    print("{:<3}{:<94}{:<56}{:<28}{:^8}{:^2}{:^10}".format("序号","图片","商品名称","参数","价格","数量","金额"))
    writeFile("{:<3}{:<94}{:<56}{:<28}{:^8}{:^2}{:^10}".format("序号","图片","商品名称","参数","价格","数量","金额"))

    #开始爬取网页数据
    main()

    print("==========京东商城购物车的商品信息下载完成！==========")
    print("请查看商品信息文件<jdcart.txt>和图片文件夹<jdcartimg>!")

