import requests,json,time
from requests.exceptions import RequestException
from urllib.request import urlretrieve

from lxml import etree
from bs4 import BeautifulSoup
from pyquery import PyQuery

# m = 1

def getPage(url):
    '''爬取指定url地址的信息'''
    try:
        #采用浏览器伪装技术并维持Cookie信息
        headers = {
            'Accept':'*/*',
            'Accept-Encoding':'deflate, sdch, br',
            'Accept-Language':'zh-CN,zh;q=0.8',
            'Cookie':'__jdu=2062983254; shshshfpa=957453a7-865c-beed-4104-665cdb2f071b-1551100467; shshshfpb=hfAwY1mUtQSU38PGs8i%2Fx9g%3D%3D; PCSYCityID=23; user-key=55860b47-4823-4c9c-abe0-54ecd489e831; ipLoc-djd=1-72-4137-0; areaId=1; mt_xid=V2_52007VwATVV5QW1gcTRxsV2BXGwFbDQBGGEgfDhliUBFbQVAGWhdVHloEMgMXVFsIVFlIeRpdBW8fE1dBWVVLH04SXgBsABdiX2hSahZBHVkCZAoTUVpaVl8XSB1VAmIzElRZXA%3D%3D; pin=%E8%B4%B9%E9%9B%B7cute; _tp=RGRZ0lKiPwn0L%2BerHk4zkZgZid0IzNUMeKAv%2BbH5Oqw%3D; _pst=%E8%B4%B9%E9%9B%B7cute; unick=%E8%B4%B9%E9%9B%B7cute; pinId=AcGEAf0kohUIeJHLLzNwqQ; cd=0; cart-main=""; unpl=V2_ZzNtbUJRSkVxAEFUckpYUGIAG1sRVkUddVhEBikaXAFkBRUNclRCFX0UR1FnGFQUZwoZXEFcRxRFCEdkeBBVAWMDE1VGZxBFLV0CFSNGF1wjU00zQwBBQHcJFF0uSgwDYgcaDhFTQEJ2XBVQL0oMDDdRFAhyZ0AVRQhHZHgYWwZvChBeRlJzJXI4dmR4G14DbwoiXHJWc1chVEVdeR9aBioAE1pBX0oXdgxDZHopXw%3d%3d; __jdv=122270672|baidu-pinzhuan|t_288551095_baidupinzhuan|cpc|0f3d30c8dba7459bb52f2eb5eba8ac7d_0_178a59618b5d4287b0791a3cc215276a|1554804175074; shshshfp=fa1ec6f0bd7af1f5973c5e6ed7bc48f5; 3AB9D23F7A4B3C9B=V66FEO6RHIWKGJQCKUNB3LEOSBV2N6VJGNE6B5CXAL476HOJMLV3USUAL6UOWDQ47QASGGENS4S3HFLLZ26KYAXILI; __jdc=122270672; cn=4; wlfstk_smdl=562hds03jfru89jvzuhttw6hbzyomlts; TrackID=1pprKIP_FD_Sx7jMMnE0QH2hBPC7Ha1Tyeq_sRRFZ0C0LBDzEAuFQGhJdpvoxLZpIChGRKYXEA_sM-aU1zaNa0yjJxSk6IWYrmkU1n_RWZyA; ceshi3.com=000; __jda=122270672.2062983254.1551100463.1554806310.1554809327.12; thor=E3505B39BB7B06414E5041FE94E3230738F8F8E2846D61350CC2E57A8DFF34977670B6E75007D3D43A4946D09BCC4BE439F39A2A3E919AE4E191EA29924154BFB09F87E4158ED0CE15CA2C7D71B82693FFF0C6C3E6DF3EE2E9DA8F173718E0C8D25FAC4B00517D11E41E0BB7A56F7B5A21F28D91A080887AC43C412DBB7588FC; shshshsID=afa4394cd26fe198579703c1a63ccc5e_2_1554809480964; __jdb=122270672.2.2062983254|12.1554809327',
            #==============Cookie值需要审查浏览器元素手动获取===============
            'Referer':'https://cart.jd.com/cart?rd=0.27009384905451417',
            'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36 LBBROWSER',
        }

        res = requests.get(url,headers=headers)
        if res.status_code == 200:
            return res.text
        else:
            return None
    except ReuestsException:
        return None

def parsePage(content):
    '''解析爬取网页中的内容，并返回结果'''

    #======================Xpath解析库=======================================#
    html = etree.HTML(content)
    items = html.xpath(".//div[@class='item-form']")
    for item in items:
        #判断a标签里是否有enm标签
        if item.xpath(".//div[@class='item-msg']/div[@class='p-name']/a/em"):
            title = item.xpath(".//div[@class='item-msg']/div[@class='p-name']/a/text()")[1]
        else:
            title = item.xpath(".//div[@class='item-msg']/div[@class='p-name']/a/text()")[0]
        yield{
            'title':title.strip(),
            'image':item.xpath(".//div[@class='p-img']/a/img/@src")[0],
            'price':item.xpath(".//p[@class='plus-switch']/strong/text()")[0],
        }

def writeFile(content):
    '''将解析的结果追加写入文件'''
    with open("./jd_cart.txt","a",encoding="utf-8") as f:
        f.write(json.dumps(content,ensure_ascii=False)+"\n")

def main():
    '''主程序函数，负责调度执行爬取处理'''
    url = 'https://cart.jd.com/cart.action'
    html = getPage(url)
    if html:
        for item in parsePage(html):
            print(item)
            writeFile(item)

if __name__ == '__main__':
    main()