from requests.exceptions import RequestException
from bs4 import BeautifulSoup
import requests,json,os


def getPage(url):
    '''爬取指定url页面信息'''
    try:
        # 定义请求头信息
        headers = {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'accept - encoding': 'gzip, deflate, br',
            'accept - language': 'zh - CN, zh;q = 0.9, en;q = 0.8',
            'cache - control': 'max - age = 0',
            'referer': 'https: // cart.jd.com / addToCart.html?rcd = 1 & pid = 10277973830 & pc = 1 & eb = 1 & rid = 1542695920153 & em =',
            'upgrade - insecure - requests': '1',
            'cookie': 'shshshfpa=c8d4d2b8-094c-ccac-10e0-4442507caa2f-1540026453; __jdv=122270672|direct|-|none|-|1542456189581; __jdu=1540026431680644610216; PCSYCityID=988; ipLoc-djd=1-72-4137-0; areaId=1; __jda=122270672.1540026431680644610216.1540026431.1542456190.1542695876.3; __jdc=122270672; user-key=d9a00ab5-6f1b-4e11-952c-83a454bb78bc; _gcl_au=1.1.1853840687.1542695893; cart-main=xx; wlfstk_smdl=bbyesulqm3ojqs68w4eayl1gwe4h59gg; TrackID=1QAzeGk7QArALGTaUEx9g9NRz5BA9f1SgWWOutnTQ6Cp0DYZzTeqA6_RG96lX9Mvo8Hfsy0oDFCT_TfLOI6J_5kjBlQAluYVCkc229gbQJ4ReXpTWVVt_1aq_aJf2GLI3; thor=1F39A2F4E962BE237C47D5CBCCFF32F11781A78DEA612892CE0090E6D635ACD86A3CF82011B7FA4ABF66F15BDDB2F9562C4ADCAC3D5FFCD992ADF0807BA09CCFEFE2CC7D368E2DA3F40A7C2B9F5953708303601DC13BB8B002B2049B9A27EAB9C3F8B1AF43AC6DF541FAF228A0F309E1E3FEF224B62F3EC27C0F904B0AB6603928518853D6934F29E172293F38DA50C1; pinId=tMfZwKGaMxZ9n6sWxuk4zg; pin=wdxoqRZNcxFeNF; unick=iarmy; ceshi3.com=000; _tp=old3NIDs09A5XKmGJ7%2F2Dg%3D%3D; _pst=wdxoqRZNcxFeNF; 3AB9D23F7A4B3C9B=JCTDROZIUER5F3OTM4DPSUFDGNHA5YSNZ37UZ7PW2MSZ66MRVFYXAPLPQLI4RYVFEILSIOMFY66Q5336ZU2LPK2Y7A; cn=3; cd=0; shshshfp=320a0dbec390e6847fbb3aa3d24e4126; hf_time=1542695877188; shshshsID=0ed5d5e5b96a5b1e2b3ad49baa96689b_4_1542695961222; __jdb=122270672.9.1540026431680644610216|3.1542695876; shshshfpb=119593d25c05e47bb9a847d95a95781566aaa8441f5abf5b95bcaf0552',

        }
        # 执行爬取
        res = requests.get(url, headers=headers)
        # 判断响应状态,并响应爬取内容
        if res.status_code == 200:
            return res.text
        else:
            return None
    except RequestException:
        return None

def parsePage(content):
    '''解析爬取网页中的内容，并返回字段结果'''
    soup = BeautifulSoup(content,"lxml")
    items = soup.find_all(name="div",attrs={"class":"item-form"})
    #print(items)
    #遍历封装数据并返回
    for item in items:
        yield {
            'image': item.find(name="img").attrs['src'],
            'name': item.find(name="a", attrs={'clstag': 'clickcart|keycount|xincart|cart_sku_name'}).string,
            'price': item.find(name='strong').string,
        }


def writeFile(content):
    '''执行文件追加写操作'''
    with open("./jd.txt", 'a', encoding='utf-8') as f:
        f.write(json.dumps(content, ensure_ascii=False) + "\n")
        # json.dumps 序列化时对中文默认使用的ascii编码.想输出真正的中文需要指定ensure_ascii=False


def main():
    ''' 主程序函数，负责调度执行爬虫处理 '''
    url = "https://cart.jd.com/cart.action"

    html = getPage(url)
    # 判断是否爬取到数据，并调用解析函数
    if html:
        for item in parsePage(html):
            writeFile(item)

# 判断当前执行是否为主程序运行，并遍历调用主函数爬取数据
if __name__ == '__main__':
    if os.path.isfile('./result.txt'):
        os.remove('./result.txt')
    main()
