from requests.exceptions import RequestException
from pyquery import PyQuery
import requests
import time,json

def clear(content):
	bookname=content.replace("\n","")
	bookname=bookname.replace(" ","")
	bookname=bookname.replace("\\","")
	return bookname

def getPage(url):
    '''爬取指定url地址的信息'''
    try:
        #定义请求头信息
        headers = {'Cookie': 'BDqhfp=%E8%A1%97%E6%8B%8D%26%260-10-1undefined%26%260%26%261; BAIDUID=E8932FED3FD7F1A2D85AF74EE7F0BE76:FG=1; BIDUPSID=E8932FED3FD7F1A2D85AF74EE7F0BE76; PSTM=1534581562; PRY=1; BDUSS=JXLTM5NlJZR1lhSDdmV3ZRUmlQc0ZLZDgydTREeUhyREhFVy05WHpMN3Z6d0JjQVFBQUFBJCQAAAAAAAAAAAEAAADvw4cCbHh5aXp5NTIxAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAO9C2VvvQtlbej; H_PS_PSSID=1458_21084_28206_28132_26350_27750_28139_22073; delPer=0; PSINO=2; BDRCVFR[X_XKQks0S63]=mk3SLVN4HKm; userFrom=null; BDRCVFR[-pGxjrCMryR]=mk3SLVN4HKm; firstShowTip=1; indexPageSugList=%5B%22%E8%A1%97%E6%8B%8D%22%2C%22%E5%9B%BE%E7%89%87%20KFC%22%2C%22%E7%9B%B8%E6%9C%BA%22%2C%22%E9%AD%94%E9%81%93%E7%A5%96%E5%B8%88%22%2C%22%E7%BE%8E%E5%A5%B3%20%E9%9D%92%E6%98%A5%22%2C%22%E6%96%AF%E5%98%89%E4%B8%BD%E7%BA%A6%E7%BF%B0%E9%80%8A%22%2C%22%E7%BE%8E%E5%A5%B3%22%5D; cleanHistoryStatus=0; BDRCVFR[dG2JNJb_ajR]=mk3SLVN4HKm',
                 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',}

        #执行爬取
        res = requests.get(url,headers=headers)
        res.content.decode("utf-8")
        #判断并返回结果
        if res.status_code == 200:
            return res.text
        else:
            return None
    except RequestException:
        return None

def parsePage(content):
    '''解析爬取网页中内容，并返回结果'''
    # print(content)
    #========使用pyquery解析======================
    #初始化，返回Pyquery对象
    doc = PyQuery(content)
    items = doc("div.item-form")
    #遍历并解析每部图书具体信息
    for item in items.items():
        yield {
            'title': item.find('.p-name a').text(),
            'img': item.find('.p-img a img').attr("src"),
            'price': item.find('div p strong').text(),
            'good_size': item('.props-txt').text(),
            'nums': item.find('div.quantity-form input.itxt').attr("value"),
        }

def writeFile(content):
    '''解析爬取网页中内容，并返回结果'''
    with open("./result_jd.txt",'a',encoding="utf-8") as f:
        f.write(json.dumps(content,ensure_ascii=False)+"\n")

def main():
    '''主程序函数，负责调度执行爬取处理'''
    print("main")
    url = 'https://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fm=index&fr=&hs=0&xthttps=111111&sf=1&fmq=&pv=&ic=0&nc=1&z=&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&word=%E8%A1%97%E6%8B%8D&oq=%E8%A1%97%E6%8B%8D&rsp=-1'
    html = getPage(url) #执行爬取
    # print(html)
    if html:
        for item in parsePage(html): #执行解析并遍历
            print(item)
            writeFile(item) #执行写操作

#判断当前执行是否为主程序，并遍历调度主函数来爬取信息
if __name__ == '__main__':
    main()
