#爬取当当网网
import requests
import pymongo
import json,time
import requests
from bs4 import BeautifulSoup
from pyquery import PyQuery

from urllib.request import urlretrieve
id = 1

# 初始化数据库
def mangodb():
    # 创建数据库连接对象
    
    client = pymongo.MongoClient(host='localhost',port=27017)
    # 获取bookdb集合
    #bookdb = client.bookdb
    db = client.db
    # 获取info文档
    info = db.info
    #db = client['test']
    return info

#爬取数据
def get_page(url):
    try:
        #定义请求信息头
        headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.119 Safari/537.36'}
        #执行爬取
        res = requests.get(url,headers=headers)
        #判断并且返回结果
        if res.status_code == 200:
            return res.text
        else:
            return None
    except RequestException:
        return None

#解析数据
def parse_page(content):
    global id
    #使用pyquery解析数据
    doc = PyQuery(content)
    
    #获取信息
    imlist = doc("#search_nature_rg li[ddt-pit]")
    #print(len(imlist))
    #print(imlist)
    for im in imlist.items():
        img = im.find("img").attr("data-original")
        if img == None:
            img = im.find("img").attr("src")

        yield {
                "id":str(id),
                "name":im.find("a").attr("title").strip(),
                "author":im.find("p[class='search_book_author']").find("span a[name='itemlist-author']").attr("title"),
                "price":im.find("p[class='price']").find("span[class='search_now_price']").text(),
                "img":img,
        }
        id += 1
    

#保存数据
def save_page(im,info):
    print(im)
    #with open("./pyqueryboos.txt",'a',encoding="utf-8") as f:
        #f.write(json.dumps(content,ensure_ascii=False)+"\n")
    savePath = "./pic/p" + str(im.get("id") + ".jpg")
    urlretrieve(im.get("img"), savePath)
    info.insert(im)
    

#主函数
def main():
    info = mangodb()
    url = "http://search.dangdang.com/?key=python&act=input"
    print = ("="*25 + "爬取当当网" + "="*25)
    html = get_page(url) #执行爬取
    for im in parse_page(html):
        save_page(im,info)
    #time.sleep(1)

if __name__ == '__main__':
    main()

