from requests.exceptions import RequestException
from lxml import etree
from bs4 import BeautifulSoup
import requests
import re,time,json

def getPage(url):
  '''爬取指定url页面信息'''
  try:
    # 定义请求头信息
    headers = {
      'User-Agent':'User-Agent:Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
    }
    # 执行爬取
    res = requests.get(url, headers=headers)
    # 判断相应状态，并响应爬取内容
    if res.status_code == 200:
      return res.text
    else:
      return None
  except RequestException:
    return None


def parsePage(content):
  '''解析爬取网页中的内容，并返回字段结果'''
  # print(content)
  # ============使用xpath解析============
  # 解析HTML文档
  html = etree.HTML(content)
  # 获取网页中所有标签并遍历输出标签名
  items = html.xpath('//tr[@class="item"]')
  # 遍历封装数据并返回
  for item in items:
    yield{
      # 'index': item.xpath('.//div/em[@class=""]/text()')[0],
      'image': item.xpath('.//img[@width="90"]/@src')[0],
      # 'image': item.xpath(".//img[@width='90']/@src")[0],
      'title': item.xpath('.//div[@class="pl2"]/a/@title')[0],
      'author': item.xpath('.//p[@class="pl"]/text()')[0],
      'score': item.xpath('.//span[@class="rating_nums"]/text()')[0],
    }


def writeFile(content):
  '''执行文件追加写操作'''
  with open("./result-xpath.txt", 'a', encoding='utf-8') as f:
    f.write(json.dumps(content, ensure_ascii=False)+"\n")
    # json.dumps 序列化时对中文默认使用的ASCII编码，想输出真正的中文需要指定ensure_ascii=False



def main(offset):
  '''主程序函数，负责调度执行爬虫处理'''
  url = 'http://book.douban.com/top250?start=' + str(offset)
  html = getPage(url)
  # 判断是否爬取到数据，并调用解析函数
  if html:
    for item in parsePage(html):
      writeFile(item)
  

if __name__ == '__main__':
  for i in range(10):
    main(offset=i*25)
    time.sleep(1)
  print("数据存入完毕，请检查同目录下result-xpath.txt文件")