# -*- coding: utf-8 -*-
import json
from lxml import etree

import jsonpath
import requests
import scrapy


class DoubanSpider(scrapy.Spider):
    name = 'douban'
    allowed_domains = ['douban.com']
    start_urls = ['https://read.douban.com/j/column_v2/59130922/chapters?start=0&limit=10&latestFirst=0']
    page_num = 0
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36',
        'Cookie': 'bid=ACykF3cjYLQ; ll="118171"; __gads=ID=f18a5ef0f13be6e5-226b2e7035c90020:T=1622797740:RT=1622797740:S=ALNI_MbPb6KPKKy0Vy5jFez7fSktrxbMjA; douban-fav-remind=1; __utma=30149280.2071024962.1622797580.1624347309.1624515974.4; __utmz=30149280.1624515974.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; gr_user_id=e88d9073-cb17-4bd0-8ccb-a11b1f7c9d0a; viewed="34809659"; _ga=GA1.3.2071024962.1622797580; _gid=GA1.3.2036785073.1624516001; _pk_ref.100001.a7dd=%5B%22%22%2C%22%22%2C1624603570%2C%22https%3A%2F%2Fbook.douban.com%2F%22%5D; _pk_ses.100001.a7dd=*; _pk_id.100001.a7dd=99d14a852ebfec26.1624516000.2.1624603581.1624518896.'
    }

    def parse(self, response):
        # print(response.text)
        # response转为json数据
        json_r = json.loads(response.text)
        section_url = jsonpath.jsonpath(json_r, '$..links.reader')
        print('*******************', section_url)
        for url in section_url:
            book_url = 'https://read.douban.com/' + url
            print('----------------------', book_url)
        #     r = requests.get(url=book_url, headers=self.headers)
        #     html = etree.HTML(r.content.decode('utf-8'))
        #     txt1 = html.xpath('//*[@id="ark-reader"]/div[2]/div[1]/div[1]/div[2]/div/p/dfn/span/text()')
        #     txt2 = html.xpath('//*[@id="ark-reader"]/div[2]/div[1]/div[2]/div[2]/div/p/dfn/span/text()')
        #     txt3 = html.xpath('//*[@id="ark-reader"]/div[2]/div[1]/div[3]/div[2]/div/p/dfn/span/text()')
        #     text = txt1 + txt2 + txt3
        #     print(text)
        #     # yield
        # self.page_num += 10
        # if self.page_num <= 50:
        #     print(self.page_num)
        #     next_page_url = 'https://read.douban.com/j/column_v2/59130922/chapters?start=%s&limit=10&latestFirst=0' % self.page_num
        #     yield scrapy.Request(url=next_page_url, callback=self.parse)
