# -*- coding: utf-8 -*-
import scrapy
import urllib
import json
import requests
import os


class QdSpider(scrapy.Spider):
    name = 'qd2'
    allowed_domains = ['qidian.com', 'book.qidian.com', 'read.qidian.com']
    start_urls = ['https://www.qidian.com/all?orderId=&style=1&pageSize=20&siteid=1&pubflag=0&hiddenField=0&page=1']
    headers = {
        'Referer': 'https://book.qidian.com/info/1004608738',
        'Cookie' : '_csrfToken=XZuQ6hTsaxODjsqNpO0jJXKc6EzSLmFx0CMq2PPs; newstatisticUUID=1550583254_477222931; qdrs=0%7C3%7C0%7C0%7C1; qdgd=1; pgv_pvi=8283689984; e1=%7B%22pid%22%3A%22qd_P_all%22%2C%22eid%22%3A%22qd_B58%22%2C%22l1%22%3A5%7D; e2=%7B%22pid%22%3A%22qd_P_all%22%2C%22eid%22%3A%22qd_B58%22%2C%22l1%22%3A5%7D; lrbc=1004608738%7C339991957%7C0%2C1003306811%7C308794862%7C0%2C1009480992%7C385931448%7C0; rcr=1004608738%2C1003306811%2C1009480992; bc=1003306811%2C1009480992%2C1004608738'
    }

    def parse(self, response):
        li_l = response.xpath('//ul[@class="all-img-list cf"]/li')
        for li in li_l:
            book_name = li.xpath('.//h4//text()').get()             # 小说书名
            path = '/home/python/Desktop/qidian_/' + book_name      # 桌面文件夹

            if not os.path.exists(path):            # 判断小说书名文件夹是否被创建过
                # print('文件已存在')
                os.mkdir(path)
            else:
                print('新文件被创建')

            book_url = li.xpath('.//h4/a/@href').get()              # 小说url
            book_id = book_url.split('/')[-1]
            # print(book_url, book_id)
            params = {
                '_csrfToken': 'XZuQ6hTsaxODjsqNpO0jJXKc6EzSLmFx0CMq2PPs',
                'bookId': book_id
            }
            book_api_url = 'https://book.qidian.com/ajax/book/category?'
            book_api_url = book_api_url + urllib.parse.urlencode(params)   # url接口中的小说数据
            print(book_api_url)
            yield scrapy.Request(
                book_api_url,
                callback=self.parse_1,
                meta={'path': path}
            )

            '''
            r = requests.get(book_api_url)
            r.encoding='utf-8'
            json_d = json.loads(r.text)

            info_l = json_d['data']['vs'][0]['cs']
            for info in info_l:
                dir_name = info['cN']       # 小数章节名
                cU = info['cU']             # 小说章节内容url参数
                _url = 'https://read.qidian.com/chapter/' + cU
                # print(_url)
                yield scrapy.Request(
                    _url,
                    callback=self.parse_2,
                    meta={
                          'path': path + '/' + dir_name         # 构造章节路径
                          }
                )
            '''

            # break　　　　测试用的
        # next_url = response.xpath('//a[@class="lbf-pagination-next "]/@href').get()  ### 有一个空格,
        # print('下一个url', next_url)
        # if next_url[-1] != 3:
        #     yield response.follow(
        #         next_url,
        #         callback=self.parse
        #     )

    def parse_1(self, response):
        json_d = json.loads(response.body.decode())
        print(response.status)
        print(json_d)
        # info_l = json_d['data']['vs'][0]['cs']
        # for info in info_l:
        #     dir_name = info['cN']  # 小数章节名
        #     cU = info['cU']  # 小说章节内容url参数
        #     _url = 'https://read.qidian.com/chapter/' + cU
        #     # print(_url)
        #     path = response.meta['path']
        #     yield scrapy.Request(
        #         _url,
        #         callback=self.parse_2,
        #         meta={
        #             'path': path + '/' + dir_name  # 构造章节路径
        #         }
        #     )
        #



    def parse_2(self, response):
        path = response.meta['path']
        content = response.xpath('//div[@class="read-content j_readContent"]/p/text()').getall()
        content = ''.join(content)
        if not os.path.exists(path):
            with open(path + '.txt', 'w') as f:
                f.write(content)
                print('{}  写入成功'.format(path))

