# -*- coding: utf-8 -*-
import scrapy
import re
from bs4 import BeautifulSoup
from myText.items import MytextItem
import json
import os
from scrapy_redis.spiders import RedisSpider


class GettextSpider(RedisSpider):
    name = 'getText'
    allowed_domains = ['book.zongheng.com']

    # start_urls = ['http://book.zongheng.com/store/c0/c0/b0/u5/p2/v0/s9/t0/u0/i1/ALL.html']

    def parse(self, response):
        html = response.text
        soup = BeautifulSoup(html, 'lxml')
        name_list = soup.find_all('div', class_="bookbox")
        for i in name_list:
            # 书籍信息
            # try:
            book_img = i.find_all('div', class_="bookimg")[0].find_all('img')[0]['src']
            print(book_img)
            book_url_old = i.find_all('div', class_="bookimg")[0].find_all('a')[0]['href']
            book_id = re.findall(r'\d+', book_url_old)[0]
            book_name = i.find_all('div', class_="bookname")[0].text
            book_auto = i.find_all('div', class_="bookilnk")[0].contents[1].text
            book_link = i.find_all('div', class_="bookilnk")[0].contents[3].text
            # # 书籍目录的主要地址 http://book.zongheng.com/showchapter/
            book_url = 'http://book.zongheng.com/showchapter/' + str(book_id) + ".html"
            Text = MytextItem()
            Text['book_id'] = book_id
            Text['book_name'] = book_name
            Text['book_auto'] = book_auto
            Text['book_link'] = book_link
            Text['book_url'] = book_url
            Text['book_img'] = book_img
            # Text['title'] = ''
            # Text['url'] = ''
            # yield scrapy.Request(book_url, callback=self.parse_list, meta={'Text': Text})
            yield Text

            # except Exception as e:
            #     print("获取信息异常，没有取得信息！数据传输失败！")
            #     print(e)
            # print(book_name, book_id, book_url, book_auto, book_link)

    # 目录页
    def parse_list(self, response):
        html = response.text
        soup = BeautifulSoup(html, 'lxml')
        title_list = soup.find_all('div', class_="volume-list")[0].find_all('li')

        for item in title_list:
            title = item.a.string
            url = item.a['href']
            if title and url:
                Text = response.meta['Text']
                Text['title'] = title
                Text['url'] = url
                # print('-----' * 30)
                print(Text)
                yield Text

# LPUSH getText:start_urls http://book.zongheng.com/store/c0/c0/b0/u5/p8/v0/s9/t0/u0/i1/ALL.html
