#!/usr/bin/python3
# --*-- coding:utf-8 --*--
# @作者: 大数据20班 黄德攒
# @软件： Pycharm Pro 2021.4
# @GitHub仓库： https://github.com/YELLOWZAN/Bigdata

import scrapy
from guji.items import GujiItem

# 数据结构：
# 古籍名：
#       |章节：
#           |章节内容
#       |篇（如果有）：
#                   |章节（如果有）：
#                               |章节内容
class GjSpider(scrapy.Spider):
    name = 'gj'
    allowed_domains = ['so.gushiwen.cn']
    start_urls = ['https://so.gushiwen.cn/guwen/']

    base_url = 'https://so.gushiwen.cn/guwen/default.aspx?p='
    page = 1


    def parse(self, response):
        text_div = response.xpath('/html/body/div[2]/div[1]/div[2]/div[@class="sonspic"]')
        for guji_list in text_div:
            try:
                page_src = guji_list.xpath('./div/p[1]/a/@href').extract_first()
                guji_name = guji_list.xpath('./div/p[1]/a/b/text()').extract_first()
                base_url = 'https://so.gushiwen.cn'
                xiangqing_url = base_url + page_src
                # url传递
                yield scrapy.Request(url=xiangqing_url, callback=self.parse_second)
            except EnvironmentError:
                print("获取主页src信息失败，跳过本次中")
            print(guji_name, ':', xiangqing_url)

        if self.page < 20:
            self.page = self.page + 1
            url = self.base_url + str(self.page)
            yield scrapy.Request(url=url, callback=self.parse)

    # 二级页面章节识别
    def parse_second(self, response):
        # 无章节xpath（论语） /html/body/div[2]/div[1]/div[3]/div/ul/span
        # 有章节xpath（老子） /html/body/div[2]/div[1]/div[3]/div/div[2]/span
        zhangjie = response.xpath('/html/body/div[2]/div[1]/div[3]/div/ul/span')
        try:
            if zhangjie:
                # 详情内容页面：
                for text_list in zhangjie:
                    page_src = text_list.xpath('./a/@href').extract_first()
                    yield scrapy.Request(url=page_src, callback=self.parse_xiangqing)
            else:
                zhangjie1 = response.xpath('/html/body/div[2]/div[1]/div[3]/div/div[2]/span')
                for text_list in zhangjie1:
                    page_src = text_list.xpath('./a/@href').extract_first()
                    yield scrapy.Request(url=page_src, callback=self.parse_xiangqing)

        except:
            print("章节识别错误!")

    def parse_xiangqing(self, response):
        title = response.xpath('/html/body/div[2]/div[1]/div/div[1]/h1/span[1]/b/text()').get()
        page_text = response.xpath('/html/body/div[2]/div[1]/div/div[1]/div//text()').extract()
        content = ''.join(page_text).strip()
        print(title, page_text)
        item = GujiItem(文章内容=content, 标题=title)

        yield item


