# -*- coding: utf-8 -*-
import scrapy


class WenzhangSpider(scrapy.Spider):
    name = 'wenzhang'
    allowed_domains = ['newskj.org']
    start_urls = ['https://www.newskj.org/news/kejixun/index.html']

    def parse(self, response):
        href_list=response.xpath('//div[@class="list-box"]/div[@class="list-items"]/ul/li/a/@href').extract()
        for hrefs in href_list:
            href = response.urljoin(hrefs)
            yield scrapy.Request(url=href,callback=self.getcontet, dont_filter=True)

        for x in range(2, 1717):
            src = "https://www.newskj.org/news/kejixun/index_"+str(x)+".html"
            print(src)
            yield scrapy.Request(url=src, callback=self.parse)

    def getcontet(self,response):

        p = response.xpath('normalize-space(//div[@id="main_content"]/p[2])').extract_first('')
        p1 = response.xpath('normalize-space(//div[@id="main_content"]/p[3])').extract_first('')
        p2 = response.xpath('normalize-space(//div[@id="main_content"]/p[4])').extract_first('')
        p3 = response.xpath('normalize-space(//div[@id="main_content"]/p[5])').extract_first('')



        print("p---------------------------------------------"+p)
        fo = open("p2.txt", "a", newline=None, encoding="utf-8")
        if p!="":
            fo.write(p + "\n")

        fo = open("p2.txt", "a", newline=None, encoding="utf-8")
        if p1!="":
            fo.write(p1 + "\n")

        fo = open("p2.txt", "a", newline=None, encoding="utf-8")
        if p2!="":
            fo.write(p2 + "\n")

        fo = open("p2.txt", "a", newline=None, encoding="utf-8")
        if p3!="":
            fo.write(p3 + "\n")







