# -*- coding: utf-8 -*-
import scrapy

num1_50 = []
# 直接爬1到50页可能会被封ip，最好一页一页爬
for num in range(1,51):
# for num in range(46,51):
# for num in range(41,46):
# for num in range(36,41):
# for num in range(31,36):
# for num in range(26,31):
# for num in range(21,26):
# for num in range(16,21):
# for num in range(11,16):
# for num in range(6,11):
# for num in range(1,6):
    num1_50.append(num)
url_list = []
for page in num1_50:
    url_page = str(page)
    the_url = 'https://lt.cjdby.net/forum-4-' + url_page + '.html'
    url_list.append(the_url)

class CjdbySpiderSpider(scrapy.Spider):
    name = 'cjdby_spider'
    allowed_domains = ['lt.cjdby.net/forum-4-1.html']
    start_urls = url_list

    def parse(self, response):
        # SelevtorList
        cddivs = response.xpath("//*[@id='threadlisttableid']/tbody")
        for cddiv in cddivs:
            # Selevtor
            author = cddiv.xpath(".//td[@class='by']/cite/a/text()").get()
            title = cddiv.xpath(".//th/a[@class='s xst']/text()").get()
            the_data = cddiv.xpath(".//td[@class='by']/em/span/text()").get()
            visitor = cddiv.xpath(".//td[@class='num hm']/em/text()").get()
            reply = cddiv.xpath(".//td[@class='num hm']/a/text()").get()
            cdkj = {"楼主": author, "标题": title,"发表时间":the_data,"访客":visitor,"回复":reply}
            yield cdkj
