# -*- coding: utf-8 -*-
import scrapy


class QingdaoSpider(scrapy.Spider):
    name = 'qingdao'
    allowed_domains = ['27.223.1.57:10000']
    start_urls = ['http://27.223.1.57:10000/PythonApplication/webbasesite/dataInfoList.aspx?lkocok_pageNo=1&oneClassGuid=171030103404278262']
    page = 1

    def parse(self, response):
        a_list = response.xpath("//tr/td//tr/td//tr")

        for a_temp in a_list:
            num = a_temp.xpath("./td[1]/text()").extract_first()
            title = a_temp.xpath("./td[2]/@title").extract_first()
            up_time = a_temp.xpath("./td[3]/text()").extract_first()
            down_time = a_temp.xpath("./td[4]/text()").extract_first()

            if num is not None:
                yield {
                    '序号': num,
                    '标题': title,
                    '上诉时间': up_time,
                    '回复时间': down_time
                }

            self.page += 1
            if self.page <= 5:
                print('---------------------------------', '1111111111111111111111111111')
                next_url = 'http://27.223.1.57:10000/PythonApplication/webbasesite/dataInfoList.aspx?lkocok_pageNo=%d&oneClassGuid=171030103404278262' % self.page
                yield scrapy.Request(url=next_url, callback=self.parse)
                print('-----------------------------', '222222222222222222222')
