# -*- coding: utf-8 -*-
import scrapy
from scrapy_splash import SplashRequest
from scrapy.selector import Selector
import requests
from ..items import PolicesItem
def get_next(num):
    lua = """
        function main(splash, args)
          assert(splash:go(args.url))
          assert(splash:wait(1))
          local input = splash:select('[name="whj_toPage"]')
          input:send_text(%s)
          assert(splash:wait(1))
          local button = splash:select('[name="whj_confirm"]')
          button:mouse_click()
          assert(splash:wait(2))
          return {
            html = splash:html(),
          }
        end
    """ % ("'"+str(num)+"'")
    return lua

class GovSpider(scrapy.Spider):
    name = "gov"
    allowed_domains = ["gov.cn"]
    start_urls = ["http://www.gov.cn/zhengce/xxgk/index.htm"]

    def parse(self, response):
        print(response.meta['page'])
        sel = Selector(response)
        '''
        with open('sel.txt', 'w') as f:
            f.write(str(response))
            f.close()
        print(str(sel))
        '''
        for i in range(2, 12):
            # print(i)
            title = sel.xpath('//*[@id="xxgkzn_list"]/div[1]/table/tbody/tr[%s]/td[2]/a/text()' % i).extract()
            part = sel.xpath('//*[@id="xxgkzn_list"]/div[1]/table/tbody/tr[%s]/td[3]/text()' % i).extract()
            startDate = sel.xpath('//*[@id="xxgkzn_list"]/div[1]/table/tbody/tr[%s]/td[4]/text()' % i).extract()
            publishDate = sel.xpath('//*[@id="xxgkzn_list"]/div[1]/table/tbody/tr[%s]/td[5]/text()' % i).extract()
            url = response.xpath('//*[@id="xxgkzn_list"]/div[1]/table/tbody/tr[%s]/td[2]//@href' % i).extract()[0]
            if True:
                yield SplashRequest(url, args={'image': 0, 'timeout': 5}, callback=self.parseDetail,
                                    meta={'title': title,
                                          'part': part,
                                          'startDate': startDate,
                                          'publishDate': publishDate}
                                    )
            # print(title, part, startDate, publishDate)

            '''
            item = PolicesItem()
            item['title'] = title
            item['part'] = part
            item['startDate'] = startDate
            item['publishDate'] = publishDate
            yield item
            '''
        if response.meta['page'] <= 100:
            page = int(response.meta['page']) + 1
            with open('logs.txt', 'w') as f:
                f.write(get_next(page))
                f.close()
            yield SplashRequest("http://www.gov.cn/zhengce/xxgk/index.htm", endpoint='execute', args={'lua_source': get_next(page)}, meta={'page': page})


    def start_requests(self):
        for url in self.start_urls:
            # meta = {'page': 21}
            yield SplashRequest(url, args={'image': 0, 'timeout': 5}, meta={'page': 0})

    def parseDetail(self, response):
        sel = Selector(response)
        detail = sel.xpath('//*[@id="UCAP-CONTENT"]').xpath('string(.)').extract()[0]
        title = response.meta['title']
        part = response.meta['part']
        startDate = response.meta['startDate']
        publishDate = response.meta['publishDate']
        #print(detail)
        item = PolicesItem()
        item['title'] = title
        item['part'] = part
        item['startDate'] = startDate
        item['publishDate'] = publishDate
        item['detail'] = detail
        yield item



