import scrapy
from Hue.basepro import ZhengFuBaseSpider


class SanyaSpider(ZhengFuBaseSpider):
    name = 'Sanya-pro'
    allowed_domains = ['sanya.gov.cn']
    start_urls = ['http://http://www.sanya.gov.cn//']
    api = 'http://search.sanya.gov.cn/s?searchWord={keyword}&column=%25E5%2585%25A8%25E7%25AB%2599&pageSize=10&pageNum={page}&siteCode=4602000035&sonSiteCode=&checkHandle=1&searchSource=0&govWorkBean=%257B%257D&sonSiteCode=&areaSearchFlag=-1&secondSearchWords=&topical=&docName=&label=&countKey=0&uc=0&isSonSite=false&left_right_index=0&searchBoxSettingsIndex=&manualWord={keyword}&orderBy=0&startTime=&endTime=&timeStamp=0&strFileType=&wordPlace=0'
    keywords = ['煤炭']
    method = 'GET'
    start_page = 0


    def edit_page(self, response):
        all_result = response.css('div.leftSide-layer.fl > div > div.fl > p.fl > span::text').get()
        all_page = int(all_result) // 10 + 1
        return all_page

    def edit_items_box(self, response):
        items_box = response.xpath('//div[@class="wordGuide Residence-permit"]')
        return items_box

    def edit_item(self, item):
        meta_info = {
            "title": item.css('div:first-child > a::attr(title)').get(),
            "url": item.css('div:first-child > a::attr(href)').get(),
            "pre_content": "".join(item.css('div.listInfoCon.clearfix > div.listIntro.wh100.fl > p.summaryFont *::text').getall()).strip(),
            "date": item.css('div.listInfoCon.clearfix > div.listIntro.wh100.fl > p.time > span::text, p.time > span::text').get()
        }
        return meta_info
