import logging

from pydispatch import dispatcher

from scrapy.spiders import Spider
from scrapy import Request
from government.items import GovernmentItem

from government.db.dbhelper import ArticleModel
from government.spiders.tools import is_expired, md5sum

#from amazon_scrapy.sql import RankingSql

# https://github.com/dynamohuang/amazon-scrapy/tree/master/amazon
from datetime import datetime
import json

class gdstcSpider(Spider):
    """
    佛山市人民政府金融工作局: http://www.fsjrj.gov.cn/wsfw/bszn/
    """
    name = 'gdstcSpider'
    allowed_domains = ["fsjrj.gov.cn"]
    start_urls = ['http://www.fsjrj.gov.cn/wsfw/bszn/']


    def parse(self, response):
        # TODO:比较数据库最新的栏目，查看是否有更新
        # TODO:是否有下一页

        resp_list = response.css('.ny_wz_zi ul')[0].css('li')

        for one in resp_list:
            publish_date = one.re(r'</a>(.*)</li>')[0]
            if is_expired(publish_date):
                continue

            title = (one.css('::text')[0].extract())
            url = response.request.url + one.css('::attr(href)')[0].extract()[2:]

            #打开链接
            yield Request(
                    url=url,
                    callback=self.page_detail,
                    meta={
                        'publish_date':publish_date,
                        'title': title,
                    }
                )


    def page_detail(self, response):
        item = GovernmentItem()

        # 固定参数
        item['province'] = '广东'
        item['city'] = '佛山'
        #item['zone'] = ''
        item['department'] = '市金融局'
        item['title'] = response.meta['title']
        item['url'] = response.request.url

        base_url = ('/').join(item['url'].split('/')[:-1])      
        # item['status'] = 'activate'
        # TODO：转为字典命名        
        item['tags'] = json.dumps({
            'type': ['债券融资扶持项目'],
            'function': ['投资类', '产业扶持']
        })
        
        item['publish_date'] = response.meta['publish_date']

        resp = response.css('.TRS_Editor')[0]
        item['content'] =  resp.extract()
        item['md5_check'] = md5sum(item['content'])
        # TODO:后面优化摘要
        item['abstract'] = resp.css('p')[0].extract()
        # 下载文档
        attachments = resp.css('a')
        item['attachments'] = []
        for one in attachments:
            item['attachments'].append({
                'name': one.css('::text')[0].extract(),
                'url':  base_url + one.css('::attr(href)')[0].extract()[1:]
            })
        item['attachments'] = json.dumps(item['attachments'])
        
        yield item