import logging

from pydispatch import dispatcher

from scrapy.spiders import Spider
from scrapy import Request
from government.items import GovernmentItem

from government.db.dbhelper import ArticleModel
from government.spiders.tools import is_expired, md5sum

#from amazon_scrapy.sql import RankingSql

# https://github.com/dynamohuang/amazon-scrapy/tree/master/amazon
from datetime import datetime
import json

class gzboftecSpider(Spider):
    """
    广州市商务委员会 http://www.gzboftec.gov.cn/web/column/column.jsp?columnId=2c9081ee2cbf9809012cc03769f4061d
    """
    name = 'gzboftecSpider'
    allowed_domains = ["gzboftec.gov.cn"]
    start_urls = ['http://www.gzboftec.gov.cn/web/column/column.jsp?columnId=2c9081ee2cbf9809012cc03769f4061d']


    def parse(self, response):
        # TODO:比较数据库最新的栏目，查看是否有更新
        # TODO:是否有下一页body > 
        #print(response.body)
        resp_list = response.css('a')
        date_list = response.css('td')
        base_url = "http://www.gzboftec.gov.cn"

        artical_date = []

        for one in date_list:
            if one.css('::text'):
                tmp_date = one.css('::text')[0].extract()
                if tmp_date:
                    try:
                        tmp_date = datetime.strptime(tmp_date[1:-1], "%Y-%m-%d").strftime("%Y-%m-%d")
                        if is_expired(tmp_date):
                            continue
                        else:
                           artical_date.append(tmp_date) 
                    except:
                        pass 

        index_artical = 0

        for one in resp_list:
            url = one.css('::attr(href)')[0].extract()

            if url.startswith('/article.jsp'):
                title = (one.css('::attr(title)')[0].extract())
                url = base_url + url
            
                #打开链接
                yield Request(
                        url=url,
                        callback=self.page_detail,
                        meta={
                            'title': title,
                            'publish_date': artical_date[index_artical]
                        }
                    )
                
                index_artical += 1
                if index_artical == len(artical_date):
                    break


    def page_detail(self, response):
        item = GovernmentItem()

        # 固定参数
        item['province'] = '广东'
        item['city'] = '广州'
        #item['zone'] = ''
        item['department'] = '市商务委员会'
        item['title'] = response.meta['title']
        item['url'] = response.request.url

        base_url = ('/').join(item['url'].split('/')[:-1])      
        # TODO：转为字典命名        
        item['tags'] = json.dumps({
            'type': ['电子商务'],
            'function': ['资金扶持', '产业扶持']
        })
        
        item['publish_date'] = response.meta['publish_date']
        resp = response.css('table')[9]  # 正文内容
        item['content'] =  resp.extract()
        item['md5_check'] = md5sum(item['content'])
        # # TODO:后面优化摘要
        item['abstract'] = ''.join([p.extract() for p in resp.css('p')[:4]])
        # # 下载文档
        attachments = resp.css('a')
        item['attachments'] = []
        for one in attachments:
            # 有些链接是完整的
            url = one.css('::attr(href)')[0].extract()
            if not url.startswith('http'):
                url = base_url + url[1:]
            
            item['attachments'].append({
                'name': one.css('::text')[0].extract(),
                'url': url
            })
        item['attachments'] = json.dumps(item['attachments'])
        
        yield item