import logging

from pydispatch import dispatcher

from scrapy.spiders import Spider
from scrapy import Request
from government.items import GovernmentItem

from government.db.dbhelper import ArticleModel
from government.spiders.tools import is_expired, md5sum

#from amazon_scrapy.sql import RankingSql

# https://github.com/dynamohuang/amazon-scrapy/tree/master/amazon
from datetime import datetime
import json

class szjmxxwSpider(Spider):
    """
    深圳市经济贸易和信息化委员会 http://zj.szjmxxw.gov.cn/sfwweb/ApplicationGuide/guide2016.aspx
    """
    name = 'szjmxxwSpider'
    allowed_domains = ["zj.szjmxxw.gov.cn"]
    # TODO:以后不知道网址是不是按照年份换，现在是没有2017，有2016， 没有2015
    start_urls = ['http://zj.szjmxxw.gov.cn/sfwweb/ApplicationGuide/guide2018.aspx']


    def parse(self, response):
        # TODO:比较数据库最新的栏目，查看是否有更新
        # TODO:是否有下一页body > 
        #print(response.body)
        resp_list = response.css('.table_simple tbody').css('.table_simple tr')
        # date_list = response.css('td')
        # base_url = "http://www.gzboftec.gov.cn"

        # artical_date = []
        tag = {
            'type': '',
            'function': '',
        }
        for one in resp_list:
            elements = one.css('td')
            url = 'http://zj.szjmxxw.gov.cn/sfwweb'
            title = None
            publish_date = None
            expired_date = None
            status = None
            if len(elements) == 8:
                tag['type'] = elements[0].css('::text').extract()
                tag['function'] = elements[1].css('::text').extract()
                url = url + elements[2].css('::attr(href)')[0].extract()[2:]
                title = elements[2].css('a::text').extract()[0]
                publish_date = elements[4].css('::text').extract()
                expired_date = elements[5].css('::text').extract()
                status = elements[6].css('font::text').extract()
            elif len(elements) == 7:
                tag['function'] = elements[0].css('::text').extract()
                url = url + elements[1].css('::attr(href)')[0].extract()[2:]
                title = elements[1].css('a::text').extract()[0]
                publish_date = elements[3].css('::text').extract()
                expired_date = elements[4].css('::text').extract()
                status = elements[5].css('font::text').extract()
            else:
                url = url + elements[0].css('::attr(href)')[0].extract()[2:]
                title = elements[0].css('a::text').extract()[0]
                publish_date = elements[2].css('::text').extract()
                expired_date = elements[3].css('::text').extract()
                status = elements[4].css('font::text').extract()

            if publish_date:
                try:
                    publish_date = datetime.strptime(publish_date[0], "%Y年%m月%d日").strftime("%Y-%m-%d")
                except:
                    publish_date = None

            if expired_date:
                if len(expired_date) == 3:
                    try:
                        expired_date = datetime.strptime(expired_date[2], "%Y年%m月%d日").strftime("%Y-%m-%d")
                    except:
                        expired_date = None
                else:
                    try:
                        expired_date = datetime.strptime(expired_date[0], "%Y年%m月%d日").strftime("%Y-%m-%d")
                    except:
                        expired_date = None

            if status:
                if status[0] == '(过期)':
                    status = 'expired'
                else:
                    status = 'active'
            else:
                status = 'active'
                
            #打开链接
            yield Request(
                    url=url,
                    callback=self.page_detail,
                    meta={
                        'title': title,
                        'publish_date': publish_date,
                        'expired_date': expired_date,
                        'status': status,
                        'tag': tag
                    }
                )


    def page_detail(self, response):
        item = GovernmentItem()

        # 固定参数
        item['province'] = '广东'
        item['city'] = '深圳'
        #item['zone'] = ''
        item['department'] = '市经贸信息委'
        item['title'] = response.meta['title']
        item['url'] = response.request.url

        # TODO：转为字典命名        
        item['tags'] = json.dumps(response.meta['tag'])
        
        item['publish_date'] = response.meta['publish_date']
        item['expired_date'] = response.meta['expired_date']
        item['status'] = response.meta['status']

        resp = response.css('#content')[0]  # 正文内容
        item['content'] =  resp.extract()
        item['md5_check'] = md5sum(item['content'])
        # # # TODO:后面优化摘要
        item['abstract'] = ''.join([p.extract() for p in resp.css('p')[:10]])
        # 下载文档
        attachments = resp.css('a')
        item['attachments'] = []
        for one in attachments:
            # 有些链接是完整的
            url = one.css('::attr(href)')
            if url:
                url = url[0].extract()
            else:
                continue

            name = one.css('::text')
            if not name:
                name = '附件'
            else:
                name = name[0].extract()

            item['attachments'].append({
                'name': name,
                'url': url
            })
        
        item['attachments'] = json.dumps(item['attachments'])
        
        yield item