# coding=utf-8
import logging
import re
import sys

import time
from scrapy.linkextractors.sgml import SgmlLinkExtractor
from scrapy.spiders import CrawlSpider, Rule

from scrapy_sinahealth import utils
from scrapy_sinahealth.items import ScrapyHealthItem

reload(sys);
sys.setdefaultencoding("utf-8");

class BXJG(CrawlSpider):
    name = "bxjg";
    download_delay = 1;
    allowed_domains = ["bxjg.circ.gov.cn"];
    start_page = 1
    end_page = 13
    # start_date = "2018-02-01"
    # end_date = "2018-02-12"

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy_sinahealth.middlewares.RandomUserAgentMiddleware': 501,
            # 'scrapy_sinahealth.spiders.qqhealth.QQHealthDownloadMiddleware': 543,
        },
        'ITEM_PIPELINES': {
            'scrapy_sinahealth.pipelines.FlatFilePipeline': 590,
        }
    }

    rules = (
        Rule(SgmlLinkExtractor(allow=(r'http://bxjg.circ.gov.cn/web/site0/tab5240/info\d+.htm',)),
             callback='parse_content'),
    );

    def __init__(self):
        super(BXJG, self).__init__();
        start_urls = self.generate_url(self.start_page, self.end_page)
        self.start_urls = start_urls

    def generate_url(self, start_page, end_page):
        base_url = "http://bxjg.circ.gov.cn/web/site0/tab5240/module14430/page%s.htm"
        page_list = range(start_page, end_page)
        return [base_url % d for d in page_list]

    def parse_content(self, response):
        logging.info("processing content url %s..." % response.url)
        item = ScrapyHealthItem();

        item["url"] = response.url.strip();
        item["referer"] = response.request.headers['Referer'].strip();
        item["spider"] = self.name
        item["category"] = 'BXJG';
        item["tag"] = ''

        title_xpath_list = ['//table[@id="tab_content"]/tbody/tr[1]/td/text()']
        title = utils.try_select_default(response, title_xpath_list)
        item['title'] = title;

        item['source'] = '';

        content_xpath_list = ['//span[@id="zoom"]']
        content = utils.try_select_default(response, content_xpath_list)
        item["content"] = content;

        issue_date_xpath_list = ['//table[@id="tab_content"]/tbody/tr[2]/td']
        issue_date = utils.try_select_default(response, issue_date_xpath_list)
        issue_date = re.findall(r"\d\d\d\d-\d\d-\d\d", issue_date)[0]
        issue_date = time.strptime(issue_date, '%Y-%m-%d')
        item['issue_date'] = issue_date;

        yield item;