import logging
import sys

from scrapy.linkextractors.sgml import SgmlLinkExtractor
from scrapy.spiders import Rule, CrawlSpider

from scrapy_sinahealth import utils
from scrapy_sinahealth.customizers import EchinalifeCustomizer
from scrapy_sinahealth.items import ScrapyHealthItem

reload(sys);
sys.setdefaultencoding("utf-8");

class Echinalife(CrawlSpider):
    name = "echinalife";
    download_delay = 1;
    allowed_domains = ["www.e-chinalife.com"];
    start_page = 1
    end_page = 13

    # rules = (
    #     Rule(SgmlLinkExtractor(allow=(r'http://www.e-chinalife.com/IRchannel/files/Changfunengli\d+_GB.pdf',)),
    #          callback='parse_content'),
    # );

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy_sinahealth.middlewares.RandomUserAgentMiddleware': 501,
            # 'scrapy_sinahealth.spiders.qqhealth.QQHealthDownloadMiddleware': 543,
        },
        'ITEM_PIPELINES': {
            # 'scrapy_sinahealth.pipelines.SQLStorePipeline': 610,
            'scrapy_sinahealth.pipelines.ImageLoadPipeline': 600,
        }
    }

    def __init__(self):
        super(Echinalife, self).__init__();
        self.customizer = EchinalifeCustomizer()
        # start_urls = self.generate_url(self.start_page, self.end_page)
        self.start_urls = [
            'http://www.e-chinalife.com/IRchannel/http/gb2312/quarterly_reports_1.html&curtPage=1',
            'http://www.e-chinalife.com/IRchannel/http/gb2312/annual_interim_reports.html&curtPage=1',
            'http://www.e-chinalife.com/IRchannel/http/gb2312/annual_interim_reports.html&curtPage=2',
            'http://www.e-chinalife.com/IRchannel/http/gb2312/annual_interim_reports.html&curtPage=3',
            'http://www.e-chinalife.com/IRchannel/http/gb2312/quarterly_reports.html&curtPage=1',
            'http://www.e-chinalife.com/IRchannel/http/gb2312/quarterly_reports.html&curtPage=2',
        ];

    def replace_img_src(self, html, image_url, new_image_full_path):
        # html = html.replace('data-src', 'src')
        # html = html.replace(image_url, new_image_full_path)
        return html

    def parse(self, response):
        logging.info("processing content url %s..." % response.url)
        item = ScrapyHealthItem();

        item["url"] = response.url.strip();
        # item["referer"] = response.request.headers['Referer'].strip();
        item["referer"] = '';
        item["spider"] = self.name
        item["category"] = 'Echinalife';
        item["tag"] = ''

        item['issue_date'] = ''
        item['source'] = ''
        title_xpath_list = ['//span[@class="new_201301"]/text()']
        title = utils.try_select_default(response, title_xpath_list)
        item["title"] = title
        item["content"] = ''

        filename_xpath_list = ['//span[@class="new_201301"]/text()[1]']
        filename = utils.try_select_all(response, filename_xpath_list)
        item["image_appendixes"] = filename;

        pdf_xpath_list = ['//span[@class="new_201301"]/a/@href']
        # pdf_xpath_list = ['//ul/li/span[@class="new_201301"]']
        # pdf = utils.try_select_default(response, pdf_xpath_list)
        pdf = utils.try_select_all(response, pdf_xpath_list)

        image_urls = item["url"]
        item["image_urls"] = pdf

        yield item;

        # pdf = content;
        # logging.info("processing content url %s..." % response.url)
        # item = ScrapyHealthItem();
        #
        # item["url"] = response.url.strip();
        # item["referer"] = response.request.headers['Referer'].strip();
        # item["spider"] = self.name
        # item["category"] = 'Echinalife';
        # item["tag"] = ''
        #
        # item['issue_date'] = ''
        # item['source'] = ''
        # item["title"] = ''
        # item["content"] = ''
        #
        # image_urls = item["url"]
        # item["image_urls"] = image_urls
        #
        # yield item;
