# coding=utf-8
import logging
import sys
import time

import scrapy
from scrapy import Spider
from selenium.webdriver.support.wait import WebDriverWait

from scrapy_sinahealth import utils
from scrapy_sinahealth.customizers import QQHealthCustomizer
from scrapy_sinahealth.items import ScrapyHealthItem
from scrapy_sinahealth.middlewares import JavaScriptListPageMiddleware

reload(sys);
sys.setdefaultencoding("utf-8");

class QQHealth(Spider):
    name = "qqhealth";
    download_delay = 1;
    page_total = 2;

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy_sinahealth.middlewares.RandomUserAgentMiddleware': 501,
            'scrapy_sinahealth.spiders.qqhealth.QQHealthDownloadMiddleware': 543,
        },
        'SPIDER_MIDDLEWARES': {
            'scrapy_sinahealth.middlewares.TimelineFilterSpiderMiddleware': 545,
        },
        'ITEM_PIPELINES': {
            'scrapy_sinahealth.pipelines.SQLStorePipeline': 610,
            'scrapy_sinahealth.pipelines.ImageLoadPipeline': 600,
        },
        'SELENIUM_REQUEST_ENABLED': True,
        'SELENIUM_ENGINE': 'CHROME',  # 'CHROME/FIREFOX/PHANTOMJS
        'ENABLE_RELATIVE_DIGGING': True,
        'DUPEFILTER_TABLE': 't_seen_url',
        'TIMELINEFILTER_DATETIME': '2018-01-24'
    }

    def __init__(self):
        super(QQHealth, self).__init__();
        self.customizer = QQHealthCustomizer()
        # self.enabled_selenium_request = True
        # self.enable_relative = True
        self.start_urls=[
                         #"http://health.qq.com/winds/shbj.htm", #养身
                         "http://health.qq.com/winds/jbkp.htm", #疾病
                         # "http://health.qq.com/winds/jkzx.htm", #新闻
                         ];

    def parse(self, response):
        url_set = response.url_set;
        for url in url_set:
            yield scrapy.Request(url, callback=self.parse_content)
        # with open('url_set.txt', mode='w') as f:
        #     f.write(repr(url_set))
                # print(url_list)
        pass
    pass

    def parse_content(self, response):
        logging.info("processing content url %s..." % response.url)
        item = ScrapyHealthItem();

        item["url"] = response.url.strip();
        item["referer"] = response.request.headers['Referer'].strip();

        item["spider"] = self.name
        item["category"] = "";
        item["tag"] = "";

        issue_date_xpath_list = ['//span[@class="article-time"]/text()', '//span[@class="a_time"]/text()']
        issue_date = utils.try_select_default(response, issue_date_xpath_list)
        issue_date = time.strptime(issue_date, '%Y-%m-%d %H:%M')
        item['issue_date'] = issue_date;

        source_xpath_list = ['//span[@class="color-a-1"]/text()', '//span[@class="a_source"]/text()']
        source = utils.try_select_default(response, source_xpath_list)
        item['source'] = source;

        title_xpath_list = ['//div[@id="C-Main-Article-QQ"]/div[@class="hd"]/h1/text()', '//div[@class="qq_article"]/div[@class="hd"]/h1/text()']
        title = utils.try_select_default(response, title_xpath_list)
        item["title"] = title;

        content_xpath_list = ['//div[@id="Cnt-Main-Article-QQ"]']
        content = utils.try_select_default(response, content_xpath_list)
        item["content"] = content;

        image_urls = utils.try_select_all(response, '//img/@src')
        item["image_urls"] = image_urls

        yield item;
        if self.settings['ENABLE_RELATIVE_DIGGING']:
        # if hasattr(self, 'enable_relative') and self.enable_relative:
            logging.info("enabled relative read digging...")
            about_read_xpath_list = ['//td/a[@class="RelaLinkStyle"]/@href', '//div[@class="qq_readBox"]/div/div[@class="bd"]/ul/li/a/@href']
            read_boxes = utils.try_select_all(response, about_read_xpath_list)
            # read_boxes = response.selector.xpath('//div[@class="qq_readBox"]/div/div[@class="bd"]/ul/li/a/@href');
            logging.info('relative reading count ... %d' % len(read_boxes))

            for reading in read_boxes:
                reading_url = reading.extract().decode('utf-8')
                logging.info("going to get relative reading... %s" % reading_url)
                yield scrapy.Request(reading_url, callback=self.parse_content)

    def replace_img_src(self, html, image_url, new_image_full_path):
        html = html.replace(image_url, new_image_full_path)
        return html


class QQHealthDownloadMiddleware(JavaScriptListPageMiddleware):
    def _parse_list(self, request, response, spider):
        logging.debug("entering _parse_list method...")
        logging.info("total %d page spider would process..." % spider.page_total)
        url_set = set()
        current_page = 1;
        while True:
            if current_page > spider.page_total:  ### processes at most pages.
                logging.info("#####Arrive the target page.#####")
                break
            logging.info("current page number is %d... url: %s" % (current_page, request.url))
            WebDriverWait(self.driver, 10).until(lambda driver: driver.find_element_by_xpath('//div[@id="tcopyright"]'))
            import time
            time.sleep(1)
            logging.debug("start to find url list element...")
            content_list = self.driver.find_elements_by_xpath('//div[@id="listZone"]/div[@class="sBox"]/div/h2/a')
            logging.debug("end to find url list element and the length is %d..." % len(content_list))
            try:
                url_list = [sel.get_attribute("href") for sel in content_list]
            except Exception, e:
                logging.error("error occurred...")
                logging.error(e)

            url_set |= set(url_list)
            logging.info("url set now have %d records... url: %s" % (len(url_set), request.url))

            try:
                WebDriverWait(self.driver, 10).until(
                    lambda driver: driver.find_element_by_xpath('//div[@id="pageZone"]/span[@title="转到下一页"]/a'))
                next_page = self.driver.find_element_by_xpath('//div[@id="pageZone"]/span[@title="转到下一页"]/a');
                next_page.click();
                logging.info("going to next page... url is %s..." % request.url)
                current_page = current_page + 1;
            except Exception,e:
                logging.warning(e)
                logging.warning("#####Arrive the last page.#####")
                break
        return url_set
