# coding=utf-8
import logging
import sys

import scrapy
from scrapy import Spider
from selenium.webdriver.support.wait import WebDriverWait

from scrapy_sinahealth import utils
from scrapy_sinahealth.items import ScrapyHealthItem
from scrapy_sinahealth.middlewares import JavaScriptListPageMiddleware

reload(sys);
sys.setdefaultencoding("utf-8");

utils.configure_logging(logging.getLogger())

class IBaodian(Spider):
    name = "ibaodian"
    download_delay = 1
    page_total = 10

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy_sinahealth.middlewares.RandomUserAgentMiddleware': 501,
            'scrapy_sinahealth.spiders.ibaodian.DownloadMiddleware': 543,
        },
        'LOG_FILE': "scrapy_ibaodian.log",
        'LOG_LEVEL': 'DEBUG',
        'SELENIUM_REQUEST_ENABLED': True,
        'SELENIUM_ENGINE': 'CHROME' #'CHROME/FIREFOX/PHANTOMJS
    }

    def __init__(self):
        super(IBaodian, self).__init__();
        # self.enabled_selenium_request = True
        self.enable_relative = True
        self.start_urls = [
            "https://www.ibaodian.com/information.jsp",  # 展业咨询
        ];

    def parse(self, response):
        url_set = response.url_set;
        for url in url_set:
            yield scrapy.Request(url, callback=self.parse_content)

    def parse_content(self, response):
        logging.info("processing content url %s..." % response.url)
        item = ScrapyHealthItem();

        item["url"] = response.url.strip();
        item["referer"] = response.request.headers['Referer'].strip();

        item["spider"] = self.name
        item["category"] = "";
        item["tag"] = "";

        issue_date = utils.select_default(response, '//div[@class="inforD-time"]/span[1]/text()')
        item['issue_date'] = issue_date;

        source = utils.select_default(response, '//div[@class="inforD-time"]/span[2]/text()')
        item['source'] = source;

        title = utils.select_default(response, '//div[@class="informationDetail"]/h3/text()')
        item["title"] = title;

        content = utils.select_default(response, '//div[@class="inforD-word"]')
        item["content"] = content;

        yield item;


class DownloadMiddleware(JavaScriptListPageMiddleware):
    def _parse_list(self, request, response, spider):
        logging.debug("entering _parse_list method...")
        logging.info("total %d page spider would process..." % spider.page_total)
        url_set = set()
        current_page = 1;
        while True:
            if current_page > spider.page_total:
                logging.info("#####Arrive the target page.#####")
                break
            logging.info("current page number is %d... url: %s" % (current_page, request.url))
            WebDriverWait(self.driver, 30).until(
                lambda driver: driver.find_element_by_xpath('//div[@class="page7-bottom"]'))
            import time
            time.sleep(1)
            logging.debug("start to find url list element...")
            content_list = self.driver.find_elements_by_xpath('//div[@class="infor-contabczll"]/a')
            logging.debug("end to find url list element and the length is %d..." % len(content_list))
            try:
                url_list = [sel.get_attribute("href") for sel in content_list]
            except Exception, e:
                logging.error("error occurred...")
                logging.error(e)

            url_set |= set(url_list)
            logging.info("url set now have %d records... url: %s" % (len(url_set), request.url))

            try:
                WebDriverWait(self.driver, 30).until(
                    lambda driver: driver.find_element_by_xpath('//div[@class="infor-contabcz"]/div[@class="infor-contabm"]/div[text()="点击查看更多"]'))
                next_page = self.driver.find_element_by_xpath('//div[@class="infor-contabcz"]/div[@class="infor-contabm"]/div[text()="点击查看更多"]');
                time.sleep(2)
                next_page.click();
                logging.info("going to next page... url is %s..." % request.url)
                current_page = current_page + 1;
            except Exception,e:
                logging.warning(e)
                logging.warning("#####Arrive the last page.#####")
                break

        return url_set
