# coding=utf-8
import json
import sys

import scrapy
from scrapy import Spider
from scrapy.http import HtmlResponse
from selenium.webdriver.support.wait import WebDriverWait

from scrapy_sinahealth.items import ScrapyHealthItem
from scrapy_sinahealth.middlewares import JavaScriptListPageMiddleware

reload(sys);
sys.setdefaultencoding("utf-8");

class Winbaoxian(Spider):
    name = "winbaoxian"
    download_delay = 1

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy_sinahealth.middlewares.RandomUserAgentMiddleware': 501,
            'scrapy_sinahealth.spiders.winbaoxian.WinbaoxianDownloadMiddleware': 543,
        },
    }

    def __init__(self):
        super(Winbaoxian, self).__init__();
        self.enabled_selenium_request = True
        self.enable_relative = True
        self.start_urls = [
            "https://app.winbaoxian.com/learning/index#&pageHome",  # 学习
        ];

    def parse(self, response):
        url_set = response.url_set;
        for id in url_set:
            url = "https://app.winbaoxian.com/learning/newsInfo/8/" + id;
            yield scrapy.Request(url, callback=self.parse_content)

    def parse_content(self, response):
        print "processing content url %s..." % response.url
        j = json.loads(response.text)
        print j["data"]["news"];
        item = ScrapyHealthItem();

        item["url"] = response.url.strip();
        item["referer"] = response.request.headers['Referer'].strip();

        item["spider"] = self.name

        item["category"] = "";
        item["tag"] = "";

        # issue_date = response.selector.xpath('//div[@class="article-title"]/p/span/text()')[0].extract().decode('utf-8');
        issue_date = j["data"]["news"]["time"]
        item['issue_date'] = issue_date;

        source = j["data"]["news"]["source"]
        item['source'] = source;

        # title = response.selector.xpath('//div[@class="article-title"]/h2/text()')[0].extract().decode('utf-8');
        title = j["data"]["news"]["title"]
        item["title"] = title;

        # content = response.selector.xpath('//div[@class="article-content"]')[0].extract().decode('utf-8');
        content = j["data"]["news"]["content"]
        item["content"] = content;

        yield item;


class WinbaoxianDownloadMiddleware(JavaScriptListPageMiddleware):
    def _parse_list(self, request, response, spider):
        article_id_set = set()
        pageNo = 1;
        while True:
            if pageNo > 2:
                print "#####Arrive the target page.#####";
                break
            print "current pageNo is %d... cate: %s" % (pageNo, request.url);
            WebDriverWait(self.driver, 10).until(lambda driver: driver.find_element_by_xpath('//div[@class="load-more"]'))
            import time
            time.sleep(1)
            print('start...')
            article_elem_list = self.driver.find_elements_by_xpath('//a[@class="article-link"]');
            article_id_list = [elem.get_attribute("data-id") for elem in article_elem_list]
            article_id_set |= set(article_id_list)

            print "url set have %d ... cate: %s" % (len(article_id_set), request.url)

            try:
                WebDriverWait(self.driver, 10).until(
                    lambda driver: driver.find_element_by_xpath('//div[@class="load-more"]'))
                next_page = self.driver.find_element_by_xpath('//div[@class="load-more"]')
                next_page.click()
                print("goto next page... cate: %s" % request.url)

                pageNo = pageNo + 1;
            except Exception, e:
                print e
                print "#####Arrive the last page.#####";
                break

        return article_id_set
