# -*- coding: utf-8 -*-
import scrapy
import time
import re
import rsa
import base64
import binascii
import random
import os
import json
import datetime
import urllib.parse
from scrapy_redis import defaults
from scrapy_redis.utils import bytes_to_str
from selenium import webdriver
from scrapy import signals

import settings
from utils import errors, common
from .myspider import MySpider
from items import WeiboItem, LabcrawlerItemLoader

if not settings.DEBUG:
    from pyvirtualdisplay import Display


class WeiboSpider(MySpider):
    name = 'weibo'
    # allowed_domains = ['www.weibo.com']
    redis_key = 'weibo:start_urls'
    searchUrl = "http://s.weibo.com/weibo/"
    handle_httpstatus_list = [302,]

    def __init__(self, **kwargs):
        super(WeiboSpider, self).__init__(**kwargs)
        if not settings.DEBUG:
            self.display = Display(visible=0, size=(800, 600))
            self.display.start()
        chrome_opt = webdriver.ChromeOptions()
        prefs = {"profile.managed_default_content_settings.images": 2}
        chrome_opt.add_experimental_option("prefs", prefs)
        self.browser = webdriver.Chrome(chrome_options=chrome_opt)
        self.log.logger.debug('spider {0} initialized'.format(self.name))
        self.log.logger.debug('selenium webdriver chrome initialized'.format(self.name))

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = super(WeiboSpider, cls).from_crawler(crawler, *args, **kwargs)
        crawler.signals.connect(spider.spider_closed, signals.spider_closed)
        return spider

    def spider_closed(self, spider):
        self.log.logger.debug('spider {0} closed'.format(self.name))
        self.browser.quit()
        if not settings.DEBUG:
            self.display.stop()

    # 解析微博搜索关键词获得的JSON数据
    def parse(self, response):
        self.log.logger.debug('Parse URL: {0}'.format(response.url))
        domain = self.get_domain(response.url)
        if domain == settings.WEB_HOST:
            task_id = self.get_taskid(response.text)
            if task_id:
                keyword = self.get_keyword(task_id)
                next_url = self.searchUrl + urllib.parse.quote(keyword) + "&page=1"
                yield scrapy.Request(url=next_url, dont_filter=True, callback=self.parse, meta={'task_id': task_id})
        else:
            task_id = response.meta.get("task_id", 0)

            if "抱歉，未找到" in response.text:
                self.set_task_done(task_id)
                return

            page = common.extract_num(response.url, prefix="page=")
            item_nodes = response.xpath('//div[@action-type="feed_list_item"]')

            for item_node in item_nodes:
                item_loader = LabcrawlerItemLoader(item=WeiboItem(), selector=item_node)
                item_loader.add_value("crawler_task_id", task_id)
                item_loader.add_xpath("item_id", './@mid')
                item_loader.add_value("page", page)
                item_loader.add_value("url", response.url)
                item_loader.add_value("url_md5", common.get_md5(response.url))
                item_loader.add_css("content", ".comment_txt")
                item_loader.add_xpath("created_time", './/a[@node-type="feed_list_item_date"]/@title')
                item_loader.add_value("crawled_time", datetime.datetime.now())

                weibo_item = item_loader.load_item()
                yield weibo_item

            if not self.is_task_done(task_id) and page < 50:
                # 继续yield下一页URL，meta带上task_id
                next_page = page + 1
                next_url = response.url.replace("page={0}".format(page), "page={0}".format(next_page))
                self.log.logger.debug('Next URL: {0}'.format(next_url))
                yield scrapy.Request(url=next_url, dont_filter=True, callback=self.parse, meta={'task_id': task_id})
            else:
                self.set_task_done(task_id)
