# -*- coding: utf-8 -*-
import hashlib
from urllib.parse import urljoin

import scrapy
from bs4 import BeautifulSoup

from utils.helper import get_random_user_agent
from utils.constant import SPLASH_URL

# 搜索'百度'关键字并爬取搜索到的结果
script = """
function main(splash)
    splash:go(splash.args.url)
    splash:wait(5)

    splash:select('input[name=srchtxt]'):focus()
    splash:send_text("百度")
    splash:wait(0)
    splash:select('button[id=scform_submit]'):mouse_click()

    assert(splash:wait(6))

    return {
        html = splash:html()
    }
    end
"""


class SpiderZuanke8Spider(scrapy.Spider):
    name = 'spider_zuanke8'
    allowed_domains = ['zuanke8.com']
    start_urls = ['http://www.zuanke8.com/search.php']

    custom_settings = {
        'SPLASH_URL': SPLASH_URL,
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy_splash.SplashCookiesMiddleware': 723,
            'scrapy_splash.SplashMiddleware': 725,
            'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810 # noqa
        },
        'SPIDER_MIDDLEWARES': {
            'scrapy_splash.SplashDeduplicateArgsMiddleware': 100
        },
        'DUPEFILTER_CLASS': 'scrapy_splash.SplashAwareDupeFilter'
    }

    def start_requests(self):
        for url in self.start_urls:
            yield scrapy.Request(
                url=url,
                headers={'User-Agent': get_random_user_agent()},
                callback=self.parse,
                meta={
                    'splash': {
                        'args': {'lua_source': script},
                        'endpoint': 'execute'
                    }
                }
            )

    def parse(self, response):
        soup = BeautifulSoup(response.body, 'html.parser')
        lis = soup.find_all('li', attrs={'class': 'pbw'})
        for li in lis:
            content_url = urljoin(response.url, li.find('a')['href'])
            title = li.find('a').get_text().strip()
            post_time = li.find('span').get_text().strip()
            if content_url and title and post_time:
                yield {
                    'content_url': content_url,
                    'title': title,
                    'post_time': post_time,
                    'md5': hashlib.sha224('|'.join([content_url, title, post_time]).encode('utf8')).hexdigest() # noqa
                }
        next_page = soup.find('div', attrs={'class': 'pg'})
        next_page_urls = [
            urljoin(response.url, a['href']) for a in next_page.find_all('a')
        ] if next_page else []
        for url in next_page_urls:
            yield scrapy.Request(
                url=url,
                headers=response.headers,
                callback=self.parse,
            )
