# -*- coding: utf-8 -*-
import scrapy
from bs4 import BeautifulSoup
from scrapy_redis.spiders import RedisSpider
import time
from ..items import NewssiteItem


class BaidusearchSpider(RedisSpider):
    name = 'baiduSearch'
    allowed_domains = ['baidu.com']
    custom_settings = {
        'ITEM_PIPELINES': {
            'news_spiders.pipelines.BaiduPipeline': 200,
        }
    }
    kw = ''  # 搜索关键词
    site = ''  # 搜索站点
    baseUrl = 'http://www.baidu.com/s?'
    pg = 0

    # scrapy crawl baiduSearch -a kw=xxx -a site=xxx
    #
    # site:
    #       news.sina.com.cn
    #       news.163.com
    def __init__(self, kw=None, site=None, *args, **kwargs):
        super(BaidusearchSpider, self).__init__(*args, **kwargs)
        self.kw = kw
        self.site = site

    def start_requests(self):
        param = 'q1={}&q2=&q3=&q6={}&tn=baidurt&pnw=1&pbl=0&pbs=0&bsst=1&ie=utf-8'.format(
            self.kw, self.site)

        yield scrapy.Request(self.baseUrl + param, callback=self.parse, dont_filter=True)

    def parse(self, response):
        item = NewssiteItem()
        # 获取链接
        soup = BeautifulSoup(response.body, 'lxml')
        contents = soup.select('#main .content h3 a')
        for tag in contents:
            print(tag['href'])
            item['url'] = self.url_process(tag['href'])
            yield item

        # 搜索下一页
        soup = BeautifulSoup(response.body, 'lxml')
        pageInfo = soup.select('#page .n')[-1]
        print(pageInfo)
        if '下一页' in pageInfo.text:
            nextUrl = pageInfo['href']
            if nextUrl is not None:
                nextUrl = response.urljoin(nextUrl)
                print(nextUrl)
                time.sleep(2)
                yield scrapy.Request(nextUrl, callback=self.parse, dont_filter=True)

    def url_process(self, url):
        url = url.replace("http:", "https:")
        return url
