# -*- coding: utf-8 -*-

import scrapy
import urllib
from bs4 import BeautifulSoup as BS
from ..items import MonitorPublicOpinionItem
import time
import redis
import datetime


redis_db = redis.Redis(host='127.0.0.1', port=6379, db=0) #连接redis，相当于MySQL的conn
redis_key = "crawl2018:keyword"  #key的名字，写什么都可以，这里的key相当于字典名称，而不是key值。


baseUrl = 'http://www.baidu.com/s'
channel = '百度搜索'
page = 1;  # 第几页
limit = 120 #每页爬取的数据数量


class mpo_baidu(scrapy.Spider):
    name = 'mpo_baidu'
    allowed_domains = ['www.baidu.com']


    def start_requests(self):
        """启动，处理发送请求"""
        self.keyword_i = 0;  # 关键词索引
        yield scrapy.Request(self.e_get_url(), callback=self.parse, dont_filter=True,errback=self.errback)

    def errback(self,msg):
        print(msg)

    def e_get_url(self):
        """处理并且返回url"""
        self.keywork_box = redis_db.lrange(redis_key, 0, -1)  # 获取关键词
        keyword = self.keywork_box[self.keyword_i].decode("UTF-8")
        data = {'wd': keyword,
                'pn': str(page - 1) + '0',
                'rn': limit,
                'tn': 'baidurt',
                'ie': 'utf-8',
                'bsst': '1'
                }
        data = urllib.parse.urlencode(data)
        url = baseUrl + '?' + data
        ret = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) + ' [' + keyword + ']：' + url
        print(ret)
        return url


    def parse(self, response):
        html = response.body
        soup = BS(html)
        td = soup.find_all(class_='f')
        keyword = self.keywork_box[self.keyword_i].decode("UTF-8")
        position = 0  # 起始
        for t in td:
            # print t.h3.a.get_text()
            # print t.h3.a['href']
            position += 1

            font_str = t.find_all('font', attrs={'size': '-1'})[0].get_text()
            realtime = t.find_all('div', attrs={'class': 'realtime'})
            if realtime:
                realtime_str = realtime[0].get_text().strip()
                if realtime_str.find('天前') > 0 :
                    days = realtime_str.rstrip('天前')
                    now_time = datetime.datetime.now()
                    realtime_str = (now_time + datetime.timedelta(days=-int(days))).strftime('%Y-%m-%d')


            item = MonitorPublicOpinionItem()
            item['title'] = t.h3.a.get_text().strip();
            item['href'] = t.h3.a['href'];
            item['date'] = realtime_str;
            item['created_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime());
            item['keyword'] = keyword
            item['channel'] = channel
            item['position'] =position
            yield item
        self.keyword_i += 1
        if self.keyword_i < len(self.keywork_box):
            yield scrapy.Request(self.e_get_url(), callback=self.parse, dont_filter=True, errback=self.errback)
        else:
            print('休息10秒，准备开启新的一轮..')
            time.sleep(900)
            self.keyword_i = 0
            yield scrapy.Request(self.e_get_url(), callback=self.parse, dont_filter=True, errback=self.errback)



