# -*- coding: utf-8 -*-

import scrapy
import urllib
from bs4 import BeautifulSoup as BS
from ..items import MonitorPublicOpinionItem
import time
import redis
import datetime
import re

redis_db = redis.Redis(host='127.0.0.1', port=6379, db=0) #连接redis，相当于MySQL的conn
redis_key = "crawl2018:keyword"  #key的名字，写什么都可以，这里的key相当于字典名称，而不是key值。
baseUrl = 'http://www.sogou.com/web'
channel = '搜狗网页'
sleep = 900  # 睡眠时间

class sougou_web(scrapy.Spider):
    name = 'sougou_web'
    custom_settings = {
        'DOWNLOAD_DELAY': 10,
    }
    def start_requests(self):
        """启动，处理发送请求"""
        self.page_total = 6 #总页数
        self.page_current = 1 #当前页码
        self.keyword_index = 0#当前关键词索引值

        yield scrapy.Request(self.e_get_url(), callback=self.parse, dont_filter=True, errback=self.errback)



    def errback(self,msg):
        print(msg)

    def e_get_url(self):
        """处理并且返回url"""
        if (self.keyword_index == 0):
            self.keyword_box = redis_db.lrange(redis_key, 0, -1)  # 获取关键词
            self.keyword_len = len(self.keyword_box)
        self.keyword = self.keyword_box[self.keyword_index].decode("UTF-8")
        self.keyword_list = self.keyword.strip(' ').split(' ')

        data = {'query': self.keyword,
                'page': self.page_current,
                'ie': 'utf8',
                }
        data = urllib.parse.urlencode(data)
        url = baseUrl + '?' + data
        ret = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) + ' [' + self.keyword + ']：' + url
        print(ret)
        return url

    def parse(self, response):
        html = response.body
        soup = BS(html)
        td = soup.find(class_='results').find_all(class_=re.compile("rb|vrwrap"))
        position = 0  # 数据条目的位置
        for t in td:
            item = MonitorPublicOpinionItem()
            item['source'] = ''
            item['date'] = ''
            try:
                if t.h3 is None:continue

                txt_info = t.find(class_=re.compile("str_info|ft|vr-west-")).get_text()

                """检查描述里面是否有关键词存在"""
                # for k in self.keyword_list:
                #     txt_info.index(k)
                if (txt_info.find('因赛') == -1):
                    continue;

                position += 1
                if t.cite:
                    tbox = t.cite.get_text(strip=True).replace(' ', '').replace('\xa0', '').replace('\n', '').rstrip('翻译此页').split('-')
                    if tbox:
                        len_t_box = len(tbox)
                        #来源字段
                        if len_t_box == 5  :
                            item['source'] = tbox[0]
                            item['date'] = tbox[2]+'-'+tbox[3]+'-'+tbox[4]
                        elif len_t_box == 4:
                            item['source'] = ''
                            item['date'] = tbox[1]+'-'+tbox[2]+'-'+tbox[3]
                        elif len_t_box == 3:
                            item['source'] = tbox[0]
                            item['date'] = self.change_to_date(tbox[2])
                        elif len_t_box == 2:
                            item['source'] = ''
                            item['date'] = self.change_to_date(tbox[1])
                        else :
                            pass
                        if item['date'] =='':
                            item['date'] = t.select('.strBox .str_info .gray-color')[0].get_text(strip=True).rstrip('-').replace('年','-').replace('月','-').replace('日','')


                item['href'] = t.h3.a['href'];
                if item['href'].index('/') == 0:
                    item['href'] = "http://www.sogou.com"+item['href']

                item['title'] = t.h3.a.get_text(strip=True)
                if item['title'] == '' :
                    # item['title'] = t.find(id=re.compile("wx_title_*")).get_text(strip=True)
                    continue
                item['created_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime());
                item['keyword'] = self.keyword
                item['channel'] = channel
                item['position'] =position
                # print(item['keyword']+" | "+item['title'])
                yield item
            except Exception as e :
                print(e)
                continue
        self.page_current += 1
        if self.page_current > self.page_total:
            self.page_current = 1
            self.keyword_index += 1
            if self.keyword_index >= self.keyword_len:
                self.keyword_index = 0
                print("休息" + str(sleep) + "，准备开启新的一轮..")
                time.sleep(sleep)
        yield scrapy.Request(self.e_get_url(), callback=self.parse, dont_filter=True, errback=self.errback)

    def change_to_date(self, timestr):
        """ 把几天前 转换成 日期 """
        now_time = datetime.datetime.now()
        if timestr.find('天前') > 0:
            days = timestr.rstrip('天前')
            timestr = (now_time + datetime.timedelta(days=-int(days))).strftime('%Y-%m-%d')
        if timestr.find('小时前') > 0:
            timestr = now_time.strftime('%Y-%m-%d')
        if timestr.find('月前') > 0:
            from dateutil.relativedelta import relativedelta
            mon = timestr.rstrip('月前')
            timestr = (now_time - relativedelta(months=+int(mon))).strftime('%Y-%m-%d')
        return timestr

