# -*- coding: utf-8 -*-

import scrapy
import urllib
from bs4 import BeautifulSoup as BS
from ..items import MonitorPublicOpinionItem
import time
import redis
import datetime
import requests
import random

redis_db = redis.Redis(host='127.0.0.1', port=6379, db=0) #连接redis，相当于MySQL的conn
redis_key = "crawl2018:keyword"  #key的名字，写什么都可以，这里的key相当于字典名称，而不是key值。
baseUrl = 'http://weixin.sogou.com/weixin'
channel = '搜狗微信'
sleep = 900  # 睡眠时间

class sougou_wx(scrapy.Spider):
    name = 'sougou_wx'
    # duplicates_model = 1  # 默认模式0：url; 模式1 去重字段是标题title,
    custom_settings = {
        'DOWNLOAD_DELAY': 30,
    }
    def start_requests(self):
        """启动，处理发送请求"""
        self.page_total = 6 #总页数
        self.page_current = 1 #当前页码
        self.keyword_index = 0#当前关键词索引值

        yield scrapy.Request(self.e_get_url(), callback=self.parse, dont_filter=True, errback=self.errback)


    def errback(self,msg):
        print(msg)

    def e_get_url(self):
        """处理并且返回url"""
        if (self.keyword_index == 0):
            self.keyword_box = redis_db.lrange(redis_key, 0, -1)  # 获取关键词
            self.keyword_len = len(self.keyword_box)
        self.keyword = self.keyword_box[self.keyword_index].decode("UTF-8")
        # self.keyword_list = self.keyword.strip(' ').split(' ')

        data = {'query': self.keyword,
                'page': self.page_current,
                'ie': 'utf8',
                'type': '2',
                }
        data = urllib.parse.urlencode(data)
        url = baseUrl + '?' + data
        return url

    def parse(self, response):
        ret = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) + ' [' + self.keyword + ']：' + response.url
        print(ret)
        html = response.body
        soup = BS(html,"lxml")

        if 'antispider' in response.url:
            print('频繁访问，跳转了')
            self.keyword_index += 1
            if self.keyword_index >= self.keyword_len:
                self.keyword_index = 0

        elif soup.find(class_='b404-box'):
            print('该关键词没有找到相关的微信公众号文章')
            self.keyword_index += 1
            if self.keyword_index >= self.keyword_len:
                self.keyword_index = 0
        else:
            td = soup.find(class_='news-list').find_all(class_="txt-box")
            position = 0  # 数据条目的位置
            for t in td:
                item = MonitorPublicOpinionItem()
                try:
                    position += 1
                    html_h3 = t.h3
                    if html_h3.em is None :
                        continue
                    item['title'] =html_h3.a.get_text(strip=True)
                    txt_info = t.find(class_='txt-info').get_text()
                    """检查描述里面是否有关键词存在"""

                    if (txt_info.find('因赛') == -1):
                        continue;

                    # for k in self.keyword_list:
                    #     txt_info.index(k)
                    html_div_sp = t.find(class_='s-p')
                    item['href'] = "http://web.idea.3xy.me/insight_crawl2018/index.php?g=home&m=Public&a=sogou_wx_url&query="+item['title']
                    # item['href'] = html_h3.a['href']
                    item['source'] = html_div_sp.a.get_text(strip=True)
                    item['date'] = time.strftime("%Y-%m-%d",time.localtime(int(html_div_sp['t'])))
                    item['created_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime());
                    item['keyword'] = self.keyword
                    item['channel'] = channel
                    item['position'] =position
                    print(str(item['position'])+" | "+item['title'])

                    # print(item)
                    yield item
                except Exception as e :
                    print(e)
                    continue
            self.page_current += 1
            if self.page_current > self.page_total:
                self.page_current = 1
                self.keyword_index += 1
                if self.keyword_index >= self.keyword_len:
                    self.keyword_index = 0
                    print("休息" + str(sleep) + "，准备开启新的一轮..")
        time.sleep(sleep)
        yield scrapy.Request(self.e_get_url(), callback=self.parse, dont_filter=True, errback=self.errback,meta=self.get_random_proxy())





    def change_to_date(self, timestr):
        """ 把几天前 转换成 日期 """
        now_time = datetime.datetime.now()
        if timestr.find('天前') > 0:
            days = timestr.rstrip('天前')
            timestr = (now_time + datetime.timedelta(days=-int(days))).strftime('%Y-%m-%d')
        if timestr.find('小时前') > 0:
            timestr = now_time.strftime('%Y-%m-%d')
        if timestr.find('月前') > 0:
            from dateutil.relativedelta import relativedelta
            mon = timestr.rstrip('月前')
            timestr = (now_time - relativedelta(months=+int(mon))).strftime('%Y-%m-%d')
        return timestr

    def get_random_proxy(self):
        '''随机从文件中读取proxy'''
        redis_proxy_key = 'crawl2018:proxie'
        all_proxy = list(redis_db.smembers(redis_proxy_key))

        for p in all_proxy:
            proxy = random.choice(all_proxy).decode('utf-8')
            proxies_ary = proxy.split('://')
            proxies = {proxies_ary[0]:proxies_ary[1]}
            try :
                if requests.get(baseUrl, proxies=proxies, timeout=2).status_code == 200:
                    print('合法ip %s' % proxy)
                    # print(proxies)
                    return proxies
                else:
                    print('不合法ip %s' % proxy)
                    redis_db.srem(redis_proxy_key,proxy.encode('utf-8'))
            except Exception as e:
                redis_db.srem(redis_proxy_key, proxy.encode('utf-8'))