# -*- coding: utf-8 -*-
import scrapy
from scrapy_redis.spiders import RedisSpider
from datetime import datetime
from datetime import timedelta
from bs4 import BeautifulSoup
from ..items import AdvSearchItem
import re
from ..utils.time_trans import caltime


class advSearchSpider(RedisSpider):
    name = 'advSearch'
    allowed_domains = ['weibo.cn']

    # 自定义pipeline(每个爬虫指定自己的)
    custom_settings = {
        'ITEM_PIPELINES': {
            'weibo_spiders.pipelines.AdvSearchPipeline': 200,
        },
        'DOWNLOAD_TIMEOUT': 5,
        'RETRY_TIMES': 50
    }
    kw = ''  # 搜索关键词
    stime_str = ''  # 开始时间  e.g. 20200123
    etime_str = ''   # 结束时间  e.g. 20200124
    sort = 'time'  # 实时(time) 或 热门(hot)
    pg = 0          # 页码
    hasori = 1      # 是否原创
    baseUrl = 'https://weibo.cn/search/mblog?' + \
        'advancedfilter=1' + \
        '&hideSearchFrame=1' + \
        '&keyword={}' + \
        '&hasori=1' + \
        '&sort={}' + \
        '&starttime={}' + \
        '&endtime={}'

    # timedelta = timedelta.datetime(days=3)  # 时间间隔

    # 启动命令
    # scrapy crawl advSearch -a kw=xxx -a stime=20200101 -a etime=20200201

    def __init__(self, kw=None, stime=None, etime=None, *args, **kwargs):
        super(advSearchSpider, self).__init__(*args, **kwargs)
        self.kw = kw
        try:
            self.stime_str = stime
            self.etime_str = etime
        except Exception:
            self.stime_str = '20000101'     # 一个很早的时间
            self.etime_str = datetime.now().strftime("%Y%m%d")  # 当前时间

    def start_requests(self):
        url = self.baseUrl.format(
            self.kw, self.sort, self.stime_str, self.etime_str)
        self.startTime = datetime.strptime(
            self.stime_str, "%Y%m%d")    # 设定的开始日期
        self.c_endTime_str = self.etime_str     # 当前请求的结束日期
        yield scrapy.Request(url, callback=self.parse, dont_filter=True)

    # 处理搜索结果，并向后翻页
    def parse(self, response):
        soup = BeautifulSoup(response.text, 'lxml')
        blog_tags = soup.find_all(id=re.compile('M_.+'))
        item_count = 0
        for blog_tag in blog_tags:
            item = AdvSearchItem()
            # item['kw'] = self.kw
            # mid
            item['mid_str'] = blog_tag['id'][2:]
            # 用户相关
            nk_tag = blog_tag.select_one('.nk')

            item['user_url'] = nk_tag['href']
            item['user_name'] = nk_tag.string

            # 用户认证类型
            # 由于cn端只区分黄V，蓝V，（m端蓝V的划分更细）
            # 普通用户，黄V 分别用 -1，0 表示（与m端一致）
            # 蓝V暂且用 10 表示 （m端无此代码）
            item['verified_type'] = -1          # 普通用户
            verify = nk_tag.parent.find(attrs={'alt': 'V'})
            if verify is not None:
                if verify['src'][-8:-4] == '5338':  # 黄V
                    item['verified_type'] = 0
                elif verify['src'][-8:-4] == '5337':  # 蓝V
                    item['verified_type'] = 10
            # 正文内容
            item['text'] = blog_tag.select_one(
                '.ctt').get_text("", strip=True)[1:]
            # 日期来源
            datesource_str = blog_tag.select_one(
                '.ct').get_text("", strip=True)
            date_str = datesource_str.split('来自')[0]
            self.lastTime = caltime(date_str)
            print(caltime(date_str))
            item['created_at'] = self.lastTime.strftime('%Y-%m-%d %H:%M:%S')
            try:
                item['source'] = datesource_str.split('来自')[1]
            except Exception:
                pass
            # 转评赞数
            repost_tag = blog_tag.find(text=re.compile(r'转发\[(\d+)\]'))
            item['reposts_count'] = int(
                re.findall(r'\d+', repost_tag.string)[0])

            comment_tag = blog_tag.find(text=re.compile(r'评论\[(\d+)\]'))
            item['comments_count'] = int(
                re.findall(r'\d+', comment_tag.string)[0])

            attitude_tag = blog_tag.find(text=re.compile(r'赞\[(\d+)\]'))
            item['attitudes_count'] = int(
                re.findall(r'\d+', attitude_tag.string)[0])
            item_count += 1
            yield item
        #######当页解析完成#######

        # 寻找下一页
        nextpage_tag = soup.find('a', text='下页')
        if nextpage_tag is not None:        # 存在‘下页’按钮
            next_url = nextpage_tag['href']
            next_url = response.urljoin(next_url)
            yield scrapy.Request(next_url, callback=self.parse, dont_filter=True)
        else:   # 不存在下页，检测微博日期，判断是否继续爬取
            # 由于微博时间是从后往前排的，故将最后一条的时间与设定的开始时间比较
            print('【未找到下页！！】')
            print(soup)
            if self.lastTime < self.startTime + timedelta(hours=5):
                print('[ALL FINISH!!]' + self.lastTime)
                return

            # 否则当前微博时间比设定开始时间晚超过5小时，重新设定时间区间，继续爬取
            next_etime_str = self.lastTime.strftime('%Y%m%d')
            # 如果下个请求的结束日期与当前请求的结束日期相同（2个请求相同），任务完成
            if next_etime_str == self.c_endTime_str:
                if next_etime_str == self.stime_str:
                    print('[FINISH!!]' + next_etime_str)
                    return
                else:   # 下一页与当前相同，但未爬完的情况
                    if item_count == 0:     # 重试
                        print('页面异常！！重试。。。')
                        new_request = response.request.replace(
                            dont_filter=True)
                        yield new_request
                        return
                    else:
                        next_etime_str = (
                            self.lastTime - timedelta(days=1)).strftime('%Y%m%d')
            # 否则，构造下一个请求，更新结束日期
            next_url = self.baseUrl.format(
                self.kw, self.sort, self.stime_str, next_etime_str)
            self.c_endTime_str = next_etime_str     # 更新当前结束日期
            print('[NEW REQUEST] %s - %s' %
                  (self.stime_str, self.c_endTime_str))
            yield scrapy.Request(next_url, callback=self.parse, dont_filter=True)
