# -*- coding: utf-8 -*-
import scrapy
from scrapy_redis.spiders import RedisSpider
import redis
import json
import re
from ..items import FindSonItem
import time
import requests
from scrapy import signals


# 启动命令
# scrapy crawl findSon


class FindSonSpider(RedisSpider):
    name = 'findSon'
    allowed_domains = ['m.weibo.cn']
    kw = ''
    pages = 0

    # 自定义pipeline(每个爬虫指定自己的)
    custom_settings = {
        'ITEM_PIPELINES': {
            'weibo_spiders.pipelines.FindSonPipeline': 200,
        },
        'DOWNLOADER_MIDDLEWARES': {
            'weibo_spiders.middlewares.CookiesMiddleware': None,
            'weibo_spiders.middlewares.ProxyMiddleware': 541,
            'weibo_spiders.middlewares.MyRetryMiddleware': 560,
        },
        # 'LOG_FILE': "./log/findson.log",
        # 'LOG_LEVEL': 'DEBUG',
        # 'LOG_STDOUT': True
    }

    # @classmethod
    # def changeKey(cls, kw):
    #     cls.kw = kw
    #
    # @classmethod
    # def changePages(cls, pages):
    #     cls.pages = int(pages)

    def __init__(self, kw=None, *args, **kwargs):
        super(FindSonSpider, self).__init__(*args, **kwargs)
        # kw = 'test'  # 测试用
        self.kw = kw

    # @classmethod
    # def from_crawler(cls, crawler, *args, **kwargs):
    #     from_crawler = super(FindSonSpider, cls).from_crawler
    #     s = from_crawler(crawler, *args, **kwargs)
    #     crawler.signals.connect(s.restart, signal=scrapy.signals.spider_idle)
    #     return s

    # def start_requests(self):
    #     # tempUrl = self.geturl()
    #     # print(tempUrl)
    #     tempUrl = 'id=4397862936638153&page'
    #     if tempUrl != -1:
    #         mid = re.findall('id=(\d+)&page', tempUrl)[0]
    #         print(mid)
    #         myUrl = 'https://m.weibo.cn/detail/' + mid
    #         yield scrapy.Request(myUrl, callback=self.parse, dont_filter=True)

    def parse(self, response):
        # 截取并解析json数据文本

        render_data = re.findall(
            'render_data=\[(.+)\]\[0\]\|\|',
            response.text.replace(' ', '').replace('\n', ''))[0]

        data = json.loads(render_data)
        status = data['status']

        # item存入根节点的相关信息
        item = FindSonItem()
        item['mid'] = status['id']
        item['pid'] = '0'
        item['rootknot'] = item['mid']
        item['userid'] = status['user']['id']
        item['verified_type'] = status['user']['verified_type']

        # 去除文本中HTML标签
        reg = re.compile('<[^>]*>')
        item['text'] = reg.sub('', status['text'])

        # 转换时间格式
        item['created_at'] = status['created_at']
        item['created_at'] = time.strftime(
            '%Y-%m-%d %H:%M:%S', time.strptime(item['created_at'], '%a%b%d%H:%M:%S%z%Y'))

        item['reposts_count'] = status['reposts_count']
        item['comments_count'] = status['comments_count']
        item['attitudes_count'] = status['attitudes_count']
        item['followers_count'] = status['user']['followers_count']
        item['follow_count'] = status['user']['follow_count']
        item['source'] = status['source']
        item['generation'] = 0

        # 判断有无被转发，若有进else，否则pass
        if item['reposts_count'] == 0:
            pass
        else:
            response = requests.get(
                'https://m.weibo.cn/api/statuses/'
                'repostTimeline?id={}&page=1'.format(item['mid']))
            response.encoding = 'utf-8'
            response_json = json.loads(response.text)

            # 若该页面为空，进else
            if response_json['ok'] == 1:
                self.pages = response_json['data']['max']
                for page in range(1, self.pages+1):
                    yield scrapy.Request('https://m.weibo.cn/api/statuses/repostTimeline?id={}&page={}'.
                                         format(item['mid'], page),
                                         callback=self.search_son_list,
                                         dont_filter=True)
            else:
                item['pid'] = '-1'  # 将有人转发但是转发列表为空的微博的pid标记为-1
        yield item
        return

    # 找到子微博id
    def search_son_list(self, response):
        ss = json.loads(response.body)
        if ss['ok'] == 1:
            sonlist = ss['data']['data']
            for son in sonlist:
                self.crawler.engine.crawl(
                    scrapy.Request(
                        'https://m.weibo.cn/detail/{}'.format(son['id']),
                        callback=self.getinfo,
                        dont_filter=True),
                    self)
        else:
            pass

    # 解析子微博的数据
    def getinfo(self, response):
        render_data = re.findall(
            'render_data=\[(.+)\]\[0\]\|\|',
            response.text.replace(' ', '').replace('\n', ''))[0]
        data = json.loads(render_data)
        status = data['status']
        item = FindSonItem()
        item['mid'] = status['id']
        item['text'] = status['raw_text']

        # pid 处理父微博丢失的情况
        pid_str = response.text.find("pidstr")
        if pid_str == -1:
            pid = status['retweeted_status']['id']
            # 第二代及以后代必有//@，第一代没有
            if '//@' in item['text']:  # 父微博丢失，用‘-’表示丢失，pid记为根节点id
                item['pid'] = '-' + pid
            else:
                item['pid'] = '+' + pid
        else:
            item['pid'] = '+' + status['pidstr']

        item['userid'] = status['user']['id']
        item['verified_type'] = status['user']['verified_type']

        item['created_at'] = status['created_at']
        item['created_at'] = time.strftime(
            '%Y-%m-%d %H:%M:%S', time.strptime(item['created_at'], '%a%b%d%H:%M:%S%z%Y'))

        item['reposts_count'] = status['reposts_count']
        item['comments_count'] = status['comments_count']
        item['attitudes_count'] = status['attitudes_count']
        item['followers_count'] = status['user']['followers_count']
        item['follow_count'] = status['user']['follow_count']
        item['rootknot'] = status['retweeted_status']['id']
        item['source'] = status['source']
        item['generation'] = item['text'].count('//@') + 1
        yield item
        return
    # 以下功能暂时弃用
    # def restart(self):
    #     print('重启...')
    #     tempurl = self.geturl()
    #     if tempurl != -1:
    #         mid = re.findall('id=(\d+)&page', tempurl)[0]
    #         myurl = 'https://m.weibo.cn/detail/' + mid
    #         self.crawler.engine.crawl(scrapy.Request(
    #             myurl, callback=self.parse, dont_filter=True), self)
    #
    # def geturl(self):
    #     pool = redis.ConnectionPool(
    #         host='localhost', port=6379, decode_responses=True)
    #     r = redis.Redis(connection_pool=pool)
    #     count = 3
    #     while count > 0:
    #         try:
    #             myresult = r.lpop('rootknot')
    #             print(myresult)
    #             mypage = re.findall("page=(\d+)", myresult)[0]
    #             self.changePages(mypage)
    #             print(mypage)
    #             return 'https://m.weibo.cn/api/statuses/repostTimeline?{}'.format(myresult)
    #         except:
    #             count -= 1
    #             time.sleep(2)
    #     print('读取出错！！！')
    #     return -1
