import scrapy
from weibo import settings
import json
import re
from weibo.items import WeiboItem,ArtilceItem
import time
from weibo.redisfilter import BloomFilter
from weibo.randomseed import Randomuser

class SinaWbSpider(scrapy.Spider):
    name = 'sn'

    user_url = 'https://m.weibo.cn/profile/info?uid={uid}'
    follow_url = 'https://m.weibo.cn/api/container/getIndex?containerid=231051_-_followers_-_{uid}&page={page}'
    fan_url = 'https://m.weibo.cn/api/container/getIndex?containerid=231051_-_fans_-_{uid}&since_id={page}'
    weibo_url = 'https://m.weibo.cn/api/container/getIndex?containerid=230413{uid}_-_WEIBO_SECOND_PROFILE_WEIBO&page={page}'

    user_tags = 'https://weibo.cn/{uid}/info'
    # start_users = ['1669879400', '1223178222', '2970452952', '2334162530', '1291477752', '2706896955',2759751670]

    start_users = ['1890708734','3266925912','3291709562','5892928892','1902499001','6027416124']

    def start_requests(self):
        # ud = Randomuser()
        # uid = ud.randomuserid()
        yield scrapy.Request(url=self.user_url.format(uid='2543925962'), callback=self.parse_user, cookies=settings.COOKIE)
            # yield scrapy.Request(url=self.user_tags.format(uid=uid), callback=self.parse_userinfo, cookies=settings.COOKIE, meta={'useritem':useritem})
    # 用户的基本信息
    def parse_user(self, response):
        results = json.loads(response.text)
        result_info = results.get('data').get('user')
        if result_info:
            uitem = WeiboItem()
            uitem['id'] = result_info.get('id')
            uitem['fans_counts'] = result_info.get('followers_count',0)
            uitem['follow_count'] = result_info.get('follow_count',0)
            uitem['profile_image_url'] = result_info.get('profile_image_url')# 头像
            uitem['profile_url'] = 'https://weibo.com/u/{}'.format(uitem['id']) # 个人url
            uitem['screen_name'] = result_info.get('screen_name') # 昵称
            uitem['description'] = result_info.get('description','') # 简介
            uitem['verified_reason'] = result_info.get('verified_reason','')# 认证
            uitem['mbrank'] = result_info.get('mbrank') # 等级

            uid = uitem['id']
            bf = BloomFilter()
            if bf.isContains(str(uitem['id'])):  # 判断字符串是否存在
                print('exists!')
            else:
                print('not exists!')
                bf.insert(str(uitem['id']))

            yield scrapy.Request(url=self.user_tags.format(uid=uid), callback=self.parse_userinfo, cookies=settings.COOKIE, meta={'uitem': uitem, 'uid': uid})

    def parse_userinfo(self, response):
        uitem = response.meta['uitem']

        userinfo_html = response.text

        tag = re.compile(r'标签:<a .+?>(.+?)</a>&nbsp;<a .+?>(.+?)</a>&nbsp;<a .+?>(.+?)</a>&nbsp;').findall(userinfo_html)

        tags = tag[0] if tag else ''
        gender = re.compile(r'性别:(.+?)<br/>').findall(userinfo_html)
        genders = gender[0] if gender != [] else ''
        uitem['tags'] = ' '.join(tags) if tags != '' else ''
        uitem['genders'] = genders
        uids = response.meta['uid']

        yield uitem
        # follows
        yield scrapy.Request(url=self.follow_url.format(uid=uids, page=1), callback=self.parse_follows, meta={'uid':uids, 'page': 1})
        # fans
        yield scrapy.Request(url=self.fan_url.format(uid=uids, page=1), callback=self.parse_fans,meta={'uid':uids, 'page': 1})
        # weibo_article
        # yield scrapy.Request(url=self.weibo_url.format(uid=uids, page=1), callback=self.parse_weibo,meta={'uid': uids, 'page': 1})


    def parse_follows(self, response):
        # 关注列表
        response_html = json.loads(response.text)
        info = response_html.get('data').get('cards')
        if info:
            follows_result = info[-1]
            follows_info = follows_result.get('card_group')
            try:
                if follows_info:
                    for follows in follows_info:
                        uid = follows['user']['id']
                        yield scrapy.Request(url=self.user_url.format(uid=uid), callback=self.parse_user, cookies=settings.COOKIE)
                # next page
                # uid = response.meta.get('uid')
                # page = response.meta.get('page')+1
                # yield scrapy.Request(url=self.follow_url.format(uid=uid, page=page), callback=self.parse_follows, cookies=settings.COOKIE, meta={'uid': uid, 'page': page})
            except:
                print('follows search nothing...')

    def parse_fans(self,response):
        # 粉丝列表
        fans_response = json.loads(response.text)
        fans = fans_response.get('data').get('cards')
        if fans:
            fans_results = fans[-1]
            fans_info = fans_results.get('card_group')
            try:
                if fans_info:
                    for fans in fans_info:
                        uid = fans['user']['id']
                        yield scrapy.Request(url=self.user_url.format(uid=uid), callback=self.parse_user, cookies=settings.COOKIE)
                    # next page
                    # uid = response.meta.get('uid')
                    # page = response.meta.get('page') + 1
                    # yield scrapy.Request(url=self.fan_url.format(uid=uid, page=page), callback=self.parse_fans, meta={'uid': uid, 'page': page})
            except:
                print('search notiong...')

    # def parse_weibo(self, response):
    #     """爬微博文章"""
    #     weibo_item = ArtilceItem()
    #     results = json.loads(response.text)
    #     weibos_info = results['data']['cards']
    #     if weibos_info:
    #         for weibo_info in weibos_info:
    #             if weibo_info['card_type'] == 9:
    #                 mblog = weibo_info['mblog']
    #                 field_map = {
    #                     'id': 'id',
    #                     'created_at': 'created_at',
    #                     'reposts_count': 'reposts_count',
    #                     'comments_count': 'comments_count',
    #                     'attitudes_count': 'attitudes_count',
    #                     'text': 'text',
    #                     'source': 'source',
    #                 }
    #                 for field, attr in field_map.items():
    #                     weibo_item[field] = mblog.get(attr)
    #                     weibo_item['user'] = response.meta.get('uid')
    #                 yield weibo_item
    #         # next page
    #         page = response.meta.get('page') + 1
    #         uid = response.meta.get('uid')
    #         yield scrapy.Request(url=self.weibo_url.format(uid=uid, page=page), callback=self.parse_weibo,meta={'uid': uid, 'page': page})