# -*- coding: utf-8 -*-
# 爬取指定用户的关注和粉丝

import scrapy, re, time
from scrapy.loader import ItemLoader
from scrapy.crawler import CrawlerProcess

from sina_crawl.items import SinaUserItem


class SpecialUserSpider(scrapy.Spider):
    # 爬取的新浪域名
    sina_domain = 'https://weibo.cn'

    def __init__(self):
        # 全局 Cookies 免登录
        self.cookies = {
            '_T_WM': 'bf738a8d9356b7d2815ae47846cb24f3',
            'SCF': 'AofQRr0GJIZdQHetVSoWsF3eyYvf1NxsCaLLItkswFcPSpma2_tkxXgMWynk4LYOwzs5NCQ1gfJuAp7B1HtuF-c.',
            'SSOLoginState': '1559615522',
            'SUB': '_2A25x8ahyDeRhGeVG7FcZ9yvPzT-IHXVTHcg6rDV6PUJbkdANLWXwkW1NT5gs2FtXQuPZwI0EU5_IxPNBuUc_dcKZ',
            'SUHB': '0G0ug3NabzuPWk',
            'TMPTOKEN': 'DcDyru8O5wpES1fOJiCD631BXh5DzrM4zQ85NlJGtFb9zKQoeypUboTBmVvgDmev'
        }
        # 全局请求头
        self.headers = {
            'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
            'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'referer': 'https://weibo.cn/',
            'upgrade-insecure-requests': '1',
        }

    name = 'special_user'
    allowed_domains = ['weibo.cn']

    custom_settings = {
        # 存放数据的集合名称
        'MONGODB_COLLECTION': "sina_userinfo_1"
    }

    weibo_id = ['5835880390']

    accounts = []

    start_urls = ['https://weibo.cn/5835880390/follow', 'https://weibo.cn/5835880390/fans']

    def start_requests(self):
        return [scrapy.Request(url=url, callback=self.parse, headers=self.headers, cookies=self.cookies) for url in
                self.start_urls]

    def parse(self, response):
        selector = scrapy.Selector(response)
        # 解析页面中所有的 URL，并提取 用户 id
        accounts = selector.xpath('//a[starts-with(@href,"https://weibo.cn/u/")]/@href').re(
            u'https://weibo.cn/u/(\d+)')
        # 去重
        accounts = list(set(accounts))
        # 求差集
        new_accounts = list(set(accounts).difference(set(self.accounts)))
        self.accounts += new_accounts
        # 使用用户 id 构造个人资料、用户主页、关注列表以及粉丝列表的 URL
        urls = []
        [urls.extend((self.sina_domain + '/%s/info' % acc, self.sina_domain + '/u/%s' % acc)) for acc in new_accounts]
        # 使用生成的 URL 构造 request
        for url in urls:
            if url.find('info') > 0:
                yield scrapy.Request(url=url, callback=self.info_parse, cookies=self.cookies, headers=self.headers)
            else:
                yield scrapy.Request(url=url, callback=self.weibo_info_parse, cookies=self.cookies,
                                     headers=self.headers)

        # 下一页
        nextLink = selector.xpath('//div[@class="pa"]/form/div/a/@href').extract()
        if nextLink:
            url = self.sina_domain + nextLink[0]
            print(url)
            yield scrapy.Request(url=url, callback=self.parse, cookies=self.cookies,
                                 headers=self.headers)
        else:
            user_id = re.compile(self.sina_domain + '/(\d+)/.*').findall(response.url)
            print(user_id)
            user_id = user_id[0] if user_id else ''
            print(user_id)
            if str(response.url).find('follow') > 0:
                self.logger.info(u'[%s] 关注人已爬取完毕！' % user_id)
            else:
                self.logger.info(u'[%s] 粉丝已爬取完毕！' % user_id)

    def weibo_info_parse(self, response):
        """
        默认解析函数\n
        :param response:
        :return:
        """
        load = ItemLoader(item=SinaUserItem(), response=response)
        selector = scrapy.Selector(response)
        # 微博数
        load.add_value('webo_num', int(selector.xpath('//div[@class="tip2"]').re(u'微博\[(\d+)\]')[0]))
        # 关注人数
        load.add_value('follow_num', int(selector.xpath('//div[@class="tip2"]').re(u'关注\[(\d+)\]')[0]))
        # 粉丝人数
        load.add_value('fans_num', int(selector.xpath('//div[@class="tip2"]').re(u'粉丝\[(\d+)\]')[0]))
        # 解析微博用户 id
        re_url = selector.xpath('//div[@class="tip2"]/a[4]/@href').re('/at/weibo\?uid=(\d+)')
        user_id = re_url[0] if re_url else ''
        load.add_value('user_id', user_id)
        # 爬取时间
        load.add_value('crawl_time', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
        # 将 Item 传给下一个 pipline
        yield load.load_item()

    def info_parse(self, response):
        """
        自定义解析函数\n
        :param response:
        :return:
        """
        load = ItemLoader(item=SinaUserItem(), response=response)
        selector = scrapy.Selector(response)
        nick_name, gender, district, birthday, brief_intro, identify, head_img, tag = '', '', '', '', '', '', '', ''
        # 提取用户 id
        re_url = re.compile(self.sina_domain + u'/(.*)/info.*').findall(response.url)
        load.add_value('user_id', re_url[0] if re_url else '')
        # 保存用户头像 URL
        load.add_xpath('head_img', '//div[@class="c"]/img[@alt="头像"]/@src')
        for info in selector.xpath('//div[@class="c"][3]/text()'):
            # 提取个人资料
            nick_name = info.re(u'昵称:(.*)')[0] if info.re(u'昵称:(.*)') else nick_name
            identify = info.re(u'认证:(.*)')[0] if info.re(u'认证:(.*)') else identify
            gender = info.re(u'性别:(.*)')[0] if info.re(u'性别:(.*)') else gender
            district = info.re(u'地区:(.*)')[0] if info.re(u'地区:(.*)') else district
            birthday = info.re(u'生日:(.*)')[0] if info.re(u'生日:(.*)') else birthday
            brief_intro = info.re(u'简介:(.*)')[0] if info.re(u'简介:(.*)') else brief_intro
            tag = info.re(u'标签:(.*)')[0] if info.re(u'标签:(.*)') else tag
        load.add_value('username', nick_name)
        load.add_value('identify', identify)
        load.add_value('gender', gender)
        load.add_value('district', district)
        load.add_value('birthday', birthday)
        load.add_value('brief_intro', brief_intro)
        load.add_value('tag', tag)
        yield load.load_item()


if __name__ == '__main__':
    process = CrawlerProcess({
        'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'
    })
    process.crawl(SpecialUserSpider)
    process.start()
