# -*- coding: utf-8 -*-
# 无定向爬取微博用户

import scrapy, re, time
from scrapy.crawler import CrawlerProcess
from scrapy.loader import ItemLoader
from scrapy.conf import settings
from sina_crawl.utils.mongodb import MongoDbClient
from sina_crawl.items import SinaUserItem

from sina_crawl.analysis import user_analysis as analysis

host = settings.get('MONGODB_HOST')
port = settings.get('MONGODB_PORT')
dbname = settings.get('MONGODB_DBNAME')
collection_name = settings.get('MONGODB_COLLECTION')
db = MongoDbClient(host=host, port=port, db_name=dbname, collection=collection_name)


class SinaUserSpider(scrapy.Spider):
    # 爬取的新浪域名
    sina_domain = 'https://weibo.cn'

    def __init__(self):
        # 全局 Cookies 免登录
        self.cookies = {
            '_T_WM': 'fc68c168a60567fcb0d58b865c5cf7fb',
            'TMPTOKEN': 'Kz9GncQThBRTpn17iaeFbOjdRd9vHiGQaE1AFk6U5uds2DMeXOU5ynRlmhsL2MNq',
            'SUB': '_2A25x6iCsDeRhGeVG7FcZ9yvPzT-IHXVTFUDkrDV6PUJbkdAKLXSgkW1NT5gs2A5TqgCcdi4EPNxFNOnVo2G3akr_',
            'SUHB': '0LcSkLiTbexIiF',
            'SCF': 'Al6oYEOIHG6uNyht6TbHJp7SLcrD339k-BxY3pGt5lGPL03gdHFPhO5TkxAu4dqS5KGeP3uTUCfpJBl6bFuO8X4.',
            'SSOLoginState': '1559122172'
        }
        # 全局请求头
        self.headers = {
            'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
            'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'referer': 'https://weibo.cn/',
            'upgrade-insecure-requests': '1',
        }
        if settings.get('COMPARE_WITH_DB', False):
            crawled_users = db.find({}, {'user_id': 1, '_id': 0})
            self.users = [user.get('user_id') for user in crawled_users if user]
            self.users = set(self.users)
        # 刷新统计信息
        analysis.freash_analysis()

    # 应用名
    name = 'sina_user'
    # 限定爬取的域名范围
    allowed_domains = ['weibo.cn']
    # 待爬取的微博账号爬取请求队列
    accounts_queue = []

    start_urls = ['https://weibo.cn/pub/top?cat=star&pos=65', 'https://weibo.cn/pub/top?cat=grass&rl=0',
                  'https://weibo.cn/pub/top?cat=content&rl=0', 'https://weibo.cn/pub/top?cat=media&rl=0']

    def start_requests(self):
        """
        起始 request，爬取排行榜（每日更新）的用户，作为爬虫的起点
        :return:
        """
        for url in self.start_urls:
            yield scrapy.Request(url=url, callback=self.parse, headers=self.headers, cookies=self.cookies)

    def parse(self, response):
        """
        爬取微博名人排行榜用户微博id\n
        :param response:
        :return:
        """
        # 爬取排行榜所有微博用户的 id
        acc_list = response.xpath('//a[starts-with(@href,"/attention/add")]/@href').extract()
        accounts = []
        for acc in acc_list:
            # 用正则表达式提取用户的 id
            account_re = re.compile('/attention/add\?uid=(\d+)\&.*').findall(acc)
            account = account_re[0] if account_re else ''
            accounts.append(account)
        # 剔除重复的 id
        accounts = list(set(accounts))
        # 将提取的用户 id 加入缓冲区
        self.accounts_queue += accounts
        # 构造微博主页以及粉丝和关注的 url
        urls = []
        [urls.extend((self.sina_domain + '/u/%s' % acc, self.sina_domain + '/%s/fans' % acc,
                      self.sina_domain + '/%s/follow' % acc)) for acc in accounts]

        # 使用生成的 URL 构造 request
        for req in self.construct_request(urls):
            yield req

    def construct_request(self, urls: list) -> list:
        """
        批量构造 request 方法\n
        :param urls: 请求 URL \n
        :return:
        """
        requests = []
        for url in urls:
            # 关注和粉丝
            if str(url).find('follow') > 0 or str(url).find('fan') > 0:
                requests.append(scrapy.Request(url=url, callback=self.follow_fans_parse, cookies=self.cookies,
                                               headers=self.headers))
            # 用户基本信息
            else:
                requests.append(
                    scrapy.Request(url=url, callback=self.base_parse, cookies=self.cookies, headers=self.headers))
        return requests

    def base_parse(self, response):
        """
        微博用户基本信息解析函数\n
        :param response:
        :return:
        """
        # 加载器（Loader）
        load = ItemLoader(item=SinaUserItem(), response=response)
        selector = scrapy.Selector(response)
        # 微博数
        webo_num_re = selector.xpath('//div[@class="tip2"]').re(u'微博\[(\d+)\]')
        webo_num = int(webo_num_re[0]) if webo_num_re else 0
        load.add_value('webo_num', webo_num)
        # 关注人数
        follow_num_re = selector.xpath('//div[@class="tip2"]').re(u'关注\[(\d+)\]')
        follow_num = int(follow_num_re[0]) if follow_num_re else 0
        load.add_value('follow_num', follow_num)
        # 粉丝人数
        fans_num_re = selector.xpath('//div[@class="tip2"]').re(u'粉丝\[(\d+)\]')
        fans_num = int(fans_num_re[0]) if fans_num_re else 0
        load.add_value('fans_num', fans_num)
        # 解析微博用户 id
        re_url = selector.xpath('//div[@class="tip2"]/a[4]/@href').re('/at/weibo\?uid=(\d+)')
        user_id = re_url[0] if re_url else ''
        load.add_value('user_id', user_id)
        # 记录爬取时间
        load.add_value('crawl_time', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
        # 将 Item 传给下一个解析函数
        yield scrapy.Request(url=self.sina_domain + '/%s/info' % user_id, callback=self.info_parse,
                             cookies=self.cookies, headers=self.headers, meta={'item': load.load_item()})

    def follow_fans_parse(self, response):
        """
        获取关注用户/粉丝用户\n
        :param response:
        :return:
        """
        selector = scrapy.Selector(response)
        # 判断用户数是否超过配置的最大用户数
        if len(self.accounts_queue) <= settings.get('ACCOUNT_QUEUE_SIZE', 100):
            self.logger.info('开始构造爬取请求...')
            # 解析页面中所有的 URL，并提取 用户 id
            accounts = selector.xpath('//a[starts-with(@href,"https://weibo.cn/u/")]/@href').re(
                u'https://weibo.cn/u/(\d+)')
            # 去重
            accounts = list(set(accounts))
            if settings.get('COMPARE_WITH_DB', False):
                # 求 accounts 对 self.accounts 的差集，目的是避免构造重复的请求
                diff_accounts = set(accounts).difference(set(self.accounts_queue))
                # 求 diff_accounts 对 self.users 的差集，目的过滤已经爬过的用户
                new_accounts = list(diff_accounts.difference(self.users))
                self.users.union(set(new_accounts))
                self.accounts_queue += new_accounts
            else:
                # 求差集
                new_accounts = list(set(accounts).difference(set(self.accounts_queue)))
                self.accounts_queue += new_accounts
            # 使用用户 id 构造个人资料、用户主页、关注列表以及粉丝列表的 URL
            urls = []
            [urls.extend((self.sina_domain + '/u/%s' % acc, self.sina_domain + '/%s/fans' % acc,
                          self.sina_domain + '/%s/follow' % acc)) for acc in
             new_accounts]
            # 使用生成的 URL 构造 request
            for req in self.construct_request(urls):
                yield req

            # 下一页
            nextLink = selector.xpath('//div[@class="pa"]/form/div/a/@href').extract()
            if nextLink:
                url = self.sina_domain + nextLink[0]
                self.logger.info('下一页：%s' % url)
                yield scrapy.Request(url=url, callback=self.follow_fans_parse, cookies=self.cookies,
                                     headers=self.headers)
            else:
                user_id = re.compile(self.sina_domain + '/(\d+)/.*').findall(response.url)
                user_id = user_id[0] if user_id else ''
                if str(response.url).find('follow') > 0:
                    self.logger.info(u'[%s] 关注人已爬取完毕！' % user_id)
                else:
                    self.logger.info(u'[%s] 粉丝已爬取完毕！' % user_id)
        else:
            self.logger.info('爬取请求队列已满，暂停构造爬取请求!')

    def info_parse(self, response):
        """
        用户详细资料解析函数\n
        :param response:
        :return:
        """
        # 获取上一个函数的解析结果
        item = response.meta['item']
        # 利用上一个函数的解析结果构造加载器（Loader）
        load = ItemLoader(item=item, response=response)
        selector = scrapy.Selector(response)
        nick_name, gender, district, birthday, brief_intro, identify, head_img, tag = '', '', '', '', '', '', '', ''
        for info in selector.xpath('//div[@class="c"][3]/text()'):
            # 提取个人资料
            nick_name = info.re(u'昵称:(.*)')[0] if info.re(u'昵称:(.*)') else nick_name
            identify = info.re(u'认证:(.*)')[0] if info.re(u'认证:(.*)') else identify
            gender = info.re(u'性别:(.*)')[0] if info.re(u'性别:(.*)') else gender
            district = info.re(u'地区:(.*)')[0] if info.re(u'地区:(.*)') else district
            birthday = info.re(u'生日:(.*)')[0] if info.re(u'生日:(.*)') else birthday
            brief_intro = info.re(u'简介:(.*)')[0] if info.re(u'简介:(.*)') else brief_intro
            tag = info.re(u'标签:(.*)')[0] if info.re(u'标签:(.*)') else tag
        # 根据用户填写的地区信息拆分成 省份 和 城市
        province, city = '', ''
        if district:
            extract = district.split(' ')
            province = extract[0] if extract else ''
            city = extract[1] if extract and len(extract) > 1 else ''
        # 合并用户基本信息和详细资料
        load.add_value('province', province)
        load.add_value('city', city)
        load.add_xpath('head_img', '//div[@class="c"]/img[@alt="头像"]/@src')
        load.add_value('username', nick_name)
        load.add_value('identify', identify)
        load.add_value('gender', gender)
        load.add_value('district', district)
        load.add_value('birthday', birthday)
        load.add_value('brief_intro', brief_intro)
        load.add_value('tag', tag)
        # 移除已经爬取的用户，释放爬取请求队列
        user_id = item.get('user_id')
        self.logger.info('[%s] 用户信息已爬取完毕！' % user_id)
        self.accounts_queue.remove(user_id)
        yield load.load_item()


if __name__ == '__main__':
    process = CrawlerProcess({
        'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'
    })
    process.crawl(SinaUserSpider)
    process.start()
