import logging
import random
import re
import urllib.parse

import scrapy
from bs4 import BeautifulSoup

from facebook_crawler.items import FacebookProfileItem


class FacebookProfileSpider(scrapy.Spider):
    name = "facebook_profile"

    # start_urls = [my_url]

    def start_requests(self):
        my_urls = [
            'https://www.facebook.com/bbcnewstrad',
            'https://www.facebook.com/DonaldTrump',
            'https://www.facebook.com/nogizaka46',
            'https://www.facebook.com/masa.abc',
            'https://www.facebook.com/bilibiliweb',
            'https://www.facebook.com/IamGuoJieRui',
            'https://www.facebook.com/CanalTownsGame',
            'https://www.facebook.com/ninjatribes',
            'https://www.facebook.com/PlayOverwatch',
            'https://www.facebook.com/overwatchleague',
            'https://www.facebook.com/CrashOnTheRun',
            'https://www.facebook.com/powerrangers',
            'https://www.facebook.com/Blizzard',
            'https://www.facebook.com/ClashofClans',
            'https://www.facebook.com/HayDayOfficial',
            'https://www.facebook.com/PGbiz',
            'https://www.facebook.com/DEATHLOOPgame',
            'https://www.facebook.com/PokemonMastersGame',
            'https://www.facebook.com/dota2',
            'https://www.facebook.com/xbox',
            'https://www.facebook.com/PlayGwent',
            'https://www.facebook.com/ArenaofValor',
            'https://www.facebook.com/AstracraftGame',
            'https://www.facebook.com/IdentityV',
            'https://www.facebook.com/gogcom',
            'https://www.facebook.com/PUBGMOBILE',
            'https://www.facebook.com/ChessRushOfficial',
            'https://www.facebook.com/playbiped',
            'https://www.facebook.com/Kiehls',
            'https://www.facebook.com/loreal',
            'https://www.facebook.com/lancome',
            'https://www.facebook.com/Hyundai',
            'https://www.facebook.com/gamefamitsu',
            'https://www.facebook.com/gmc',
            'https://www.facebook.com/TECNOMobile',
            'https://www.facebook.com/ToysforBob',
            'https://www.facebook.com/Marvel',
            'https://www.facebook.com/moonlightblog',
            'https://www.facebook.com/flmutineers',
            'https://www.facebook.com/TheDivisionGame',
            'https://www.facebook.com/GameSpot',
            'https://www.facebook.com/WikoWorld',
            'https://www.facebook.com/apple',
            'https://www.facebook.com/ireddeadr',
            'https://www.facebook.com/rollsroycemotorcars',
            'https://www.facebook.com/PorscheIndiaOfficial',
            'https://www.facebook.com/porsche',
            'https://www.facebook.com/MINI',
            'https://www.facebook.com/MGMiddleEast',
            'https://www.facebook.com/McLaren.Racing',
            'https://www.facebook.com/Valve',
            'https://www.facebook.com/DrMarioWorld',
            'https://www.facebook.com/Nintendo',
            'https://www.facebook.com/MarvelGames2020',
            'https://www.facebook.com/thingsfromanotherworld',
            'https://www.facebook.com/CDPROJEKTRED',
            'https://www.facebook.com/BMW',
            'https://www.facebook.com/CodeDragonBlood',
            'https://www.facebook.com/MetalRevolutionGame',
            'https://www.facebook.com/TencentGlobal',
            'https://www.facebook.com/bafta',
            'https://www.facebook.com/syncedoffplanet',
            'https://www.facebook.com/DC.DeathComing',
            'https://www.facebook.com/tencentgames',
            'https://www.facebook.com/bugatti',
            'https://www.facebook.com/Powerrangerslegacywars',
            'https://www.facebook.com/Mazda.Japan',
            'https://www.facebook.com/MahindraMarazzo',
            'https://www.facebook.com/landrover',
            'https://www.facebook.com/Honda',
            'https://www.facebook.com/ford',
            'https://www.facebook.com/BMWUSA',
            'https://www.facebook.com/audiindia',
            'https://www.facebook.com/JeepJapan',
            'https://www.facebook.com/JeepIndia',
            'https://www.facebook.com/ISUZUMahavir',
            'https://www.facebook.com/audi',
            'https://www.facebook.com/huaweimobile',
            'https://www.facebook.com/Cartier',
            'https://www.facebook.com/alfaromeousa',
            'https://www.facebook.com/infinitiglobal',
            'https://www.facebook.com/Hyundaiworldwide',
            'https://www.facebook.com/Playgendary',
            'https://www.facebook.com/toyota.aus',
            'https://www.facebook.com/TOYOTA.Global',
            'https://www.facebook.com/NissanElectric',
            'https://www.facebook.com/MazdaUSA',
            'https://www.facebook.com/subaruasia',
            'https://www.facebook.com/VolvoCarUSA',
            'https://www.facebook.com/skoda',
            'https://www.facebook.com/SkodaIndia',
            'https://www.facebook.com/Prada',
            'https://www.facebook.com/ByteDancer',
            'https://www.facebook.com/Tiffany',
            'https://www.facebook.com/Burberry',
            'https://www.facebook.com/Vertu',
            'https://www.facebook.com/uleFoneMobile',
            'https://www.facebook.com/tclmobile',
            'https://www.facebook.com/rolex',
            'https://www.facebook.com/GUCCI',
            'https://www.facebook.com/essieDE',
            'https://www.facebook.com/THOMBROWNENY',
            'https://www.facebook.com/DSQUARED2',
            'https://www.facebook.com/valentino',
            'https://www.facebook.com/CalvinKlein',
            'https://www.facebook.com/COMME.des.GARCONS.org',
            'https://www.facebook.com/Berluti',
            'https://www.facebook.com/AlexanderMcQueen',
            'https://www.facebook.com/AMQWorld.jp',
            'https://www.facebook.com/RamTrucks',
            'https://www.facebook.com/tomford',
            'https://www.facebook.com/balmainparis',
            'https://www.facebook.com/SamsungUK',
            'https://www.facebook.com/BenQClub',
        ]
        for i in range(0, 5):
            my_url = '%s%s' % (random.choice(my_urls), '?locale=en_US')
            self.crawler.stats.set_value('fb_url:%s' % my_url, 0)
            yield scrapy.Request(my_url, self.parse)

    def parse(self, response):
        item = FacebookProfileItem()
        # 直接打印全部内容
        logging.info('-------------')
        logging.info(response.text)
        logging.info('-------------')
        # Custom Stats and Email Notifications in Scrapy
        # http://forwardslash.tech/2017/11/16/custom-stats-in-scrapy/
        self.crawler.stats.set_value('fb_url:%s' % response.request.url, 1)
        fw = re.findall("<div>([^>]*)people follow this</div>", response.text, re.S)
        if fw is not None:
            item['follow'] = int(fw[0].strip().replace(',', ''))
        soup = BeautifulSoup(response.text, "lxml")
        for tag in soup.find_all():
            if tag.name in ["script"]:
                # 可能会出现多个ServerJS
                if tag.string is not None and 'ServerJS' in tag.string:
                    text = tag.string
                    m = re.findall(r'"original":(.*)"uri":([^}]*)',
                                   tag.string, re.S)
                    if m is not None and len(m) > 0:
                        item['cover_url'] = urllib.parse.unquote(m[0][1].replace('\\', '')).replace('"', '')

                    m = re.findall(
                        r'"name":(.*)"pageID":(.*)"username":(.*)"usernameEditDialogProfilePictureURI":([^}]*)',
                        text, re.S)
                    if m is not None and len(m) > 0:
                        m = m[0]
                        name = re.findall(r':(.*)"(.+?)"', m[0], re.S)
                        item['name'] = bytes(name[0][1], encoding='utf-8').decode("unicode_escape")
                        print(item['name'])
                        item['pageID'] = m[1].replace('"', '').replace(',', '').strip()
                        item['username'] = m[2].replace('"', '').replace(',', '').strip()
                        item['url'] = urllib.parse.unquote(m[3].replace('\\', '')).replace('"', '')
                        self.crawler.stats.set_value('fb_url:%s' % response.request.url, 2)
        yield item
