import json
import re
import scrapy
from scrapy import Request

# 如果有多个爬虫，可以在这里写多个class，注意爬虫名称

from spider.item_search_blogger import SpiderItem, ChannelItem, SubChannelItem, SubChannelVideoItem
from spider.util import MysqlClient, transform_archive_count


class MarketIndustryoverviewSpiderChannel(scrapy.Spider):
    '''
    抓取所有频道
    '''
    name = 'member_workbench_channel'

    #   獲取url
    def start_requests(self):
        for url in ['https://api.bilibili.com/x/web-interface/web/channel/category/list']:
            yield Request(url, meta={})

    # 解析data
    def parse(self, response):
        content = json.loads(response.text)  # str转dict
        categories = content.get('data', {}).get('categories', [])
        for d in categories:
            # 创建item    {'id': 21, 'name': '体育', 'channel_count': '70'}
            item = ChannelItem()
            item['id'] = d.get('id', -1)
            item['name'] = d.get('name', '')
            item['channel_count'] = d.get('channel_count', -1)
            yield item


class MarketIndustryoverviewSpiderSubChannel(scrapy.Spider):
    '''
    抓取所有子频道
    '''
    name = 'member_workbench_subchannel'

    def start_requests(self):
        for url in ['https://api.bilibili.com/x/web-interface/web/channel/category/list']:
            yield Request(url, meta={})

    def parse(self, response):

        content = json.loads(response.text)  # str转dict
        categories = content.get('data', {}).get('categories', [])
        for d in categories:
            channel_id = d.get('id', -1)
            # if channel_id==19:   # 这条表示判断
            # 子频道的首页url
            url = 'https://api.bilibili.com/x/web-interface/web/channel/category/channel_arc/list?id={}&offset='.format(
                channel_id)
            yield Request(url, meta={'channel_id': channel_id, 'archive_channels': []}, callback=self.parse2)

    def parse2(self, response):
        channel_id = response.meta['channel_id']  # 上面传下来的第一个参数

        if channel_id == 0:  # 代表全部,不要
            return

        archive_channels = response.meta['archive_channels']  # 上面传下来的第二的参数， 上面定义为空列表

        content = json.loads(response.text)

        data = content.get('data', {})
        has_more = data.get('has_more', False)
        offset = data.get('offset', -1)
        archive_channels += data.get('archive_channels', [])

        if has_more:  # 判断是否有下一页
            url = 'https://api.bilibili.com/x/web-interface/web/channel/category/channel_arc/list?id={}&offset={}'.format(
                channel_id, offset)
            yield Request(url, meta={'channel_id': channel_id, 'archive_channels': archive_channels},
                          callback=self.parse2)
        else:
            for d in archive_channels:
                item = SubChannelItem()
                item['cid'] = channel_id
                item['id'] = d.get('id', -1)
                item['name'] = d.get('name', '')
                item['cover'] = d.get('cover', '')
                item['subscribed_count'] = d.get('subscribed_count', -1)
                item['archive_count'] = transform_archive_count(d.get('archive_count', -1))
                item['featured_count'] = d.get('featured_count', -1)
                yield item
                # 下面的请求,只为了获得'观看数'
        #     url = 'https://www.bilibili.com/v/channel/{}?tab=multiple&cid={}'.format(item['id'], channel_id)
        #       yield Request(url, meta={'item':item}, callback=self.parse3)


#     def parse3(self, response):
#         item = response.meta['item']
#         value = response.xpath('//span[@class="info__item"]/text()').getall()
#         value = [x for x in value if x.find('观看')>-1]
#         view_count = value[0] if value else -1
#         view_count = view_count.replace('观看', '')
#         view_count = transform_count(view_count)
#         item['view_count'] = view_count
#
#         yield item
#
#
#
class MarketIndustryoverviewSpiderVideo(scrapy.Spider):
    '''
    抓取所有子频道下的所有视频,包括近期热门\播放最多\最新投稿
    '''
    name = 'member_workbench_subchannel_video'

    def start_requests(self):
        client = MysqlClient()
        subchannel_id = 178862  # 为了演示方便,这里写死,对应的url是 https://www.bilibili.com/v/channel/24304?tab=multiple&cid=22
        row = client.select_one('SELECT *  FROM member_workbench_subchannel WHERE id={}'.format(subchannel_id))
        # print(row)
        subchannel = row[1]

        # 近期热门
        url1 = 'https://api.bilibili.com/x/web-interface/web/channel/multiple/list?channel_id={}&sort_type=hot&offset=&page_size=30'.format(
            subchannel_id)
        # 播放最多
        # url2 = 'https://api.bilibili.com/x/web-interface/web/channel/multiple/list?channel_id={}&sort_type=view&offset=&page_size=30'.format(subchannel_id)
        # 近期投稿
        #  url3 = 'https://api.bilibili.com/x/web-interface/web/channel/multiple/list?channel_id={}&sort_type=new&offset=&page_size=30'.format(subchannel_id)
        for url in [url1]:
            yield Request(url, meta={'channel_id': subchannel['cid'], 'subchannel_id': subchannel_id, 'videolist': []})

    def parse(self, response, **kwargs):
        # print(response.url)
        channel_id = response.meta['channel_id']
        subchannel_id = response.meta['subchannel_id']
        videolist = response.meta['videolist']
        # print(videolist)

        content = json.loads(response.text)
        data = content.get('data', {})
        offset = data.get('offset', -1)
        has_more = data.get('has_more', False)
        videolist += data.get('list', [])
        # print(videolist)

        # if has_more:
        #    # url = 'https://api.bilibili.com/x/web-interface/web/channel/multiple/list?channel_id={}&sort_type=hot&offset={}&page_size=30'.format(subchannel_id, offset)
        #
        #     url = re.sub(r'offset=(.*)&', 'offset={}&'.format(offset), response.url)
        #     yield Request(url, meta={'channel_id':channel_id, 'subchannel_id':subchannel_id, 'videolist':videolist})
        #
        # else:
        for d in videolist:
            item = SubChannelVideoItem()
            item['cid'] = channel_id
            item['scid'] = subchannel_id
            item['id'] = d.get('id', -1)
            item['name'] = d.get('name', -1)
            # item['cover'] = d.get('cover', -1)
            # item['view_count'] = transform_count(d.get('view_count', -1))
            # item['like_count'] = transform_count(d.get('like_count', -1))
            # item['duration'] = d.get('duration', -1)
            item['author_name'] = d.get('author_name', -1)
            item['author_id'] = d.get('author_id', -1)
            item['bvid'] = d.get('bvid', -1)
            # item['card_type'] = d.get('card_type', -1)
            # item['sort'] = d.get('sort', -1)
            # item['filt'] = d.get('filt', -1)
            # print(item)
            yield item


import pymysql


class MarketIndustryoverviewSpiderSubChannellogo(scrapy.Spider):
    '''
    抓取up主信息
    '''
    name = 'upname'

    def start_requests(self):
        client = MysqlClient()
        # subchannel_id = 24304  # 为了演示方便,这里写死,对应的url是 https://www.bilibili.com/v/channel/24304?tab=multiple&cid=22
        row = client.select_many('SELECT author_id  FROM member_workbench_subchannel_video')
        subchannel = row[2]
        # ids = [x['author_id']for x in subchannel]
        for x in subchannel:
            author_id = x['author_id']
            headers = {'referer': 'https://space.bilibili.com/297173974?spm_id_from=333.788.b_765f7570696e666f.1'}
            for url in ['https://api.bilibili.com/x/space/acc/info?mid={}&jsonp=jsonp'.format(author_id)]:
                # print(url)
                yield Request(url, headers=headers, meta={}, dont_filter=True)

    def parse(self, response):  # 解析使用parse

        # print(response.text)
        content = json.loads(response.text)  # str转dict
        data = content.get('data', {})
        # 创建item
        item = SpiderItem()
        item['mid'] = data.get('mid', -1)
        item['name'] = data.get('name', '')
        item['sex'] = data.get('sex', '')
        item['face'] = data.get('face', '')
        item['sign'] = data.get('sign', '')
        item['vip'] = data.get('vip', '').get('label', '').get('text', '')
        # 粉丝数和关注数
        url = 'https://api.bilibili.com/x/relation/stat?vmid=' + str(item['mid'])
        yield Request(url=url, callback=self.parse_page1,
                      meta={'item': item})

    def parse_page1(self, response):
        item = response.meta['item']
        data = json.loads(response.body)['data']
        item['following'] = data.get('following', -1)
        item['follower'] = data.get('follower', -1)

        cookies = {
            'sid': 'd1ifenop',
            '_uuid':'DE407564-AFB5-14FC-16FE-5FBE7E0B7CEF62658infoc',
            'buvid3': 'C61F4AC3-51DB-418C-B2BF-9ECEE3FED33534768infoc',
            'CURRENT_FNVAL': '80',
            'blackside_state': '1',
            'rpdid': '|(m~Ru)lYRR0J',
            'buvid_fp': 'C61F4AC3-51DB-418C-B2BF-9ECEE3FED33534768infoc',
            'buvid_fp_plain': 'C61F4AC3-51DB-418C-B2BF-9ECEE3FED33534768infoc',
            'DedeUserID': '420238986',
            'DedeUserID__ckMd5': 'd6334e9b8ac93fb6',
            'SESSDATA': '13897202%2C1636373349%2Ce6365*51',
            'bili_jct': 'e91e631d02f1fdbe1faf8599bc8f59ef',
            'CURRENT_QUALITY': '64',
            'bp_video_offset_420238986': '531297006529662110',
            'fingerprint3': 'a5a5c59f02a362b3630de5eed749e581',
            'bp_t_offset_420238986': '534151888474144644',
            'fingerprint': '222777d313cc1b28751f7edfbddb0326',
            'fingerprint_s': 'ce3a6a980bc3febca56776c9adcc5590',
            'PVID': '3',
            'bfe_id': '1e33d9ad1cb29251013800c68af42315'
        }
        # 获赞数和播放数
        url = 'https://api.bilibili.com/x/space/upstat?mid=' + str(item['mid'])
        yield Request(url=url, callback=self.parse_page2, cookies=cookies,
                      meta={'item': item})

    def parse_page2(self, response):
        item = response.meta['item']
        # content = json.loads(response.text)
        # data = content.get('data', '').get('archive', '')
        data = json.loads(response.text)['data']
        item['view'] = data.get('archive', {}).get('view', -1)
        item['likes'] = data.get('likes', -1)
        print(item)
        yield item

