import json

import scrapy
from scrapy import Request

# 如果有多个爬虫，可以在这里写多个class，注意爬虫名称

from spider.item_rank_hot_video import SpiderItem, ChannelItem, subChannelItem, CommentItem, hot_video_lItem

import pymysql

class MarketIndustryoverviewSpiderComment(scrapy.Spider):
    name = 'rank_hot_video_comment'

    def get_oid(self):
        conn = pymysql.connect(
            host="localhost",
            user="root",
            password="123456",
            db="b_video_stat",
            port=3306
        )
        cur = conn.cursor()

        sql = """
                            SELECT author_id FROM rank_hot_videodetail;
                        """
        cur.execute(sql)
        data = cur.fetchall()
        data = [d1[0] for d1 in data]
        cur.close()
        conn.close()
        return data

    def get_bvids(self):
        conn = pymysql.connect(
            host="localhost",
            user="root",
            password="123456",
            db="b_video_stat",
            port=3306
        )
        cur = conn.cursor()

        sql = """
                            SELECT bvid FROM rank_hot_videodetail;
                        """
        cur.execute(sql)
        data = cur.fetchall()
        data = [d1[0] for d1 in data]
        cur.close()
        conn.close()
        return data

    def start_requests(self):
        bvids = self.get_bvids()
        next = 0
        oids = self.get_oid()
        # oid = '1219523bvivd()
        # bvid = 'BV1h54y1j780' #这个是作者id
        for bvid in bvids:
            for oid in oids:

                          # 当请求服务器不接受时，需要增加header信息(这个需要去尝试),url太长了，我们可以删掉一些不需要的东西
             # url = 'https://api.bilibili.com/x/v2/reply/main?callback=jQuery172026320630979522197_1623120811662&jsonp=jsonp&next=0&type=1&oid=64509279&mode=3&plat=1&_=1623120813263'
                url = 'https://api.bilibili.com/x/v2/reply/main?jsonp=jsonp&next=0&type=1&{}&mode=3'.format(next, oid)
                headers = {"referer": "https://www.bilibili.com/video/{}/?p=2&spm_id_from=pageDriver".format(bvid)}
                yield Request(url, meta={'bvid': bvid, 'oid': oid, 'replies': []}, headers=headers)
    # meta中向下传需要传值，如果向上传则需要给一个空值


def parse(self,response):
        bvid = response.meta['bvid']
        oid = response.meta['oid']
        replies = response.meta['replies']
        conment = json.loads(response.text)
        data = conment.get('data',{})
        cursor = data.get('cursor',{})

        next = cursor.get('next',-1)
        #做了强转，因为下面需要判断它是否是true或faulse，如果不是字符串，下面的的判断就会报错
        is_end = bool(cursor.get('is_end',True))

        #从data取过来的值，不断向下传递,若果出现空值，这里会报错，可以将下列语句写成两句
        replies += data.get('replies', {})

         #enumerate可以输出下标
        #for i, item in enumerate(replies):
            #print(i,item)
            #print('\n\n\n\n')
        if is_end:
            url = 'https://api.bilibili.com/x/v2/reply/main?jsonp=jsonp&next={}&type=1&oid={}&mode=3'.format(next,oid)
            headers = {"referer": "https://www.bilibili.com/video/bvid={}/?p=2&spm_id_from=pageDriver".format(bvid)}
            yield Request(url,meta={'bvid':bvid,'oid':oid,'replies':replies},headers=headers)
        else:
            item = CommentItem()
            for r in replies:
                #item['like'] = r.get('like',-1)
                #item['uname'] = r.get('menber', {}).get('uname')
                item['sex'] = r.get('member', {}).get('sex')
                item['level'] = r.get('member',{}).get('level_info',{}).get('current_level')
                item['content'] = r.get('member',{}).get('conment',{}).get('message')
                print(len(replies))
                print(item)

# class Rank_hot_video(scrapy.Spider):
#     name='Rank_hot_video_total'
#
#      #Pn表示页数
#     #rid有点乱
#     def  start_requests(self):
#         for url in ['https://api.bilibili.com/x/web-interface/newlist?rid=24&type=0&pn=1&ps=20&jsonp=jsonp']:
#             yield Request(url, meta={})
#
# # parse方法去应答，拿到我们想要的内容
#     def parse(self, response):
#                 # str转dict
#         content = json.loads(response.text)
#                 #
#                 # 如果data和categories为空，则会报缺少key这个错，为了防止这种问题出现，要使用get方法
#                 # 如果data为空的时候返回一个空的字典,后面返回空的列表，表示data和categories都没有时也不会报错
#         categories = content.get('data', {}).get('archives', [])
#         for d in categories:
#     # 创建item
#             item = hot_video_lItem()
#     # 有些id没有值，给一个固定的值会覆盖那些有id的值，给了-1就不会出现
#             item['tid'] = d.get('tid', -1)
#             item['tname'] = d.get('tname', '')
#             item['title'] = d.get('title', '')
#             item['view'] = d.get('stat',{}).get('view') #播放
#             item['danmaku'] = d.get('stat', {}).get('danmaku') #评论
#             item['favorite'] = d.get('stat', {}).get('favorite') #收藏
#             item['coin'] = d.get('stat', {}).get('coin') #投币
#             item['like'] = d.get('stat', {}).get('like') #点赞
#             item['share'] = d.get('stat', {}).get('share') #分享
#
#
#     #"danmaku": 0,
#     #"reply": 0,
#     #"favorite": 0,
#     #"coin": 0,
#     #"share": 0,
#     #"now_rank": 0,
#     #"his_rank": 0,
#     #"like": 0,
#             print(item)






