import json
import time

import pymysql
import scrapy
import numpy as np
from hot_danmaku.items import HotDanmakuItem
from scrapy.utils.project import get_project_settings


def getcids():
    settings = get_project_settings()
    host = settings['DB_HOST']
    port = settings['DB_PORT']
    user = settings['DB_USER']
    password = settings['DB_PASSWORD']
    name = settings['DB_NAME']
    charset = settings['DB_CHARSET']

    conn = pymysql.connect(
        host=host,
        port=port,
        user=user,
        password=password,
        db=name,
        charset=charset
    )

    cursor = conn.cursor()

    sql = "select cid, bvid from hot_list"
    cursor.execute(sql)
    cidlist = np.array(cursor.fetchall())
    conn.commit()
    cursor.close()
    conn.close()
    return cidlist



class DanmakuSpider(scrapy.Spider):
    name = 'danmaku'
    # allowed_domains = ['www.bilibili.com']
    start_urls = ['https://api.bilibili.com/x/web-interface/popular?ps=50&pn=1']

    def parse(self, response):

        bvid = "BV1ZT4y1S7VG"
        cid = 545276599
        url = 'https://comment.bilibili.com/545276599.xml'
        yield scrapy.Request(url=url, callback=self.parse_second, meta={'bvid': bvid, 'cid': cid})


        # lists = getcids()
        # for list in lists:
        #     bvid = list[1]
        #     cid = list[0]
        #
        #     url = 'https://comment.bilibili.com/' + str(cid) + '.xml'
        #     # print(url)
        #
        #     yield scrapy.Request(url=url, callback=self.parse_second, meta={'bvid': bvid, 'cid': cid})
        #     # break



        # texts = json.loads(response.text)
        # lists = texts['data']['list']

        # for list in lists:
        #     bvid = list['bvid']
        #     cid = list['cid']
        #
        #     #https://comment.bilibili.com/3726057.xml
        #     # url = 'https://comment.bilibili.com/' + str(cid) + '.xml'
        #
        #     url = 'https://comment.bilibili.com/3726057.xml'
        #     # print(url)
        #     yield scrapy.Request(url=url, callback=self.parse_second, meta={'bvid': bvid, 'cid': cid})
        #     break



    def parse_second(self, response):
        # print('================')

        bvid = response.meta['bvid']
        cid = response.meta['cid']

        cnt = 0

        danmus = response.xpath('//d')
        for danmu in danmus:
            info = danmu.xpath('./@p').extract_first().split(',')
            info[4] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(info[4])))
            # 获取弹幕内容
            content = danmu.xpath('./text()').extract_first()
            sendtime = info[0]
            senddate = info[4]
            # print(items)
            danmakus = HotDanmakuItem(bvid=bvid, cid=cid, content=content, sendtime=sendtime, senddate=senddate)
            # print(info)
            yield danmakus
            cnt += 1
            if cnt >= 100:
                break



