# @Author: 唐奇才
# @Time: 2021/6/12 14:47
# @File: main.py
# @Software: PyCharm
from fakeagent import get_fake_p
from fakeagent import get_fake_ua

import requests
from lxml import etree


def test():
    # res = requests.get("https://www.baidu.com/")
    # res.encoding = 'utf-8'
    # print(len(res.text))
    proxy = get_fake_p()
    res = requests.get("https://www.baidu.com/", headers=get_fake_ua(), proxies=proxy)
    res.encoding = 'utf-8'
    print(len(res.text))


# 控制返回类型，这样就会有提示了 -> requests.get
def get_html(url) -> requests.get:
    res = requests.get(url=url, headers=get_fake_ua(), proxies=get_fake_p())
    print(res.status_code)
    if res.status_code == 200:
        return res
    else:
        get_html(url)


def parse(res):
    info = []
    xpd = etree.HTML(res.text)
    scores = xpd.xpath('//ul[@class="rank-list"]/li//div[@class="pts"]/div/text()')
    urls = xpd.xpath('//ul[@class="rank-list"]/li//div[@class="info"]/a/@href')
    urls = ['https:' + i for i in urls]
    info.append(scores)
    info.append(urls)
    detail = xpd.xpath('//div[@class="detail"]/span/text()')
    detail = [str(d).replace("\n", "").replace(" ", "") for d in detail]
    detail = [[detail[i], detail[i + 1]] for i in range(0, len(detail), 2)]
    names = xpd.xpath('//div[@class="detail"]/a//text()')
    names = [n.replace('\n', "").replace(" ", "") for n in names]
    # print(names)
    id = xpd.xpath('//li/@data-id')
    # print(id)
    for s, u, d, n, i in zip(scores, urls, detail, names, id):
        get_detail(u, s, p=d[0], dm=d[1], name=n, id=i)


def get_detail(url, score, p, dm,name, id, remax=5):
    info = []
    info.append(id)
    info.append(url)
    info.append(score)
    res = get_html(url)
    xpd = etree.HTML(res.text)
    fans = xpd.xpath('//div[@class="up-info_right"]//span[@class="has-charge"]//span/text()')

    oop = xpd.xpath('//div[@class="ops"]/span/text()')
    if len(oop) == 0 and remax > 0:
        print('重试的url', url)
        print('remax', remax)
        get_detail(url, score, p=p, dm=dm,name=name, id=id, remax= remax-1)

    else:

        if len(fans) == 0:
            info.append('-1')
        else:
            info.append(*fans)

        info.append(name)

        info.append(p)
        info.append(dm)

        title = xpd.xpath('//div[@id="viewbox_report"]/h1/@title')

        info.append(*title)

        oop = [str(op).replace("\n", "").replace(" ", "") for op in oop]
        for o in oop:
            info.append(o)

        tags = xpd.xpath('//div[@id="v_tag"]/ul/li//div//text()')
        tags = [tag.replace("\n", "").replace(" ", "") for tag in tags]
        tags = "'" + "|".join(tags) + "'"
        # print('tags', tags)
        info.append(tags)

        detail = xpd.xpath('//div[@class="video-data"]/span/text()')
        detail = [d.replace("\n", "").replace("\xa0", "") for d in detail]
        for d in detail:
            info.append(d)
        # info.append(*detail)

        dec = xpd.xpath('//div[@id="v_desc"]//text()')
        dec = ",".join(dec)
        # print('dec', dec)
        info.append(dec)

        print(info)


def start_spider():
    res = get_html("https://www.bilibili.com/v/popular/rank/all")
    parse(res)
    # get_detail('https://www.bilibili.com/video/BV1xV41147Gw')


def main():
    # start_spider()
    # print(get_html('https://api.bilibili.com/x/web-interface/view?bvid=BV1xV41147Gw'))
    # print(get_html('https://space.bilibili.com/9824766').text)
    print(get_html('https://api.bilibili.com/x/web-interface/view?bvid=BV1xV41147Gw').json())


if __name__ == '__main__':
    main()
