import requests
from bs4 import BeautifulSoup
from lxml import etree
from requests_html import HTMLSession, UserAgent
import os
import csv

import time
import random
import re

# 生成随机的user agent
user_agents = [
    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
    "Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
    "Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
    "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
    "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
    "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
    "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
    "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
    "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
]
request_headers = {
    "User-Agent": random.choice(user_agents),
}


def get_up_info(up_str, up_name):
    # web_data = requests.get(url=url_str, headers=request_headers)
    # status_code = web_data.status_code
    session = HTMLSession()
    ups_str = "https:"
    ups_str += up_str
    resp1 = session.get(url=ups_str, headers=request_headers, timeout=(5, 5))
    time.sleep(2)
    status_code = resp1.status_code
    if status_code == 200:
        resp1.html.render(sleep=3, retries=5)
        html_text1 = resp1.html.html
        matches = re.findall(r'(\d+(?:\.\d+)?万)', html_text1)
        if matches:
            result = matches[0]
        else:
            result = "少于1万"
        xpath_selector = etree.HTML(html_text1)

        try:
            face = xpath_selector.xpath('//*[@id="app"]/div[1]/div[1]/div[2]/div[2]/div/div[1]/div/div/img/@data-src')[
                0].strip()

        except:
            face = "null"
        print(up_name, up_str, result, face)
        csv_writer1.writerow((up_name, up_str, result, face))


def get_bili_info(url_str, img):
    web_data = requests.get(url=url_str, headers=request_headers)
    status_code = web_data.status_code
    if status_code == 200:
        html_text = web_data.text
        xpath_selector = etree.HTML(html_text)
        soup_doc = BeautifulSoup(html_text, "lxml")
        up_num = 0
        try:
            video_name = xpath_selector.xpath('//*[@id="viewbox_report"]/h1/text()')[0].strip()
            up_name = ""
            up_cao = ""

            try:
                up_name = \
                    xpath_selector.xpath(
                        '//*[@id="app"]/div[2]/div[2]/div/div[1]/div[1]/div[2]/div[1]/div/div[1]/a[1]/text()')[
                        0].strip()
                up_str = \
                    xpath_selector.xpath(
                        '//*[@id="app"]/div[2]/div[2]/div/div[1]/div[1]/div[2]/div[1]/div/div/a[1]/@href')[
                        0].strip()
                up_num = 1
                up_cao = up_name
            except:
                up_str = []
                up_cao = []
                up_team_elements = xpath_selector.xpath(
                    '//*[@id="app"]/div[2]/div[2]/div/div[1]/div[1]/div/div[2]/div/div/div/a')
                for up_team_element in up_team_elements:
                    if up_name != "":
                        up_name += '/'
                    up_name += up_team_element.text.strip()
                    href = up_team_element.get("href")
                    up_str.append(href)
                    up_cao.append(up_team_element.text.strip())
                up_num = 2
            video_play_num = xpath_selector.xpath('//*[@id="viewbox_report"]/div/div/span[1]/text()')[0].strip()
            video_barrage_num = xpath_selector.xpath('//*[@id="viewbox_report"]/div/div/span[2]/text()')[0].strip()
            video_like_num = xpath_selector.xpath('//*[@id="arc_toolbar_report"]/div[1]/div[1]/div/span/text()')[
                0].strip()
            video_coins_num = xpath_selector.xpath('//*[@id="arc_toolbar_report"]/div[1]/div[2]/div/span/text()')[
                0].strip()
            video_collection_num = xpath_selector.xpath('//*[@id="arc_toolbar_report"]/div[1]/div[3]/div/span/text()')[
                0].strip()
            video_tag_elements = xpath_selector.xpath('//*[@id="v_tag"]/div/div/div/span/a/text()')
            video_tag = ""
            for video_tag_element in video_tag_elements:
                if video_tag != "":
                    video_tag += '/'
                video_tag += video_tag_element.strip()
            video_tags_elements = soup_doc.select("#v_tag > div > div > div > a")
            for video_tags_element in video_tags_elements:
                if video_tag != "":
                    video_tag += '/'
                video_tag += video_tags_element.get_text().strip()
        except:
            video_name = "null"
            up_name = "null"
            video_play_num = "null"
            video_barrage_num = "null"
            video_like_num = "null"
            video_coins_num = "null"
            video_collection_num = "null"
            video_tag = "null"
        if up_num == 1:
            get_up_info(up_str, up_cao)
        elif up_num == 2:
            for up_s, up_c in zip(up_str, up_cao):
                get_up_info(up_s, up_c)
        print(video_name, img, url_str, up_name, video_play_num, video_barrage_num, video_like_num,
              video_coins_num, video_collection_num, video_tag)
        csv_writer.writerow(
            (video_name, img, url_str, up_name, video_play_num, video_barrage_num, video_like_num,
             video_coins_num, video_collection_num, video_tag))


def get_bili_links(url_str):
    session = HTMLSession()
    resp = session.get(url=url_str, headers=request_headers, timeout=(10, 10))
    time.sleep(5)
    status_code = resp.status_code
    if status_code == 200:
        resp.html.render(sleep=10, retries=5)
        html_text = resp.html.html
        # print(html_text)
        soup_doc = BeautifulSoup(html_text, "lxml")
        bili_hot_elements = soup_doc.select("div.img>a")
        hot_img_elements = soup_doc.select("div.img>a>img")
        for bili_hot_element, hot_img_element in zip(bili_hot_elements, hot_img_elements):
            hot_href = bili_hot_element.get("href").strip()
            img = hot_img_element.get("src").strip()
            video_href = "https:"
            video_href += hot_href
            get_bili_info(video_href, img)


if __name__ == "__main__":
    url = "https://www.bilibili.com/v/popular/rank/"
    webs = [ "all", "guochuang", "douga", "music", "dance","game", "knowledge", "tech","sports", "car", "life", "food","animal", "kichiku", "fashion","ent", "cinephile", "origin","rookie"]
    file_name1 = os.getcwd() + os.sep + "upinfo.csv"
    fp1 = open(file_name1, 'wt', newline="", encoding='utf-8-sig')
    csv_writer1 = csv.writer(fp1)
    csv_header_info = ('up主', '主页链接', '粉丝数', '头像')
    csv_writer1.writerow(csv_header_info)
    for web in webs:
        url_str = url + web
        file_name = os.getcwd() + os.sep + "{}hot100info.csv".format(web)
        fp = open(file_name, 'wt', newline="", encoding='utf-8-sig')
        csv_writer = csv.writer(fp)
        csv_header_info = (
            '视频名', '图片链接', '视频链接', 'UP主', '播放量', '弹幕数', '点赞数', '投币数',
            '收藏数', '标签')
        csv_writer.writerow(csv_header_info)
        get_bili_links(url_str)
        fp.close()
        wait_time = random.uniform(1, 3)
        time.sleep(wait_time)
        print("---------------------------------------------------------------------------------")
    fp1.close()

    print("End....")
