#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
注释说明
"""

__author__ = 'hubert'

import os
import requests
import random
from bs4 import BeautifulSoup
import re
import json
from datetime import datetime
import csv
import threading


# 随机生产浏览器标识user-agent
def get_ua():
    first_num = random.randint(55, 76)
    third_num = random.randint(0, 3800)
    fourth_num = random.randint(0, 140)
    os_type = ['(Windows NT 6.1;WOW64)', '(Windows NT 10.0;WOW64)', '(X11;Linux x86_64)', '(Macintosh;Intel Mac OS X 10_14_5)']
    chrome_version = 'Chrome/{}.0.{}.{}'.format(first_num, third_num, fourth_num)
    ua = ''.join(["Mozilla/5.0 ", random.choice(os_type), ' AppleWebKit/537.36 ', '(KHTML,like Gecko) ', chrome_version, ' safari/537.36 ', 'Edg/100.0.1185.50'])
    return ua

# head
headers = {
    'User-Agent': get_ua()
}


# 写入csv文件
def write_csv(f, video_id, video_title, video_pic_url, video_duration, video_detail_url):
    # 1. 创建文件对象
    # f = open(path,'w',encoding='utf-8')
    # 2. 基于文件对象构建 csv写入对象
    csv_writer = csv.writer(f)
    # 3. 构建列表头
    # csv_writer.writerow(["名称","code"])
    # 4. 写入csv文件内容
    csv_writer.writerow([video_id, video_title, video_pic_url, video_duration, video_detail_url])
    # 5. 关闭文件
    # f.close()

# def resolve_item(item):



"""
    video_d = 
    video_title = 
    pic_url = 
"""
def save_video_info(list_items):
    csv_path = "/Users/hubert/Documents/home/csv/youtube_moive" + "：" + datetime.now().strftime('%Y-%m-%d') + ".csv"
    # f = open(csv_path, 'w', encoding='utf-8-sig')
    with open(csv_path, 'w', encoding='utf-8-sig') as f_csv:
        # 表头
        write_csv(f_csv, "视频ID", "视频名称", "海报地址", "视频时长", "播放页地址")
        print(len(list_items))
        for item in list_items:
            gridMovieRenderer = item["gridMovieRenderer"]
            video_id = gridMovieRenderer["videoId"]
            video_title = gridMovieRenderer["title"]["runs"][0]["text"]
            video_pic_url = gridMovieRenderer["thumbnail"]["thumbnails"][0]["url"]
            video_duration = gridMovieRenderer["lengthText"]["accessibility"]["accessibilityData"]["label"]
            detail_url = gridMovieRenderer["navigationEndpoint"]["commandMetadata"]["webCommandMetadata"]["url"]
            video_detail_url = "https://www.youtube.com" + str(detail_url).replace("\u0026", "&")
            print("视频ID："+ str(video_id), "视频名称："+video_title, "海报地址："+video_pic_url, "视频时长："+video_duration, "播放页地址："+video_detail_url)
            # write_csv(f_csv, video_id, video_title, video_pic_url, video_duration, video_detail_url)



"""
    #compile中的正则
        1."var ytInitialData ="表示我们需要开始截取的地方
        2."(.*?)"表示中间为任意字符串
        3.";$"表示第一个；结尾的地方结束
        4."re.MULTILINE",影响^与$ 锚点匹配的位置。
          没有开关，^并且$仅在整个文本的开头和结尾处匹配。使用该开关，它们也将在换行符之前或之后匹配
        5."re.DOTALL",re.DOTALL,影响.模式可以匹配的内容。
          如果没有切换，则.匹配除换行符之外的任何字符。通过该开关，换行符也将匹配
    """
def get_movie_list():
    # 电影列表
    url = "https://www.youtube.com/feed/storefront?bp=kgEDCOICogUCKAU%3D"
    sess = requests.Session()
    req = sess.get(url, headers=headers, timeout=20)
    req.encoding = "utf-8"
    soup = BeautifulSoup(req.text, 'lxml')
    body = soup.find("body")
    pattern = re.compile(r"var ytInitialData =(.*?);$", re.MULTILINE | re.DOTALL)
    script = body.find('script', text=pattern)
    data_str = pattern.search(script.text).group(1)
    data_json = json.loads(data_str, strict=False)
    content1 = data_json["contents"]["twoColumnBrowseResultsRenderer"]
    list_tabs = content1["tabs"]
    content2 = list_tabs[0]["tabRenderer"]["content"]
    list_content3 = content2["sectionListRenderer"]["contents"]
    list_content4 = list_content3[0]["itemSectionRenderer"]["contents"]
    list_items = list_content4[0]["shelfRenderer"]["content"]["gridRenderer"]["items"]

    save_video_info(list_items)
    #print(list_items[0])


"""
    title
    "url": "/watch?v=60jex4kgHcc\u0026pp=sAQB"    # 详情页地址webCommandMetadata
    
    "thumbnails": [{  # 海报地址
        "url": "https://i.ytimg.com/vi_webp/60jex4kgHcc/movieposter.webp",
        "width": 279,
        "height": 402
"""
if __name__ == '__main__':
    #for i in range(10):
    #    # 页数，初始值为0
    #    start_page = i * 20 * 4
    #    t = threading.Thread(target=get_movie_list)
    #    t.start()
    #while threading.active_count() != 1:
    #    pass
    get_movie_list()

