{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from googleapiclient.discovery import build\n",
    "from google_auth_oauthlib.flow import InstalledAppFlow\n",
    "from google.auth.transport.requests import Request\n",
    "\n",
    "import urllib.parse as p\n",
    "import re\n",
    "import os\n",
    "import pickle\n",
    "\n",
    "SCOPES = [\"https://www.googleapis.com/auth/youtube.force-ssl\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def youtube_authenticate():\n",
    "    os.environ[\"OAUTHLIB_INSECURE_TRANSPORT\"] = \"1\"\n",
    "    api_service_name = \"youtube\"\n",
    "    api_version = \"v3\"\n",
    "    client_secrets_file = \"credentials.json\"\n",
    "    creds = None\n",
    "    # the file token.pickle stores the user's access and refresh tokens, and is\n",
    "    # created automatically when the authorization flow completes for the first time\n",
    "    if os.path.exists(\"token.pickle\"):\n",
    "        with open(\"token.pickle\", \"rb\") as token:\n",
    "            creds = pickle.load(token)\n",
    "    # if there are no (valid) credentials availablle, let the user log in.\n",
    "    if not creds or not creds.valid:\n",
    "        if creds and creds.expired and creds.refresh_token:\n",
    "            creds.refresh(Request())\n",
    "        else:\n",
    "            flow = InstalledAppFlow.from_client_secrets_file(client_secrets_file, SCOPES)\n",
    "            creds = flow.run_local_server(port=0)\n",
    "        # save the credentials for the next run\n",
    "        with open(\"token.pickle\", \"wb\") as token:\n",
    "            pickle.dump(creds, token)\n",
    "\n",
    "    return build(api_service_name, api_version, credentials=creds)\n",
    "\n",
    "# authenticate to YouTube API\n",
    "youtube = youtube_authenticate()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_video_id_by_url(url):\n",
    "    \"\"\"\n",
    "    Return the Video ID from the video `url`\n",
    "    \"\"\"\n",
    "    # split URL parts\n",
    "    parsed_url = p.urlparse(url)\n",
    "    # get the video ID by parsing the query of the URL\n",
    "    video_id = p.parse_qs(parsed_url.query).get(\"v\")\n",
    "    if video_id:\n",
    "        return video_id[0]\n",
    "    else:\n",
    "        raise Exception(f\"Wasn't able to parse video URL: {url}\")\n",
    "\n",
    "def get_video_details(youtube, **kwargs):\n",
    "    return youtube.videos().list(\n",
    "        part=\"snippet,contentDetails,statistics\",\n",
    "        **kwargs\n",
    "    ).execute()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def print_video_infos(video_response):\n",
    "    items = video_response.get(\"items\")[0]\n",
    "    # get the snippet, statistics & content details from the video response\n",
    "    snippet         = items[\"snippet\"]\n",
    "    statistics      = items[\"statistics\"]\n",
    "    content_details = items[\"contentDetails\"]\n",
    "    # get infos from the snippet\n",
    "    channel_title = snippet[\"channelTitle\"]\n",
    "    title         = snippet[\"title\"]\n",
    "    description   = snippet[\"description\"]\n",
    "    publish_time  = snippet[\"publishedAt\"]\n",
    "    # get stats infos\n",
    "    comment_count = statistics[\"commentCount\"]\n",
    "    like_count    = statistics[\"likeCount\"]\n",
    "    view_count    = statistics[\"viewCount\"]\n",
    "    # get duration from content details\n",
    "    duration = content_details[\"duration\"]\n",
    "    # duration in the form of something like 'PT5H50M15S'\n",
    "    # parsing it to be something like '5:50:15'\n",
    "    parsed_duration = re.search(f\"PT(\\d+H)?(\\d+M)?(\\d+S)\", duration).groups()\n",
    "    duration_str = \"\"\n",
    "    for d in parsed_duration:\n",
    "        if d:\n",
    "            duration_str += f\"{d[:-1]}:\"\n",
    "    duration_str = duration_str.strip(\":\")\n",
    "    print(f\"\"\"\\\n",
    "    Title: {title}\n",
    "    Description: {description}\n",
    "    Channel Title: {channel_title}\n",
    "    Publish time: {publish_time}\n",
    "    Duration: {duration_str}\n",
    "    Number of comments: {comment_count}\n",
    "    Number of likes: {like_count}\n",
    "    Number of views: {view_count}\n",
    "    \"\"\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "video_url = \"https://www.youtube.com/watch?v=jNQXAC9IVRw&ab_channel=jawed\"\n",
    "# parse video ID from URL\n",
    "video_id = get_video_id_by_url(video_url)\n",
    "# make API call to get video info\n",
    "response = get_video_details(youtube, id=video_id)\n",
    "# print extracted video infos\n",
    "print_video_infos(response)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def search(youtube, **kwargs):\n",
    "    return youtube.search().list(\n",
    "        part=\"snippet\",\n",
    "        **kwargs\n",
    "    ).execute()\n",
    "\n",
    "# search for the query 'python' and retrieve 2 items only\n",
    "response = search(youtube, q=\"python\", maxResults=2)\n",
    "items = response.get(\"items\")\n",
    "for item in items:\n",
    "    # get the video ID\n",
    "    video_id = item[\"id\"][\"videoId\"]\n",
    "    # get the video details\n",
    "    video_response = get_video_details(youtube, id=video_id)\n",
    "    # print the video details\n",
    "    print_video_infos(video_response)\n",
    "    print(\"=\"*50)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def parse_channel_url(url):\n",
    "    \"\"\"\n",
    "    This function takes channel `url` to check whether it includes a\n",
    "    channel ID, user ID or channel name\n",
    "    \"\"\"\n",
    "    path = p.urlparse(url).path\n",
    "    id = path.split(\"/\")[-1]\n",
    "    if \"/c/\" in path:\n",
    "        return \"c\", id\n",
    "    elif \"/channel/\" in path:\n",
    "        return \"channel\", id\n",
    "    elif \"/user/\" in path:\n",
    "        return \"user\", id\n",
    "\n",
    "\n",
    "def get_channel_id_by_url(youtube, url):\n",
    "    \"\"\"\n",
    "    Returns channel ID of a given `id` and `method`\n",
    "    - `method` (str): can be 'c', 'channel', 'user'\n",
    "    - `id` (str): if method is 'c', then `id` is display name\n",
    "        if method is 'channel', then it's channel id\n",
    "        if method is 'user', then it's username\n",
    "    \"\"\"\n",
    "    # parse the channel URL\n",
    "    method, id = parse_channel_url(url)\n",
    "    if method == \"channel\":\n",
    "        # if it's a channel ID, then just return it\n",
    "        return id\n",
    "    elif method == \"user\":\n",
    "        # if it's a user ID, make a request to get the channel ID\n",
    "        response = get_channel_details(youtube, forUsername=id)\n",
    "        items = response.get(\"items\")\n",
    "        if items:\n",
    "            channel_id = items[0].get(\"id\")\n",
    "            return channel_id\n",
    "    elif method == \"c\":\n",
    "        # if it's a channel name, search for the channel using the name\n",
    "        # may be inaccurate\n",
    "        response = search(youtube, q=id, maxResults=1)\n",
    "        items = response.get(\"items\")\n",
    "        if items:\n",
    "            channel_id = items[0][\"snippet\"][\"channelId\"]\n",
    "            return channel_id\n",
    "    raise Exception(f\"Cannot find ID:{id} with {method} method\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_channel_videos(youtube, **kwargs):\n",
    "    return youtube.search().list(\n",
    "        **kwargs\n",
    "    ).execute()\n",
    "\n",
    "\n",
    "def get_channel_details(youtube, **kwargs):\n",
    "    return youtube.channels().list(\n",
    "        part=\"statistics,snippet,contentDetails\",\n",
    "        **kwargs\n",
    "    ).execute()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "channel_url = \"https://www.youtube.com/channel/UC8butISFwT-Wl7EV0hUK0BQ\"\n",
    "# get the channel ID from the URL\n",
    "channel_id = get_channel_id_by_url(youtube, channel_url)\n",
    "# get the channel details\n",
    "response = get_channel_details(youtube, id=channel_id)\n",
    "# extract channel infos\n",
    "snippet = response[\"items\"][0][\"snippet\"]\n",
    "statistics = response[\"items\"][0][\"statistics\"]\n",
    "channel_country = snippet[\"country\"]\n",
    "channel_description = snippet[\"description\"]\n",
    "channel_creation_date = snippet[\"publishedAt\"]\n",
    "channel_title = snippet[\"title\"]\n",
    "channel_subscriber_count = statistics[\"subscriberCount\"]\n",
    "channel_video_count = statistics[\"videoCount\"]\n",
    "channel_view_count  = statistics[\"viewCount\"]\n",
    "print(f\"\"\"\n",
    "Title: {channel_title}\n",
    "Published At: {channel_creation_date}\n",
    "Description: {channel_description}\n",
    "Country: {channel_country}\n",
    "Number of videos: {channel_video_count}\n",
    "Number of subscribers: {channel_subscriber_count}\n",
    "Total views: {channel_view_count}\n",
    "\"\"\")\n",
    "# the following is grabbing channel videos\n",
    "# number of pages you want to get\n",
    "n_pages = 2\n",
    "# counting number of videos grabbed\n",
    "n_videos = 0\n",
    "next_page_token = None\n",
    "for i in range(n_pages):\n",
    "    params = {\n",
    "        'part': 'snippet',\n",
    "        'q': '',\n",
    "        'channelId': channel_id,\n",
    "        'type': 'video',\n",
    "    }\n",
    "    if next_page_token:\n",
    "        params['pageToken'] = next_page_token\n",
    "    res = get_channel_videos(youtube, **params)\n",
    "    channel_videos = res.get(\"items\")\n",
    "    for video in channel_videos:\n",
    "        n_videos += 1\n",
    "        video_id = video[\"id\"][\"videoId\"]\n",
    "        # easily construct video URL by its ID\n",
    "        video_url = f\"https://www.youtube.com/watch?v={video_id}\"\n",
    "        video_response = get_video_details(youtube, id=video_id)\n",
    "        print(f\"================Video #{n_videos}================\")\n",
    "        # print the video details\n",
    "        print_video_infos(video_response)\n",
    "        print(f\"Video URL: {video_url}\")\n",
    "        print(\"=\"*40)\n",
    "    print(\"*\"*100)\n",
    "    # if there is a next page, then add it to our parameters\n",
    "    # to proceed to the next page\n",
    "    if \"nextPageToken\" in res:\n",
    "        next_page_token = res[\"nextPageToken\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_comments(youtube, **kwargs):\n",
    "    return youtube.commentThreads().list(\n",
    "        part=\"snippet\",\n",
    "        **kwargs\n",
    "    ).execute()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# URL can be a channel or a video, to extract comments\n",
    "url = \"https://www.youtube.com/watch?v=jNQXAC9IVRw&ab_channel=jawed\"\n",
    "# parameters to send to commentThreads API endpoint\n",
    "params = {\n",
    "    'maxResults': 2,\n",
    "    'order': 'relevance', # default is 'time' (newest)\n",
    "}\n",
    "if \"watch\" in url:\n",
    "    # that's a video\n",
    "    video_id = get_video_id_by_url(url)\n",
    "    params['videoId'] = video_id\n",
    "else:\n",
    "    # should be a channel\n",
    "    channel_id = get_channel_id_by_url(url)\n",
    "    params['allThreadsRelatedToChannelId'] = channel_id\n",
    "# get the first 2 pages (2 API requests)\n",
    "n_pages = 2\n",
    "for i in range(n_pages):\n",
    "    # make API call to get all comments from the channel (including posts & videos)\n",
    "    response = get_comments(youtube, **params)\n",
    "    items = response.get(\"items\")\n",
    "    # if items is empty, breakout of the loop\n",
    "    if not items:\n",
    "        break\n",
    "    for item in items:\n",
    "        comment = item[\"snippet\"][\"topLevelComment\"][\"snippet\"][\"textDisplay\"]\n",
    "        updated_at = item[\"snippet\"][\"topLevelComment\"][\"snippet\"][\"updatedAt\"]\n",
    "        like_count = item[\"snippet\"][\"topLevelComment\"][\"snippet\"][\"likeCount\"]\n",
    "        comment_id = item[\"snippet\"][\"topLevelComment\"][\"id\"]\n",
    "        print(f\"\"\"\\\n",
    "        Comment: {comment}\n",
    "        Likes: {like_count}\n",
    "        Updated At: {updated_at}\n",
    "        ==================================\\\n",
    "        \"\"\")\n",
    "    if \"nextPageToken\" in response:\n",
    "        # if there is a next page\n",
    "        # add next page token to the params we pass to the function\n",
    "        params[\"pageToken\"] =  response[\"nextPageToken\"]\n",
    "    else:\n",
    "        # must be end of comments!!!!\n",
    "        break\n",
    "    print(\"*\"*70)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3.6.6 64-bit",
   "language": "python",
   "name": "python36664bitea6884f10f474b21a2a2f022451e0d09"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.7-final"
  },
  "orig_nbformat": 2
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
