{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "private_outputs": true,
      "provenance": [],
      "machine_shape": "hm",
      "gpuType": "A100",
      "authorship_tag": "ABX9TyPoovqqSqnso7vUVlJUoa6D",
      "include_colab_link": true
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    },
    "accelerator": "GPU"
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "view-in-github",
        "colab_type": "text"
      },
      "source": [
        "<a href=\"https://colab.research.google.com/github/ktynski/Marketing_Automations_Notebooks_With_GPT/blob/main/Automated_TikTok_Video_Understanding_for_Social_Media_Strategy_(public).ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "## An OpenAI api key, and an Apify api key are required to run this script. It has also only been tested on an A100 computer which is only available through Google Colab+ subscription. It may work with other less powerful computers like the V100, but will likely error out. To change the runtime computer, go to Runtime-->Change Runtime Type and select the A100 GPU and High Ram."
      ],
      "metadata": {
        "id": "pCVbhKbVXyMN"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "!pip install torch torchvision\n",
        "!pip install transformers\n",
        "!pip install opencv-python-headless\n",
        "!pip install accelerate\n",
        "!pip install sentencepiece\n",
        "!pip install omegaconf\n",
        "!pip install iopath\n",
        "!pip install timm\n",
        "!pip install decord\n",
        "!pip install gradio\n",
        "!pip install ftfy\n",
        "!pip install GPUtil\n",
        "!pip install pytorchvideo\n",
        "!pip install einops\n",
        "!pip install webdataset\n",
        "!pip install moviepy\n",
        "!pip install apify-client\n",
        "!pip install spleeter\n",
        "!pip install openai\n",
        "!pip install typer\n",
        "!pip install click==8.0.4\n",
        "!pip install pydub\n",
        "!pip install --upgrade httpx\n",
        "!pip install apify-client\n",
        "!pip install --upgrade httpx\n",
        "\n"
      ],
      "metadata": {
        "id": "OttGIWBB_uP5"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "!pip install spleeter"
      ],
      "metadata": {
        "id": "qqT6mAtKdIFs"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "source": [
        "# You MUST now restart the runtime (Runtime-->Restart Runtime in the menu) or (Ctrl+M). This is required for the script to work from this point forward."
      ],
      "metadata": {
        "id": "Vvbohz6hXg8j"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "!curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | sudo bash\n",
        "!sudo apt-get install git-lfs\n",
        "!git clone https://github.com/ktynski/MovieChat.git\n",
        "\n",
        "%cd /content/MovieChat/\n",
        "!python apply_delta.py \\\n",
        "    --base-model-identifier \"https://huggingface.co/dontito/llama-7b-hf-v0\" \\\n",
        "    --target-model-path \"ckpt/Vicuna/7B\" \\\n",
        "    --delta-repo-url \"https://huggingface.co/lmsys/vicuna-7b-delta-v0\"\n",
        "\n",
        "!mkdir -p /content/MovieChat/ckpt/pretrained_ckpt/\n",
        "!wget -O /content/MovieChat/ckpt/pretrained_ckpt/finetune-vicuna7b-v2.pth https://huggingface.co/DAMO-NLP-SG/Video-LLaMA-Series/resolve/main/finetune-vicuna7b-v2.pth\n",
        "!wget -O ckpt/pretrained_minigpt4.pth https://drive.google.com/file/d/1a4zLvaiDBr-36pasffmgpvH5P7CKmpze/view"
      ],
      "metadata": {
        "id": "XufPUyuJAHuW"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "!pip install --upgrade httpx"
      ],
      "metadata": {
        "id": "X-5iI7tutN6s"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "%cd /content/MovieChat/"
      ],
      "metadata": {
        "id": "NliwlDSPsGEt"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "import pandas as pd\n",
        "import concurrent.futures\n",
        "import threading\n",
        "from apify_client import ApifyClient\n",
        "import time\n",
        "\n",
        "APIFY_API_URL = 'https://api.apify.com/v2'\n",
        "ACTOR_NAME = 'mscraper/tiktok-search-autocomplete'\n",
        "APIFY_API_KEY = 'Your apify api key'\n",
        "\n",
        "client = ApifyClient(APIFY_API_KEY)\n",
        "\n",
        "# Create a semaphore with a maximum of 12 concurrent threads\n",
        "semaphore = threading.Semaphore(12)\n",
        "df = pd.DataFrame(columns=[\"search_term\", \"suggestion\", \"lang\"])\n",
        "\n",
        "def get_autocomplete(search):\n",
        "    run_input = {\n",
        "        \"proxy\": {\n",
        "            \"useApifyProxy\": True,\n",
        "            \"apifyProxyCountry\": \"US\",\n",
        "            \"apifyProxyGroups\": [\"RESIDENTIAL\"]\n",
        "        },\n",
        "        \"query\": [search]\n",
        "    }\n",
        "\n",
        "    run = client.actor(ACTOR_NAME).call(run_input=run_input)\n",
        "    run_id = run.get('id')\n",
        "\n",
        "    # Check the status every 5 seconds\n",
        "    while True:\n",
        "        details = client.run(run_id).wait_for_finish()\n",
        "        if details['status'] == 'SUCCEEDED':\n",
        "            break\n",
        "        time.sleep(5)\n",
        "\n",
        "    global df\n",
        "    suggestions = []\n",
        "\n",
        "    for item in client.dataset(details[\"defaultDatasetId\"]).iterate_items():\n",
        "        # add to dataframe\n",
        "        df = df.append({\"search_term\": search, \"suggestion\": item['content'], \"lang\": item['lang']}, ignore_index=True)\n",
        "        suggestions.append(item['content'])\n",
        "\n",
        "    return suggestions\n",
        "\n",
        "def recursive_search(search_term, level=1, max_level=3):\n",
        "    if level > max_level:\n",
        "        return\n",
        "\n",
        "    suggestions = get_autocomplete(search_term)\n",
        "\n",
        "    # Create a list to store futures\n",
        "    futures = []\n",
        "\n",
        "    with concurrent.futures.ThreadPoolExecutor() as executor:\n",
        "        for suggestion in suggestions:\n",
        "            # Use the semaphore to limit the number of active threads\n",
        "            semaphore.acquire()\n",
        "\n",
        "            try:\n",
        "                futures.append(executor.submit(recursive_search, suggestion, level + 1, max_level))\n",
        "            finally:\n",
        "                semaphore.release()\n",
        "\n",
        "        for future in concurrent.futures.as_completed(futures):\n",
        "            future.result()\n",
        "\n",
        "recursive_search('Camping', max_level=1)\n",
        "df = df.drop_duplicates(subset='suggestion')\n",
        "\n",
        "print(df)\n",
        "df.to_csv('/content/scraped_tiktok_searches.csv')"
      ],
      "metadata": {
        "id": "PXxHPhnSNBQG"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "import pandas as pd\n",
        "import concurrent.futures\n",
        "import threading\n",
        "from apify_client import ApifyClient\n",
        "\n",
        "APIFY_API_URL = 'https://api.apify.com/v2'\n",
        "ACTOR_NAME = 'clockworks/tiktok-scraper'\n",
        "APIFY_API_KEY = 'your apify api key'\n",
        "\n",
        "client = ApifyClient(APIFY_API_KEY)\n",
        "\n",
        "# Create a semaphore with a maximum of 4 concurrent threads\n",
        "semaphore = threading.Semaphore(4)\n",
        "\n",
        "def run_actor(hashtag, resultsPerPage):\n",
        "    run_input = {\n",
        "        \"hashtags\": [hashtag],\n",
        "        \"resultsPerPage\": resultsPerPage,\n",
        "        \"scrapeEmptyChannelInfo\": False,\n",
        "        \"shouldDownloadVideos\": True,\n",
        "        \"shouldDownloadCovers\": False,\n",
        "        \"videoKvStoreIdOrName\": \"mytiktokvideos\",\n",
        "        \"proxyConfiguration\": { \"useApifyProxy\": True },\n",
        "    }\n",
        "\n",
        "    run = client.actor(ACTOR_NAME).call(run_input=run_input)\n",
        "    results = []\n",
        "\n",
        "    for item in client.dataset(run[\"defaultDatasetId\"]).iterate_items():\n",
        "        result = item  # Store the entire response\n",
        "        result['hashtag'] = hashtag  # Add the used hashtag\n",
        "        results.append(result)\n",
        "\n",
        "    return results\n",
        "\n",
        "def concurrent_runs(df, resultsPerPage):\n",
        "    hashtags = df['suggestion'].tolist()\n",
        "    all_results = []\n",
        "\n",
        "    with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:\n",
        "        futures = []\n",
        "        for hashtag in hashtags:\n",
        "            # Use the semaphore to limit the number of active threads\n",
        "            semaphore.acquire()\n",
        "            try:\n",
        "                futures.append(executor.submit(run_actor, hashtag, resultsPerPage))\n",
        "            finally:\n",
        "                semaphore.release()\n",
        "\n",
        "        for future in concurrent.futures.as_completed(futures):\n",
        "            all_results.extend(future.result())\n",
        "\n",
        "    return pd.DataFrame(all_results)\n",
        "\n",
        "# Get user input for the number of videos to scrape and the final DataFrame length\n",
        "resultsPerPage = 20\n",
        "final_df_length = 100\n",
        "\n",
        "# Drop rows with NaN values\n",
        "df = df.dropna()\n",
        "\n",
        "# Or replace NaNs with a placeholder value\n",
        "df = df.fillna('')\n",
        "\n",
        "# Then run the function\n",
        "df = df[:final_df_length]   # Limit the DataFrame to the desired length\n",
        "new_df = concurrent_runs(df, resultsPerPage)\n",
        "\n",
        "# Sort the DataFrame by the 'playCount' column in descending order\n",
        "new_df = new_df.sort_values(by='playCount', ascending=False)\n",
        "\n",
        "new_df.to_csv('/content/tiktokscrape.csv')\n",
        "print(new_df)"
      ],
      "metadata": {
        "id": "mGnencXENMkT"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "%cd /content/MovieChat/"
      ],
      "metadata": {
        "id": "3BpAzQwGg49A"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "import concurrent.futures\n",
        "import openai\n",
        "import requests\n",
        "import pandas as pd\n",
        "import os\n",
        "import shutil\n",
        "import spleeter\n",
        "from spleeter.separator import Separator\n",
        "from pydub import AudioSegment\n",
        "import ast\n",
        "import threading\n",
        "\n",
        "new_df = pd.read_csv('/content/tiktokscrape.csv')\n",
        "df = new_df\n",
        "\n",
        "openai.api_key = \"your openai api key\"\n",
        "\n",
        "# Create a separator with 2 stems (vocals and accompaniment)\n",
        "separator = Separator('spleeter:2stems')\n",
        "separator_lock = threading.Lock()\n",
        "\n",
        "# Create a directory for the videos if it doesn't exist\n",
        "os.makedirs('TikToks', exist_ok=True)\n",
        "\n",
        "def download_and_transcribe(row):\n",
        "    video_url = ast.literal_eval(row['mediaUrls'])[0]\n",
        "    filename = f\"TikToks/{row['id']}.mp4\"  # Include the folder path in the filename\n",
        "\n",
        "    # Download the video\n",
        "    with requests.get(video_url, stream=True) as r:\n",
        "        r.raise_for_status()\n",
        "        with open(filename, 'wb') as f:\n",
        "            for chunk in r.iter_content(chunk_size=8192):\n",
        "                f.write(chunk)\n",
        "\n",
        "    # Convert mp4 to mp3 for audio separation\n",
        "    audio_filename = f\"TikToks/{row['id']}.mp3\"  # Include the folder path in the audio filename\n",
        "    os.system(f'ffmpeg -i {filename} -vn -ar 44100 -ac 2 -b:a 192k {audio_filename}')\n",
        "\n",
        "    # Separate the vocal from music\n",
        "    with separator_lock:\n",
        "        separator.separate_to_file(audio_filename, f'TikToks/{row[\"id\"]}')  # Include the folder path in the output path\n",
        "    os.remove(audio_filename)\n",
        "\n",
        "    # Convert wav to mp3\n",
        "    audio = AudioSegment.from_wav(f\"TikToks/{row['id']}/{row['id']}/vocals.wav\")\n",
        "    audio.export(f\"TikToks/{row['id']}/vocals.mp3\", format=\"mp3\")  # Include the folder path in the output path\n",
        "\n",
        "    # Transcribe the vocal\n",
        "    vocal_filename = f\"TikToks/{row['id']}/vocals.mp3\"  # Include the folder path in the filename\n",
        "    with open(vocal_filename, \"rb\") as vocal_file:\n",
        "        transcript = openai.Audio.translate(\"whisper-1\", vocal_file)\n",
        "\n",
        "    # Clean up the downloaded files\n",
        "    # os.remove(filename)  # Comment out this line to keep the video files\n",
        "    # shutil.rmtree(f\"TikToks/{row['id']}\")  # Include the folder path in the directory path\n",
        "\n",
        "    return (row['id'], transcript[\"text\"])\n",
        "\n",
        "\n",
        "# Create a ThreadPoolExecutor\n",
        "with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:\n",
        "    # Submit all tasks to the executor\n",
        "    future_to_row = {executor.submit(download_and_transcribe, row[1]): row[0] for row in df.iterrows()}\n",
        "\n",
        "    # Collect the results as they become available\n",
        "    for future in concurrent.futures.as_completed(future_to_row):\n",
        "        i = future_to_row[future]\n",
        "        try:\n",
        "            id, transcription = future.result()\n",
        "            df.loc[i, 'transcription'] = transcription\n",
        "        except Exception as exc:\n",
        "            print(f'Row {i} generated an exception: {exc}')\n",
        "\n",
        "df.to_csv('/content/transcribed.csv')"
      ],
      "metadata": {
        "id": "xCIBkj3RO8Bf"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "%cd /content/MovieChat/"
      ],
      "metadata": {
        "id": "qbpD2e73Y58v"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "source": [
        "# The next cell is doing the heavy lifting of video understanding. It will take 2-3min per video to run. I have tested it up to 20 videos at a time, it may be able to do more, so feel free to try by editing line 483 to set the max number of videos to evaluate from your dataframe of scraped and transcribed videos."
      ],
      "metadata": {
        "id": "3enI4DHCBobq"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "import gc\n",
        "import argparse\n",
        "import os\n",
        "import random\n",
        "import gc\n",
        "import pandas as pd\n",
        "import numpy as np\n",
        "import torch\n",
        "import torch.backends.cudnn as cudnn\n",
        "from MovieChat.common.config import Config\n",
        "from MovieChat.common.dist_utils import get_rank\n",
        "from MovieChat.common.registry import registry\n",
        "from MovieChat.conversation.conversation_video import Chat, Conversation, default_conversation,SeparatorStyle\n",
        "import decord\n",
        "import cv2\n",
        "import time\n",
        "import subprocess\n",
        "from moviepy.editor import VideoFileClip\n",
        "from decord import VideoReader\n",
        "decord.bridge.set_bridge('torch')\n",
        "\n",
        "#%%\n",
        "# imports modules for registration\n",
        "from MovieChat.datasets.builders import *\n",
        "from MovieChat.models import *\n",
        "from MovieChat.processors import *\n",
        "from MovieChat.runners import *\n",
        "from MovieChat.tasks import *\n",
        "from moviepy.editor import*\n",
        "import os\n",
        "import random as rnd\n",
        "from transformers import StoppingCriteria, StoppingCriteriaList\n",
        "from PIL import Image\n",
        "import GPUtil\n",
        "import gradio as gr\n",
        "\n",
        "MAX_INT = 8\n",
        "N_SAMPLES = 32\n",
        "SHORT_MEMORY_Length = 10\n",
        "#%%\n",
        "def parse_args():\n",
        "    parser = argparse.ArgumentParser(description=\"Demo\")\n",
        "    parser.add_argument(\"--cfg-path\", required=True, help=\"path to configuration file.\")\n",
        "    parser.add_argument(\"--gpu-id\", type=int, default=0, help=\"specify the gpu to load the model.\")\n",
        "    parser.add_argument(\"--num-beams\", type=int, default=1)\n",
        "    parser.add_argument(\"--temperature\", type=float, default=1.0)\n",
        "    parser.add_argument(\"--text-query\", required=True, help=\"question the video\")\n",
        "    parser.add_argument(\"--video-path\", required=True, help=\"path to video file.\")\n",
        "    parser.add_argument(\"--fragment-video-path\", required=True, help=\"path to video fragment file.\")\n",
        "    parser.add_argument(\"--cur-sec\", type=int, default=2, help=\"current minute\")\n",
        "    parser.add_argument(\"--cur-min\", type=int, default=15, help=\"current second\")\n",
        "    parser.add_argument(\"--middle-video\", type=bool, default=False, help=\"current second\")\n",
        "    parser.add_argument(\n",
        "        \"--options\",\n",
        "        nargs=\"+\",\n",
        "        help=\"override some settings in the used config, the key-value pair \"\n",
        "        \"in xxx=yyy format will be merged into config file (deprecate), \"\n",
        "        \"change to --cfg-options instead.\",\n",
        "    )\n",
        "    args = parser.parse_args()\n",
        "    return args\n",
        "\n",
        "\n",
        "def setup_seeds(config_seed):\n",
        "    seed = config_seed + get_rank()\n",
        "\n",
        "    random.seed(seed)\n",
        "    np.random.seed(seed)\n",
        "    torch.manual_seed(seed)\n",
        "\n",
        "    cudnn.benchmark = False\n",
        "    cudnn.deterministic = True\n",
        "\n",
        "class StoppingCriteriaSub(StoppingCriteria):\n",
        "\n",
        "    def __init__(self, stops=[], encounters=1):\n",
        "        super().__init__()\n",
        "        self.stops = stops\n",
        "\n",
        "    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor):\n",
        "        for stop in self.stops:\n",
        "            if torch.all((stop == input_ids[0][-len(stop):])).item():\n",
        "                return True\n",
        "\n",
        "        return False\n",
        "\n",
        "\n",
        "def video_duration(filename):\n",
        "    result = subprocess.run([\"ffprobe\", \"-v\", \"error\", \"-show_entries\",\n",
        "                             \"format=duration\", \"-of\",\n",
        "                             \"default=noprint_wrappers=1:nokey=1\", filename],\n",
        "                            stdout=subprocess.PIPE,\n",
        "                            stderr=subprocess.STDOUT)\n",
        "    return float(result.stdout)\n",
        "\n",
        "\n",
        "\n",
        "def capture_video(video_path, fragment_video_path, per_video_length, n_stage):\n",
        "    # Check if the video file exists\n",
        "    if not os.path.exists(video_path):\n",
        "        print(f\"Video file not found: {video_path}\")\n",
        "        return\n",
        "\n",
        "    start_time = n_stage * per_video_length\n",
        "    end_time = (n_stage+1) * per_video_length\n",
        "\n",
        "    # Check the duration of the video\n",
        "    video_clip = VideoFileClip(video_path)\n",
        "    if video_clip.duration < end_time:\n",
        "        print(f\"Video duration is less than the end time: {video_clip.duration} < {end_time}\")\n",
        "        return\n",
        "\n",
        "    print(f\"Start time: {start_time}, End time: {end_time}\")  # Debug print\n",
        "\n",
        "    # Handle exceptions during video processing\n",
        "    try:\n",
        "        video = CompositeVideoClip([video_clip.subclip(start_time,end_time)])\n",
        "        print(f\"Writing video fragment to: {fragment_video_path}\")  # Debug print\n",
        "        video.write_videofile(fragment_video_path)\n",
        "        print(\"Video fragment saved.\")  # Debug print\n",
        "    except Exception as e:\n",
        "        print(f\"Error processing video: {e}\")\n",
        "\n",
        "\n",
        "def load_video(video_path, n_frms=MAX_INT, height=-1, width=-1, sampling=\"uniform\", return_msg = False):\n",
        "    decord.bridge.set_bridge(\"torch\")\n",
        "    vr = VideoReader(uri=video_path, height=height, width=width)\n",
        "\n",
        "    vlen = len(vr)\n",
        "    start, end = 0, vlen\n",
        "\n",
        "    n_frms = min(n_frms, vlen)\n",
        "\n",
        "    if sampling == \"uniform\":\n",
        "        indices = np.arange(start, end, vlen / n_frms).astype(int).tolist()\n",
        "    elif sampling == \"headtail\":\n",
        "        indices_h = sorted(rnd.sample(range(vlen // 2), n_frms // 2))\n",
        "        indices_t = sorted(rnd.sample(range(vlen // 2, vlen), n_frms // 2))\n",
        "        indices = indices_h + indices_t\n",
        "    else:\n",
        "        raise NotImplementedError\n",
        "\n",
        "    # get_batch -> T, H, W, C\n",
        "    temp_frms = vr.get_batch(indices)\n",
        "    tensor_frms = torch.from_numpy(temp_frms) if type(temp_frms) is not torch.Tensor else temp_frms\n",
        "    frms = tensor_frms.permute(3, 0, 1, 2).float()  # (C, T, H, W)\n",
        "\n",
        "    if not return_msg:\n",
        "        return frms\n",
        "\n",
        "    fps = float(vr.get_avg_fps())\n",
        "    sec = \", \".join([str(round(f / fps, 1)) for f in indices])\n",
        "    # \" \" should be added in the start and end\n",
        "    msg = f\"The video contains {len(indices)} frames sampled at {sec} seconds. \"\n",
        "    return frms, msg\n",
        "\n",
        "\n",
        "\n",
        "\n",
        "def parse_video_fragment(video_path, video_length, n_stage, n_samples):\n",
        "    start_time = n_stage * video_length / n_samples\n",
        "    end_time = (n_stage + 1) * video_length / n_samples\n",
        "    # Ensure end_time does not exceed video_length\n",
        "    end_time = min(end_time, video_length)\n",
        "    print(f'Start time: {start_time}, End time: {end_time}')\n",
        "    output_path = f'src/video_fragment/output_{n_stage}.mp4'\n",
        "    clip = VideoFileClip(video_path).subclip(start_time, end_time)\n",
        "    clip.write_videofile(output_path, audio=False)\n",
        "    return output_path\n",
        "\n",
        "class Chat:\n",
        "    def clear_long_memory(self):\n",
        "          self.model.long_memory_buffer = []\n",
        "\n",
        "\n",
        "    def __init__(self, model, vis_processor, device='cuda:0'):\n",
        "        self.device = device\n",
        "        self.output_text = \" \"\n",
        "        self.model = model\n",
        "        self.vis_processor = vis_processor\n",
        "        self.image_vis_processor = Blip2ImageEvalProcessor()\n",
        "        stop_words_ids = [torch.tensor([835]).to(self.device),\n",
        "                          torch.tensor([2277, 29937]).to(self.device)]  # '###' can be encoded in two different ways.\n",
        "        self.stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub(stops=stop_words_ids)])\n",
        "\n",
        "    def get_context_emb(self, input_text, msg, img_list):\n",
        "\n",
        "        prompt_1 = \"You are able to understand the visual content that the user provides.Follow the instructions carefully and explain your answers in detail.###Human: <Video><ImageHere></Video>\"\n",
        "        prompt_2 = input_text\n",
        "        prompt_3 = \"###Assistant:\"\n",
        "\n",
        "        prompt = prompt_1 + \" \" + prompt_2 + prompt_3\n",
        "\n",
        "        prompt_segs = prompt.split('<ImageHere>')\n",
        "        assert len(prompt_segs) == len(img_list) + 1, \"Unmatched numbers of image placeholders and images.\"\n",
        "        seg_tokens = [\n",
        "            self.model.llama_tokenizer(\n",
        "                seg, return_tensors=\"pt\", add_special_tokens=i == 0).to(self.device).input_ids\n",
        "            # only add bos to the first seg\n",
        "            for i, seg in enumerate(prompt_segs)\n",
        "        ]\n",
        "        seg_embs = [self.model.llama_model.model.embed_tokens(seg_t) for seg_t in seg_tokens]\n",
        "\n",
        "        mixed_embs = [emb for pair in zip(seg_embs[:-1], img_list) for emb in pair] + [seg_embs[-1]]\n",
        "        mixed_embs = torch.cat(mixed_embs, dim=1)\n",
        "        return mixed_embs\n",
        "\n",
        "    def gradio_answer(self,chatbot, chat_state):\n",
        "\n",
        "        # chatbot[-1][1] = llm_message\n",
        "        # print(chat_state.get_prompt())\n",
        "        print(chat_state)\n",
        "        import pdb;pdb.set_trace()\n",
        "        return gr.update(value=self.output_text, interactive=False),None\n",
        "\n",
        "    def answer(self, img_list, input_text, msg, max_new_tokens=300, num_beams=1, min_length=1, top_p=0.9,\n",
        "            repetition_penalty=1.0, length_penalty=1, temperature=1.0, max_length=2000):\n",
        "        embs = self.get_context_emb(input_text, msg, img_list)\n",
        "\n",
        "        current_max_len = embs.shape[1] + max_new_tokens\n",
        "        if current_max_len - max_length > 0:\n",
        "            print('Warning: The number of tokens in current conversation exceeds the max length. '\n",
        "                  'The model will not see the contexts outside the range.')\n",
        "        begin_idx = max(0, current_max_len - max_length)\n",
        "\n",
        "        embs = embs[:, begin_idx:]\n",
        "\n",
        "        outputs = self.model.llama_model.generate(\n",
        "            inputs_embeds=embs,\n",
        "            max_new_tokens=max_new_tokens,\n",
        "            stopping_criteria=self.stopping_criteria,\n",
        "            num_beams=num_beams,\n",
        "            do_sample=True,\n",
        "            min_length=min_length,\n",
        "            top_p=top_p,\n",
        "            repetition_penalty=repetition_penalty,\n",
        "            length_penalty=length_penalty,\n",
        "            temperature=temperature,\n",
        "        )\n",
        "\n",
        "        output_token = outputs[0]\n",
        "        if output_token[0] == 0:  # the model might output a unknow token <unk> at the beginning. remove it\n",
        "            output_token = output_token[1:]\n",
        "        if output_token[0] == 1:  # some users find that there is a start token <s> at the beginning. remove it\n",
        "            output_token = output_token[1:]\n",
        "        output_text = self.model.llama_tokenizer.decode(output_token, add_special_tokens=False)\n",
        "        output_text = output_text.split('###')[0]  # remove the stop sign '###'\n",
        "        output_text = output_text.split('Assistant:')[-1].strip()\n",
        "        return output_text, output_token.cpu().numpy()\n",
        "\n",
        "    def cal_frame(self, video_length, cur_min, cur_sec, middle_video):\n",
        "        per_frag_second = video_length / N_SAMPLES\n",
        "        if middle_video:\n",
        "            cur_seconds = cur_min * 60 + cur_sec\n",
        "            num_frames = int(cur_seconds / per_frag_second)\n",
        "            per_frame_second = per_frag_second/SHORT_MEMORY_Length\n",
        "            cur_frame = int((cur_seconds-per_frag_second*num_frames)/per_frame_second)\n",
        "            return num_frames, cur_frame\n",
        "        else:\n",
        "            cur_frame = 0\n",
        "            num_frames = int(video_length / per_frag_second)\n",
        "            return num_frames, cur_frame\n",
        "\n",
        "    def upload_video_without_audio(self, video_path, fragment_video_path, cur_min, cur_sec, cur_image, img_list, middle_video):\n",
        "        msg = \"\"\n",
        "        if isinstance(video_path, str):  # is a video path\n",
        "            ext = os.path.splitext(video_path)[-1].lower()\n",
        "            print(video_path)\n",
        "            video_length = video_duration(video_path)\n",
        "            num_frames, cur_frame = self.cal_frame(video_length, cur_min, cur_sec, middle_video)\n",
        "            if num_frames == 0:\n",
        "                video_fragment = parse_video_fragment(video_path=video_path, video_length=video_length, n_stage=0, n_samples= N_SAMPLES)\n",
        "                video_fragment, msg = load_video(\n",
        "                    video_path=video_fragment,  # Use the actual video fragment path\n",
        "                    n_frms=MAX_INT,\n",
        "                    height=224,\n",
        "                    width=224,\n",
        "                    sampling =\"uniform\", return_msg = True\n",
        "                )\n",
        "                video_fragment = self.vis_processor.transform(video_fragment)\n",
        "                video_fragment = video_fragment.unsqueeze(0).to(self.device)\n",
        "\n",
        "                self.model.encode_short_memory_frame(video_fragment, cur_frame)\n",
        "            else:\n",
        "                for i in range(num_frames):\n",
        "                    print(i)\n",
        "                    video_fragment = parse_video_fragment(video_path=video_path, video_length=video_length, n_stage=i, n_samples= N_SAMPLES)\n",
        "                    video_fragment, msg = load_video(\n",
        "                        video_path=video_fragment,  # Use the actual video fragment path\n",
        "                        n_frms=MAX_INT,\n",
        "                        height=224,\n",
        "                        width=224,\n",
        "                        sampling =\"uniform\", return_msg = True\n",
        "                    )\n",
        "                    video_fragment = self.vis_processor.transform(video_fragment)\n",
        "                    video_fragment = video_fragment.unsqueeze(0).to(self.device)\n",
        "\n",
        "                    if middle_video:\n",
        "                        self.model.encode_short_memory_frame(video_fragment, cur_frame)\n",
        "                    else:\n",
        "                        self.model.encode_short_memory_frame(video_fragment)\n",
        "        else:\n",
        "            raise NotImplementedError\n",
        "        video_emb, _ = self.model.encode_long_video(cur_image, middle_video)\n",
        "        img_list.append(video_emb)\n",
        "        return msg\n",
        "    def gener_infer(self, video_path, text_inputs, num_beams, temperature, libraries, minute, second):\n",
        "        print(\"here\")\n",
        "        fragment_video_path = \"src/video_fragment/output.mp4\"\n",
        "        cur_min = minute if minute is not None else int(0)\n",
        "        cur_sec = second if second is not None else int(0)\n",
        "\n",
        "        if libraries is not None:\n",
        "            cap = cv2.VideoCapture(video_path)\n",
        "\n",
        "            if libraries[0] == \"Breakpoint mode\":\n",
        "                fps_video = cap.get(cv2.CAP_PROP_FPS)\n",
        "                self.model.middle_video = True\n",
        "                self.model.question_minute = minute\n",
        "                self.model.question_second = second\n",
        "                cur_fps = fps_video * (60*minute + second)\n",
        "            else:\n",
        "                cur_fps = 0\n",
        "                self.model.middle_video = False\n",
        "\n",
        "            cap.set(cv2.CAP_PROP_POS_FRAMES, cur_fps)\n",
        "            ret, frame = cap.read()\n",
        "            temp_frame_path = 'src/output_frame/snapshot.jpg'\n",
        "\n",
        "            cv2.imwrite(temp_frame_path, frame)\n",
        "            raw_image = Image.open(temp_frame_path).convert('RGB')\n",
        "            image = self.image_vis_processor(raw_image).unsqueeze(0).unsqueeze(2).to(self.device) # [1,3,1,224,224]\n",
        "            cur_image = self.model.encode_image(image)\n",
        "\n",
        "            img_list = []\n",
        "            msg = self.upload_video_without_audio(\n",
        "                video_path=video_path,\n",
        "                fragment_video_path=fragment_video_path,\n",
        "                cur_min=cur_min,\n",
        "                cur_sec=cur_sec,\n",
        "                cur_image = cur_image,\n",
        "                img_list=img_list,\n",
        "                middle_video = self.model.middle_video,\n",
        "                )\n",
        "\n",
        "            for text_input in text_inputs:\n",
        "                llm_message = self.answer(img_list=img_list,\n",
        "                                        input_text=text_input,\n",
        "                                        msg = msg,\n",
        "                                        num_beams=num_beams,\n",
        "                                        temperature=temperature,\n",
        "                                        max_new_tokens=300,\n",
        "                                        max_length=2000)[0]\n",
        "\n",
        "                self.output_text = llm_message\n",
        "                print(self.output_text)\n",
        "\n",
        "\n",
        "# Define the variable values inline here\n",
        "video_path = \"/content/MovieChat/src/examples/kt2.mp4\"\n",
        "output_directory = \"/content/MovieChat/src/outputs/\"\n",
        "cur_min = 2\n",
        "cur_sec = 15\n",
        "middle_video = False\n",
        "num_beams = 1\n",
        "temperature = 1.0\n",
        "\n",
        "# Define your list of questions\n",
        "question_list = [\n",
        "    \"How old is the woman?\",\n",
        "    \"What is she doing?\",\n",
        "    # Add more questions as needed\n",
        "]\n",
        "\n",
        "class DummyArgs:\n",
        "    def __init__(self, cfg_path, options):\n",
        "        self.cfg_path = cfg_path\n",
        "        self.options = options\n",
        "\n",
        "args = DummyArgs(cfg_path=\"/content/MovieChat/eval_configs/MovieChat.yaml\", options=[\"model.frozen_llama_proj=False\"])\n",
        "\n",
        "# Create configuration\n",
        "config_seed = 42\n",
        "setup_seeds(config_seed)\n",
        "cfg = Config(args)\n",
        "\n",
        "# Create model and chat\n",
        "model_config = cfg.model_cfg\n",
        "model_config.device_8bit = 0  # Specify the GPU ID\n",
        "model_cls = registry.get_model_class(model_config.arch)\n",
        "model = model_cls.from_config(model_config).to('cuda:0')\n",
        "vis_processor_cfg = cfg.datasets_cfg.webvid.vis_processor.train\n",
        "vis_processor = registry.get_processor_class(vis_processor_cfg.name).from_config(vis_processor_cfg)\n",
        "chat = Chat(model, vis_processor, device='cuda:0')\n",
        "import gc\n",
        "import pandas as pd\n",
        "\n",
        "# Define your list of questions\n",
        "questions_list = [\n",
        "    \"What is happening in this video? Please be detailed.\",\n",
        "    \"What is unique about this video? Please be detailed.\",\n",
        "    \"Who are the main characters or subjects in the video, and what is their relevance?\",\n",
        "    \"What emotions are being portrayed visually in the video, and how do these contribute to its appeal?\",\n",
        "    \"How does the video's pacing, editing, and visual effects contribute to its engagement?\",\n",
        "    \"What are the key visual elements (e.g., colors, backgrounds, effects) that make this video stand out?\",\n",
        "    \"Is there any specific visual cultural or trending context that makes the video particularly relevant?\",\n",
        "    \"What demographic is likely to find this video visually appealing, and why?\",\n",
        "    \"Does the video visually encourage viewer interaction (e.g., through text overlays, visual cues) and how does it do so?\",\n",
        "    \"How does the video visually connect with or reference other popular media or trends?\",\n",
        "    \"What visual marketing or promotional techniques are used in the video to increase its visibility?\",\n",
        "    \"Are there any controversial or provocative visual elements in the video, and how might they influence its popularity?\",\n",
        "    \"How does the video's length and visual structure contribute to retaining the viewer's attention?\",\n",
        "    \"Is there a clear visual call-to-action or message that encourages further engagement or sharing?\",\n",
        "]\n",
        "\n",
        "\n",
        "def process_video(video_path, question_list, chat, middle_video):\n",
        "    # Load video frame and encode image\n",
        "    cap = cv2.VideoCapture(video_path)\n",
        "    if not cap.isOpened():\n",
        "        print(\"Error: Video capture could not be opened.\")\n",
        "        return []\n",
        "    fps_video = cap.get(cv2.CAP_PROP_FPS)\n",
        "    total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n",
        "    video_duration_sec = total_frames / fps_video\n",
        "    cur_min = int(video_duration_sec / 2) // 60\n",
        "    cur_sec = int(video_duration_sec / 2) % 60\n",
        "    cur_fps = fps_video * (60*cur_min + cur_sec)\n",
        "    print(f\"Video FPS: {fps_video}\")\n",
        "    print(f\"Total frames: {total_frames}\")\n",
        "    print(f\"Video duration: {video_duration_sec} seconds\")\n",
        "    print(f\"Current minute: {cur_min}\")\n",
        "    print(f\"Current second: {cur_sec}\")\n",
        "    print(f\"Current FPS: {cur_fps}\")\n",
        "    cap.set(cv2.CAP_PROP_POS_FRAMES, cur_fps)\n",
        "    ret, frame = cap.read()\n",
        "    print(f\"Frame read successfully: {ret}\")\n",
        "    if not ret:\n",
        "        print(\"Error: Video frame could not be read.\")\n",
        "        return []\n",
        "    temp_frame_path = 'src/output_frame/snapshot.jpg'\n",
        "    cv2.imwrite(temp_frame_path, frame)\n",
        "    raw_image = Image.open(temp_frame_path).convert('RGB')\n",
        "    image = chat.image_vis_processor(raw_image).unsqueeze(0).unsqueeze(2).to(chat.device)\n",
        "    cur_image = chat.model.encode_image(image)\n",
        "\n",
        "    # Upload video frames without audio\n",
        "    img_list = []\n",
        "    middle_video = True if middle_video == 1 else False\n",
        "    msg = chat.upload_video_without_audio(\n",
        "        video_path=video_path,\n",
        "        fragment_video_path='fragment_video_path',\n",
        "        cur_min=cur_min,\n",
        "        cur_sec=cur_sec,\n",
        "        cur_image=cur_image,\n",
        "        img_list=img_list,\n",
        "        middle_video=middle_video\n",
        "    )\n",
        "\n",
        "    # Generate inference for each question in the list\n",
        "    answers = []\n",
        "    for text_input in question_list:\n",
        "        try:\n",
        "            llm_message = chat.answer(\n",
        "                img_list=img_list,\n",
        "                input_text=text_input,\n",
        "                msg=msg,\n",
        "                num_beams=num_beams,\n",
        "                temperature=temperature,\n",
        "                max_new_tokens=300,\n",
        "                max_length=2000\n",
        "            )[0]\n",
        "            answers.append(llm_message)\n",
        "        except IndexError as e:\n",
        "            print(f\"Warning: Skipping question due to an IndexError: {e}\")\n",
        "            continue\n",
        "\n",
        "    return answers\n",
        "\n",
        "\n",
        "\n",
        "df = pd.read_csv('/content/transcribed.csv')\n",
        "df = df[:20]\n",
        "\n",
        "# Iterate over the rows of the DataFrame\n",
        "for index, row in df.iterrows():\n",
        "    # Get the video filename for this row\n",
        "    video_filename = f\"/content/MovieChat/TikToks/{row['id']}.mp4\"\n",
        "    print(video_filename)\n",
        "\n",
        "    # Check if the video file exists and is not corrupt\n",
        "    if os.path.exists(video_filename):\n",
        "        cap = cv2.VideoCapture(video_filename)\n",
        "        if cap.isOpened():\n",
        "            # Run your script for this video and get the answers to the questions\n",
        "            answers = process_video(video_filename, questions_list, chat, middle_video)\n",
        "            print(f\"Answers:{answers}\")\n",
        "\n",
        "            # Clear the long memory buffer before processing the next video\n",
        "            chat.clear_long_memory()\n",
        "\n",
        "            for i, answer in enumerate(answers):\n",
        "                df.loc[index, f'Question {i+1}'] = questions_list[i]\n",
        "                df.loc[index, f'Answer {i+1}'] = answer\n",
        "        else:\n",
        "            print(f\"Video file {video_filename} is corrupt.\")\n",
        "        cap.release()\n",
        "    else:\n",
        "        print(f\"Video file {video_filename} does not exist.\")\n",
        "\n",
        "    gc.collect()\n",
        "\n",
        "# Print the final DataFrame\n",
        "print(df)\n",
        "df.to_csv(\"/content/Evaluated_Videos_Data.csv\")"
      ],
      "metadata": {
        "id": "vqQSM0qb-s4Z"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "import pandas as pd\n",
        "import openai\n",
        "from IPython.display import display, Markdown\n",
        "\n",
        "\n",
        "openai.api_key = \"Your openai api key\"\n",
        "df = pd.read_csv(\"/content/Evaluated_Videos_Data.csv\")\n",
        "def evaluate_dataframe(csv_file):\n",
        "    # Function to evaluate a single row\n",
        "    def evaluate_row(row):\n",
        "        # Prepare the data for the GPT-3 model\n",
        "        batch_data = [\n",
        "            f\"Transcription: {row['transcription']}\",\n",
        "            # Add the questions and answers\n",
        "            *(f\"Question {i+1}: {row[f'Question {i+1}']}\\nAnswer {i+1}: {row[f'Answer {i+1}']}\" for i in range(14))\n",
        "        ]\n",
        "\n",
        "        # Call the GPT-3 model\n",
        "        response = openai.ChatCompletion.create(\n",
        "            model=\"gpt-3.5-turbo-16k\",\n",
        "            messages=[\n",
        "                                  {\n",
        "                      \"role\": \"system\",\n",
        "                      \"content\": \"\"\"You are a superhuman expert in viral marketing on TikTok, with unparalleled insights into content creation and audience engagement.\n",
        "                      Analyze the provided TikTok video transcripts, description, and metadata with precision and depth, focusing on:\n",
        "\n",
        "                        1. **Introduction**:\n",
        "                          - Objectives: A deep understanding of the factors that led this video to being successful and popular.\n",
        "                          - Videos Overview: Summarize the key aspects of the videos.\n",
        "                          - Scope and Methodology: Detail the approach and limitations.\n",
        "\n",
        "                        2. **Emotional Landscape**:\n",
        "                          - Emotional Spectrum: Identify and map the range of emotions.\n",
        "                          - Emotional Journey: Track the evolution of emotions.\n",
        "                          - Resonance and Impact: Assess how emotions resonate with the audience.\n",
        "                          - Questions: What techniques are used to evoke specific emotions?\n",
        "\n",
        "                        3. **Content and Thematic Depth**:\n",
        "                          - Themes and Symbols: Uncover underlying meanings.\n",
        "                          - Genre and Style: Classify and analyze the stylistic choices.\n",
        "                          - Storytelling Techniques: Explore narrative structures and devices.\n",
        "                          - Questions: How are central conflicts or tensions expressed?\n",
        "\n",
        "                        4. **Visual and Auditory Mastery**:\n",
        "                          - Visual Language: Analyze visual cues, composition, and effects.\n",
        "                          - Sound Design: Examine the use of music, sound effects, and silence.\n",
        "                          - Synchronization: Evaluate the harmony between visuals and audio.\n",
        "                          - Questions: What visual or auditory motifs recur?\n",
        "\n",
        "                        5. **Engagement Mechanics**:\n",
        "                          - Engagement Blueprint: Analyze how content promotes interaction.\n",
        "                          - Community Dynamics: Understand community formation and interaction.\n",
        "                          - Trend Mapping: Recognize trends and patterns in user engagement.\n",
        "                          - Questions: How does the content align with TikTok's unique culture?\n",
        "\n",
        "                        6. **Competitive Analysis** (Optional):\n",
        "                          - Benchmarking: Compare against similar viral content.\n",
        "                          - Differentiation: Identify unique selling points.\n",
        "                          - Questions: What sets this content apart from others in the same niche?\n",
        "\n",
        "                        7. **Actionable Strategy**:\n",
        "                          - Tailored Recommendations: Craft strategies for various content types.\n",
        "                          - Risk and Opportunities: Assess potential pitfalls and gains.\n",
        "                          - Tactical Roadmap: Provide a step-by-step implementation guide.\n",
        "\n",
        "                        8. **Future Forecast**:\n",
        "                          - Trend Prediction: Analyze potential future trends in TikTok content.\n",
        "                          - Innovation Scouting: Identify opportunities for groundbreaking content.\n",
        "\n",
        "                        9. **Conclusion and Reflection**:\n",
        "                          - Synthesis: Bring together the key findings.\n",
        "                          - Strategic Implications: Connect insights to broader goals.\n",
        "\n",
        "                      Your analysis must be insightful, innovative, and provide a roadmap for creating content that resonates with TikTok's diverse and dynamic audience.\"\"\"\n",
        "                    },\n",
        "                    {\n",
        "                      \"role\": \"user\",\n",
        "                      \"content\": \"Here are the TikTok transcriptions, questions and answers, and associated data for analysis: \\n\\n\" + \"\\n\\n\".join(batch_data)\n",
        "                    }\n",
        "            ],\n",
        "            max_tokens=500,\n",
        "            n=1,\n",
        "            stop=None,\n",
        "            temperature=0.7\n",
        "        )\n",
        "\n",
        "        # Extract the parsed transcription\n",
        "        if response is not None and 'choices' in response and len(response['choices']) > 0:\n",
        "            parsed_transcription = response['choices'][0]['message']['content'].strip()\n",
        "            print(parsed_transcription)\n",
        "            return parsed_transcription\n",
        "        else:\n",
        "            return None\n",
        "\n",
        "    # Apply the function to each row of the DataFrame\n",
        "    df['Evaluation'] = df.apply(evaluate_row, axis=1)\n",
        "\n",
        "    # Return the DataFrame with the evaluations\n",
        "    return df\n",
        "\n",
        "def truncate_text(text, max_length=25000):\n",
        "    if len(text) > max_length:\n",
        "        text = text[:max_length]\n",
        "    return text\n",
        "\n",
        "def generate_final_report(df):\n",
        "    # Prepare the data for the GPT-3 model\n",
        "    evaluations = \"\\n\\n\".join(df.apply(lambda row: f\"Video ID: {row['id']}\\nEvaluation: {row['Evaluation']}\\nShare Count: {row['shareCount']}\\nPlay Count: {row['playCount']}\\nComment Count: {row['commentCount']}\", axis=1).tolist())\n",
        "    evaluations = truncate_text(evaluations)\n",
        "    # Call the GPT-3 model\n",
        "    response = openai.ChatCompletion.create(\n",
        "        model=\"gpt-4\",\n",
        "        messages=[\n",
        "            {\n",
        "                \"role\": \"system\",\n",
        "                \"content\": \"\"\"You are a superhuman expert in viral marketing on TikTok, with unparalleled insights into content creation and audience engagement.\n",
        "                Your task now is to synthesize all the evaluations into a final report that provides a comprehensive analysis of the TikTok videos,\n",
        "                 identifies key trends and insights, and offers actionable recommendations for creating viral content on TikTok. You do not provide generic advice, your advice\n",
        "                 is grounded in the learnings from the data provided, and you include specific references and examples whenever possible.\n",
        "                 Your report should be in beautiful markdown and structured as follows:\n",
        "\n",
        "                  1. Executive Summary: Provide a brief overview of the key findings and recommendations.\n",
        "                  2. Detailed Analysis: Discuss the evaluations in depth, highlighting important trends, insights, and examples.\n",
        "                  3. Recommendations: Based on the analysis, provide actionable recommendations for creating viral content on TikTok.\n",
        "                  4. Conclusion: Summarize the report and discuss the potential impact of the recommendations.\"\"\"\n",
        "            },\n",
        "            {\n",
        "                \"role\": \"user\",\n",
        "                \"content\": f\"\"\"Please provide an incredibly thorough, highly accurate, extremely useful report in valid markdown style.\n",
        "                Make use of any/all markdown style elements including titles, headings, subheadings, bullets and lists, lines, bold, italics, blockquotes, etc. you can to make it as readable and beautiful as possible.\n",
        "                Here are the evaluations for the final report: \\n\\n{evaluations}\"\"\"\n",
        "            }\n",
        "        ],\n",
        "        max_tokens=2500,\n",
        "        n=1,\n",
        "        stop=None,\n",
        "        temperature=0.4\n",
        "    )\n",
        "\n",
        "    # Extract the final report\n",
        "    if response is not None and 'choices' in response and len(response['choices']) > 0:\n",
        "        final_report = response['choices'][0]['message']['content'].strip()\n",
        "        return final_report\n",
        "    else:\n",
        "        return None\n",
        "\n",
        "# Generate the final report\n",
        "\n",
        "# Evaluate the DataFrame\n",
        "evaluated_df = evaluate_dataframe(df)\n",
        "\n",
        "\n",
        "# Generate the final report\n",
        "final_report = generate_final_report(evaluated_df)\n",
        "# Save the final report as a Markdown file\n",
        "with open('/content/final_report.md', 'w') as f:\n",
        "    f.write(final_report)\n",
        "\n",
        "# Display the final report in the notebook\n",
        "display(Markdown(final_report))\n"
      ],
      "metadata": {
        "id": "FA4nUHX-cpPB"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "with open('final_report.md', 'w') as f:\n",
        "    f.write(final_report)"
      ],
      "metadata": {
        "id": "ZgQDpUyS0dMN"
      },
      "execution_count": null,
      "outputs": []
    }
  ]
}