{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "private_outputs": true,
      "provenance": [],
      "authorship_tag": "ABX9TyMh47M8EQKWJIaTSVNScDay",
      "include_colab_link": true
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "view-in-github",
        "colab_type": "text"
      },
      "source": [
        "<a href=\"https://colab.research.google.com/github/ktynski/Marketing_Automations_Notebooks_With_GPT/blob/main/Automatic_Deep_TikTok_Insights_with_GPT_and_Whisper_Public.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "!pip install apify-client\n",
        "!pip install spleeter openai tensorflow\n",
        "!pip install typer click==8.0.4\n",
        "!pip install pydub"
      ],
      "metadata": {
        "id": "Cmt7oM1WaYNU"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "!pip install --upgrade httpx apify-client\n"
      ],
      "metadata": {
        "id": "hT8XEF2L8Nt4"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "d8CqsjVTWsdI"
      },
      "outputs": [],
      "source": [
        "import pandas as pd\n",
        "import concurrent.futures\n",
        "import threading\n",
        "from apify_client import ApifyClient\n",
        "import time\n",
        "\n",
        "APIFY_API_URL = 'https://api.apify.com/v2'\n",
        "ACTOR_NAME = 'mscraper/tiktok-search-autocomplete'\n",
        "APIFY_API_KEY = 'Your Apify API Key'\n",
        "\n",
        "client = ApifyClient(APIFY_API_KEY)\n",
        "\n",
        "# Create a semaphore with a maximum of 12 concurrent threads\n",
        "semaphore = threading.Semaphore(12)\n",
        "df = pd.DataFrame(columns=[\"search_term\", \"suggestion\", \"lang\"])\n",
        "\n",
        "def get_autocomplete(search):\n",
        "    run_input = {\n",
        "        \"proxy\": {\n",
        "            \"useApifyProxy\": True,\n",
        "            \"apifyProxyCountry\": \"US\",\n",
        "            \"apifyProxyGroups\": [\"RESIDENTIAL\"]\n",
        "        },\n",
        "        \"query\": [search]\n",
        "    }\n",
        "\n",
        "    run = client.actor(ACTOR_NAME).call(run_input=run_input)\n",
        "    run_id = run.get('id')\n",
        "\n",
        "    # Check the status every 5 seconds\n",
        "    while True:\n",
        "        details = client.run(run_id).wait_for_finish()\n",
        "        if details['status'] == 'SUCCEEDED':\n",
        "            break\n",
        "        time.sleep(5)\n",
        "\n",
        "    global df\n",
        "    suggestions = []\n",
        "\n",
        "    for item in client.dataset(details[\"defaultDatasetId\"]).iterate_items():\n",
        "        # add to dataframe\n",
        "        df = df.append({\"search_term\": search, \"suggestion\": item['content'], \"lang\": item['lang']}, ignore_index=True)\n",
        "        suggestions.append(item['content'])\n",
        "\n",
        "    return suggestions\n",
        "\n",
        "def recursive_search(search_term, level=1, max_level=3):\n",
        "    if level > max_level:\n",
        "        return\n",
        "\n",
        "    suggestions = get_autocomplete(search_term)\n",
        "\n",
        "    # Create a list to store futures\n",
        "    futures = []\n",
        "\n",
        "    with concurrent.futures.ThreadPoolExecutor() as executor:\n",
        "        for suggestion in suggestions:\n",
        "            # Use the semaphore to limit the number of active threads\n",
        "            semaphore.acquire()\n",
        "\n",
        "            try:\n",
        "                futures.append(executor.submit(recursive_search, suggestion, level + 1, max_level))\n",
        "            finally:\n",
        "                semaphore.release()\n",
        "\n",
        "        for future in concurrent.futures.as_completed(futures):\n",
        "            future.result()\n",
        "\n",
        "recursive_search('Camping', max_level=2)\n",
        "df = df.drop_duplicates(subset='suggestion')\n",
        "\n",
        "print(df)\n"
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "df.to_csv('scraped_tiktok_searches.csv')\n",
        "\n",
        "fordl = df\n",
        "fordl"
      ],
      "metadata": {
        "id": "Vq_5MHQRkFCY"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "import pandas as pd\n",
        "import concurrent.futures\n",
        "import threading\n",
        "from apify_client import ApifyClient\n",
        "\n",
        "APIFY_API_URL = 'https://api.apify.com/v2'\n",
        "ACTOR_NAME = 'clockworks/tiktok-scraper'\n",
        "APIFY_API_KEY = 'Your Apify API Key'\n",
        "\n",
        "client = ApifyClient(APIFY_API_KEY)\n",
        "\n",
        "# Create a semaphore with a maximum of 32 concurrent threads\n",
        "semaphore = threading.Semaphore(24)\n",
        "\n",
        "def run_actor(hashtag):\n",
        "    run_input = {\n",
        "        \"hashtags\": [hashtag],\n",
        "        \"resultsPerPage\": 25,\n",
        "        \"scrapeEmptyChannelInfo\": False,\n",
        "        \"shouldDownloadVideos\": True,\n",
        "        \"shouldDownloadCovers\": False,\n",
        "        \"videoKvStoreIdOrName\": \"mytiktokvideos\",\n",
        "        \"proxyConfiguration\": { \"useApifyProxy\": True },\n",
        "    }\n",
        "\n",
        "    run = client.actor(ACTOR_NAME).call(run_input=run_input)\n",
        "    results = []\n",
        "\n",
        "    for item in client.dataset(run[\"defaultDatasetId\"]).iterate_items():\n",
        "        result = item  # Store the entire response\n",
        "        result['hashtag'] = hashtag  # Add the used hashtag\n",
        "        results.append(result)\n",
        "\n",
        "    return results\n",
        "\n",
        "def concurrent_runs(df):\n",
        "    hashtags = df['suggestion'].tolist()\n",
        "    all_results = []\n",
        "\n",
        "    with concurrent.futures.ThreadPoolExecutor() as executor:\n",
        "        futures = []\n",
        "        for hashtag in hashtags:\n",
        "            # Use the semaphore to limit the number of active threads\n",
        "            semaphore.acquire()\n",
        "            try:\n",
        "                futures.append(executor.submit(run_actor, hashtag))\n",
        "            finally:\n",
        "                semaphore.release()\n",
        "\n",
        "        for future in concurrent.futures.as_completed(futures):\n",
        "            all_results.extend(future.result())\n",
        "\n",
        "    return pd.DataFrame(all_results)\n",
        "\n",
        "# Drop rows with NaN values\n",
        "df = df.dropna()\n",
        "\n",
        "# Or replace NaNs with a placeholder value\n",
        "df = df.fillna('')\n",
        "\n",
        "# Then run the function\n",
        "df = df[:50]\n",
        "new_df = concurrent_runs(df)\n",
        "new_df.to_csv('tiktokscrape.csv')\n",
        "print(new_df)\n",
        "\n",
        "\n"
      ],
      "metadata": {
        "id": "sqy6_7QfiogS"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "new_df.to_csv('tiktokscrape.csv')"
      ],
      "metadata": {
        "id": "9ioQtAROol6-"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "import concurrent.futures\n",
        "import openai\n",
        "import requests\n",
        "import pandas as pd\n",
        "import os\n",
        "import shutil\n",
        "from spleeter.separator import Separator\n",
        "from pydub import AudioSegment\n",
        "import ast\n",
        "import threading\n",
        "\n",
        "new_df = pd.read_csv('tiktokscrape.csv')\n",
        "df = new_df\n",
        "\n",
        "openai.api_key = \"Your OpenAI API Key\"\n",
        "\n",
        "# Create a separator with 2 stems (vocals and accompaniment)\n",
        "separator = Separator('spleeter:2stems')\n",
        "separator_lock = threading.Lock()\n",
        "\n",
        "# Modify download_and_transcribe function\n",
        "def download_and_transcribe(row):\n",
        "    video_url = ast.literal_eval(row['mediaUrls'])[0]\n",
        "    filename = f\"/content/{row['id']}.mp4\"\n",
        "\n",
        "    # Download the video\n",
        "    with requests.get(video_url, stream=True) as r:\n",
        "        r.raise_for_status()\n",
        "        with open(filename, 'wb') as f:\n",
        "            for chunk in r.iter_content(chunk_size=8192):\n",
        "                f.write(chunk)\n",
        "\n",
        "    # Convert mp4 to mp3 for audio separation\n",
        "    audio_filename = f\"/content/{row['id']}.mp3\"\n",
        "    os.system(f'ffmpeg -i {filename} -vn -ar 44100 -ac 2 -b:a 192k {audio_filename}')\n",
        "\n",
        "    # Separate the vocal from music\n",
        "    with separator_lock:\n",
        "        separator.separate_to_file(audio_filename, f'/content/{row[\"id\"]}')\n",
        "    os.remove(audio_filename)\n",
        "\n",
        "    # Convert wav to mp3\n",
        "    audio = AudioSegment.from_wav(f\"/content/{row['id']}/{row['id']}/vocals.wav\")\n",
        "    audio.export(f\"/content/{row['id']}/vocals.mp3\", format=\"mp3\")\n",
        "\n",
        "    # Transcribe the vocal\n",
        "    vocal_filename = f\"/content/{row['id']}/vocals.mp3\"\n",
        "    with open(vocal_filename, \"rb\") as vocal_file:\n",
        "        transcript = openai.Audio.translate(\"whisper-1\", vocal_file)\n",
        "\n",
        "    # Clean up the downloaded files\n",
        "    os.remove(filename)\n",
        "    shutil.rmtree(f\"/content/{row['id']}\")  # remove directory created by spleeter\n",
        "\n",
        "    return (row['id'], transcript[\"text\"])\n",
        "\n",
        "\n",
        "\n",
        "# Create a ThreadPoolExecutor\n",
        "with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:\n",
        "    # Submit all tasks to the executor\n",
        "    future_to_row = {executor.submit(download_and_transcribe, row[1]): row[0] for row in df.iterrows()}\n",
        "\n",
        "    # Collect the results as they become available\n",
        "    for future in concurrent.futures.as_completed(future_to_row):\n",
        "        i = future_to_row[future]\n",
        "        try:\n",
        "            id, transcription = future.result()\n",
        "            df.loc[i, 'transcription'] = transcription\n",
        "        except Exception as exc:\n",
        "            print(f'Row {i} generated an exception: {exc}')\n",
        "\n",
        "df.to_csv('transcribed.csv')"
      ],
      "metadata": {
        "id": "B9OxBhDpn1e-"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "def gpt_parse_tiktok_transcriptions(df, chunk_size=10, retries=3):\n",
        "    # Create a container to store the parsed transcriptions\n",
        "    parsed_transcriptions = []\n",
        "\n",
        "    # Chunk the DataFrame\n",
        "    chunks = [df[i:i+chunk_size] for i in range(0, df.shape[0], chunk_size)]\n",
        "\n",
        "    for chunk in chunks:\n",
        "        # Prepare the data for the batch\n",
        "        batch_data = []\n",
        "\n",
        "        for _, row in chunk.iterrows():\n",
        "            # Unpack the row\n",
        "            hashtags, tiktok_transcription, created_time, sharecount, playcount, commentcount, mentions, effect_stickers, search_hashtags = row['hashtags'], row['transcription'], row['createTime'], row['shareCount'], row['playCount'], row['commentCount'], row['mentions'], row['effectStickers'], row['searchHashtag']\n",
        "\n",
        "            # Prepare the row data\n",
        "            row_data = f\"\"\"\n",
        "            Transcription: {tiktok_transcription}\n",
        "            Hashtags: {hashtags}\n",
        "            View Count: {playcount}\n",
        "            Share Count: {sharecount}\n",
        "            Comment Count: {commentcount}\n",
        "            Mentions: {mentions}\n",
        "            Effect Stickers: {effect_stickers}\n",
        "            Search Hashtags: {search_hashtags}\n",
        "            \"\"\"\n",
        "\n",
        "            batch_data.append(row_data)\n",
        "\n",
        "        for _ in range(retries):\n",
        "            # Provide GPT with the system message to instruct it on the task\n",
        "            response = openai.ChatCompletion.create(\n",
        "                model=\"gpt-3.5-turbo-16k\",\n",
        "                messages=[\n",
        "                        {\"role\": \"system\",\n",
        "                                \"content\": \"\"\"You're an AI with advanced capabilities in understanding and interpreting social media content. Your current task involves processing a batch of up to 100 transcriptions from TikTok videos, each with its associated hashtags, view counts, and comment counts.\n",
        "                                Your objective is to uncover valuable and actionable insights for a content marketer who aims to create impactful and resonant content on TikTok. Give specific examples from the data/transcripts to illustrate your points wherever possible.\n",
        "\n",
        "                                Pay close attention to these specific areas in the data provided. Do not provide generically true advice, your advice should be specific to the data you are provided with and provide specific examples from the data that illustrate your point:\n",
        "\n",
        "                                - Virality Factors: Identify unique characteristics of videos that went viral. What aspects of their content or engagement metrics could have contributed to their virality? Give at least 5 examples to illustrate.\n",
        "\n",
        "                                - Community Engagement: Evaluate the elements of videos that have high community engagement. What strategies, themes or content types incite audience participation? Give at least 5 examples to illustrate.\n",
        "\n",
        "                                - Story Arc: For high performing videos, what is the arc of the story like? Provide specific examples.\n",
        "\n",
        "                                - Audience Segmentation: Deduce potential audience segments based on their reactions to different videos. What content preferences does each audience segment seem to display? Give at least 5 examples to illustrate.\n",
        "\n",
        "                                - Effective Calls to Action: Identify strategies that lead to effective calls to action within the TikTok community. What type of calls to action are generally successful? Give at least 5 examples to illustrate.\n",
        "\n",
        "                                - Predicting Engagement: Develop an understanding of the factors that might lead to high engagement in the future. What emerging trends or behaviours do you anticipate? Give at least 5 examples to illustrate.\n",
        "\n",
        "                                - Viewer Retention: Analyze the strategies used to retain viewer attention throughout a video. What elements contributed to viewers watching the videos till the end? Give at least 5 examples to illustrate.\n",
        "\n",
        "                                - Emotional Engagement: Identify the types of content that elicit strong emotional responses given the data and transcripts. Give at least 5 examples to illustrate.\n",
        "\n",
        "                                - Brand Presence: Evaluate the balance between brand presence and audience enjoyment. How does the overt presence of a brand in the videos influence engagement? List all brands mentioned.\n",
        "\n",
        "                                - Participation in Trends: Understand the drivers behind user participation in challenges or trends. What factors encourage users to join in? List any challenges or trends found.\n",
        "\n",
        "                                - Authenticity: Analyze the role of authenticity in content reception. How important is it for the content to be perceived as genuine? Give at least 5 examples to illustrate.\n",
        "\n",
        "                                - Sentiment Analysis: Determine what factors contributed to positive or negative sentiment towards a piece of content. What elements seem to sway sentiment in either direction? Give at least 5 examples to illustrate.\n",
        "\n",
        "                                Remember, each insight should be backed by the data provided with as many specific examples as possible, and focus on providing highly detailed analysis of the content provided. Your analysis should help them understand all of the important insights of the data.\"\"\"},\n",
        "                                {\"role\": \"user\", \"content\": f\"Here are the TikTok transcriptions and associated data for analysis: \\n\\n\" + \"\\n\\n\".join(batch_data)}\n",
        "\n",
        "\n",
        "                ],\n",
        "                max_tokens=4000,\n",
        "                n=1,\n",
        "                stop=None,\n",
        "                temperature=0.7\n",
        "            )\n",
        "\n",
        "            # If the API call was successful, extract the parsed transcription and add it to the list\n",
        "            if response is not None and 'choices' in response and len(response['choices']) > 0:\n",
        "                parsed_transcription = response['choices'][0]['message']['content'].strip()\n",
        "                print(parsed_transcription)\n",
        "                parsed_transcriptions.append(parsed_transcription)\n",
        "                break\n",
        "\n",
        "    # Create a DataFrame from the list of parsed transcriptions\n",
        "    parsed_df = pd.DataFrame(parsed_transcriptions, columns=['parsed_transcription'])\n",
        "\n",
        "    # Save the parsed transcriptions to a CSV file\n",
        "    parsed_df.to_csv('parsed_transcriptions.csv', index=False)\n",
        "\n",
        "\n",
        "    return parsed_df\n",
        "\n",
        "\n",
        "\n",
        "\n",
        "df = pd.read_csv('transcribed.csv')\n",
        "\n",
        "# Run the GPT analysis on the transcriptions\n",
        "parsed_df = gpt_parse_tiktok_transcriptions(df, chunk_size=10, retries=3)\n",
        "\n",
        "# Print the parsed DataFrame\n",
        "print(parsed_df)\n",
        "\n",
        "\n",
        "\n",
        "\n"
      ],
      "metadata": {
        "id": "E-OnquvSy5VD"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "def gpt_secondary_analysis_batched(df, chunk_size=4):\n",
        "    # Create a container to store the secondary analysis results\n",
        "    secondary_analysis_results = []\n",
        "\n",
        "    # Chunk the DataFrame\n",
        "    chunks = [df[i:i+chunk_size] for i in range(0, df.shape[0], chunk_size)]\n",
        "\n",
        "    for chunk in chunks:\n",
        "        aggregated_transcriptions = ' '.join(chunk['parsed_transcription'])\n",
        "\n",
        "        response = openai.ChatCompletion.create(\n",
        "            model=\"gpt-3.5-turbo-16k\",\n",
        "            messages=[\n",
        "                {\"role\": \"system\",\n",
        "                 \"content\": \"Summarize the findings and provide a readout that includes all salient info. Always provide specific examples from the transcripts\"},\n",
        "                {\"role\": \"user\", \"content\": f\"Provide the readout: {aggregated_transcriptions}.\"}\n",
        "            ],\n",
        "            max_tokens=1500,\n",
        "            n=1,\n",
        "            stop=None,\n",
        "            temperature=0.7\n",
        "        )\n",
        "\n",
        "        if response is not None and 'choices' in response and len(response['choices']) > 0:\n",
        "            secondary_analysis = response['choices'][0]['message']['content'].strip()\n",
        "            secondary_analysis_results.append(secondary_analysis)\n",
        "\n",
        "    # After analyzing each chunk, analyze all the results together\n",
        "    aggregated_results = ' '.join(secondary_analysis_results)\n",
        "\n",
        "    response = openai.ChatCompletion.create(\n",
        "        model=\"gpt-3.5-turbo-16k\",\n",
        "        messages=[\n",
        "            {\"role\": \"system\",\n",
        "             \"content\": \"Simulate an award winning TikTok social media expert and consultant known for highly actionable and insightful analysis of tiktok niches. Please provide a long-form analysis and client deliverable that provides a readout that includes all salient info that would be useful or interesting. Try to provide up to ten specific examples for each.  Make this as in-depth and long-form as possible.\"},\n",
        "            {\"role\": \"user\", \"content\": f\"Analyze and provide the longform readout: {aggregated_results}.\"}\n",
        "        ],\n",
        "        max_tokens=10000,\n",
        "        n=1,\n",
        "        stop=None,\n",
        "        temperature=0.7\n",
        "    )\n",
        "\n",
        "    if response is not None and 'choices' in response and len(response['choices']) > 0:\n",
        "        final_analysis = response['choices'][0]['message']['content'].strip()\n",
        "        return final_analysis\n",
        "\n",
        "    return None\n",
        "\n",
        "\n",
        "# Your DataFrame should be loaded or defined before this point, for example:\n",
        "df = pd.read_csv('parsed_transcriptions.csv')\n",
        "\n",
        "# Call the function\n",
        "final_eval = gpt_secondary_analysis_batched(df, chunk_size=6)\n",
        "\n",
        "# Save the final analysis to a text file\n",
        "with open('Final_TikTok_Analysis.txt', 'w') as f:\n",
        "    f.write(str(final_eval))\n"
      ],
      "metadata": {
        "id": "70QN4R9ixSb6"
      },
      "execution_count": null,
      "outputs": []
    }
  ]
}