{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "private_outputs": true,
      "provenance": [],
      "authorship_tag": "ABX9TyMZrrl/tb5OqQYABf8AQOKW",
      "include_colab_link": true
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "view-in-github",
        "colab_type": "text"
      },
      "source": [
        "<a href=\"https://colab.research.google.com/github/ktynski/Marketing_Automations_Notebooks_With_GPT/blob/main/Large_Language_Model_Search_Optimization_(Public).ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "!pip install openai\n",
        "!pip install fuzzywuzzy\n",
        "!pip install retrying\n",
        "\n"
      ],
      "metadata": {
        "id": "ds0jS7vlerDz"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "dK07vK-4d4VN"
      },
      "outputs": [],
      "source": [
        "import pandas as pd\n",
        "import openai\n",
        "from collections import Counter\n",
        "import concurrent.futures\n",
        "from retrying import retry\n",
        "from fuzzywuzzy import fuzz, process\n",
        "\n",
        "# Replace with your actual OpenAI API Key\n",
        "openai.api_key = \"Your OpenAI API Key\"\n",
        "gpt_call_counter = 0\n",
        "\n",
        "\n",
        "\n",
        "# Define a function that determines whether to retry an exception or not.\n",
        "# In this case, it always retries.\n",
        "def retry_if_exception(exception):\n",
        "    return True\n",
        "\n",
        "@retry(retry_on_exception=retry_if_exception, stop_max_attempt_number=3)\n",
        "def gpt_call(messages):\n",
        "    global gpt_call_counter\n",
        "    gpt_call_counter += 1\n",
        "    print(f\"GPT called {gpt_call_counter} times.\")\n",
        "    return openai.ChatCompletion.create(\n",
        "        model=\"gpt-3.5-turbo\",\n",
        "        messages=messages\n",
        "    )\n",
        "\n",
        "def generate_related_queries(topic, n):\n",
        "    print(f\"Generating related queries for topic: '{topic}'\")\n",
        "    response = gpt_call([\n",
        "        {\"role\": \"system\", \"content\": \"You are all-knowing genius at understanding how people search. Your task is to generate specific and high-intent questions or searches related to a topic.\"},\n",
        "        {\"role\": \"user\", \"content\": f\"I need {n} distinct questions or keyword searches related to {topic}. They should reflect common and detailed inquiries that people might have about the topic that reflect high purchase intent. Do not print anything other than the questions and/or searches themselves. No intro or outro text should be included under any circumstances.\"}\n",
        "    ])\n",
        "    queries = response['choices'][0]['message']['content'].strip().split('\\n')\n",
        "    return queries\n",
        "\n",
        "def extract_brands(response):\n",
        "    print(f\"Extracting brands from response.\")\n",
        "    chat_response = gpt_call([\n",
        "        {\"role\": \"system\", \"content\": \"You are a helpful assistant. Your task is to identify specific brand names mentioned in a text.\"},\n",
        "        {\"role\": \"user\", \"content\": f\"Can you identify any specific brand names mentioned in the following text: \\\"{response}\\\"? Please respond with only a list of the brands in the following format: [\\\"Brand1\\\", \\\"Brand2\\\", \\\"Brand3\\\"].\"}\n",
        "    ])\n",
        "    try:\n",
        "        brands = eval(chat_response['choices'][0]['message']['content'])\n",
        "    except SyntaxError:\n",
        "        brands = []\n",
        "    return brands\n",
        "\n",
        "def query_gpt(query, n):\n",
        "    print(f\"Querying GPT for query: '{query}'\")\n",
        "    df = pd.DataFrame(columns=['Query', 'Response', 'Brand', 'Mentions', 'Prominence'])\n",
        "\n",
        "    # Create a list of tasks for concurrent execution\n",
        "    tasks = [({'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': f\"I need detailed information on this topic: \\\"{query}\\\". If relevant, please mention specific brand names.\"}) for _ in range(n)]\n",
        "\n",
        "    # Use ThreadPoolExecutor for concurrent API calls\n",
        "    with concurrent.futures.ThreadPoolExecutor(max_workers=n) as executor:\n",
        "        futures = {executor.submit(gpt_call, task) for task in tasks}\n",
        "        for future in concurrent.futures.as_completed(futures):\n",
        "            response = future.result()['choices'][0]['message']['content'].strip()\n",
        "            brands = extract_brands(response)\n",
        "            mentions = Counter(brands)\n",
        "\n",
        "            if not brands:\n",
        "                temp_df = pd.DataFrame([{'Query': query, 'Response': response, 'Brand': 'N/A', 'Mentions': 0, 'Prominence': 0}], index=[0])\n",
        "                df = pd.concat([df, temp_df], ignore_index=True)\n",
        "            else:\n",
        "                for brand, mention_count in mentions.items():\n",
        "                    print(f\"Found brand '{brand}' mentioned {mention_count} time(s) in response.\")\n",
        "                    prominence = 1 / (response.lower().split().index(brand.lower()) + 1 if brand.lower() in response.lower().split() else len(response))\n",
        "                    temp_df = pd.DataFrame([{'Query': query, 'Response': response, 'Brand': brand, 'Mentions': mention_count, 'Prominence': prominence}], index=[0])\n",
        "                    df = pd.concat([df, temp_df], ignore_index=True)\n",
        "    return df\n",
        "\n",
        "def analyze_brands(df):\n",
        "    print(\"Analyzing brands...\")\n",
        "    # Get a list of unique brands in the DataFrame\n",
        "    unique_brands = df[df['Brand'] != 'N/A']['Brand'].unique()\n",
        "\n",
        "    # Initialize a new DataFrame for the analysis results\n",
        "    analysis_df = pd.DataFrame(columns=['Brand', 'Total Mentions', 'Average Prominence', 'Contexts', 'Context Summaries'])\n",
        "\n",
        "    for brand in unique_brands:\n",
        "        print(f\"Analyzing brand: '{brand}'\")\n",
        "        # Filter the original DataFrame for each unique brand\n",
        "        brand_df = df[df['Brand'] == brand]\n",
        "\n",
        "        # Aggregate the total mentions and average prominence for this brand\n",
        "        total_mentions = brand_df['Mentions'].sum()\n",
        "        avg_prominence = brand_df['Prominence'].mean()\n",
        "\n",
        "        # Gather the contexts (i.e., full responses) where this brand is mentioned\n",
        "        contexts = brand_df['Response'].unique()\n",
        "\n",
        "        # Initialize a list to store the context summaries\n",
        "        context_summaries = []\n",
        "\n",
        "        # Send each context to GPT for analysis and summarization\n",
        "        for context in contexts:\n",
        "            chat_response = gpt_call([\n",
        "                {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
        "                {\"role\": \"user\", \"content\": f\"Given the following text, please provide a summary focusing on how the brand '{brand}' is being discussed: {context}\"}\n",
        "            ])\n",
        "            summary = chat_response['choices'][0]['message']['content']\n",
        "            context_summaries.append(summary)\n",
        "\n",
        "        # Append the analysis results to the DataFrame\n",
        "        temp_df = pd.DataFrame({'Brand': [brand], 'Total Mentions': [total_mentions], 'Average Prominence': [avg_prominence], 'Contexts': [contexts], 'Context Summaries': [context_summaries]})\n",
        "        analysis_df = pd.concat([analysis_df, temp_df], ignore_index=True)\n",
        "\n",
        "    return analysis_df\n",
        "\n",
        "def generate_report(analysis_df, specific_brand):\n",
        "    print(f\"Generating report for brand: '{specific_brand}'\")\n",
        "    # Generate the natural language report for the specific brand\n",
        "    specific_brand_df = analysis_df[analysis_df['Brand'] == specific_brand]\n",
        "    if not specific_brand_df.empty:\n",
        "        chat_response = gpt_call([\n",
        "            {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
        "            {\"role\": \"user\", \"content\": f\"Please provide a detailed report on the brand '{specific_brand}', which was mentioned {specific_brand_df.iloc[0]['Total Mentions']} times with an average prominence of {specific_brand_df.iloc[0]['Average Prominence']}. Here are the contexts and their summaries: {specific_brand_df.iloc[0]['Contexts']} {specific_brand_df.iloc[0]['Context Summaries']}\"}\n",
        "        ])\n",
        "        report = chat_response['choices'][0]['message']['content']\n",
        "    else:\n",
        "        report = f\"The brand {specific_brand} was not mentioned in the responses.\"\n",
        "\n",
        "    return report\n",
        "\n",
        "\n",
        "def unify_similar_brands(df, threshold=90):\n",
        "    brands = df['Brand'].unique()\n",
        "    for brand in brands:\n",
        "        matches = process.extract(brand, brands, limit=len(brands), scorer=fuzz.token_sort_ratio)\n",
        "        similar_brands = [match[0] for match in matches if match[1] >= threshold and match[0] != brand]\n",
        "        if similar_brands:\n",
        "            for similar_brand in similar_brands:\n",
        "                df['Brand'] = df['Brand'].replace(similar_brand, brand)\n",
        "    return df\n",
        "\n",
        "\n",
        "# Set your topic and the number of queries and responses\n",
        "topic = 'Content Marketing Agency'\n",
        "n_queries = 10\n",
        "n_responses = 2\n",
        "\n",
        "# Generate the queries\n",
        "queries = generate_related_queries(topic, n_queries)\n",
        "\n",
        "# Process each query, producing a DataFrame for each\n",
        "df_list = [query_gpt(query, n_responses) for query in queries]\n",
        "\n",
        "# Combine all the DataFrames into one\n",
        "final_df = pd.concat(df_list, ignore_index=True)\n",
        "\n",
        "# Unify similar brand names\n",
        "final_df = unify_similar_brands(final_df)\n",
        "\n",
        "# Specify the brand you want a detailed report for\n",
        "specific_brand = 'Fractl'\n",
        "\n",
        "# Perform the brand analysis\n",
        "analysis_df = analyze_brands(final_df)\n",
        "\n",
        "# Generate the report for the specific brand\n",
        "brand_report = generate_report(analysis_df, specific_brand)\n",
        "\n",
        "# Save final_df to an Excel file\n",
        "final_df.to_excel(\"final_dataframe.xlsx\", index=False)\n",
        "\n",
        "# Save analysis_df to an Excel file\n",
        "analysis_df.to_excel(\"analysis_dataframe.xlsx\", index=False)\n",
        "\n",
        "# Save the brand_report to a text file\n",
        "with open(\"brand_report.txt\", \"w\") as text_file:\n",
        "    text_file.write(brand_report)\n",
        "\n",
        "# Print the analysis DataFrame and the brand report\n",
        "print(analysis_df)\n",
        "print(brand_report)\n",
        "\n",
        "# Print total number of GPT calls\n",
        "print(f\"Total GPT calls made: {gpt_call_counter}\")\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "brand_report"
      ],
      "metadata": {
        "id": "3s9wKWe9xEgK"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "analysis_df"
      ],
      "metadata": {
        "id": "5yHN366BDurp"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "final_df"
      ],
      "metadata": {
        "id": "r_JR9uTCDwM3"
      },
      "execution_count": null,
      "outputs": []
    }
  ]
}