{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "private_outputs": true,
      "provenance": [],
      "authorship_tag": "ABX9TyMsecj/GSbTeUksdMCzJEG/",
      "include_colab_link": true
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "view-in-github",
        "colab_type": "text"
      },
      "source": [
        "<a href=\"https://colab.research.google.com/github/ktynski/Marketing_Automations_Notebooks_With_GPT/blob/main/Automatic_Brand_or_Entity_News_Media_Monitoring_and_Analysis_by_Kristin_frac_tl_(public).ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "## Install Required Libraries"
      ],
      "metadata": {
        "id": "5w5N_tH6dsI0"
      }
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "2ETZKO85Pgxx"
      },
      "outputs": [],
      "source": [
        "!pip install sentence-transformers\n",
        "!pip install scikit-learn\n",
        "!pip install google-search-results-serpapi\n",
        "!pip install newspaper3k\n",
        "!pip install google-search-results\n",
        "!pip install openai\n",
        "!pip install yellowbrick"
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "## Run the script below to generate a dataframe and csv of the article analysis and clustering of the news articles found. Replace your keyword, Serpapi key, and Openai api key"
      ],
      "metadata": {
        "id": "-OlCaaI3dbnu"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "import concurrent.futures\n",
        "import json\n",
        "import os\n",
        "import time\n",
        "from collections import defaultdict\n",
        "from concurrent.futures import ThreadPoolExecutor\n",
        "\n",
        "import nltk\n",
        "import openai\n",
        "import pandas as pd\n",
        "import requests\n",
        "from newspaper import Article\n",
        "from sentence_transformers import SentenceTransformer\n",
        "from sklearn.cluster import KMeans\n",
        "from transformers import GPT2Tokenizer\n",
        "from yellowbrick.cluster import KElbowVisualizer\n",
        "\n",
        "nltk.download(\"punkt\")\n",
        "\n",
        "\n",
        "serpapi_key = 'Your SerpAPI Key'\n",
        "openai.api_key = 'Your OpenAI Api Key'\n",
        "\n",
        "\n",
        "\n",
        "\n",
        "# Load the GPT-2 tokenizer\n",
        "tokenizer = GPT2Tokenizer.from_pretrained(\"gpt2\")\n",
        "\n",
        "def truncate_text(text, max_tokens):\n",
        "    \"\"\"\n",
        "    Truncate the input text to the specified maximum number of tokens using the GPT-2 tokenizer.\n",
        "\n",
        "    Args:\n",
        "        text (str): The input text to be tokenized and truncated.\n",
        "        max_tokens (int): The maximum number of tokens to keep.\n",
        "\n",
        "    Returns:\n",
        "        str: The truncated text.\n",
        "    \"\"\"\n",
        "    # Tokenize the text and get the token IDs\n",
        "    token_ids = tokenizer.encode(text)\n",
        "\n",
        "    # Truncate the token IDs if they exceed the maximum number of tokens\n",
        "    if len(token_ids) > max_tokens:\n",
        "        token_ids = token_ids[:max_tokens]\n",
        "\n",
        "    # Convert the truncated token IDs back to text\n",
        "    truncated_text = tokenizer.decode(token_ids)\n",
        "    return truncated_text\n",
        "\n",
        "\n",
        "\n",
        "\n",
        "\n",
        "def get_google_news_data(query, num_results=100):\n",
        "    params = {\n",
        "        \"api_key\": serpapi_key,\n",
        "        \"engine\": \"google\",\n",
        "        \"q\": query,\n",
        "        \"tbm\": \"nws\",\n",
        "        \"num\": num_results\n",
        "    }\n",
        "    response = requests.get('https://serpapi.com/search.json', params=params)\n",
        "    data = json.loads(response.text)\n",
        "\n",
        "    if 'news_results' in data:\n",
        "        articles = []\n",
        "        for result in data['news_results']:\n",
        "            articles.append({\n",
        "                'title': result['title'],\n",
        "                'link': result['link'],\n",
        "                'date': result['date'],\n",
        "                'source': result['source']\n",
        "            })\n",
        "        return articles\n",
        "    else:\n",
        "        print(\"No news results found.\")\n",
        "        return []\n",
        "\n",
        "def fetch_articles(brand,num_results, num_clusters=30):\n",
        "    query = brand\n",
        "    num_results = num_results\n",
        "    language = \"en\"\n",
        "\n",
        "    articles = get_google_news_data(query, num_results)\n",
        "    embeddings, article_texts = generate_embeddings(articles)\n",
        "    clusters = cluster_articles(embeddings, num_clusters)\n",
        "    article_list = []\n",
        "    for i, article in enumerate(articles):\n",
        "        article_dict = {\n",
        "            'brand': brand,\n",
        "            'title': article['title'],\n",
        "            'link': article['link'],\n",
        "            'date': article['date'],\n",
        "            'source': article['source'],\n",
        "            'text': scrape_article(article['link']),\n",
        "            'cluster': clusters[i]\n",
        "        }\n",
        "        article_list.append(article_dict)\n",
        "    df = pd.DataFrame(article_list)\n",
        "    return df\n",
        "\n",
        "\n",
        "def generate_embeddings(articles):\n",
        "    model = SentenceTransformer('sentence-transformers/paraphrase-MiniLM-L6-v2')\n",
        "    article_texts = [scrape_article(article.get('link', '')) for article in articles]\n",
        "    embeddings = model.encode(article_texts)\n",
        "    return embeddings, article_texts\n",
        "\n",
        "def cluster_articles(embeddings, num_clusters=30):\n",
        "    kmeans = KMeans(n_clusters=num_clusters)\n",
        "    clusters = kmeans.fit_predict(embeddings)\n",
        "    return clusters\n",
        "\n",
        "def find_optimal_clusters(embeddings, max_clusters=30):\n",
        "    visualizer = KElbowVisualizer(KMeans(), k=(2, max_clusters + 1), metric='silhouette')\n",
        "    visualizer.fit(embeddings)\n",
        "    return visualizer.elbow_value_\n",
        "\n",
        "def scrape_article(url):\n",
        "    article = Article(url)\n",
        "    try:\n",
        "        article.download()\n",
        "        article.parse()\n",
        "    except Exception as e:\n",
        "        print(f\"Failed to Download Article: {e}\")\n",
        "    return article.title + \" \" + article.text\n",
        "\n",
        "def analyze_articles(df):\n",
        "    with ThreadPoolExecutor() as executor:\n",
        "        futures = []\n",
        "        for i, row in df.iterrows():\n",
        "            time.sleep(2)\n",
        "            article_text = row[\"text\"]\n",
        "            article_text = truncate_text(article_text, 2000)\n",
        "            prompt = (\n",
        "              f\"Please analyze this news article and provide a comprehensive summary based on the following categories. Please answer every part of the following:\\n\\n\"\n",
        "              f\"Main Themes: Identify the central topics discussed in the article.\\n\"\n",
        "              f\"Narratives: Describe any storylines or overarching messages present in the article.\\n\"\n",
        "              f\"Opinions: Mention the key viewpoints or perspectives expressed in the article, along with their sources (if mentioned).\\n\"\n",
        "              f\"Spokespersons: List any individuals or organizations mentioned as sources, along with their roles or affiliations.\\n\"\n",
        "              f\"Biases: Point out any potential biases in the article, whether it's through language, perspective, or focus.\\n\"\n",
        "              f\"Article Emotion: Determine the dominant emotion(s) conveyed by the article (e.g., positive, negative, neutral, etc.).\\n\\n\"\n",
        "              f\"Please provide your analysis in a well-structured and concise format. Use bullet points or numbered lists to make your response easier to read and understand.\\n\\n\"\n",
        "              f\"This is the News Article to evaluate. Only provide the requested data and nothing else before Main Themes: \\n\\n {article_text}\"\n",
        "          )\n",
        "\n",
        "\n",
        "            future = executor.submit(\n",
        "                openai.Completion.create,\n",
        "                model=\"text-davinci-003\",\n",
        "                prompt=prompt,\n",
        "                max_tokens=1800,\n",
        "                n=1,\n",
        "                stop=None,\n",
        "                temperature=0.7,\n",
        "            )\n",
        "            futures.append((i, future))\n",
        "\n",
        "        for i, future in futures:\n",
        "            retries = 0\n",
        "            max_retries = 3\n",
        "            while retries < max_retries:\n",
        "                try:\n",
        "                    response = future.result()\n",
        "                    output_text = response.choices[0].text.strip()\n",
        "                    output_list = output_text.split(\"\\n\\n\")\n",
        "                    parsed_data = {}\n",
        "                    for item in output_list:\n",
        "                        key, value = item.split(\":\", 1)\n",
        "                        parsed_data[key.strip()] = value.strip()\n",
        "\n",
        "                    # Check if all required keys are present in the parsed_data dictionary\n",
        "                    required_keys = [\"Main Themes\"]\n",
        "                    if all(key in parsed_data for key in required_keys):\n",
        "                        df.loc[i, \"main_themes\"] = parsed_data.get(\"Main Themes\", \"\")\n",
        "                        df.loc[i, \"narratives\"] = parsed_data.get(\"Narratives\", \"\")\n",
        "                        df.loc[i, \"opinions\"] = parsed_data.get(\"Opinions\", \"\")\n",
        "                        df.loc[i, \"spokespersons\"] = parsed_data.get(\"Spokespersons\", \"\")\n",
        "                        df.loc[i, \"biases\"] = parsed_data.get(\"Biases\", \"\")\n",
        "                        df.loc[i, \"emotion\"] = parsed_data.get(\"Article Emotion\", \"\")\n",
        "                        break\n",
        "                    else:\n",
        "                        retries += 1\n",
        "                        print(f\"Incomplete data for row {i}, retrying {retries}/{max_retries}...\")\n",
        "                        future = executor.submit(\n",
        "                            openai.Completion.create,\n",
        "                            model=\"text-davinci-003\",\n",
        "                            prompt=prompt,\n",
        "                            max_tokens=1800,\n",
        "                            n=1,\n",
        "                            stop=None,\n",
        "                            temperature=0.5,\n",
        "                        )\n",
        "                except:\n",
        "                    print(\"failed\")\n",
        "                    break\n",
        "    return df\n",
        "\n",
        "num_results = 10\n",
        "topic = \"OpenAI\"\n",
        "df = fetch_articles(topic, num_results, num_clusters=10)\n",
        "df = analyze_articles(df)\n",
        "df.to_csv(\"Media_Monitoring_Report.csv\")\n"
      ],
      "metadata": {
        "id": "4Pvjf3SaPhX9"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "source": [
        "## Print Dataframe"
      ],
      "metadata": {
        "id": "bwbHzv3LdzTC"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "df"
      ],
      "metadata": {
        "id": "szUOK5uoQqpd"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "source": [
        "## Run Media Monitoring Report on Data from the Last Step"
      ],
      "metadata": {
        "id": "9-_Rqessd315"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "import pandas as pd\n",
        "import openai\n",
        "\n",
        "\n",
        "def describe_clusters(df):\n",
        "    cluster_descriptions = {}\n",
        "\n",
        "    for cluster_id in df['cluster'].unique():\n",
        "        titles = df[df['cluster'] == cluster_id]['title'].tolist()\n",
        "        titles_str = \"\\n\".join(titles)\n",
        "\n",
        "        prompt = (\n",
        "            f\"Please provide a brief description of the main topic or theme for the following list of article titles:\\n\\n\"\n",
        "            f\"{titles_str}\\n\\n\"\n",
        "            f\"Write a one-sentence description summarizing the main topic or theme.\"\n",
        "        )\n",
        "\n",
        "        response = openai.Completion.create(\n",
        "            model=\"text-davinci-003\",\n",
        "            prompt=prompt,\n",
        "            max_tokens=50,\n",
        "            n=1,\n",
        "            stop=None,\n",
        "            temperature=0.5,\n",
        "        )\n",
        "\n",
        "        output_text = response.choices[0].text.strip()\n",
        "        cluster_descriptions[cluster_id] = output_text\n",
        "\n",
        "    return cluster_descriptions\n",
        "\n",
        "\n",
        "\n",
        "\n",
        "\n",
        "def generate_report(most_common, cluster_counts, emotion_by_cluster, spokesperson_counts, bias_counts):\n",
        "    prompt = (\n",
        "        f\"Please generate a media monitoring report based on the following summary data:\\n\\n\"\n",
        "        f\"Most Common Information:\\n{most_common}\\n\\n\"\n",
        "        f\"Article Distribution Among Clusters:\\n{cluster_counts}\\n\\n\"\n",
        "        f\"Article Emotions Distribution Across Clusters:\\n{emotion_by_cluster}\\n\\n\"\n",
        "        f\"Most Frequently Mentioned Spokespersons:\\n{spokesperson_counts}\\n\\n\"\n",
        "        f\"Most Biased Articles or Sources:\\n{bias_counts}\\n\\n\"\n",
        "        f\"Write a well-structured and concise report summarizing the key findings from the provided data.\"\n",
        "    )\n",
        "\n",
        "    response = openai.ChatCompletion.create(\n",
        "        model=\"gpt-4\",\n",
        "        messages = [\n",
        "        {\"role\": \"system\", \"content\": \"Please simulate an expert at media analysis with a strong background in psychology and human behavior who is a world expert at PR\"},\n",
        "        {\"role\": \"user\", \"content\": prompt}],\n",
        "        max_tokens=1024,\n",
        "        n=1,\n",
        "        stop=None,\n",
        "        temperature=0.5,\n",
        "    )\n",
        "\n",
        "    output_text = response['choices'][0]['message']['content'].strip()\n",
        "    return output_text\n",
        "\n",
        "def analyze_dataframe(df):\n",
        "    # Extract summary information\n",
        "    most_common = df['main_themes'].value_counts().head(5).to_dict()\n",
        "    cluster_counts = df['cluster'].value_counts().to_dict()\n",
        "    emotion_by_cluster = df.groupby('cluster')['emotion'].value_counts().unstack(fill_value=0).to_dict()\n",
        "    spokesperson_counts = df['spokespersons'].value_counts().head(5).to_dict()\n",
        "    bias_counts = df['biases'].value_counts().head(5).to_dict()\n",
        "\n",
        "    # Generate the GPT-based report\n",
        "    report = generate_report(most_common, cluster_counts, emotion_by_cluster, spokesperson_counts, bias_counts)\n",
        "    print(\"\\nGPT-Generated Media Monitoring Report:\")\n",
        "    print(report)\n",
        "\n",
        "    # Describe the clusters based on article titles\n",
        "    cluster_descriptions = describe_clusters(df)\n",
        "    print(\"\\nCluster Descriptions:\")\n",
        "    for cluster_id, description in cluster_descriptions.items():\n",
        "        print(f\"Cluster {cluster_id}: {description}\")\n",
        "\n",
        "# Load the DataFrame from the CSV file generated by the original script\n",
        "df = deduplicated_df = df.drop_duplicates(subset='Hotels')\n",
        "df = pd.read_csv(\"Media_Monitoring_Report.csv\")\n",
        "analyze_dataframe(df)\n"
      ],
      "metadata": {
        "id": "H_JSf-qGZ4Lv"
      },
      "execution_count": null,
      "outputs": []
    }
  ]
}