{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "faf9e9c3",
   "metadata": {},
   "source": [
    "## Solr Re Ranker  \n",
    "This notebooks shows you how you can create a solr connection, how can you access solr instance and apply Re-Ranker model on results."
   ]
  },
  {
   "cell_type": "markdown",
   "id": "84f61108",
   "metadata": {},
   "source": [
    "### Import pre-required libs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "b85577f3",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{\"time\":\"2023-07-31 16:22:53,253\", \"name\": \"faiss.loader\", \"level\": \"INFO\", \"message\": \"Loading faiss with AVX2 support.\"}\n",
      "{\"time\":\"2023-07-31 16:22:53,426\", \"name\": \"faiss.loader\", \"level\": \"INFO\", \"message\": \"Successfully loaded faiss with AVX2 support.\"}\n"
     ]
    }
   ],
   "source": [
    "import json\n",
    "import re\n",
    "import requests\n",
    "import os\n",
    "from bs4 import BeautifulSoup\n",
    "import pandas as pd \n",
    "import numpy as np\n",
    "from IPython.display import display, HTML\n",
    "from primeqa.components.reranker.colbert_reranker import ColBERTReranker"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c930cf69",
   "metadata": {},
   "source": [
    "<span style=\"color:blueviolet\">Step 1. Please provide ColBERT Re-ranker model path</span>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "87092dd8",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Run ColBERT Reranker\n",
    "model_name_or_path = \"/Users/abhilashamangal/Documents/Hanzo/PrimeQA/article/DrDecr.dnn\"\n",
    "llmToken = os.getenv('LLM_TOKEN')\n",
    "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6a99effa",
   "metadata": {},
   "source": [
    "<span style=\"color:blueviolet\">Step 2. Skip unwanted chars from documents</span>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "id": "1807647a",
   "metadata": {},
   "outputs": [],
   "source": [
    "def skip_unwanted_characters(document, keyword):\n",
    "        lines = document.split('\\n')\n",
    "        desired_text = \"\"\n",
    "        last_occurrence = -1\n",
    "        for i, line in enumerate(lines):\n",
    "            if keyword in line:\n",
    "                last_occurrence = i\n",
    "                \n",
    "        if last_occurrence != -1:\n",
    "            for line in lines[last_occurrence+1:]:\n",
    "                desired_text += line.strip() + \"\\n\"\n",
    "        else:\n",
    "            desired_text = document\n",
    "    \n",
    "        return desired_text.strip()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "57384a42",
   "metadata": {},
   "source": [
    "<span style=\"color:blueviolet\">Step 3. Remove HTML tags from documents</span>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 106,
   "id": "3768cf46",
   "metadata": {},
   "outputs": [],
   "source": [
    "def pre_processingtext(text_data):\n",
    "    replaced = re.sub(\"\\{{ .*?\\}}\", \"\", text_data)\n",
    "    replaced = re.sub(\"\\{: .*?\\}\", \"\", replaced)\n",
    "    replaced = re.sub(\"\\.*?\", \"\", replaced)\n",
    "    replaced = re.sub(\"\\(.*?\\)|\\[.*?\\] |\\{.*?\\}\", \"\", replaced)\n",
    "    replaced = re.sub(\"</?div[^>]*>\", \"\", replaced)\n",
    "    replaced = re.sub(\"</?p[^>]*>\", \"\", replaced)\n",
    "    replaced = re.sub(\"</?a[^>]*>\", \"\", replaced)\n",
    "    replaced = re.sub(\"</?h*[^>]*>\", \"\", replaced)\n",
    "    replaced = re.sub(\"</?em*[^>]*>\", \"\", replaced)\n",
    "    replaced = re.sub(\"</?img*[^>]*>\", \"\", replaced)\n",
    "    replaced = re.sub(\"&amp;\", \"\", replaced)\n",
    "    replaced = re.sub(\"</?href*>\", \"\", replaced)\n",
    "    replaced = re.sub(\"\\s+\", \" \", replaced)\n",
    "    replaced = replaced.replace(\"}\",\"\")\n",
    "    replaced = replaced.replace(\"##\",\"\")\n",
    "    replaced = replaced.replace(\"###\",\"\")\n",
    "    replaced = replaced.replace(\"#\",\"\")\n",
    "    replaced = replaced.replace(\"*\",\"\")\n",
    "    replaced = replaced.replace(\"<strong>\",\"\")\n",
    "    replaced = replaced.replace(\"</strong>\",\"\")\n",
    "    replaced = replaced.replace(\"<ul>\",\"\")\n",
    "    replaced = replaced.replace(\"</ul>\",\"\")\n",
    "    replaced = replaced.replace(\"<li>\",\"\")\n",
    "    replaced = replaced.replace(\"</li>\",\"\")\n",
    "    replaced = replaced.replace(\"<ol>\",\"\")\n",
    "    replaced = replaced.replace(\"</ol>\",\"\")\n",
    "    return replaced\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "49405b22",
   "metadata": {},
   "source": [
    "<span style=\"color:blueviolet\">Step 4.By using below method get documnets from solr using question</span>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 125,
   "id": "305f70d0",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Retrieve documents\n",
    "max_num_documents=10\n",
    "\n",
    "def solr_reteriver(question):\n",
    "    question = question.replace(\"?\",\"\")\n",
    "    response = requests.get(f'http://150.239.171.68:8983/solr/superknowa/select?q='+question+'&q.op=AND&wt=json')\n",
    "    query_result = response.json()\n",
    "    \n",
    "   # print(\"SOLR RESPONSE:\", json.dumps(query_result, indent=2))\n",
    "    print(query_result['response']['numFound'], \"documents found.\")\n",
    "    total = query_result['response']['numFound']\n",
    "    results_list=[]\n",
    "    query_hits={}\n",
    "    if total > 0:\n",
    "        if total > 10:\n",
    "            total =10\n",
    "        for i in range(total):\n",
    "                string_unicode = query_result['response']['docs'][i]['content'][0]\n",
    "                doc = string_unicode.encode(\"ascii\", \"ignore\")\n",
    "                string_decode = doc.decode()\n",
    "                keyword = \"{: shortdesc} \"\n",
    "                cleaned_text = skip_unwanted_characters(string_decode, keyword)\n",
    "                pattern =  r'\\{\\s*:\\s*[\\w#-]+\\s*\\}|\\{\\s*:\\s*\\w+\\s*\\}|\\n\\s*\\n'\n",
    "                cleaned_text = re.sub(pattern, '', cleaned_text)\n",
    "                cleaned_text = pre_processingtext(cleaned_text)\n",
    "                query_hits = {\n",
    "                \"document\": {\n",
    "                    \"rank\": i,\n",
    "                    \"document_id\": query_result['response']['docs'][i]['id'][0],\n",
    "                    \"text\": cleaned_text[0:4000], \n",
    "                    \"url\" :query_result['response']['docs'][i]['url'][0].replace(\" \",\"\")\n",
    "                },\n",
    "            }\n",
    "\n",
    "                results_list.append(query_hits)\n",
    "                results_to_display = [results_list['document'] for results_list in results_list]\n",
    "                df = pd.DataFrame.from_records(results_to_display, columns=['rank','document_id','text','url'])\n",
    "                # df['title'] = np.random.randint(1, 10, df.shape[0])\n",
    "                df.dropna(inplace=True)\n",
    "                print('======================================================================')             \n",
    "    print(f'QUERY: {question}')\n",
    "    return results_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 127,
   "id": "236037c6",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "82 documents found.\n",
      "======================================================================\n",
      "======================================================================\n",
      "======================================================================\n",
      "======================================================================\n",
      "======================================================================\n",
      "======================================================================\n",
      "======================================================================\n",
      "======================================================================\n",
      "======================================================================\n",
      "======================================================================\n",
      "QUERY: what is ibm cloud pak for data\n",
      "[{'document': {'rank': 0, 'document_id': ' ', 'text': 'It used to be that the developer and operations roles were fully separated. The developer received the business requirements, they came up with a design and implemented it. A dedicated QA team wrote tests, and validated that the new application, capability, or change worked as expected, and then the code or application was handed over to the operations team to run it. In most organizations, these roles have now merged into a set of development practices that are commonly referred to as DevOps.\\nTo learn more about what is SRE, read this Red Hat article.\\nAt the same time, DevOps practices brought an expanded role for the operations teams, typically referred to now as a Site Reliability Engineering, or SRE. Red Hat defines the DevOps practice of SRE like this SRE takes the tasks that have historically been done by operations teams, often manually, and instead gives them to engineers or ops teams who use software and automation to solve problems and manage production systems.\\nNow, the SRE role, can either be a dedicated engineering position, or frequently is one that the developers take on, either full or part time. The goal of SRE is to create a natural improvement cycle, which one gets by linking the application development and operations portions. The applications can be created such that they are easier to manage and observe, while the insights from monitoring the application, can in turn identify inefficiencies, defects, and potential improvements.\\nIn this article, we will focus on how Cloud Pak for Watson AIOps can help an SRE  improve and automate their applications. To reiterate, though, while we will use the term SRE throughout this article, SRE also applies to IT Operations engineers and application developers who are supporting their products.\\nAn SRE focuses on reacting to problems encountered by the application, but also on crafting ways to improve the time to repair the application , or even better on avoiding these problems in the first place.\\nSo, what is important to an SRE? When a problem occurs, they need to have a holistic view of what is happening to their application. This means they should have a real-time view of the state of all their components, understand what is relevant in their logs or metrics, and know whether any of their monitors, such as synthetic consumption monitors show any problems. However, they do not want to be inundated with lots of different alerts going off, but rather they want to know where to focus their time and attention. After a problem has been confirmed, they would like guidance on possible solutions, and automation to execute them. Ideally though, this would be available to them before a problem becomes an outage.\\nHistorically, two factors stood in the way here: the sheer volume of data and the fact that a lot of that data was in an unstructured format. As long as there have been applications, there have been simple monitoring tools. For example, an operator would set an alert on the memory consumption of an application, and when it breached a given amount, an alert would go off. Apart from being a blunt instrument , this approach might have been practical when the operator was monitoring a handful of monolithic applications.\\nNow that SREs are monitoring thousands of microservices, this approach is no longer practical. Secondly, with the advent of AI, we now have a way to unlock all the value of unstructured  data. For example, many problems show early warnings in your logs, long before an application starts showing problems , but the problems dont have an impact yet and thus they wont fail the main application flow.\\nAs such, some of the features that an AIOps solution like Cloud Pak for Watson AIOps that support SREs are:No code changes are required. Your applications and your site reliability processes already output a lot of helpful information. Its time to take advantage of that data.\\nBring all your disparate data sources together for a holistic view across all parts of your e', 'url': 'https://developer.ibm.com/middleware/v1/contents /articles/improving-and-automating-your-ops-with-cp4waiops\\n'}}, {'document': {'rank': 1, 'document_id': ' ', 'text': 'Palantir for IBM Cloud Pak for Data enables building no-/low-code line of business applications using data, machine learning, and optimization from IBM Cloud Pak for Data. Ontology managers can define business-oriented data models integrating data from IBM Cloud Pak for Data. Application builders can use Palantir tools to create applications using these data models. Additionally, applications can integrate machine learning models from IBM Cloud Pak for Data to infuse predictions, as well as decision optimization result data from IBM Cloud Pak for Data to determine optimized actions based on data and predictions.\\nThis blog post explains how to create AI-infused apps using Palantir ontology and application building tools together with IBM Cloud Pak for Data model deployments and data and AI catalog. It also outlines the underlying integration architecture.\\nIBM Cloud Pak for Data as the data and AI foundation\\nIBM Cloud Pak for Data together with Palantir provide integrated capabilities to:Collect, transform, and integrate data from many sources\\nOrganize data to be ready for use in projects and applications\\nAnalyze data to gain insights and create AI models\\nInfuse AI insights such as predictions and optimization via APIs where needed\\nBuild applications using no-/low-code app builders, integrating data and AI on multiple clouds while leveraging Red Hat OpenShift as the underlying platform.Applications built with Palantir for IBM Cloud Pak for Data by application builders -- using no-/low-code tools -- can use data, predictions, and optimization result data from IBM Cloud Pak for Data, helping business users achieve smarter business outcomes by taking optimized actions.Data engineers can create data services in IBM Cloud Pak for Data such as Db2, Db2 Warehouse, Postgres, etc. to collect data and can build a catalog of data assets available for data scientists and application builders to use. Where needed, they can use DataStage flows or other tools to transform data from multiple sources and use data virtualization services.\\nData scientists can collaborate in projects, add data sets from the catalog or from other data sources, analyze data, gain insights, and train machine learning models or define decision optimization models. To train models, they may use Python code in JupyterLab using their favorite machine learning framework, SPSS Modeler flows, or AutoAI, as shown in the following image.Models can be saved and deployed to spaces, as shown in the image below, to make them available for AI infusion into business processes and applications. The deployed model can then be called via the model deployment REST API.Building data and AI applications with Palantir for IBM Cloud Pak for Data\\nApplication builders can build rich no-/low-code applications using the Palantir app builder tools available through a new Palantir card on the IBM Cloud Pak for Data home page.From here, ontology managers can navigate to the Palantir UI to define and manage Palantir ontologies, integrating data from IBM Cloud Pak for Data. Application builders can navigate to the Palantir UI to build apps using ontologies and connecting machine learning models from IBM Cloud Pak for Data to integrate predictions into applications. Once in the Palantir UI, they can integrate AI models from IBM Cloud Pak for Data into Palantir apps  and can integrate data from IBM Cloud Pak for Data into a Palantir ontology .To enable Palantir applications, a business-oriented ontology needs to first be defined using Palantir ontology management, which integrates with the data sets from the data and AI catalog in IBM Cloud Pak for Data. From the ontology management UI, users can search the IBM Cloud Pak for Data catalog for data assets to use and can then drill down into the columns or object attributes of the data set to map these to business objects defined in the Palantir ontology.The underlying data behind the data assets is then synchronized from the referenced data source into ', 'url': 'https://developer.ibm.com/middleware/v1/contents /blogs/data-and-ai-applications-with-palantir-for-ibm-cloud-pak-for-data\\n'}}, {'document': {'rank': 2, 'document_id': ' ', 'text': 'Todays cloud-native and microservices-based architectures rely on a complex infrastructure that is made up of various hardware and software components. This complexity arises because of the number of applications, the variety of hardware and software in the infrastructure, the volume of data, and the large number of business processes that are part of network and IT operations.\\nThis increasingly complex infrastructure makes it difficult to troubleshoot and resolve issues quickly. Troubleshooting and root-cause analysis are harder with the explosion of data available from all the individual microservices. Closed-loop automation systems help transform network and IT operations by using AI-driven automation to detect anomalies, determine resolution, and implement the required changes in a highly automated framework.\\nClosed-loop automation systems enable companies to transform network and IT operations by using AI-driven automation to detect anomalies, determine resolution, and implement the required changes within a continuous highly automated framework. Closed-loop automation helps solve many problems before they even become issues.Read more about Cloud Pak for Watson AIOps.One common use case for closed-loop automation is traffic flow optimization. By implementing a closed-loop automation system, teams can automatically correct issues like network anomalies within the provisioned network infrastructure. At the heart of a closed-loop automation system for the traffic flow optimization use case are the components of IBM Cloud Pak for Watson AIOps, which is at the heart of IBMs AIOps platform.\\nWhat is closed-loop automation?\\nA simple closed-loop implementation detects issues that could happen in the future. The appropriate data is analyzed by various predictive models, which then make a recommendation on the change to be made to the orchestration layer, which implements the change.\\nIn complex cases, closed-loop automation combines the predictive insights information with additional AI systems to determine a resolution. The AI system is trained to resolve these issues and is integrated with a robotics automation system  to automate the resolution process. If the AI system determines it has a high confidence that the suggested resolution is correct, it will invoke the orchestration engine to implement the solution automatically. If not, a trouble ticket is generated, and an engineer works to resolve the issue.\\nThe following image provides an overview of a closed-loop automation system that addresses issues of varying complexity.Closed-loop automation enables these key capabilities:Anomaly detection. Anomaly detection uses large, real-time, time-series data to analyze networks applications, database metrics, operating systems, and so on. This gives anomaly detection the capability to identify patterns and anomalies, and raise awareness toward predictive actions.\\nIntelligent alerts. In a general operations environment, multiple connected components can raise alerts all related to the same failure event. These add to the overall load and volume of operations teams. However, 20 percent of overall alert volume is false-positive. Closed-loop automation uses machine learning models to create the patterns for the series of alerts so that those can be bound to causes and known actions, and then be corrected accordingly.\\nPredictive planning. Organizations can use machine learning algorithms to predict how application and network behaviors are dependent on seasonality and other factors to ensure that appropriate corrective actions are taken, thereby permitting systems to perform optimally.\\nRoot-cause analysis. Closed-loop automation leverages data to intelligently identify all anomalies in the service path and use AI to map it to find the most likely cause for a particular incident. It makes use of various AI algorithms to ensure the accuracy of root-cause identifications and implements the required remediation steps.IT Ops teams apply AI and ', 'url': 'https://developer.ibm.com/middleware/v1/contents /articles/an-introduction-to-closed-loop-automation\\n'}}, {'document': {'rank': 3, 'document_id': ' ', 'text': \"In this blog, I will answer the following general questions about AIOps:What is AIOps?\\nWhat does a company need to utilize AIOps?\\nHow do I train AI?\\nWhat is no-code AI?\\nWhy is this important?What is AIOps?\\nGartners definition of AIOps: AIOps combines big data and machine learning to automate IT operations  processes, including event correlation, anomaly detection, and causality determination.\\nITOps teams are beginning to explore how automation can improve business outcomes through scalable artificial intelligence . Gartner found that 10 times more business leaders will rely on AIOps platforms for automated insights in the next three years. Without it, IT organizations can experience a lack of observability, become overwhelmed from manually managing data, and end up focusing on infrastructure rather than an application-centric approach. The ITOps teams of tomorrow will experience full visualization and observability of their IT environments with insights derived from various tools that focus on critical applications to support business performance.\\nWhat does a company need to be able to use AIOps?\\nTo start, ITOps organizations can determine what the company needs in business performance. By applying AI, what does the company intend to do to improve outcomes? Next, companies will need to consider if the organization has historical and real-time data organized in a way that can begin to train models, and if so, is it understandable.\\nHow do I train AI?\\nIf a company wants to use AIOps they need to establish a baseline performance, which requires that they have an understanding of their historical data by measuring the performance of their steady state using real-time data. When something bad happens to a system, like an application outage, the company can more efficiently pinpoint the issue by using insights gathered from historical and real-time data. This insightful information can help find ways to triage events or outages as efficiently as possible.\\nWhat is no-code AI?\\nCompanies don't always have data scientists, data engineers, or data centric teams to help. What if we could train AI without the assistance of data scientists, data engineers, and data centric teams?\\nNo-code AI simply means using no-code in automation training. A company can use its history and what it is doing today as a baseline to improve its tomorrow without developing code to train the AI.\\nWhy is this important?\\nAs an IT admin juggling multiple sources of data and resolving incidents manually, you need tools to help you resolve incidents faster.\\nTime is money. Battling aggressive timelines, ITOps teams need to identify solutions quickly and have those solutions work correctly when theyre set in motion. Lets unpack how this is achieved with Cloud Pak for Watson AIOps.\\nSo, what is the Cloud Pak for Watson AIOps?\\nCloud Pak for Watson AIOps provides an application-centric data and intelligence platform powering automation for application, incident, cost, and security  risk management with trusted and explainable AI.\\nFigure 1: Bringing DevSecOps together with AI and automationThe goal of using AIOps is to focus on business outcomes. To do this, a company must leverage its data without creating gates of entry by requiring a dedicated data scientist, data engineer, or a data-centric team to get up and running. Cloud Pak for Watson AIOps provides a comprehensive understanding of business applications baked in to help provide insights and intelligence derived from not just operational data such as logs or events but enhanced organizational insights as well.\\nLearn more about Cloud Pak for Watson AIOps can evolve your ITOps organization.  And, explore blogs, articles, tutorials, and code patterns on the Cloud Pak for Watson AIOps hub page on IBM Developer.\", 'url': 'https://developer.ibm.com/middleware/v1/contents /blogs/evolving-itops-with-aiops-with-no-code-ai-training\\n'}}, {'document': {'rank': 4, 'document_id': ' ', 'text': \"TensorFlow is an end-to-end open source machine learning platform that makes it easier to build and deploy machine learning models. A TensorFlow application uses a structure known as a data flow graph. By default in TensorFlow version 1.0, every graph had to be run within a TensorFlow session, which only allowed for the entire graph to be run all at once, and made it hard to debug the computation graph. The only way to get around this default and be able to debug the code was to use Eager Execution.\\nEager Execution is a flexible machine learning platform for research and experimentation that provides:An intuitive interface so that the code can be structured naturally and use Python data structures. Small models and small data can be quickly iterated.\\nEasier debugging by providing the ability to call operations directly to inspect code line by line and test changes.\\nA natural control flow using a Python control flow instead of a graph control flow, which simplifies the specification of dynamic models.With TensorFlow 2.x, Eager Execution is enabled by default, and allows TensorFlow code to be run and evaluated line by line.\\nLearning objectives\\nThis tutorial looks at the impact of Eager Execution and the benefits of having it enabled by default in TensorFlow 2.x. You'll use a Jupyter Notebook to observe the behavior of TensorFlow when Eager Execution is both disabled and enabled. You'll learn how to:Run a Jupyter Notebook using IBM Watson Studio on IBM Cloud Pak for Data as a Service\\nDisable and enable Eager Execution\\nUnderstand the benefits of Eager ExecutionPrerequisites\\nThe following prerequisites are required to follow the tutorial:An IBM Cloud Account\\nIBM Cloud Pak for DataEstimated time\\nIt should take you approximately 30 minutes to complete the tutorial.\\nStepsSet up IBM Cloud Pak for Data as a Service\\nCreate a new Project and import the notebook\\nRead through the notebook\\nRun the first half of the notebook\\nRestart the kernel\\nRun the second half of the notebookSet up IBM Cloud Pak for Data as a ServiceOpen a browser, and log in to IBM Cloud with your IBM Cloud credentials.Type Watson Studio in the search bar at the top. If you already have an instance of Watson Studio, it should be visible. If so, click it. If not, click Watson Studio under Catalog Results to create a new service instance.Select the type of plan to create if you are creating a new service instance. A Lite  plan should suffice for this tutorial). Click Create.Click Get Started on the landing page for the service instance. This should take you to the landing page for IBM Cloud Pak for Data as a Service.Click your avatar in the upper-right corner, then click Profile and settings under your name.Switch to the Services tab. You should see the Watson Studio service instance listed under Your Cloud Pak for Data services.\\n You can also associate other services such as Watson Knowledge Catalog and Watson Machine Learning with your IBM Cloud Pak for Data as a Service account. These are listed under Try our available services.\\n In the example shown here, a Watson Knowledge Catalog service instance already exists in the IBM Cloud account, so it's automatically associated with the IBM Cloud Pak for Data as a Service account. To add any other service , click Add within the tile for the service under Try our available services.Select the type of plan to create , and click Create.After the service instance is created, you are returned to the IBM Cloud Pak for Data as a Service instance. You should see that the service is now associated with your IBM Cloud Pak for Data as a Service account.Create a new project and import the notebookNavigate to the hamburger menu  on the left, and choose View all projects. After the screen loads, click New + or New project + to create a new project.Select Create an empty project.Provide a name for the project. You must associate an IBM Cloud Object Storage instance with your project. If you already have an IBM Cloud Object Storage service in\", 'url': 'https://developer.ibm.com/middleware/v1/contents /tutorials/enable-eager-execution-in-tensorflow\\n'}}, {'document': {'rank': 5, 'document_id': ' ', 'text': 'Netezza has always been synonymous with speed and simplicity. Netezza Performance Server for IBM Cloud Pak for Data is the next-generation advanced data warehouse and analytics platform available both on-premises and on cloud.\\nTo understand why Netezza Performance Server for IBM Cloud Pak for Data is important for application developers, it is first important to understand the journey to AI and how to get there. Many developers want to infuse AI into the companies they work for, but dont really know how. IBM Cloud Pak for Data is a complete Data and AI platform that modernizes how businesses collect, organize, and analyze data to infuse AI throughout their organizations. If you look under the hood of IBM Cloud Pak for Data, you will see that it is built with the streamlined hybrid cloud foundation of Red Hat OpenShift. This solution supports multicloud environments, such as Amazon Web Services , Google Cloud, IBM Cloud, and private cloud deployments.\\nThe Netezza Performance Server part of IBM Cloud Pak for Data is responsible for the \"collect\" piece of the data lifecycle. Netezza Performance Server can take data from many sources and store current and historical data in an enterprise data warehouse so it can be used for reporting, analysis, and better decision-making. What makes the Netezza Performance Server so powerful is the fact that it can process huge amounts of data and run large jobs that can return results in seconds, rather than hours or days. Netezza has always been known for speed and simplicity, so the fact that the new generation of Netezza Performance Server is built onto the same engine means that you dont need to waste all your time on migration to the new platform, especially if you are coming form an older Netezza form factor. It is a simple nz_migrate command, then just point your applications to the new server. It doesn\\'t get much easier than that.\\nSo what does this mean for application developers? Having everything you need in your journey to AI and all in one platform means that you dont need waste your time putting all the pieces together. Netezza Performance Server for IBM Cloud Pak for Data is an all-in-one Data and AI platform that lets you perform data science and machine learning with data volumes scaling into the petabytes.\\nNetezza Performance Server on IBM Cloud Pak for Data System or Netezza on Cloud\\nNetezza Performance Server for IBM Cloud Pak for Data comes in two form factors: It is available as part of a hyper-converged system that includes all hardware and software needed to get up and running quickly; and the other is Netezza available on IBM Cloud and AWS, with more clouds to come. This gives you the flexibility to run this on-premises with all the needed hardware, software, storage, compute, and networking in a single system. If you choose to run Netezza on cloud, you are getting a cloud-native deployment of the Netezza Performance Server database engine deployed to a public cloud data center of your choice.\\nNative in-database analytics and geospatial capabilities\\nNetezza Performance Server comes with advanced in-database analytics capabilities that can be used to act on the data stored in Netezza Performance Server. This package that used to be called Netezza In-Database Analytics is now called the Netezza Performance Server Analytics package and can be installed after you have the Netezza Performance Server up and running. The Netezza Performance Server Analytics package comprises a set of cartridges, each of which covers a different area of analytics. There are are analytics packages for:In-database analytics\\nSpatial\\nSpatial ESRI\\nMatrix\\nMapReduceThere are also some special geospatial capabilities available in Netezza Performance Server in order to process data needed for this particular type of use case. All of these capabilities come in handy when you want pre-packaged and powerful analytics capabilities to work on the data inside the database.\\nWith IBM Watson Studio and machine lear', 'url': 'https://developer.ibm.com/middleware/v1/contents /blogs/get-acquainted-with-netezza-performance-server\\n'}}, {'document': {'rank': 6, 'document_id': 'o', 'text': \"\\nAdding Cloud PaksIBM Cloud Paks&trade; are containerized, licensed IBM middleware and open source software components that you can use to modernize, move, and build cloud-native business applications in hybrid and multicloud deployments. By running exclusively on  and Red Hat Enterprise Linux, Cloud Paks are built atop a secure stack and maintain consistency in deployment and behavior across cloud providers. You have greater flexibility to run and manage your workloads securely where you need them: on-premises, off-premises, in a backup provider, and in .Overview of Cloud Pak offeringsYou can deploy the entire set of Cloud Paks to manage your full-stack cloud apps, data, integration, automation, and management across  cloud providers.Cloud Pak for ApplicationsCloud Pak for DataCloud Pak for IntegrationCloud Pak for SecurityCloud Pak for ManagementAdding IBM Cloud PaksIBM Cloud Paks are containerized, licensed IBM middleware and open source software components as part of your hybrid cloud solution. IBM Cloud Paks run exclusively on  clusters, not community Kubernetes clusters.Before you begin:\\n Verify that your account administrator set up your  account with the Cloud Pak entitlement.\\n Make sure that you have the required permissions to create a cluster. These permissions include the following:\\n     The IAM Administrator platform access role for .\\n     The IAM Administrator platform access role for .\\n     The IAM Viewer platform access role for the resource group if you create the cluster in a resource group other than default.\\n     The appropriate infrastructure permissions, such as an API key with the Super User role for classic infrastructure.\\nTo add a Cloud Pak from the  catalog:Add your Cloud Pak entitlement from IBM Passport Advantage to your  cluster.For new clusters: Create a cluster with the --entitlement cloud_pak option. When you specify the number of workers  and flavor , make sure to specify only the number and size of worker nodes that you are entitled to use. You can optionally specify the worker node operating system . After your cluster is created, you are not charged the  license fee for the entitled worker nodes in the default worker pool. If you want to use a different worker pool for your Cloud Pak, follow the steps for existing clusters.\\nFor existing clusters or worker pools other than default: Create a worker pool with the --entitlement cloud_pak option. When you specify the number of workers  and flavor , make sure to specify only the number and size of worker nodes that you are entitled to use. After creation, your worker pool does not charge you the  license fee for your entitled worker nodes.Do not exceed your entitlement. Keep in mind that your OpenShift Container Platform entitlements can be used with other cloud providers or in other environments. To avoid billing issues later, make sure that you use only what you are entitled to use. For example, you might have an entitlement for the OCP licenses for two worker nodes of 4 CPU and 16 GB memory, and you create this worker pool with two worker nodes of 4 CPU and 16 GB memory. You used your entire entitlement, and you can't use the same entitlement for other worker pools, cloud providers, or environments.In the  catalog, in the Software tab, under Offering Type, check Cloud Paks.Select the Cloud Pak that you want to deploy, and follow the installation instructions. Each Cloud Pak requires an entitlement from IBM Passport Advantage, and has its own configuration settings. For more information, view the About tab and Cloud Pak documentation.Now you can run your Cloud Pak on your  cluster!\\nAssigning a Cloud Pak entitlement to your  accountTo deploy a Cloud Pak to your  cluster, your entitlement to the Cloud Pak must be assigned to your  account.Verify that your Cloud Pak entitlement is in your Container software library. If you don't see the entitlement, the entitlement might be owned by a different user. Verify the user, and if you still have issues, c\", 'url': '\"https://github.com/ibm-cloud-docs/openshift_integrations_cloud_paks.md\"'}}, {'document': {'rank': 7, 'document_id': ' ', 'text': '\"Awww, come on guys, it\\'s so simple. Maybe you need a refresher course. ... It\\'s all ball bearings nowadays.\" - Fletch, 1985 movie\\nSadly, it is not all about ball bearings nowadays. It\\'s all about containers. If you heard about containers, but are not sure what they are, you\\'ve come to the right place. This blog post addresses the following questions:Why should I care about containers?\\nWhat are containers?\\nAre containers the same as microservices?\\nWhat is an example of a microservices application?\\nWhat is a Docker container?\\nWhat are container orchestration and Kubernetes?\\nWhat is the difference between containers and virtual machine images?\\nHow can I get started with containers?\\nHow can IBM Cloud Paks help?\\nWhere can I run my containers?\\nWhat is Red Hat OpenShift on IBM Cloud?Why should I care about containers?\\nAcross organizations, there is a spectrum of container adoption. Many people are just learning about containers. Some companies are further along in their journey. If you\\'re at all considering containerization, it\\'s time to join the fun where you can see real business results:Faster time to market: New applications and services are what keep your competitive edge. Organizations are able to speed up delivery of new services with development and operational agility.\\nDeployment velocity: Move quicker from development to deployment. Containerization breaks down barriers for DevOps teams to accelerate deployment times and frequency.\\nIT infrastructure reduction: Reduce your costs by increasing your application workload density, getting better utilization of your server compute density, and reducing software licensing costs.\\nIT operational efficiency: Gain more operational efficiency by streamlining and automating the management of diverse applications and infrastructure into a single operating model.\\nGain freedom of choice: Package, ship, and run applications on any public or private cloud.Next steps: Learn what the true benefits of moving to containers are.What are containers?\\nThe best analogy for understanding containers is a shipping container. That\\'s why the majority of all container articles and blog posts show a photo of a shipping container. We\\'re sure you\\'ve seen the transport of those big steel shipping containers.  The shipping industry standardized on a consistent size container. Now, the same container can move from a ship to a train to a truck without unloading the cargo. The container contents do not matter.\\nJust like a shipping container, a software container is a standardized package of software. Everything needed for the software to run is inside the container. The software code, runtime, system tools, system libraries, and settings are all inside a single container.\\nAre containers the same thing as microservices?\\nOnce you start diving into containers, it\\'s impossible to avoid reading about microservices.  Microservices is an architectural style. A microservices architecture structures an application by using as a collection of loosely coupled services, which deliver specific business capabilities. Containers help make it happen.\\nWhat is an example of a microservices application?\\nMore than ten years ago, Netflix was one of the first companies to begin using containers extensively. They rewrote the applications that ran their entire video service by using a microservices architecture. In 2017, Netflix estimated that it employed around 700 microservices to control each of the many functions that make up its service. Let\\'s look at a few :Video selection: A microservice, in a container, provides your phone, tablet, computer, or TV with the video file to play and at a video quality based on your internet speed.\\nViewing history: One microservice remembers what shows you watch.\\nProgram recommendations: A microservice takes a look at your viewing history and uses analytics to recommend movies.\\nMain menu: One microservice provides the names and images of these movies shown on your main menu.\\nBilling: Another microser', 'url': 'https://developer.ibm.com/middleware/v1/contents /blogs/what-are-containers-and-why-do-you-need-them\\n'}}, {'document': {'rank': 8, 'document_id': ' ', 'text': 'Archived contentArchive date: 2023-02-09This content is no longer being updated or maintained. The content is provided as is. Given the rapid evolution of technology, some content, steps, or illustrations may have changed.Generic JDBC enables a variety of connections to different data sources. A generic JDBC connection offers the option to connect to a data source using a different driver from what is pre-built in IBM Cloud Pak for Data, and provides additional properties and support for customized use cases.\\nLearning objectives\\nThe purpose of this tutorial is to demonstrate how to create a generic JDBC connector in IBM Cloud Pak for Data.\\nIn this tutorial, you will learn how to: Check that you have the Administrator role\\nImport a JDBC JAR file to IBM Cloud Pak for Data\\nCreate a generic JDBC platform connection\\nUse the created generic JDBC connection in a projectPrerequisites\\nYou will need IBM Cloud Pak for Data platform software on-prem.\\nNote: The generic JDBC connection is supported by IBM Cloud Pak for Data; at the time of this writing, it is not supported by IBM Cloud Pak for Data as a Service.\\nWhen using the generic JDBC connector, please ensure that a JDBC driver exists for the data source you would like to connect to.\\nAbout the data\\nFor this tutorial, we create an SAP HANA data connection. Please substitute details pertaining to the data connection and its containing data with your own.\\nEstimated time\\nCompleting this tutorial should take about 10 minutes.\\nSteps\\nStep 1. Navigate to IBM Cloud Pak for Data and ensure that you have Administrator accessTo upload your JDBC JAR, you must have Administer Platform permissions. You can ensure that you have this role by clicking on your profile on the upper-right corner of IBM Cloud Pak for Data, then clicking Profile &gt; Settings.Under Roles, check that you have the Administrator role with Administer Platform under enabled permissions. If you are unable to obtain the Administrator role, you need to ask an administrator to perform Step 2. Upload your JDBC JAR to IBM Cloud Pak for Data.Step 2. Upload JDBC JAR to IBM Cloud Pak for DataTo upload a JDBC JAR to IBM Cloud Pak for Data, navigate to Data &gt; Platform connections.With Administer Platform permissions, you should see the JDBC drivers tab.Drag and drop the JDBC driver JAR file into the box on the left side of the page. You should see your JAR listed, and after clicking Upload, your JAR file should be listed under Existing files on the right side of the page. Your JAR file is now available for use in IBM Cloud Pak for Data.Step 3. Creating a Generic JDBC platform connection\\nIn IBM Cloud Pak for Data, you can create a platform-level or project-level connection. A platform-level connection enables use of the connector across the platform vs a project-level connection, which can only be used in a project.To create an IBM Cloud Pak for Data platform connection, click on Data &gt; Platform Connections.Click on the New connection button.From the Add Connection page, click on the Generic JDBC connector, then the Select button, which launches the Create connection page.Enter the desired name and description  for your generic JDBC SAP HANA connector. The JAR URL drop-down will display all existing JDBC JARs available for use in IBM Cloud Pak for Data. Select the JAR required for a connection.Next, enter the required fields for JDBC URL and the JDBC class driver.Enter the username and password for the connection.Click on Test Connection to see if you can successfully connect.Once the test connection is verified, click Create to create the connection. The created SAP HANA  connection should display under Platform Connections ready for use in IBM Cloud Pak for Data.Step 4. Using the created generic JDBC connector in a projectNavigate to the desired project in IBM Cloud Pak for Data. As you can see, this project has no data assets.Click on Add to project and choose Connection from the asset type options.Click on the From platform tab to ', 'url': 'https://developer.ibm.com/middleware/v1/contents /tutorials/using-generic-jdbc-connector-on-cloud-pak-for-data\\n'}}, {'document': {'rank': 9, 'document_id': ' ', 'text': 'This blog post is the first of a three-part series authored by software developers and architects at IBM and Cloudera. This first post focuses on integration points of the recently announced joint offering: Cloudera Data Platform for IBM Cloud Pak for Data. The second post will look at how Cloudera Data Platform was installed on IBM Cloud using Ansible. And the third post will focus on lessons learned from installing, maintaining, and verifying the connectivity of the two platforms. Lets get started!\\nIn this post we will be outlining the main integration points between Cloudera Data Platform and IBM Cloud Pak for Data, and explaining how the two distinct data and AI platforms can communicate with each other. Integrating two platforms is made easy with capabilities available out of the box for both IBM Cloud Pak for Data and Cloudera Data Platform. Establishing a connection between the two is just a few clicks away.Architecture diagram showing Cloudera Data Plaform for Cloud Pak for Data\\nIn our view, there are three key points to integrating Cloudera Data Platform and IBM Cloud Pak for Data; all other services piggyback on one of these:Apache Knox Gateway \\nExecution Engine for Apache Hadoop \\nDb2 Big SQL Read on for more information about how each integration point works. For a demonstration on how to use data from Hive and Db2 check out the video below where we join the data using Data Virtualization and then display it with IBM Cognos Analytics check out the video below.Apache Knox Gateway\\nTo truly be secure, a Hadoop cluster needs Kerberos. However, Kerberos requires a client-side library and complex client-side configuration. This is where the Apache Knox Gateway  comes in. By encapsulating Kerberos, Knox eliminates the need for client software or client configuration and, thus, simplifies the access model. Knox integrates with identity management and SSO systems, such as Active Directory and LDAP, to allow identities from these systems to be used for access to Cloudera clusters.Knox dashboard showing the list of supported services\\nCloudera services such as Impala, Hive, and HDFS can be configured with Knox, allowing JDBC connections to easily be created in IBM Cloud Pak for Data.Creating a JDBC connection to Impala via KnoxList of connections on IBM Cloud Pak for Data\\nExecution Engine for Apache Hadoop\\nThe Execution Engine for Apache Hadoop service is installed on both IBM Cloud Pak for Data and on the worker nodes of a Cloudera Data Platform deployment. Execution Engine for Hadoop allows users to:Browse remote Hadoop data  through platform-level connections\\nCleanse and shape remote Hadoop data  with Data Refinery\\nRun a Jupyter notebook session on the remote Hadoop system\\nAccess Hadoop systems with basic utilities from RStudio and Jupyter notebooksAfter installing and configuring the services on IBM Cloud Pak for Data and Cloudera Data Platform, you can create platform-level connections to HDFS, Impala, and Hive.Execution Engine for Hadoop connection options\\nOnce a connection has been established, data from HDFS, Impala, or Hive can be browsed and imported.Browsing through an HDFS connection made via Execution Engine for Hadoop\\nData residing in HDFS, Impala or Hive can be cleaned and modified through Data Refinery on IBM Cloud Pak for Data.Data Refinery allows for operations to be run on data\\nThe Hadoop Execution Engine also allows for Jupyter notebook sessions to connect to a remote Hadoop system.Jupyter notebook connecting to a remote HDFS\\nDb2 Big SQL\\nThe Db2 Big SQL service is installed on IBM Cloud Pak for Data and is configured to communicate with a Cloudera Data Platform deployment. Db2 Big SQL allows users to:Query data stored on Hadoop services such as HDFS and Hive\\nQuery large amounts of data residing in a secured  or unsecured Hadoop-based platformOnce Big SQL is configured, you can choose what data to synchronize into tables. Once in a table, you can save the data to a project, run queries against it, or browse t', 'url': 'https://developer.ibm.com/middleware/v1/contents /blogs/a-deep-dive-integrating-cloudera-data-platform-and-ibm-cloud-pak-for-data\\n'}}]\n"
     ]
    }
   ],
   "source": [
    "results_list=solr_reteriver(\"what is ibm cloud pak for data\")\n",
    "print(results_list)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c2d557fd",
   "metadata": {},
   "source": [
    "<span style=\"color:blueviolet\">Step 5. By using below method applying Re-Ranker model on documnets which is return by solr</span>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 128,
   "id": "0d1ade0a",
   "metadata": {},
   "outputs": [],
   "source": [
    "def solr_reranker(question, max_reranked_documents = 10):\n",
    "\n",
    "    reranker = ColBERTReranker(model=model_name_or_path)\n",
    "    reranker.load()\n",
    "    \n",
    "    results_list = solr_reteriver(question)\n",
    "    if len(results_list) >0:\n",
    "        reranked_results = reranker.predict(queries= [question], documents = [results_list], max_num_documents=max_reranked_documents)\n",
    "\n",
    "        print(reranked_results)\n",
    "\n",
    "        reranked_results_to_display = [result['document'] for result in reranked_results[0]]\n",
    "        df = pd.DataFrame.from_records(reranked_results_to_display, columns=['rank','document_id','text','url'])\n",
    "        print('======================================================================')\n",
    "        print(f'QUERY: {question}')\n",
    "        display( HTML(df.to_html()) )\n",
    "        return df['text'][0] , df['url'][0]\n",
    "    else:\n",
    "        return \"0 documents found\" , \"None\""
   ]
  },
  {
   "cell_type": "markdown",
   "id": "706593c9",
   "metadata": {
    "scrolled": false
   },
   "source": [
    "<span style=\"color:blueviolet\">Step 6. format string before send to LLM model</span>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 130,
   "id": "904d9e25",
   "metadata": {},
   "outputs": [],
   "source": [
    "def format_string(doc):\n",
    "    doc = doc.encode(\"ascii\", \"ignore\")\n",
    "    string_decode = doc.decode()\n",
    "    cleantext = BeautifulSoup(string_decode, \"lxml\").text\n",
    "    perfecttext = \" \".join(cleantext.split())\n",
    "    perfecttext = re.sub(' +', ' ', perfecttext).strip('\"')\n",
    "    #perfecttext = perfecttext[0:4000]\n",
    "    return perfecttext"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "fdae385e",
   "metadata": {},
   "source": [
    "<span style=\"color:blueviolet\">Step 7. By using below code we will  call LLM model</span>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 131,
   "id": "8c8d4584",
   "metadata": {},
   "outputs": [],
   "source": [
    "def process_llm_request(question):\n",
    "    \n",
    "    wd_result,url = solr_reranker(question)\n",
    "    if '0 documents found' not in wd_result:\n",
    "        combined_input = \"Answer the question based only on the context below. \" + \\\n",
    "            \"Context: \"  + format_string(wd_result) + \\\n",
    "            \" Question: \" + question\n",
    "        print(\"INPUT PROMPT: \", combined_input)\n",
    "\n",
    "        headers = {\n",
    "            'Content-Type': 'application/json',\n",
    "            'Authorization': llmToken,\n",
    "        }\n",
    "\n",
    "        json_data = {\n",
    "            'model_id': 'bigscience/bloom',\n",
    "            'inputs':  [combined_input],        \n",
    "                'parameters': {\n",
    "                # \"stream\": \"true\",\n",
    "                'temperature': 0.5,\n",
    "                'max_new_tokens': 200,\n",
    "            },\n",
    "        }\n",
    "\n",
    "        ## Demo LINK of llm \n",
    "        response = requests.post('https://llm-api.res.demo.ibm.com/v1/generate', headers=headers, json=json_data)\n",
    "        json_response = json.loads(response.content.decode(\"utf-8\"))\n",
    "        result = json_response['results'][0]['generated_text'].split(\"Answer:\")\n",
    "        if len(result) > 1:\n",
    "            print(\"LLM Output: \", result[1])\n",
    "            return result[1],url\n",
    "        else:\n",
    "            print(\"LLM Output: \", result[0])\n",
    "            return result[0],url\n",
    "    else:\n",
    "        return \"0 documents found\" , \"None\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 132,
   "id": "79f47f3b",
   "metadata": {},
   "outputs": [],
   "source": [
    "def main():\n",
    "    question = \"what is ibm cloud pak for data\"\n",
    "    # print(\"-------- Final answer ---------------\")\n",
    "    answer ,url = process_llm_request(question)\n",
    "    print(\"FINAL ANSWER: \", answer)\n",
    "    print(\"URL: \", url)\n",
    "    return answer , url"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 133,
   "id": "7fdffb10",
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[May 23, 01:19:20] #>>>>> at ColBERT name (model type) : /Users/abhilashamangal/Documents/Hanzo/PrimeQA/article/DrDecr.dnn\n",
      "[May 23, 01:19:20] #>>>>> at BaseColBERT name (model type) : /Users/abhilashamangal/Documents/Hanzo/PrimeQA/article/DrDecr.dnn\n",
      "[May 23, 01:19:23] factory model type: xlm-roberta-base\n",
      "[May 23, 01:19:35] get query model type: xlm-roberta-base\n",
      "[May 23, 01:19:39] get doc model type: xlm-roberta-base\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/torch/cuda/amp/grad_scaler.py:115: UserWarning: torch.cuda.amp.GradScaler is enabled, but CUDA is not available.  Disabling.\n",
      "  warnings.warn(\"torch.cuda.amp.GradScaler is enabled, but CUDA is not available.  Disabling.\")\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "82 documents found.\n",
      "======================================================================\n",
      "======================================================================\n",
      "======================================================================\n",
      "======================================================================\n",
      "======================================================================\n",
      "======================================================================\n",
      "======================================================================\n",
      "======================================================================\n",
      "======================================================================\n",
      "======================================================================\n",
      "QUERY: what is ibm cloud pak for data\n",
      "[May 23, 01:19:46] #> XMLR QueryTokenizer.tensorize(batch_text[0], batch_background[0], bsize) ==\n",
      "[May 23, 01:19:46] #> Input: $ what is ibm cloud pak for data, \t\t True, \t\t None\n",
      "[May 23, 01:19:46] #> Output IDs: torch.Size([32]), tensor([    0,  9748,  2367,    83,     6,  2566,    39, 76746,  2522,   100,\n",
      "         2053,     2,     1,     1,     1,     1,     1,     1,     1,     1,\n",
      "            1,     1,     1,     1,     1,     1,     1,     1,     1,     1,\n",
      "            1,     1])\n",
      "[May 23, 01:19:46] #> Output Mask: torch.Size([32]), tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
      "        0, 0, 0, 0, 0, 0, 0, 0])\n",
      "[May 23, 01:19:46] #>>>> colbert query ==\n",
      "[May 23, 01:19:46] #>>>>> input_ids: torch.Size([32]), tensor([    0,  9748,  2367,    83,     6,  2566,    39, 76746,  2522,   100,\n",
      "         2053,     2,     1,     1,     1,     1,     1,     1,     1,     1,\n",
      "            1,     1,     1,     1,     1,     1,     1,     1,     1,     1,\n",
      "            1,     1])\n",
      "[May 23, 01:19:46] #>>>> before linear query ==\n",
      "[May 23, 01:19:46] #>>>>> Q: torch.Size([32, 768]), tensor([[-0.1827,  0.2638,  0.1374,  ..., -0.1888, -0.1479,  0.3810],\n",
      "        [-0.7300,  0.0428,  0.4594,  ...,  0.0581,  0.0912,  0.2706],\n",
      "        [-0.7543,  0.0286,  0.5249,  ...,  0.2002, -0.0464,  0.2305],\n",
      "        ...,\n",
      "        [-0.5047,  0.3461,  0.2351,  ..., -0.0880, -0.4726,  0.8083],\n",
      "        [-0.5047,  0.3461,  0.2351,  ..., -0.0880, -0.4726,  0.8083],\n",
      "        [-0.5047,  0.3461,  0.2351,  ..., -0.0880, -0.4726,  0.8083]])\n",
      "[May 23, 01:19:46] #>>>>> self.linear query : Parameter containing:\n",
      "tensor([[-0.0286,  0.0017, -0.0202,  ..., -0.0262,  0.0210,  0.0006],\n",
      "        [-0.0102,  0.0121, -0.0111,  ..., -0.0362, -0.0165, -0.0012],\n",
      "        [-0.0047, -0.0172, -0.0054,  ..., -0.0069, -0.0194, -0.0193],\n",
      "        ...,\n",
      "        [-0.0286,  0.0231,  0.0004,  ...,  0.0373, -0.0045,  0.0125],\n",
      "        [ 0.0051,  0.0023,  0.0212,  ..., -0.0254,  0.0034,  0.0206],\n",
      "        [-0.0068,  0.0256, -0.0263,  ...,  0.0200,  0.0125, -0.0149]],\n",
      "       requires_grad=True)\n",
      "[May 23, 01:19:46] #>>>> colbert query ==\n",
      "[May 23, 01:19:46] #>>>>> Q: torch.Size([32, 128]), tensor([[ 0.3722,  0.3336,  0.4162,  ...,  0.4678,  0.2300, -0.3741],\n",
      "        [ 0.8487,  0.8140,  1.3944,  ...,  1.1479,  0.5574, -0.8432],\n",
      "        [ 0.7710,  0.7533,  1.5775,  ...,  1.4433,  0.6473, -0.9788],\n",
      "        ...,\n",
      "        [ 1.1156,  0.5901,  1.6189,  ...,  0.7307,  0.4164, -0.9748],\n",
      "        [ 1.1156,  0.5901,  1.6189,  ...,  0.7307,  0.4164, -0.9748],\n",
      "        [ 1.1156,  0.5901,  1.6189,  ...,  0.7307,  0.4164, -0.9748]])\n",
      "[May 23, 01:19:46] #> XLMR DocTokenizer.tensorize(batch_text[0], batch_background[0], bsize) ==\n",
      "[May 23, 01:19:46] #> Input: $ It used to be that the developer and operations roles were fully separated. The developer received the business requirements, they came up with a design and implemented it. A dedicated QA team wrote tests, and validated that the new application, capability, or change worked as expected, and then the code or application was handed over to the operations team to run it. In most organizations, these roles have now merged into a set of development practices that are commonly referred to as DevOps.\n",
      "To learn more about what is SRE, read this Red Hat article.\n",
      "At the same time, DevOps practices brought an expanded role for the operations teams, typically referred to now as a Site Reliability Engineering, or SRE. Red Hat defines the DevOps practice of SRE like this SRE takes the tasks that have historically been done by operations teams, often manually, and instead gives them to engineers or ops teams who use software and automation to solve problems and manage production systems.\n",
      "Now, the SRE role, can either be a dedicated engineering position, or frequently is one that the developers take on, either full or part time. The goal of SRE is to create a natural improvement cycle, which one gets by linking the application development and operations portions. The applications can be created such that they are easier to manage and observe, while the insights from monitoring the application, can in turn identify inefficiencies, defects, and potential improvements.\n",
      "In this article, we will focus on how Cloud Pak for Watson AIOps can help an SRE  improve and automate their applications. To reiterate, though, while we will use the term SRE throughout this article, SRE also applies to IT Operations engineers and application developers who are supporting their products.\n",
      "An SRE focuses on reacting to problems encountered by the application, but also on crafting ways to improve the time to repair the application , or even better on avoiding these problems in the first place.\n",
      "So, what is important to an SRE? When a problem occurs, they need to have a holistic view of what is happening to their application. This means they should have a real-time view of the state of all their components, understand what is relevant in their logs or metrics, and know whether any of their monitors, such as synthetic consumption monitors show any problems. However, they do not want to be inundated with lots of different alerts going off, but rather they want to know where to focus their time and attention. After a problem has been confirmed, they would like guidance on possible solutions, and automation to execute them. Ideally though, this would be available to them before a problem becomes an outage.\n",
      "Historically, two factors stood in the way here: the sheer volume of data and the fact that a lot of that data was in an unstructured format. As long as there have been applications, there have been simple monitoring tools. For example, an operator would set an alert on the memory consumption of an application, and when it breached a given amount, an alert would go off. Apart from being a blunt instrument , this approach might have been practical when the operator was monitoring a handful of monolithic applications.\n",
      "Now that SREs are monitoring thousands of microservices, this approach is no longer practical. Secondly, with the advent of AI, we now have a way to unlock all the value of unstructured  data. For example, many problems show early warnings in your logs, long before an application starts showing problems , but the problems dont have an impact yet and thus they wont fail the main application flow.\n",
      "As such, some of the features that an AIOps solution like Cloud Pak for Watson AIOps that support SREs are:No code changes are required. Your applications and your site reliability processes already output a lot of helpful information. Its time to take advantage of that data.\n",
      "Bring all your disparate data sources together for a holistic view across all parts of your e, \t\t None\n",
      "[May 23, 01:19:46] #> Output IDs: torch.Size([180]), tensor([     0,   9749,   1650,  11814,     47,    186,    450,     70, 106001,\n",
      "           136,  41018,      7,  31486,      7,   3542,  89554,  84797,     71,\n",
      "             5,    581, 106001,  75204,     70,   8063,  96679,      4,   1836,\n",
      "         21449,   1257,    678,     10,   4331,    136,  29479,    297,    442,\n",
      "             5,     62, 171332,      6,  72898,   7175,  54397, 109921,      4,\n",
      "           136,  44622,   3674,    450,     70,   3525,  38415,      4,   3540,\n",
      "         41159,      4,    707,  15549,  79786,    237,  84751,      4,    136,\n",
      "          7068,     70,  18151,    707,  38415,    509,   3535,    297,    645,\n",
      "            47,     70,  41018,      7,   7175,     47,  11675,    442,      5,\n",
      "           360,   2684,  53702,      7,      4,   6097,  31486,      7,    765,\n",
      "          5036,  42564,     71,   3934,     10,   5423,    111,  34754,  41361,\n",
      "             7,    450,    621,  39210,    538,  15005,   2822,     47,    237,\n",
      "         40317,  39029,      7,      5,    717,  30698,   1286,   1672,   2367,\n",
      "            83,    159,  11766,      4,  12301,    903,   6096,  17354,   5582,\n",
      "             5,   1913,     70,   5701,   1733,      4,  40317,  39029,      7,\n",
      "         41361,      7,  91048,    142,  71062,    297,  31486,    100,     70,\n",
      "         41018,      7,  87199,      4, 205794,  15005,   2822,     47,   5036,\n",
      "           237,     10,  20897,    853,    150,  41159, 123470,      4,    707,\n",
      "           159,  11766,      5,   6096,  17354,  61924,      7,     70,  40317,\n",
      "         39029,      7,  41361,    111,    159,  11766,   1884,    903,      2])\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[May 23, 01:19:46] #> Output Mask: torch.Size([180]), tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "        1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "        1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "        1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "        1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "        1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "        1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "        1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n",
      "[May 23, 01:19:46] #>>>> colbert doc ==\n",
      "[May 23, 01:19:46] #>>>>> input_ids: torch.Size([180]), tensor([     0,   9749,   1650,  11814,     47,    186,    450,     70, 106001,\n",
      "           136,  41018,      7,  31486,      7,   3542,  89554,  84797,     71,\n",
      "             5,    581, 106001,  75204,     70,   8063,  96679,      4,   1836,\n",
      "         21449,   1257,    678,     10,   4331,    136,  29479,    297,    442,\n",
      "             5,     62, 171332,      6,  72898,   7175,  54397, 109921,      4,\n",
      "           136,  44622,   3674,    450,     70,   3525,  38415,      4,   3540,\n",
      "         41159,      4,    707,  15549,  79786,    237,  84751,      4,    136,\n",
      "          7068,     70,  18151,    707,  38415,    509,   3535,    297,    645,\n",
      "            47,     70,  41018,      7,   7175,     47,  11675,    442,      5,\n",
      "           360,   2684,  53702,      7,      4,   6097,  31486,      7,    765,\n",
      "          5036,  42564,     71,   3934,     10,   5423,    111,  34754,  41361,\n",
      "             7,    450,    621,  39210,    538,  15005,   2822,     47,    237,\n",
      "         40317,  39029,      7,      5,    717,  30698,   1286,   1672,   2367,\n",
      "            83,    159,  11766,      4,  12301,    903,   6096,  17354,   5582,\n",
      "             5,   1913,     70,   5701,   1733,      4,  40317,  39029,      7,\n",
      "         41361,      7,  91048,    142,  71062,    297,  31486,    100,     70,\n",
      "         41018,      7,  87199,      4, 205794,  15005,   2822,     47,   5036,\n",
      "           237,     10,  20897,    853,    150,  41159, 123470,      4,    707,\n",
      "           159,  11766,      5,   6096,  17354,  61924,      7,     70,  40317,\n",
      "         39029,      7,  41361,    111,    159,  11766,   1884,    903,      2])\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/torch/autocast_mode.py:162: UserWarning: User provided device_type of 'cuda', but CUDA is not available. Disabling\n",
      "  warnings.warn('User provided device_type of \\'cuda\\', but CUDA is not available. Disabling')\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[May 23, 01:19:49] #>>>> before linear doc ==\n",
      "[May 23, 01:19:49] #>>>>> D: torch.Size([180, 768]), tensor([[ 0.0963,  0.1672,  0.2844,  ..., -0.0999,  0.1511,  0.2975],\n",
      "        [-0.2177, -0.1156,  0.1032,  ..., -0.2990,  0.0331,  0.4527],\n",
      "        [-0.1852, -0.0389,  0.1935,  ...,  0.1389,  0.0033,  0.4150],\n",
      "        ...,\n",
      "        [-0.0796, -0.1400,  0.2745,  ...,  0.6648,  0.1962,  0.6829],\n",
      "        [-0.1573, -0.2491,  0.2658,  ...,  0.3211,  0.2732,  0.9019],\n",
      "        [ 0.0965,  0.1657,  0.2779,  ..., -0.1104,  0.1406,  0.3077]])\n",
      "[May 23, 01:19:49] #>>>>> self.linear doc : Parameter containing:\n",
      "tensor([[-0.0286,  0.0017, -0.0202,  ..., -0.0262,  0.0210,  0.0006],\n",
      "        [-0.0102,  0.0121, -0.0111,  ..., -0.0362, -0.0165, -0.0012],\n",
      "        [-0.0047, -0.0172, -0.0054,  ..., -0.0069, -0.0194, -0.0193],\n",
      "        ...,\n",
      "        [-0.0286,  0.0231,  0.0004,  ...,  0.0373, -0.0045,  0.0125],\n",
      "        [ 0.0051,  0.0023,  0.0212,  ..., -0.0254,  0.0034,  0.0206],\n",
      "        [-0.0068,  0.0256, -0.0263,  ...,  0.0200,  0.0125, -0.0149]],\n",
      "       requires_grad=True)\n",
      "[May 23, 01:19:49] #>>>> colbert doc ==\n",
      "[May 23, 01:19:49] #>>>>> D: torch.Size([180, 128]), tensor([[-0.0686,  0.0506,  0.3864,  ...,  0.2547,  0.1527, -0.2208],\n",
      "        [ 0.4730,  0.6280,  1.1563,  ...,  0.5557,  0.3734, -0.5029],\n",
      "        [ 0.1218,  0.9693,  1.0564,  ...,  0.6269, -0.0534, -0.3580],\n",
      "        ...,\n",
      "        [-0.3793,  0.6370,  1.3091,  ...,  0.5273, -0.2304, -0.1520],\n",
      "        [-0.4451,  0.3568,  1.3110,  ...,  0.6241, -0.0677, -0.5109],\n",
      "        [-0.0641,  0.0526,  0.3908,  ...,  0.2554,  0.1589, -0.2212]])\n",
      "[[{'document': {'rank': 7, 'document_id': ' ', 'text': '\"Awww, come on guys, it\\'s so simple. Maybe you need a refresher course. ... It\\'s all ball bearings nowadays.\" - Fletch, 1985 movie\\nSadly, it is not all about ball bearings nowadays. It\\'s all about containers. If you heard about containers, but are not sure what they are, you\\'ve come to the right place. This blog post addresses the following questions:Why should I care about containers?\\nWhat are containers?\\nAre containers the same as microservices?\\nWhat is an example of a microservices application?\\nWhat is a Docker container?\\nWhat are container orchestration and Kubernetes?\\nWhat is the difference between containers and virtual machine images?\\nHow can I get started with containers?\\nHow can IBM Cloud Paks help?\\nWhere can I run my containers?\\nWhat is Red Hat OpenShift on IBM Cloud?Why should I care about containers?\\nAcross organizations, there is a spectrum of container adoption. Many people are just learning about containers. Some companies are further along in their journey. If you\\'re at all considering containerization, it\\'s time to join the fun where you can see real business results:Faster time to market: New applications and services are what keep your competitive edge. Organizations are able to speed up delivery of new services with development and operational agility.\\nDeployment velocity: Move quicker from development to deployment. Containerization breaks down barriers for DevOps teams to accelerate deployment times and frequency.\\nIT infrastructure reduction: Reduce your costs by increasing your application workload density, getting better utilization of your server compute density, and reducing software licensing costs.\\nIT operational efficiency: Gain more operational efficiency by streamlining and automating the management of diverse applications and infrastructure into a single operating model.\\nGain freedom of choice: Package, ship, and run applications on any public or private cloud.Next steps: Learn what the true benefits of moving to containers are.What are containers?\\nThe best analogy for understanding containers is a shipping container. That\\'s why the majority of all container articles and blog posts show a photo of a shipping container. We\\'re sure you\\'ve seen the transport of those big steel shipping containers.  The shipping industry standardized on a consistent size container. Now, the same container can move from a ship to a train to a truck without unloading the cargo. The container contents do not matter.\\nJust like a shipping container, a software container is a standardized package of software. Everything needed for the software to run is inside the container. The software code, runtime, system tools, system libraries, and settings are all inside a single container.\\nAre containers the same thing as microservices?\\nOnce you start diving into containers, it\\'s impossible to avoid reading about microservices.  Microservices is an architectural style. A microservices architecture structures an application by using as a collection of loosely coupled services, which deliver specific business capabilities. Containers help make it happen.\\nWhat is an example of a microservices application?\\nMore than ten years ago, Netflix was one of the first companies to begin using containers extensively. They rewrote the applications that ran their entire video service by using a microservices architecture. In 2017, Netflix estimated that it employed around 700 microservices to control each of the many functions that make up its service. Let\\'s look at a few :Video selection: A microservice, in a container, provides your phone, tablet, computer, or TV with the video file to play and at a video quality based on your internet speed.\\nViewing history: One microservice remembers what shows you watch.\\nProgram recommendations: A microservice takes a look at your viewing history and uses analytics to recommend movies.\\nMain menu: One microservice provides the names and images of these movies shown on your main menu.\\nBilling: Another microser', 'url': 'https://developer.ibm.com/middleware/v1/contents /blogs/what-are-containers-and-why-do-you-need-them\\n'}, 'score': 22.73632049560547}, {'document': {'rank': 1, 'document_id': ' ', 'text': 'Palantir for IBM Cloud Pak for Data enables building no-/low-code line of business applications using data, machine learning, and optimization from IBM Cloud Pak for Data. Ontology managers can define business-oriented data models integrating data from IBM Cloud Pak for Data. Application builders can use Palantir tools to create applications using these data models. Additionally, applications can integrate machine learning models from IBM Cloud Pak for Data to infuse predictions, as well as decision optimization result data from IBM Cloud Pak for Data to determine optimized actions based on data and predictions.\\nThis blog post explains how to create AI-infused apps using Palantir ontology and application building tools together with IBM Cloud Pak for Data model deployments and data and AI catalog. It also outlines the underlying integration architecture.\\nIBM Cloud Pak for Data as the data and AI foundation\\nIBM Cloud Pak for Data together with Palantir provide integrated capabilities to:Collect, transform, and integrate data from many sources\\nOrganize data to be ready for use in projects and applications\\nAnalyze data to gain insights and create AI models\\nInfuse AI insights such as predictions and optimization via APIs where needed\\nBuild applications using no-/low-code app builders, integrating data and AI on multiple clouds while leveraging Red Hat OpenShift as the underlying platform.Applications built with Palantir for IBM Cloud Pak for Data by application builders -- using no-/low-code tools -- can use data, predictions, and optimization result data from IBM Cloud Pak for Data, helping business users achieve smarter business outcomes by taking optimized actions.Data engineers can create data services in IBM Cloud Pak for Data such as Db2, Db2 Warehouse, Postgres, etc. to collect data and can build a catalog of data assets available for data scientists and application builders to use. Where needed, they can use DataStage flows or other tools to transform data from multiple sources and use data virtualization services.\\nData scientists can collaborate in projects, add data sets from the catalog or from other data sources, analyze data, gain insights, and train machine learning models or define decision optimization models. To train models, they may use Python code in JupyterLab using their favorite machine learning framework, SPSS Modeler flows, or AutoAI, as shown in the following image.Models can be saved and deployed to spaces, as shown in the image below, to make them available for AI infusion into business processes and applications. The deployed model can then be called via the model deployment REST API.Building data and AI applications with Palantir for IBM Cloud Pak for Data\\nApplication builders can build rich no-/low-code applications using the Palantir app builder tools available through a new Palantir card on the IBM Cloud Pak for Data home page.From here, ontology managers can navigate to the Palantir UI to define and manage Palantir ontologies, integrating data from IBM Cloud Pak for Data. Application builders can navigate to the Palantir UI to build apps using ontologies and connecting machine learning models from IBM Cloud Pak for Data to integrate predictions into applications. Once in the Palantir UI, they can integrate AI models from IBM Cloud Pak for Data into Palantir apps  and can integrate data from IBM Cloud Pak for Data into a Palantir ontology .To enable Palantir applications, a business-oriented ontology needs to first be defined using Palantir ontology management, which integrates with the data sets from the data and AI catalog in IBM Cloud Pak for Data. From the ontology management UI, users can search the IBM Cloud Pak for Data catalog for data assets to use and can then drill down into the columns or object attributes of the data set to map these to business objects defined in the Palantir ontology.The underlying data behind the data assets is then synchronized from the referenced data source into ', 'url': 'https://developer.ibm.com/middleware/v1/contents /blogs/data-and-ai-applications-with-palantir-for-ibm-cloud-pak-for-data\\n'}, 'score': 22.560094833374023}, {'document': {'rank': 8, 'document_id': ' ', 'text': 'Archived contentArchive date: 2023-02-09This content is no longer being updated or maintained. The content is provided as is. Given the rapid evolution of technology, some content, steps, or illustrations may have changed.Generic JDBC enables a variety of connections to different data sources. A generic JDBC connection offers the option to connect to a data source using a different driver from what is pre-built in IBM Cloud Pak for Data, and provides additional properties and support for customized use cases.\\nLearning objectives\\nThe purpose of this tutorial is to demonstrate how to create a generic JDBC connector in IBM Cloud Pak for Data.\\nIn this tutorial, you will learn how to: Check that you have the Administrator role\\nImport a JDBC JAR file to IBM Cloud Pak for Data\\nCreate a generic JDBC platform connection\\nUse the created generic JDBC connection in a projectPrerequisites\\nYou will need IBM Cloud Pak for Data platform software on-prem.\\nNote: The generic JDBC connection is supported by IBM Cloud Pak for Data; at the time of this writing, it is not supported by IBM Cloud Pak for Data as a Service.\\nWhen using the generic JDBC connector, please ensure that a JDBC driver exists for the data source you would like to connect to.\\nAbout the data\\nFor this tutorial, we create an SAP HANA data connection. Please substitute details pertaining to the data connection and its containing data with your own.\\nEstimated time\\nCompleting this tutorial should take about 10 minutes.\\nSteps\\nStep 1. Navigate to IBM Cloud Pak for Data and ensure that you have Administrator accessTo upload your JDBC JAR, you must have Administer Platform permissions. You can ensure that you have this role by clicking on your profile on the upper-right corner of IBM Cloud Pak for Data, then clicking Profile &gt; Settings.Under Roles, check that you have the Administrator role with Administer Platform under enabled permissions. If you are unable to obtain the Administrator role, you need to ask an administrator to perform Step 2. Upload your JDBC JAR to IBM Cloud Pak for Data.Step 2. Upload JDBC JAR to IBM Cloud Pak for DataTo upload a JDBC JAR to IBM Cloud Pak for Data, navigate to Data &gt; Platform connections.With Administer Platform permissions, you should see the JDBC drivers tab.Drag and drop the JDBC driver JAR file into the box on the left side of the page. You should see your JAR listed, and after clicking Upload, your JAR file should be listed under Existing files on the right side of the page. Your JAR file is now available for use in IBM Cloud Pak for Data.Step 3. Creating a Generic JDBC platform connection\\nIn IBM Cloud Pak for Data, you can create a platform-level or project-level connection. A platform-level connection enables use of the connector across the platform vs a project-level connection, which can only be used in a project.To create an IBM Cloud Pak for Data platform connection, click on Data &gt; Platform Connections.Click on the New connection button.From the Add Connection page, click on the Generic JDBC connector, then the Select button, which launches the Create connection page.Enter the desired name and description  for your generic JDBC SAP HANA connector. The JAR URL drop-down will display all existing JDBC JARs available for use in IBM Cloud Pak for Data. Select the JAR required for a connection.Next, enter the required fields for JDBC URL and the JDBC class driver.Enter the username and password for the connection.Click on Test Connection to see if you can successfully connect.Once the test connection is verified, click Create to create the connection. The created SAP HANA  connection should display under Platform Connections ready for use in IBM Cloud Pak for Data.Step 4. Using the created generic JDBC connector in a projectNavigate to the desired project in IBM Cloud Pak for Data. As you can see, this project has no data assets.Click on Add to project and choose Connection from the asset type options.Click on the From platform tab to ', 'url': 'https://developer.ibm.com/middleware/v1/contents /tutorials/using-generic-jdbc-connector-on-cloud-pak-for-data\\n'}, 'score': 22.475357055664062}, {'document': {'rank': 6, 'document_id': 'o', 'text': \"\\nAdding Cloud PaksIBM Cloud Paks&trade; are containerized, licensed IBM middleware and open source software components that you can use to modernize, move, and build cloud-native business applications in hybrid and multicloud deployments. By running exclusively on  and Red Hat Enterprise Linux, Cloud Paks are built atop a secure stack and maintain consistency in deployment and behavior across cloud providers. You have greater flexibility to run and manage your workloads securely where you need them: on-premises, off-premises, in a backup provider, and in .Overview of Cloud Pak offeringsYou can deploy the entire set of Cloud Paks to manage your full-stack cloud apps, data, integration, automation, and management across  cloud providers.Cloud Pak for ApplicationsCloud Pak for DataCloud Pak for IntegrationCloud Pak for SecurityCloud Pak for ManagementAdding IBM Cloud PaksIBM Cloud Paks are containerized, licensed IBM middleware and open source software components as part of your hybrid cloud solution. IBM Cloud Paks run exclusively on  clusters, not community Kubernetes clusters.Before you begin:\\n Verify that your account administrator set up your  account with the Cloud Pak entitlement.\\n Make sure that you have the required permissions to create a cluster. These permissions include the following:\\n     The IAM Administrator platform access role for .\\n     The IAM Administrator platform access role for .\\n     The IAM Viewer platform access role for the resource group if you create the cluster in a resource group other than default.\\n     The appropriate infrastructure permissions, such as an API key with the Super User role for classic infrastructure.\\nTo add a Cloud Pak from the  catalog:Add your Cloud Pak entitlement from IBM Passport Advantage to your  cluster.For new clusters: Create a cluster with the --entitlement cloud_pak option. When you specify the number of workers  and flavor , make sure to specify only the number and size of worker nodes that you are entitled to use. You can optionally specify the worker node operating system . After your cluster is created, you are not charged the  license fee for the entitled worker nodes in the default worker pool. If you want to use a different worker pool for your Cloud Pak, follow the steps for existing clusters.\\nFor existing clusters or worker pools other than default: Create a worker pool with the --entitlement cloud_pak option. When you specify the number of workers  and flavor , make sure to specify only the number and size of worker nodes that you are entitled to use. After creation, your worker pool does not charge you the  license fee for your entitled worker nodes.Do not exceed your entitlement. Keep in mind that your OpenShift Container Platform entitlements can be used with other cloud providers or in other environments. To avoid billing issues later, make sure that you use only what you are entitled to use. For example, you might have an entitlement for the OCP licenses for two worker nodes of 4 CPU and 16 GB memory, and you create this worker pool with two worker nodes of 4 CPU and 16 GB memory. You used your entire entitlement, and you can't use the same entitlement for other worker pools, cloud providers, or environments.In the  catalog, in the Software tab, under Offering Type, check Cloud Paks.Select the Cloud Pak that you want to deploy, and follow the installation instructions. Each Cloud Pak requires an entitlement from IBM Passport Advantage, and has its own configuration settings. For more information, view the About tab and Cloud Pak documentation.Now you can run your Cloud Pak on your  cluster!\\nAssigning a Cloud Pak entitlement to your  accountTo deploy a Cloud Pak to your  cluster, your entitlement to the Cloud Pak must be assigned to your  account.Verify that your Cloud Pak entitlement is in your Container software library. If you don't see the entitlement, the entitlement might be owned by a different user. Verify the user, and if you still have issues, c\", 'url': '\"https://github.com/ibm-cloud-docs/openshift_integrations_cloud_paks.md\"'}, 'score': 22.443798065185547}, {'document': {'rank': 9, 'document_id': ' ', 'text': 'This blog post is the first of a three-part series authored by software developers and architects at IBM and Cloudera. This first post focuses on integration points of the recently announced joint offering: Cloudera Data Platform for IBM Cloud Pak for Data. The second post will look at how Cloudera Data Platform was installed on IBM Cloud using Ansible. And the third post will focus on lessons learned from installing, maintaining, and verifying the connectivity of the two platforms. Lets get started!\\nIn this post we will be outlining the main integration points between Cloudera Data Platform and IBM Cloud Pak for Data, and explaining how the two distinct data and AI platforms can communicate with each other. Integrating two platforms is made easy with capabilities available out of the box for both IBM Cloud Pak for Data and Cloudera Data Platform. Establishing a connection between the two is just a few clicks away.Architecture diagram showing Cloudera Data Plaform for Cloud Pak for Data\\nIn our view, there are three key points to integrating Cloudera Data Platform and IBM Cloud Pak for Data; all other services piggyback on one of these:Apache Knox Gateway \\nExecution Engine for Apache Hadoop \\nDb2 Big SQL Read on for more information about how each integration point works. For a demonstration on how to use data from Hive and Db2 check out the video below where we join the data using Data Virtualization and then display it with IBM Cognos Analytics check out the video below.Apache Knox Gateway\\nTo truly be secure, a Hadoop cluster needs Kerberos. However, Kerberos requires a client-side library and complex client-side configuration. This is where the Apache Knox Gateway  comes in. By encapsulating Kerberos, Knox eliminates the need for client software or client configuration and, thus, simplifies the access model. Knox integrates with identity management and SSO systems, such as Active Directory and LDAP, to allow identities from these systems to be used for access to Cloudera clusters.Knox dashboard showing the list of supported services\\nCloudera services such as Impala, Hive, and HDFS can be configured with Knox, allowing JDBC connections to easily be created in IBM Cloud Pak for Data.Creating a JDBC connection to Impala via KnoxList of connections on IBM Cloud Pak for Data\\nExecution Engine for Apache Hadoop\\nThe Execution Engine for Apache Hadoop service is installed on both IBM Cloud Pak for Data and on the worker nodes of a Cloudera Data Platform deployment. Execution Engine for Hadoop allows users to:Browse remote Hadoop data  through platform-level connections\\nCleanse and shape remote Hadoop data  with Data Refinery\\nRun a Jupyter notebook session on the remote Hadoop system\\nAccess Hadoop systems with basic utilities from RStudio and Jupyter notebooksAfter installing and configuring the services on IBM Cloud Pak for Data and Cloudera Data Platform, you can create platform-level connections to HDFS, Impala, and Hive.Execution Engine for Hadoop connection options\\nOnce a connection has been established, data from HDFS, Impala, or Hive can be browsed and imported.Browsing through an HDFS connection made via Execution Engine for Hadoop\\nData residing in HDFS, Impala or Hive can be cleaned and modified through Data Refinery on IBM Cloud Pak for Data.Data Refinery allows for operations to be run on data\\nThe Hadoop Execution Engine also allows for Jupyter notebook sessions to connect to a remote Hadoop system.Jupyter notebook connecting to a remote HDFS\\nDb2 Big SQL\\nThe Db2 Big SQL service is installed on IBM Cloud Pak for Data and is configured to communicate with a Cloudera Data Platform deployment. Db2 Big SQL allows users to:Query data stored on Hadoop services such as HDFS and Hive\\nQuery large amounts of data residing in a secured  or unsecured Hadoop-based platformOnce Big SQL is configured, you can choose what data to synchronize into tables. Once in a table, you can save the data to a project, run queries against it, or browse t', 'url': 'https://developer.ibm.com/middleware/v1/contents /blogs/a-deep-dive-integrating-cloudera-data-platform-and-ibm-cloud-pak-for-data\\n'}, 'score': 22.428131103515625}, {'document': {'rank': 5, 'document_id': ' ', 'text': 'Netezza has always been synonymous with speed and simplicity. Netezza Performance Server for IBM Cloud Pak for Data is the next-generation advanced data warehouse and analytics platform available both on-premises and on cloud.\\nTo understand why Netezza Performance Server for IBM Cloud Pak for Data is important for application developers, it is first important to understand the journey to AI and how to get there. Many developers want to infuse AI into the companies they work for, but dont really know how. IBM Cloud Pak for Data is a complete Data and AI platform that modernizes how businesses collect, organize, and analyze data to infuse AI throughout their organizations. If you look under the hood of IBM Cloud Pak for Data, you will see that it is built with the streamlined hybrid cloud foundation of Red Hat OpenShift. This solution supports multicloud environments, such as Amazon Web Services , Google Cloud, IBM Cloud, and private cloud deployments.\\nThe Netezza Performance Server part of IBM Cloud Pak for Data is responsible for the \"collect\" piece of the data lifecycle. Netezza Performance Server can take data from many sources and store current and historical data in an enterprise data warehouse so it can be used for reporting, analysis, and better decision-making. What makes the Netezza Performance Server so powerful is the fact that it can process huge amounts of data and run large jobs that can return results in seconds, rather than hours or days. Netezza has always been known for speed and simplicity, so the fact that the new generation of Netezza Performance Server is built onto the same engine means that you dont need to waste all your time on migration to the new platform, especially if you are coming form an older Netezza form factor. It is a simple nz_migrate command, then just point your applications to the new server. It doesn\\'t get much easier than that.\\nSo what does this mean for application developers? Having everything you need in your journey to AI and all in one platform means that you dont need waste your time putting all the pieces together. Netezza Performance Server for IBM Cloud Pak for Data is an all-in-one Data and AI platform that lets you perform data science and machine learning with data volumes scaling into the petabytes.\\nNetezza Performance Server on IBM Cloud Pak for Data System or Netezza on Cloud\\nNetezza Performance Server for IBM Cloud Pak for Data comes in two form factors: It is available as part of a hyper-converged system that includes all hardware and software needed to get up and running quickly; and the other is Netezza available on IBM Cloud and AWS, with more clouds to come. This gives you the flexibility to run this on-premises with all the needed hardware, software, storage, compute, and networking in a single system. If you choose to run Netezza on cloud, you are getting a cloud-native deployment of the Netezza Performance Server database engine deployed to a public cloud data center of your choice.\\nNative in-database analytics and geospatial capabilities\\nNetezza Performance Server comes with advanced in-database analytics capabilities that can be used to act on the data stored in Netezza Performance Server. This package that used to be called Netezza In-Database Analytics is now called the Netezza Performance Server Analytics package and can be installed after you have the Netezza Performance Server up and running. The Netezza Performance Server Analytics package comprises a set of cartridges, each of which covers a different area of analytics. There are are analytics packages for:In-database analytics\\nSpatial\\nSpatial ESRI\\nMatrix\\nMapReduceThere are also some special geospatial capabilities available in Netezza Performance Server in order to process data needed for this particular type of use case. All of these capabilities come in handy when you want pre-packaged and powerful analytics capabilities to work on the data inside the database.\\nWith IBM Watson Studio and machine lear', 'url': 'https://developer.ibm.com/middleware/v1/contents /blogs/get-acquainted-with-netezza-performance-server\\n'}, 'score': 21.696094512939453}, {'document': {'rank': 2, 'document_id': ' ', 'text': 'Todays cloud-native and microservices-based architectures rely on a complex infrastructure that is made up of various hardware and software components. This complexity arises because of the number of applications, the variety of hardware and software in the infrastructure, the volume of data, and the large number of business processes that are part of network and IT operations.\\nThis increasingly complex infrastructure makes it difficult to troubleshoot and resolve issues quickly. Troubleshooting and root-cause analysis are harder with the explosion of data available from all the individual microservices. Closed-loop automation systems help transform network and IT operations by using AI-driven automation to detect anomalies, determine resolution, and implement the required changes in a highly automated framework.\\nClosed-loop automation systems enable companies to transform network and IT operations by using AI-driven automation to detect anomalies, determine resolution, and implement the required changes within a continuous highly automated framework. Closed-loop automation helps solve many problems before they even become issues.Read more about Cloud Pak for Watson AIOps.One common use case for closed-loop automation is traffic flow optimization. By implementing a closed-loop automation system, teams can automatically correct issues like network anomalies within the provisioned network infrastructure. At the heart of a closed-loop automation system for the traffic flow optimization use case are the components of IBM Cloud Pak for Watson AIOps, which is at the heart of IBMs AIOps platform.\\nWhat is closed-loop automation?\\nA simple closed-loop implementation detects issues that could happen in the future. The appropriate data is analyzed by various predictive models, which then make a recommendation on the change to be made to the orchestration layer, which implements the change.\\nIn complex cases, closed-loop automation combines the predictive insights information with additional AI systems to determine a resolution. The AI system is trained to resolve these issues and is integrated with a robotics automation system  to automate the resolution process. If the AI system determines it has a high confidence that the suggested resolution is correct, it will invoke the orchestration engine to implement the solution automatically. If not, a trouble ticket is generated, and an engineer works to resolve the issue.\\nThe following image provides an overview of a closed-loop automation system that addresses issues of varying complexity.Closed-loop automation enables these key capabilities:Anomaly detection. Anomaly detection uses large, real-time, time-series data to analyze networks applications, database metrics, operating systems, and so on. This gives anomaly detection the capability to identify patterns and anomalies, and raise awareness toward predictive actions.\\nIntelligent alerts. In a general operations environment, multiple connected components can raise alerts all related to the same failure event. These add to the overall load and volume of operations teams. However, 20 percent of overall alert volume is false-positive. Closed-loop automation uses machine learning models to create the patterns for the series of alerts so that those can be bound to causes and known actions, and then be corrected accordingly.\\nPredictive planning. Organizations can use machine learning algorithms to predict how application and network behaviors are dependent on seasonality and other factors to ensure that appropriate corrective actions are taken, thereby permitting systems to perform optimally.\\nRoot-cause analysis. Closed-loop automation leverages data to intelligently identify all anomalies in the service path and use AI to map it to find the most likely cause for a particular incident. It makes use of various AI algorithms to ensure the accuracy of root-cause identifications and implements the required remediation steps.IT Ops teams apply AI and ', 'url': 'https://developer.ibm.com/middleware/v1/contents /articles/an-introduction-to-closed-loop-automation\\n'}, 'score': 19.22018051147461}, {'document': {'rank': 0, 'document_id': ' ', 'text': 'It used to be that the developer and operations roles were fully separated. The developer received the business requirements, they came up with a design and implemented it. A dedicated QA team wrote tests, and validated that the new application, capability, or change worked as expected, and then the code or application was handed over to the operations team to run it. In most organizations, these roles have now merged into a set of development practices that are commonly referred to as DevOps.\\nTo learn more about what is SRE, read this Red Hat article.\\nAt the same time, DevOps practices brought an expanded role for the operations teams, typically referred to now as a Site Reliability Engineering, or SRE. Red Hat defines the DevOps practice of SRE like this SRE takes the tasks that have historically been done by operations teams, often manually, and instead gives them to engineers or ops teams who use software and automation to solve problems and manage production systems.\\nNow, the SRE role, can either be a dedicated engineering position, or frequently is one that the developers take on, either full or part time. The goal of SRE is to create a natural improvement cycle, which one gets by linking the application development and operations portions. The applications can be created such that they are easier to manage and observe, while the insights from monitoring the application, can in turn identify inefficiencies, defects, and potential improvements.\\nIn this article, we will focus on how Cloud Pak for Watson AIOps can help an SRE  improve and automate their applications. To reiterate, though, while we will use the term SRE throughout this article, SRE also applies to IT Operations engineers and application developers who are supporting their products.\\nAn SRE focuses on reacting to problems encountered by the application, but also on crafting ways to improve the time to repair the application , or even better on avoiding these problems in the first place.\\nSo, what is important to an SRE? When a problem occurs, they need to have a holistic view of what is happening to their application. This means they should have a real-time view of the state of all their components, understand what is relevant in their logs or metrics, and know whether any of their monitors, such as synthetic consumption monitors show any problems. However, they do not want to be inundated with lots of different alerts going off, but rather they want to know where to focus their time and attention. After a problem has been confirmed, they would like guidance on possible solutions, and automation to execute them. Ideally though, this would be available to them before a problem becomes an outage.\\nHistorically, two factors stood in the way here: the sheer volume of data and the fact that a lot of that data was in an unstructured format. As long as there have been applications, there have been simple monitoring tools. For example, an operator would set an alert on the memory consumption of an application, and when it breached a given amount, an alert would go off. Apart from being a blunt instrument , this approach might have been practical when the operator was monitoring a handful of monolithic applications.\\nNow that SREs are monitoring thousands of microservices, this approach is no longer practical. Secondly, with the advent of AI, we now have a way to unlock all the value of unstructured  data. For example, many problems show early warnings in your logs, long before an application starts showing problems , but the problems dont have an impact yet and thus they wont fail the main application flow.\\nAs such, some of the features that an AIOps solution like Cloud Pak for Watson AIOps that support SREs are:No code changes are required. Your applications and your site reliability processes already output a lot of helpful information. Its time to take advantage of that data.\\nBring all your disparate data sources together for a holistic view across all parts of your e', 'url': 'https://developer.ibm.com/middleware/v1/contents /articles/improving-and-automating-your-ops-with-cp4waiops\\n'}, 'score': 17.130687713623047}, {'document': {'rank': 3, 'document_id': ' ', 'text': \"In this blog, I will answer the following general questions about AIOps:What is AIOps?\\nWhat does a company need to utilize AIOps?\\nHow do I train AI?\\nWhat is no-code AI?\\nWhy is this important?What is AIOps?\\nGartners definition of AIOps: AIOps combines big data and machine learning to automate IT operations  processes, including event correlation, anomaly detection, and causality determination.\\nITOps teams are beginning to explore how automation can improve business outcomes through scalable artificial intelligence . Gartner found that 10 times more business leaders will rely on AIOps platforms for automated insights in the next three years. Without it, IT organizations can experience a lack of observability, become overwhelmed from manually managing data, and end up focusing on infrastructure rather than an application-centric approach. The ITOps teams of tomorrow will experience full visualization and observability of their IT environments with insights derived from various tools that focus on critical applications to support business performance.\\nWhat does a company need to be able to use AIOps?\\nTo start, ITOps organizations can determine what the company needs in business performance. By applying AI, what does the company intend to do to improve outcomes? Next, companies will need to consider if the organization has historical and real-time data organized in a way that can begin to train models, and if so, is it understandable.\\nHow do I train AI?\\nIf a company wants to use AIOps they need to establish a baseline performance, which requires that they have an understanding of their historical data by measuring the performance of their steady state using real-time data. When something bad happens to a system, like an application outage, the company can more efficiently pinpoint the issue by using insights gathered from historical and real-time data. This insightful information can help find ways to triage events or outages as efficiently as possible.\\nWhat is no-code AI?\\nCompanies don't always have data scientists, data engineers, or data centric teams to help. What if we could train AI without the assistance of data scientists, data engineers, and data centric teams?\\nNo-code AI simply means using no-code in automation training. A company can use its history and what it is doing today as a baseline to improve its tomorrow without developing code to train the AI.\\nWhy is this important?\\nAs an IT admin juggling multiple sources of data and resolving incidents manually, you need tools to help you resolve incidents faster.\\nTime is money. Battling aggressive timelines, ITOps teams need to identify solutions quickly and have those solutions work correctly when theyre set in motion. Lets unpack how this is achieved with Cloud Pak for Watson AIOps.\\nSo, what is the Cloud Pak for Watson AIOps?\\nCloud Pak for Watson AIOps provides an application-centric data and intelligence platform powering automation for application, incident, cost, and security  risk management with trusted and explainable AI.\\nFigure 1: Bringing DevSecOps together with AI and automationThe goal of using AIOps is to focus on business outcomes. To do this, a company must leverage its data without creating gates of entry by requiring a dedicated data scientist, data engineer, or a data-centric team to get up and running. Cloud Pak for Watson AIOps provides a comprehensive understanding of business applications baked in to help provide insights and intelligence derived from not just operational data such as logs or events but enhanced organizational insights as well.\\nLearn more about Cloud Pak for Watson AIOps can evolve your ITOps organization.  And, explore blogs, articles, tutorials, and code patterns on the Cloud Pak for Watson AIOps hub page on IBM Developer.\", 'url': 'https://developer.ibm.com/middleware/v1/contents /blogs/evolving-itops-with-aiops-with-no-code-ai-training\\n'}, 'score': 17.018108367919922}, {'document': {'rank': 4, 'document_id': ' ', 'text': \"TensorFlow is an end-to-end open source machine learning platform that makes it easier to build and deploy machine learning models. A TensorFlow application uses a structure known as a data flow graph. By default in TensorFlow version 1.0, every graph had to be run within a TensorFlow session, which only allowed for the entire graph to be run all at once, and made it hard to debug the computation graph. The only way to get around this default and be able to debug the code was to use Eager Execution.\\nEager Execution is a flexible machine learning platform for research and experimentation that provides:An intuitive interface so that the code can be structured naturally and use Python data structures. Small models and small data can be quickly iterated.\\nEasier debugging by providing the ability to call operations directly to inspect code line by line and test changes.\\nA natural control flow using a Python control flow instead of a graph control flow, which simplifies the specification of dynamic models.With TensorFlow 2.x, Eager Execution is enabled by default, and allows TensorFlow code to be run and evaluated line by line.\\nLearning objectives\\nThis tutorial looks at the impact of Eager Execution and the benefits of having it enabled by default in TensorFlow 2.x. You'll use a Jupyter Notebook to observe the behavior of TensorFlow when Eager Execution is both disabled and enabled. You'll learn how to:Run a Jupyter Notebook using IBM Watson Studio on IBM Cloud Pak for Data as a Service\\nDisable and enable Eager Execution\\nUnderstand the benefits of Eager ExecutionPrerequisites\\nThe following prerequisites are required to follow the tutorial:An IBM Cloud Account\\nIBM Cloud Pak for DataEstimated time\\nIt should take you approximately 30 minutes to complete the tutorial.\\nStepsSet up IBM Cloud Pak for Data as a Service\\nCreate a new Project and import the notebook\\nRead through the notebook\\nRun the first half of the notebook\\nRestart the kernel\\nRun the second half of the notebookSet up IBM Cloud Pak for Data as a ServiceOpen a browser, and log in to IBM Cloud with your IBM Cloud credentials.Type Watson Studio in the search bar at the top. If you already have an instance of Watson Studio, it should be visible. If so, click it. If not, click Watson Studio under Catalog Results to create a new service instance.Select the type of plan to create if you are creating a new service instance. A Lite  plan should suffice for this tutorial). Click Create.Click Get Started on the landing page for the service instance. This should take you to the landing page for IBM Cloud Pak for Data as a Service.Click your avatar in the upper-right corner, then click Profile and settings under your name.Switch to the Services tab. You should see the Watson Studio service instance listed under Your Cloud Pak for Data services.\\n You can also associate other services such as Watson Knowledge Catalog and Watson Machine Learning with your IBM Cloud Pak for Data as a Service account. These are listed under Try our available services.\\n In the example shown here, a Watson Knowledge Catalog service instance already exists in the IBM Cloud account, so it's automatically associated with the IBM Cloud Pak for Data as a Service account. To add any other service , click Add within the tile for the service under Try our available services.Select the type of plan to create , and click Create.After the service instance is created, you are returned to the IBM Cloud Pak for Data as a Service instance. You should see that the service is now associated with your IBM Cloud Pak for Data as a Service account.Create a new project and import the notebookNavigate to the hamburger menu  on the left, and choose View all projects. After the screen loads, click New + or New project + to create a new project.Select Create an empty project.Provide a name for the project. You must associate an IBM Cloud Object Storage instance with your project. If you already have an IBM Cloud Object Storage service in\", 'url': 'https://developer.ibm.com/middleware/v1/contents /tutorials/enable-eager-execution-in-tensorflow\\n'}, 'score': 16.26887321472168}]]\n",
      "======================================================================\n",
      "QUERY: what is ibm cloud pak for data\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>rank</th>\n",
       "      <th>document_id</th>\n",
       "      <th>text</th>\n",
       "      <th>url</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>7</td>\n",
       "      <td></td>\n",
       "      <td>\"Awww, come on guys, it's so simple. Maybe you need a refresher course. ... It's all ball bearings nowadays.\" - Fletch, 1985 movie\\nSadly, it is not all about ball bearings nowadays. It's all about containers. If you heard about containers, but are not sure what they are, you've come to the right place. This blog post addresses the following questions:Why should I care about containers?\\nWhat are containers?\\nAre containers the same as microservices?\\nWhat is an example of a microservices application?\\nWhat is a Docker container?\\nWhat are container orchestration and Kubernetes?\\nWhat is the difference between containers and virtual machine images?\\nHow can I get started with containers?\\nHow can IBM Cloud Paks help?\\nWhere can I run my containers?\\nWhat is Red Hat OpenShift on IBM Cloud?Why should I care about containers?\\nAcross organizations, there is a spectrum of container adoption. Many people are just learning about containers. Some companies are further along in their journey. If you're at all considering containerization, it's time to join the fun where you can see real business results:Faster time to market: New applications and services are what keep your competitive edge. Organizations are able to speed up delivery of new services with development and operational agility.\\nDeployment velocity: Move quicker from development to deployment. Containerization breaks down barriers for DevOps teams to accelerate deployment times and frequency.\\nIT infrastructure reduction: Reduce your costs by increasing your application workload density, getting better utilization of your server compute density, and reducing software licensing costs.\\nIT operational efficiency: Gain more operational efficiency by streamlining and automating the management of diverse applications and infrastructure into a single operating model.\\nGain freedom of choice: Package, ship, and run applications on any public or private cloud.Next steps: Learn what the true benefits of moving to containers are.What are containers?\\nThe best analogy for understanding containers is a shipping container. That's why the majority of all container articles and blog posts show a photo of a shipping container. We're sure you've seen the transport of those big steel shipping containers.  The shipping industry standardized on a consistent size container. Now, the same container can move from a ship to a train to a truck without unloading the cargo. The container contents do not matter.\\nJust like a shipping container, a software container is a standardized package of software. Everything needed for the software to run is inside the container. The software code, runtime, system tools, system libraries, and settings are all inside a single container.\\nAre containers the same thing as microservices?\\nOnce you start diving into containers, it's impossible to avoid reading about microservices.  Microservices is an architectural style. A microservices architecture structures an application by using as a collection of loosely coupled services, which deliver specific business capabilities. Containers help make it happen.\\nWhat is an example of a microservices application?\\nMore than ten years ago, Netflix was one of the first companies to begin using containers extensively. They rewrote the applications that ran their entire video service by using a microservices architecture. In 2017, Netflix estimated that it employed around 700 microservices to control each of the many functions that make up its service. Let's look at a few :Video selection: A microservice, in a container, provides your phone, tablet, computer, or TV with the video file to play and at a video quality based on your internet speed.\\nViewing history: One microservice remembers what shows you watch.\\nProgram recommendations: A microservice takes a look at your viewing history and uses analytics to recommend movies.\\nMain menu: One microservice provides the names and images of these movies shown on your main menu.\\nBilling: Another microser</td>\n",
       "      <td>https://developer.ibm.com/middleware/v1/contents /blogs/what-are-containers-and-why-do-you-need-them\\n</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1</td>\n",
       "      <td></td>\n",
       "      <td>Palantir for IBM Cloud Pak for Data enables building no-/low-code line of business applications using data, machine learning, and optimization from IBM Cloud Pak for Data. Ontology managers can define business-oriented data models integrating data from IBM Cloud Pak for Data. Application builders can use Palantir tools to create applications using these data models. Additionally, applications can integrate machine learning models from IBM Cloud Pak for Data to infuse predictions, as well as decision optimization result data from IBM Cloud Pak for Data to determine optimized actions based on data and predictions.\\nThis blog post explains how to create AI-infused apps using Palantir ontology and application building tools together with IBM Cloud Pak for Data model deployments and data and AI catalog. It also outlines the underlying integration architecture.\\nIBM Cloud Pak for Data as the data and AI foundation\\nIBM Cloud Pak for Data together with Palantir provide integrated capabilities to:Collect, transform, and integrate data from many sources\\nOrganize data to be ready for use in projects and applications\\nAnalyze data to gain insights and create AI models\\nInfuse AI insights such as predictions and optimization via APIs where needed\\nBuild applications using no-/low-code app builders, integrating data and AI on multiple clouds while leveraging Red Hat OpenShift as the underlying platform.Applications built with Palantir for IBM Cloud Pak for Data by application builders -- using no-/low-code tools -- can use data, predictions, and optimization result data from IBM Cloud Pak for Data, helping business users achieve smarter business outcomes by taking optimized actions.Data engineers can create data services in IBM Cloud Pak for Data such as Db2, Db2 Warehouse, Postgres, etc. to collect data and can build a catalog of data assets available for data scientists and application builders to use. Where needed, they can use DataStage flows or other tools to transform data from multiple sources and use data virtualization services.\\nData scientists can collaborate in projects, add data sets from the catalog or from other data sources, analyze data, gain insights, and train machine learning models or define decision optimization models. To train models, they may use Python code in JupyterLab using their favorite machine learning framework, SPSS Modeler flows, or AutoAI, as shown in the following image.Models can be saved and deployed to spaces, as shown in the image below, to make them available for AI infusion into business processes and applications. The deployed model can then be called via the model deployment REST API.Building data and AI applications with Palantir for IBM Cloud Pak for Data\\nApplication builders can build rich no-/low-code applications using the Palantir app builder tools available through a new Palantir card on the IBM Cloud Pak for Data home page.From here, ontology managers can navigate to the Palantir UI to define and manage Palantir ontologies, integrating data from IBM Cloud Pak for Data. Application builders can navigate to the Palantir UI to build apps using ontologies and connecting machine learning models from IBM Cloud Pak for Data to integrate predictions into applications. Once in the Palantir UI, they can integrate AI models from IBM Cloud Pak for Data into Palantir apps  and can integrate data from IBM Cloud Pak for Data into a Palantir ontology .To enable Palantir applications, a business-oriented ontology needs to first be defined using Palantir ontology management, which integrates with the data sets from the data and AI catalog in IBM Cloud Pak for Data. From the ontology management UI, users can search the IBM Cloud Pak for Data catalog for data assets to use and can then drill down into the columns or object attributes of the data set to map these to business objects defined in the Palantir ontology.The underlying data behind the data assets is then synchronized from the referenced data source into</td>\n",
       "      <td>https://developer.ibm.com/middleware/v1/contents /blogs/data-and-ai-applications-with-palantir-for-ibm-cloud-pak-for-data\\n</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>8</td>\n",
       "      <td></td>\n",
       "      <td>Archived contentArchive date: 2023-02-09This content is no longer being updated or maintained. The content is provided as is. Given the rapid evolution of technology, some content, steps, or illustrations may have changed.Generic JDBC enables a variety of connections to different data sources. A generic JDBC connection offers the option to connect to a data source using a different driver from what is pre-built in IBM Cloud Pak for Data, and provides additional properties and support for customized use cases.\\nLearning objectives\\nThe purpose of this tutorial is to demonstrate how to create a generic JDBC connector in IBM Cloud Pak for Data.\\nIn this tutorial, you will learn how to: Check that you have the Administrator role\\nImport a JDBC JAR file to IBM Cloud Pak for Data\\nCreate a generic JDBC platform connection\\nUse the created generic JDBC connection in a projectPrerequisites\\nYou will need IBM Cloud Pak for Data platform software on-prem.\\nNote: The generic JDBC connection is supported by IBM Cloud Pak for Data; at the time of this writing, it is not supported by IBM Cloud Pak for Data as a Service.\\nWhen using the generic JDBC connector, please ensure that a JDBC driver exists for the data source you would like to connect to.\\nAbout the data\\nFor this tutorial, we create an SAP HANA data connection. Please substitute details pertaining to the data connection and its containing data with your own.\\nEstimated time\\nCompleting this tutorial should take about 10 minutes.\\nSteps\\nStep 1. Navigate to IBM Cloud Pak for Data and ensure that you have Administrator accessTo upload your JDBC JAR, you must have Administer Platform permissions. You can ensure that you have this role by clicking on your profile on the upper-right corner of IBM Cloud Pak for Data, then clicking Profile &amp;gt; Settings.Under Roles, check that you have the Administrator role with Administer Platform under enabled permissions. If you are unable to obtain the Administrator role, you need to ask an administrator to perform Step 2. Upload your JDBC JAR to IBM Cloud Pak for Data.Step 2. Upload JDBC JAR to IBM Cloud Pak for DataTo upload a JDBC JAR to IBM Cloud Pak for Data, navigate to Data &amp;gt; Platform connections.With Administer Platform permissions, you should see the JDBC drivers tab.Drag and drop the JDBC driver JAR file into the box on the left side of the page. You should see your JAR listed, and after clicking Upload, your JAR file should be listed under Existing files on the right side of the page. Your JAR file is now available for use in IBM Cloud Pak for Data.Step 3. Creating a Generic JDBC platform connection\\nIn IBM Cloud Pak for Data, you can create a platform-level or project-level connection. A platform-level connection enables use of the connector across the platform vs a project-level connection, which can only be used in a project.To create an IBM Cloud Pak for Data platform connection, click on Data &amp;gt; Platform Connections.Click on the New connection button.From the Add Connection page, click on the Generic JDBC connector, then the Select button, which launches the Create connection page.Enter the desired name and description  for your generic JDBC SAP HANA connector. The JAR URL drop-down will display all existing JDBC JARs available for use in IBM Cloud Pak for Data. Select the JAR required for a connection.Next, enter the required fields for JDBC URL and the JDBC class driver.Enter the username and password for the connection.Click on Test Connection to see if you can successfully connect.Once the test connection is verified, click Create to create the connection. The created SAP HANA  connection should display under Platform Connections ready for use in IBM Cloud Pak for Data.Step 4. Using the created generic JDBC connector in a projectNavigate to the desired project in IBM Cloud Pak for Data. As you can see, this project has no data assets.Click on Add to project and choose Connection from the asset type options.Click on the From platform tab to</td>\n",
       "      <td>https://developer.ibm.com/middleware/v1/contents /tutorials/using-generic-jdbc-connector-on-cloud-pak-for-data\\n</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>6</td>\n",
       "      <td>o</td>\n",
       "      <td>\\nAdding Cloud PaksIBM Cloud Paks&amp;trade; are containerized, licensed IBM middleware and open source software components that you can use to modernize, move, and build cloud-native business applications in hybrid and multicloud deployments. By running exclusively on  and Red Hat Enterprise Linux, Cloud Paks are built atop a secure stack and maintain consistency in deployment and behavior across cloud providers. You have greater flexibility to run and manage your workloads securely where you need them: on-premises, off-premises, in a backup provider, and in .Overview of Cloud Pak offeringsYou can deploy the entire set of Cloud Paks to manage your full-stack cloud apps, data, integration, automation, and management across  cloud providers.Cloud Pak for ApplicationsCloud Pak for DataCloud Pak for IntegrationCloud Pak for SecurityCloud Pak for ManagementAdding IBM Cloud PaksIBM Cloud Paks are containerized, licensed IBM middleware and open source software components as part of your hybrid cloud solution. IBM Cloud Paks run exclusively on  clusters, not community Kubernetes clusters.Before you begin:\\n Verify that your account administrator set up your  account with the Cloud Pak entitlement.\\n Make sure that you have the required permissions to create a cluster. These permissions include the following:\\n     The IAM Administrator platform access role for .\\n     The IAM Administrator platform access role for .\\n     The IAM Viewer platform access role for the resource group if you create the cluster in a resource group other than default.\\n     The appropriate infrastructure permissions, such as an API key with the Super User role for classic infrastructure.\\nTo add a Cloud Pak from the  catalog:Add your Cloud Pak entitlement from IBM Passport Advantage to your  cluster.For new clusters: Create a cluster with the --entitlement cloud_pak option. When you specify the number of workers  and flavor , make sure to specify only the number and size of worker nodes that you are entitled to use. You can optionally specify the worker node operating system . After your cluster is created, you are not charged the  license fee for the entitled worker nodes in the default worker pool. If you want to use a different worker pool for your Cloud Pak, follow the steps for existing clusters.\\nFor existing clusters or worker pools other than default: Create a worker pool with the --entitlement cloud_pak option. When you specify the number of workers  and flavor , make sure to specify only the number and size of worker nodes that you are entitled to use. After creation, your worker pool does not charge you the  license fee for your entitled worker nodes.Do not exceed your entitlement. Keep in mind that your OpenShift Container Platform entitlements can be used with other cloud providers or in other environments. To avoid billing issues later, make sure that you use only what you are entitled to use. For example, you might have an entitlement for the OCP licenses for two worker nodes of 4 CPU and 16 GB memory, and you create this worker pool with two worker nodes of 4 CPU and 16 GB memory. You used your entire entitlement, and you can't use the same entitlement for other worker pools, cloud providers, or environments.In the  catalog, in the Software tab, under Offering Type, check Cloud Paks.Select the Cloud Pak that you want to deploy, and follow the installation instructions. Each Cloud Pak requires an entitlement from IBM Passport Advantage, and has its own configuration settings. For more information, view the About tab and Cloud Pak documentation.Now you can run your Cloud Pak on your  cluster!\\nAssigning a Cloud Pak entitlement to your  accountTo deploy a Cloud Pak to your  cluster, your entitlement to the Cloud Pak must be assigned to your  account.Verify that your Cloud Pak entitlement is in your Container software library. If you don't see the entitlement, the entitlement might be owned by a different user. Verify the user, and if you still have issues, c</td>\n",
       "      <td>\"https://github.com/ibm-cloud-docs/openshift_integrations_cloud_paks.md\"</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>9</td>\n",
       "      <td></td>\n",
       "      <td>This blog post is the first of a three-part series authored by software developers and architects at IBM and Cloudera. This first post focuses on integration points of the recently announced joint offering: Cloudera Data Platform for IBM Cloud Pak for Data. The second post will look at how Cloudera Data Platform was installed on IBM Cloud using Ansible. And the third post will focus on lessons learned from installing, maintaining, and verifying the connectivity of the two platforms. Lets get started!\\nIn this post we will be outlining the main integration points between Cloudera Data Platform and IBM Cloud Pak for Data, and explaining how the two distinct data and AI platforms can communicate with each other. Integrating two platforms is made easy with capabilities available out of the box for both IBM Cloud Pak for Data and Cloudera Data Platform. Establishing a connection between the two is just a few clicks away.Architecture diagram showing Cloudera Data Plaform for Cloud Pak for Data\\nIn our view, there are three key points to integrating Cloudera Data Platform and IBM Cloud Pak for Data; all other services piggyback on one of these:Apache Knox Gateway \\nExecution Engine for Apache Hadoop \\nDb2 Big SQL Read on for more information about how each integration point works. For a demonstration on how to use data from Hive and Db2 check out the video below where we join the data using Data Virtualization and then display it with IBM Cognos Analytics check out the video below.Apache Knox Gateway\\nTo truly be secure, a Hadoop cluster needs Kerberos. However, Kerberos requires a client-side library and complex client-side configuration. This is where the Apache Knox Gateway  comes in. By encapsulating Kerberos, Knox eliminates the need for client software or client configuration and, thus, simplifies the access model. Knox integrates with identity management and SSO systems, such as Active Directory and LDAP, to allow identities from these systems to be used for access to Cloudera clusters.Knox dashboard showing the list of supported services\\nCloudera services such as Impala, Hive, and HDFS can be configured with Knox, allowing JDBC connections to easily be created in IBM Cloud Pak for Data.Creating a JDBC connection to Impala via KnoxList of connections on IBM Cloud Pak for Data\\nExecution Engine for Apache Hadoop\\nThe Execution Engine for Apache Hadoop service is installed on both IBM Cloud Pak for Data and on the worker nodes of a Cloudera Data Platform deployment. Execution Engine for Hadoop allows users to:Browse remote Hadoop data  through platform-level connections\\nCleanse and shape remote Hadoop data  with Data Refinery\\nRun a Jupyter notebook session on the remote Hadoop system\\nAccess Hadoop systems with basic utilities from RStudio and Jupyter notebooksAfter installing and configuring the services on IBM Cloud Pak for Data and Cloudera Data Platform, you can create platform-level connections to HDFS, Impala, and Hive.Execution Engine for Hadoop connection options\\nOnce a connection has been established, data from HDFS, Impala, or Hive can be browsed and imported.Browsing through an HDFS connection made via Execution Engine for Hadoop\\nData residing in HDFS, Impala or Hive can be cleaned and modified through Data Refinery on IBM Cloud Pak for Data.Data Refinery allows for operations to be run on data\\nThe Hadoop Execution Engine also allows for Jupyter notebook sessions to connect to a remote Hadoop system.Jupyter notebook connecting to a remote HDFS\\nDb2 Big SQL\\nThe Db2 Big SQL service is installed on IBM Cloud Pak for Data and is configured to communicate with a Cloudera Data Platform deployment. Db2 Big SQL allows users to:Query data stored on Hadoop services such as HDFS and Hive\\nQuery large amounts of data residing in a secured  or unsecured Hadoop-based platformOnce Big SQL is configured, you can choose what data to synchronize into tables. Once in a table, you can save the data to a project, run queries against it, or browse t</td>\n",
       "      <td>https://developer.ibm.com/middleware/v1/contents /blogs/a-deep-dive-integrating-cloudera-data-platform-and-ibm-cloud-pak-for-data\\n</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5</th>\n",
       "      <td>5</td>\n",
       "      <td></td>\n",
       "      <td>Netezza has always been synonymous with speed and simplicity. Netezza Performance Server for IBM Cloud Pak for Data is the next-generation advanced data warehouse and analytics platform available both on-premises and on cloud.\\nTo understand why Netezza Performance Server for IBM Cloud Pak for Data is important for application developers, it is first important to understand the journey to AI and how to get there. Many developers want to infuse AI into the companies they work for, but dont really know how. IBM Cloud Pak for Data is a complete Data and AI platform that modernizes how businesses collect, organize, and analyze data to infuse AI throughout their organizations. If you look under the hood of IBM Cloud Pak for Data, you will see that it is built with the streamlined hybrid cloud foundation of Red Hat OpenShift. This solution supports multicloud environments, such as Amazon Web Services , Google Cloud, IBM Cloud, and private cloud deployments.\\nThe Netezza Performance Server part of IBM Cloud Pak for Data is responsible for the \"collect\" piece of the data lifecycle. Netezza Performance Server can take data from many sources and store current and historical data in an enterprise data warehouse so it can be used for reporting, analysis, and better decision-making. What makes the Netezza Performance Server so powerful is the fact that it can process huge amounts of data and run large jobs that can return results in seconds, rather than hours or days. Netezza has always been known for speed and simplicity, so the fact that the new generation of Netezza Performance Server is built onto the same engine means that you dont need to waste all your time on migration to the new platform, especially if you are coming form an older Netezza form factor. It is a simple nz_migrate command, then just point your applications to the new server. It doesn't get much easier than that.\\nSo what does this mean for application developers? Having everything you need in your journey to AI and all in one platform means that you dont need waste your time putting all the pieces together. Netezza Performance Server for IBM Cloud Pak for Data is an all-in-one Data and AI platform that lets you perform data science and machine learning with data volumes scaling into the petabytes.\\nNetezza Performance Server on IBM Cloud Pak for Data System or Netezza on Cloud\\nNetezza Performance Server for IBM Cloud Pak for Data comes in two form factors: It is available as part of a hyper-converged system that includes all hardware and software needed to get up and running quickly; and the other is Netezza available on IBM Cloud and AWS, with more clouds to come. This gives you the flexibility to run this on-premises with all the needed hardware, software, storage, compute, and networking in a single system. If you choose to run Netezza on cloud, you are getting a cloud-native deployment of the Netezza Performance Server database engine deployed to a public cloud data center of your choice.\\nNative in-database analytics and geospatial capabilities\\nNetezza Performance Server comes with advanced in-database analytics capabilities that can be used to act on the data stored in Netezza Performance Server. This package that used to be called Netezza In-Database Analytics is now called the Netezza Performance Server Analytics package and can be installed after you have the Netezza Performance Server up and running. The Netezza Performance Server Analytics package comprises a set of cartridges, each of which covers a different area of analytics. There are are analytics packages for:In-database analytics\\nSpatial\\nSpatial ESRI\\nMatrix\\nMapReduceThere are also some special geospatial capabilities available in Netezza Performance Server in order to process data needed for this particular type of use case. All of these capabilities come in handy when you want pre-packaged and powerful analytics capabilities to work on the data inside the database.\\nWith IBM Watson Studio and machine lear</td>\n",
       "      <td>https://developer.ibm.com/middleware/v1/contents /blogs/get-acquainted-with-netezza-performance-server\\n</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>6</th>\n",
       "      <td>2</td>\n",
       "      <td></td>\n",
       "      <td>Todays cloud-native and microservices-based architectures rely on a complex infrastructure that is made up of various hardware and software components. This complexity arises because of the number of applications, the variety of hardware and software in the infrastructure, the volume of data, and the large number of business processes that are part of network and IT operations.\\nThis increasingly complex infrastructure makes it difficult to troubleshoot and resolve issues quickly. Troubleshooting and root-cause analysis are harder with the explosion of data available from all the individual microservices. Closed-loop automation systems help transform network and IT operations by using AI-driven automation to detect anomalies, determine resolution, and implement the required changes in a highly automated framework.\\nClosed-loop automation systems enable companies to transform network and IT operations by using AI-driven automation to detect anomalies, determine resolution, and implement the required changes within a continuous highly automated framework. Closed-loop automation helps solve many problems before they even become issues.Read more about Cloud Pak for Watson AIOps.One common use case for closed-loop automation is traffic flow optimization. By implementing a closed-loop automation system, teams can automatically correct issues like network anomalies within the provisioned network infrastructure. At the heart of a closed-loop automation system for the traffic flow optimization use case are the components of IBM Cloud Pak for Watson AIOps, which is at the heart of IBMs AIOps platform.\\nWhat is closed-loop automation?\\nA simple closed-loop implementation detects issues that could happen in the future. The appropriate data is analyzed by various predictive models, which then make a recommendation on the change to be made to the orchestration layer, which implements the change.\\nIn complex cases, closed-loop automation combines the predictive insights information with additional AI systems to determine a resolution. The AI system is trained to resolve these issues and is integrated with a robotics automation system  to automate the resolution process. If the AI system determines it has a high confidence that the suggested resolution is correct, it will invoke the orchestration engine to implement the solution automatically. If not, a trouble ticket is generated, and an engineer works to resolve the issue.\\nThe following image provides an overview of a closed-loop automation system that addresses issues of varying complexity.Closed-loop automation enables these key capabilities:Anomaly detection. Anomaly detection uses large, real-time, time-series data to analyze networks applications, database metrics, operating systems, and so on. This gives anomaly detection the capability to identify patterns and anomalies, and raise awareness toward predictive actions.\\nIntelligent alerts. In a general operations environment, multiple connected components can raise alerts all related to the same failure event. These add to the overall load and volume of operations teams. However, 20 percent of overall alert volume is false-positive. Closed-loop automation uses machine learning models to create the patterns for the series of alerts so that those can be bound to causes and known actions, and then be corrected accordingly.\\nPredictive planning. Organizations can use machine learning algorithms to predict how application and network behaviors are dependent on seasonality and other factors to ensure that appropriate corrective actions are taken, thereby permitting systems to perform optimally.\\nRoot-cause analysis. Closed-loop automation leverages data to intelligently identify all anomalies in the service path and use AI to map it to find the most likely cause for a particular incident. It makes use of various AI algorithms to ensure the accuracy of root-cause identifications and implements the required remediation steps.IT Ops teams apply AI and</td>\n",
       "      <td>https://developer.ibm.com/middleware/v1/contents /articles/an-introduction-to-closed-loop-automation\\n</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7</th>\n",
       "      <td>0</td>\n",
       "      <td></td>\n",
       "      <td>It used to be that the developer and operations roles were fully separated. The developer received the business requirements, they came up with a design and implemented it. A dedicated QA team wrote tests, and validated that the new application, capability, or change worked as expected, and then the code or application was handed over to the operations team to run it. In most organizations, these roles have now merged into a set of development practices that are commonly referred to as DevOps.\\nTo learn more about what is SRE, read this Red Hat article.\\nAt the same time, DevOps practices brought an expanded role for the operations teams, typically referred to now as a Site Reliability Engineering, or SRE. Red Hat defines the DevOps practice of SRE like this SRE takes the tasks that have historically been done by operations teams, often manually, and instead gives them to engineers or ops teams who use software and automation to solve problems and manage production systems.\\nNow, the SRE role, can either be a dedicated engineering position, or frequently is one that the developers take on, either full or part time. The goal of SRE is to create a natural improvement cycle, which one gets by linking the application development and operations portions. The applications can be created such that they are easier to manage and observe, while the insights from monitoring the application, can in turn identify inefficiencies, defects, and potential improvements.\\nIn this article, we will focus on how Cloud Pak for Watson AIOps can help an SRE  improve and automate their applications. To reiterate, though, while we will use the term SRE throughout this article, SRE also applies to IT Operations engineers and application developers who are supporting their products.\\nAn SRE focuses on reacting to problems encountered by the application, but also on crafting ways to improve the time to repair the application , or even better on avoiding these problems in the first place.\\nSo, what is important to an SRE? When a problem occurs, they need to have a holistic view of what is happening to their application. This means they should have a real-time view of the state of all their components, understand what is relevant in their logs or metrics, and know whether any of their monitors, such as synthetic consumption monitors show any problems. However, they do not want to be inundated with lots of different alerts going off, but rather they want to know where to focus their time and attention. After a problem has been confirmed, they would like guidance on possible solutions, and automation to execute them. Ideally though, this would be available to them before a problem becomes an outage.\\nHistorically, two factors stood in the way here: the sheer volume of data and the fact that a lot of that data was in an unstructured format. As long as there have been applications, there have been simple monitoring tools. For example, an operator would set an alert on the memory consumption of an application, and when it breached a given amount, an alert would go off. Apart from being a blunt instrument , this approach might have been practical when the operator was monitoring a handful of monolithic applications.\\nNow that SREs are monitoring thousands of microservices, this approach is no longer practical. Secondly, with the advent of AI, we now have a way to unlock all the value of unstructured  data. For example, many problems show early warnings in your logs, long before an application starts showing problems , but the problems dont have an impact yet and thus they wont fail the main application flow.\\nAs such, some of the features that an AIOps solution like Cloud Pak for Watson AIOps that support SREs are:No code changes are required. Your applications and your site reliability processes already output a lot of helpful information. Its time to take advantage of that data.\\nBring all your disparate data sources together for a holistic view across all parts of your e</td>\n",
       "      <td>https://developer.ibm.com/middleware/v1/contents /articles/improving-and-automating-your-ops-with-cp4waiops\\n</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>8</th>\n",
       "      <td>3</td>\n",
       "      <td></td>\n",
       "      <td>In this blog, I will answer the following general questions about AIOps:What is AIOps?\\nWhat does a company need to utilize AIOps?\\nHow do I train AI?\\nWhat is no-code AI?\\nWhy is this important?What is AIOps?\\nGartners definition of AIOps: AIOps combines big data and machine learning to automate IT operations  processes, including event correlation, anomaly detection, and causality determination.\\nITOps teams are beginning to explore how automation can improve business outcomes through scalable artificial intelligence . Gartner found that 10 times more business leaders will rely on AIOps platforms for automated insights in the next three years. Without it, IT organizations can experience a lack of observability, become overwhelmed from manually managing data, and end up focusing on infrastructure rather than an application-centric approach. The ITOps teams of tomorrow will experience full visualization and observability of their IT environments with insights derived from various tools that focus on critical applications to support business performance.\\nWhat does a company need to be able to use AIOps?\\nTo start, ITOps organizations can determine what the company needs in business performance. By applying AI, what does the company intend to do to improve outcomes? Next, companies will need to consider if the organization has historical and real-time data organized in a way that can begin to train models, and if so, is it understandable.\\nHow do I train AI?\\nIf a company wants to use AIOps they need to establish a baseline performance, which requires that they have an understanding of their historical data by measuring the performance of their steady state using real-time data. When something bad happens to a system, like an application outage, the company can more efficiently pinpoint the issue by using insights gathered from historical and real-time data. This insightful information can help find ways to triage events or outages as efficiently as possible.\\nWhat is no-code AI?\\nCompanies don't always have data scientists, data engineers, or data centric teams to help. What if we could train AI without the assistance of data scientists, data engineers, and data centric teams?\\nNo-code AI simply means using no-code in automation training. A company can use its history and what it is doing today as a baseline to improve its tomorrow without developing code to train the AI.\\nWhy is this important?\\nAs an IT admin juggling multiple sources of data and resolving incidents manually, you need tools to help you resolve incidents faster.\\nTime is money. Battling aggressive timelines, ITOps teams need to identify solutions quickly and have those solutions work correctly when theyre set in motion. Lets unpack how this is achieved with Cloud Pak for Watson AIOps.\\nSo, what is the Cloud Pak for Watson AIOps?\\nCloud Pak for Watson AIOps provides an application-centric data and intelligence platform powering automation for application, incident, cost, and security  risk management with trusted and explainable AI.\\nFigure 1: Bringing DevSecOps together with AI and automationThe goal of using AIOps is to focus on business outcomes. To do this, a company must leverage its data without creating gates of entry by requiring a dedicated data scientist, data engineer, or a data-centric team to get up and running. Cloud Pak for Watson AIOps provides a comprehensive understanding of business applications baked in to help provide insights and intelligence derived from not just operational data such as logs or events but enhanced organizational insights as well.\\nLearn more about Cloud Pak for Watson AIOps can evolve your ITOps organization.  And, explore blogs, articles, tutorials, and code patterns on the Cloud Pak for Watson AIOps hub page on IBM Developer.</td>\n",
       "      <td>https://developer.ibm.com/middleware/v1/contents /blogs/evolving-itops-with-aiops-with-no-code-ai-training\\n</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9</th>\n",
       "      <td>4</td>\n",
       "      <td></td>\n",
       "      <td>TensorFlow is an end-to-end open source machine learning platform that makes it easier to build and deploy machine learning models. A TensorFlow application uses a structure known as a data flow graph. By default in TensorFlow version 1.0, every graph had to be run within a TensorFlow session, which only allowed for the entire graph to be run all at once, and made it hard to debug the computation graph. The only way to get around this default and be able to debug the code was to use Eager Execution.\\nEager Execution is a flexible machine learning platform for research and experimentation that provides:An intuitive interface so that the code can be structured naturally and use Python data structures. Small models and small data can be quickly iterated.\\nEasier debugging by providing the ability to call operations directly to inspect code line by line and test changes.\\nA natural control flow using a Python control flow instead of a graph control flow, which simplifies the specification of dynamic models.With TensorFlow 2.x, Eager Execution is enabled by default, and allows TensorFlow code to be run and evaluated line by line.\\nLearning objectives\\nThis tutorial looks at the impact of Eager Execution and the benefits of having it enabled by default in TensorFlow 2.x. You'll use a Jupyter Notebook to observe the behavior of TensorFlow when Eager Execution is both disabled and enabled. You'll learn how to:Run a Jupyter Notebook using IBM Watson Studio on IBM Cloud Pak for Data as a Service\\nDisable and enable Eager Execution\\nUnderstand the benefits of Eager ExecutionPrerequisites\\nThe following prerequisites are required to follow the tutorial:An IBM Cloud Account\\nIBM Cloud Pak for DataEstimated time\\nIt should take you approximately 30 minutes to complete the tutorial.\\nStepsSet up IBM Cloud Pak for Data as a Service\\nCreate a new Project and import the notebook\\nRead through the notebook\\nRun the first half of the notebook\\nRestart the kernel\\nRun the second half of the notebookSet up IBM Cloud Pak for Data as a ServiceOpen a browser, and log in to IBM Cloud with your IBM Cloud credentials.Type Watson Studio in the search bar at the top. If you already have an instance of Watson Studio, it should be visible. If so, click it. If not, click Watson Studio under Catalog Results to create a new service instance.Select the type of plan to create if you are creating a new service instance. A Lite  plan should suffice for this tutorial). Click Create.Click Get Started on the landing page for the service instance. This should take you to the landing page for IBM Cloud Pak for Data as a Service.Click your avatar in the upper-right corner, then click Profile and settings under your name.Switch to the Services tab. You should see the Watson Studio service instance listed under Your Cloud Pak for Data services.\\n You can also associate other services such as Watson Knowledge Catalog and Watson Machine Learning with your IBM Cloud Pak for Data as a Service account. These are listed under Try our available services.\\n In the example shown here, a Watson Knowledge Catalog service instance already exists in the IBM Cloud account, so it's automatically associated with the IBM Cloud Pak for Data as a Service account. To add any other service , click Add within the tile for the service under Try our available services.Select the type of plan to create , and click Create.After the service instance is created, you are returned to the IBM Cloud Pak for Data as a Service instance. You should see that the service is now associated with your IBM Cloud Pak for Data as a Service account.Create a new project and import the notebookNavigate to the hamburger menu  on the left, and choose View all projects. After the screen loads, click New + or New project + to create a new project.Select Create an empty project.Provide a name for the project. You must associate an IBM Cloud Object Storage instance with your project. If you already have an IBM Cloud Object Storage service in</td>\n",
       "      <td>https://developer.ibm.com/middleware/v1/contents /tutorials/enable-eager-execution-in-tensorflow\\n</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INPUT PROMPT:  Answer the question based only on the context below. Context: Awww, come on guys, it's so simple. Maybe you need a refresher course. ... It's all ball bearings nowadays.\" - Fletch, 1985 movie Sadly, it is not all about ball bearings nowadays. It's all about containers. If you heard about containers, but are not sure what they are, you've come to the right place. This blog post addresses the following questions:Why should I care about containers? What are containers? Are containers the same as microservices? What is an example of a microservices application? What is a Docker container? What are container orchestration and Kubernetes? What is the difference between containers and virtual machine images? How can I get started with containers? How can IBM Cloud Paks help? Where can I run my containers? What is Red Hat OpenShift on IBM Cloud?Why should I care about containers? Across organizations, there is a spectrum of container adoption. Many people are just learning about containers. Some companies are further along in their journey. If you're at all considering containerization, it's time to join the fun where you can see real business results:Faster time to market: New applications and services are what keep your competitive edge. Organizations are able to speed up delivery of new services with development and operational agility. Deployment velocity: Move quicker from development to deployment. Containerization breaks down barriers for DevOps teams to accelerate deployment times and frequency. IT infrastructure reduction: Reduce your costs by increasing your application workload density, getting better utilization of your server compute density, and reducing software licensing costs. IT operational efficiency: Gain more operational efficiency by streamlining and automating the management of diverse applications and infrastructure into a single operating model. Gain freedom of choice: Package, ship, and run applications on any public or private cloud.Next steps: Learn what the true benefits of moving to containers are.What are containers? The best analogy for understanding containers is a shipping container. That's why the majority of all container articles and blog posts show a photo of a shipping container. We're sure you've seen the transport of those big steel shipping containers. The shipping industry standardized on a consistent size container. Now, the same container can move from a ship to a train to a truck without unloading the cargo. The container contents do not matter. Just like a shipping container, a software container is a standardized package of software. Everything needed for the software to run is inside the container. The software code, runtime, system tools, system libraries, and settings are all inside a single container. Are containers the same thing as microservices? Once you start diving into containers, it's impossible to avoid reading about microservices. Microservices is an architectural style. A microservices architecture structures an application by using as a collection of loosely coupled services, which deliver specific business capabilities. Containers help make it happen. What is an example of a microservices application? More than ten years ago, Netflix was one of the first companies to begin using containers extensively. They rewrote the applications that ran their entire video service by using a microservices architecture. In 2017, Netflix estimated that it employed around 700 microservices to control each of the many functions that make up its service. Let's look at a few :Video selection: A microservice, in a container, provides your phone, tablet, computer, or TV with the video file to play and at a video quality based on your internet speed. Viewing history: One microservice remembers what shows you watch. Program recommendations: A microservice takes a look at your viewing history and uses analytics to recommend movies. Main menu: One microservice provides the names and images of these movies shown on your main menu. Billing: Another microser Question: what is ibm cloud pak for data\n",
      "BAM Output:   IBM Cloud Pak for Data is a purpose-built, integrated platform that provides the foundation for modernizing data and analytics workloads. It is a fully open, extensible, and integrated platform that brings together the best of IBM and Red Hat technologies, including Red Hat OpenShift on IBM Cloud, IBM Cloud Pak for Data, IBM Cloud Pak for Integration, and IBM Cloud Pak for Security. IBM Cloud Pak for Data is a platform for modernizing data and analytics. You can use it to manage the data lifecycle, from data ingestion to data storage to data processing to data visualization to data governance. IBM Cloud Pak for Data is an open and integrated platform that brings together the best of IBM and Red Hat technologies. It includes the following capabilities:IBM Cloud Pak for Data is a platform for modernizing data and analytics. You can use it to manage the data lifecycle, from data ingestion to data storage to data processing to data visualization to data governance.\n",
      "FINAL ANSWER:   IBM Cloud Pak for Data is a purpose-built, integrated platform that provides the foundation for modernizing data and analytics workloads. It is a fully open, extensible, and integrated platform that brings together the best of IBM and Red Hat technologies, including Red Hat OpenShift on IBM Cloud, IBM Cloud Pak for Data, IBM Cloud Pak for Integration, and IBM Cloud Pak for Security. IBM Cloud Pak for Data is a platform for modernizing data and analytics. You can use it to manage the data lifecycle, from data ingestion to data storage to data processing to data visualization to data governance. IBM Cloud Pak for Data is an open and integrated platform that brings together the best of IBM and Red Hat technologies. It includes the following capabilities:IBM Cloud Pak for Data is a platform for modernizing data and analytics. You can use it to manage the data lifecycle, from data ingestion to data storage to data processing to data visualization to data governance.\n",
      "URL:  https://developer.ibm.com/middleware/v1/contents /blogs/what-are-containers-and-why-do-you-need-them\n",
      "\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(' IBM Cloud Pak for Data is a purpose-built, integrated platform that provides the foundation for modernizing data and analytics workloads. It is a fully open, extensible, and integrated platform that brings together the best of IBM and Red Hat technologies, including Red Hat OpenShift on IBM Cloud, IBM Cloud Pak for Data, IBM Cloud Pak for Integration, and IBM Cloud Pak for Security. IBM Cloud Pak for Data is a platform for modernizing data and analytics. You can use it to manage the data lifecycle, from data ingestion to data storage to data processing to data visualization to data governance. IBM Cloud Pak for Data is an open and integrated platform that brings together the best of IBM and Red Hat technologies. It includes the following capabilities:IBM Cloud Pak for Data is a platform for modernizing data and analytics. You can use it to manage the data lifecycle, from data ingestion to data storage to data processing to data visualization to data governance.',\n",
       " 'https://developer.ibm.com/middleware/v1/contents /blogs/what-are-containers-and-why-do-you-need-them\\n')"
      ]
     },
     "execution_count": 133,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "main()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b6aaf93a",
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
