<!DOCTYPE html>
<html>
<head>
  <meta charset="utf-8">
  
  
  <title>Spark | Hexo</title>
  <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
  <meta name="description" content="Spark 实验目的 安装spark并熟悉pyspark 的RDD算子操作  实验过程 ## 一、Spark local模式安装  1.1安装scala123456789101112cp ~&#x2F;big_data_tools&#x2F;scala-2.12.8.tgz &#x2F;apps&#x2F;tar zxvf &#x2F;apps&#x2F;scala-2.12.8.tgz -C &#x2F;apps&#x2F;mv &#x2F;apps&#x2F;scala-2.12.8&#x2F; &#x2F;a">
<meta property="og:type" content="article">
<meta property="og:title" content="Spark">
<meta property="og:url" content="http://example.com/2020/12/18/spark%E5%AE%89%E8%A3%85/index.html">
<meta property="og:site_name" content="Hexo">
<meta property="og:description" content="Spark 实验目的 安装spark并熟悉pyspark 的RDD算子操作  实验过程 ## 一、Spark local模式安装  1.1安装scala123456789101112cp ~&#x2F;big_data_tools&#x2F;scala-2.12.8.tgz &#x2F;apps&#x2F;tar zxvf &#x2F;apps&#x2F;scala-2.12.8.tgz -C &#x2F;apps&#x2F;mv &#x2F;apps&#x2F;scala-2.12.8&#x2F; &#x2F;a">
<meta property="og:locale" content="en_US">
<meta property="og:image" content="http://example.com/2020/12/18/spark%E5%AE%89%E8%A3%85/spark%E5%AE%89%E8%A3%85.assets/1606273331055.png">
<meta property="og:image" content="http://example.com/2020/12/18/spark%E5%AE%89%E8%A3%85/spark%E5%AE%89%E8%A3%85.assets/1606273762884.png">
<meta property="og:image" content="http://example.com/2020/12/18/spark%E5%AE%89%E8%A3%85/spark%E5%AE%89%E8%A3%85.assets/1606273853343.png">
<meta property="og:image" content="http://example.com/2020/12/18/spark%E5%AE%89%E8%A3%85/spark%E5%AE%89%E8%A3%85.assets/1606273953600.png">
<meta property="og:image" content="http://example.com/2020/12/18/spark%E5%AE%89%E8%A3%85/spark%E5%AE%89%E8%A3%85.assets/1606274086471.png">
<meta property="og:image" content="http://example.com/2020/12/18/spark%E5%AE%89%E8%A3%85/spark%E5%AE%89%E8%A3%85.assets/1606274323080.png">
<meta property="og:image" content="http://example.com/2020/12/18/spark%E5%AE%89%E8%A3%85/spark%E5%AE%89%E8%A3%85.assets/1606274937357.png">
<meta property="og:image" content="http://example.com/2020/12/18/spark%E5%AE%89%E8%A3%85/spark%E5%AE%89%E8%A3%85.assets/1606275049400.png">
<meta property="og:image" content="http://example.com/2020/12/18/spark%E5%AE%89%E8%A3%85/spark%E5%AE%89%E8%A3%85.assets/1606275258091.png">
<meta property="og:image" content="http://example.com/2020/12/18/spark%E5%AE%89%E8%A3%85/spark%E5%AE%89%E8%A3%85.assets/1606275364676.png">
<meta property="og:image" content="http://example.com/2020/12/18/spark%E5%AE%89%E8%A3%85/spark%E5%AE%89%E8%A3%85.assets/1606275518216.png">
<meta property="og:image" content="http://example.com/2020/12/18/spark%E5%AE%89%E8%A3%85/spark%E5%AE%89%E8%A3%85.assets/1606275645182.png">
<meta property="og:image" content="http://example.com/2020/12/18/spark%E5%AE%89%E8%A3%85/spark%E5%AE%89%E8%A3%85.assets/1606275686989.png">
<meta property="og:image" content="http://example.com/2020/12/18/spark%E5%AE%89%E8%A3%85/spark%E5%AE%89%E8%A3%85.assets/1606275841446.png">
<meta property="og:image" content="http://example.com/2020/12/18/spark%E5%AE%89%E8%A3%85/spark%E5%AE%89%E8%A3%85.assets/1606276390750.png">
<meta property="og:image" content="http://example.com/2020/12/18/spark%E5%AE%89%E8%A3%85/spark%E5%AE%89%E8%A3%85.assets/1606276422023.png">
<meta property="og:image" content="http://example.com/2020/12/18/spark%E5%AE%89%E8%A3%85/spark%E5%AE%89%E8%A3%85.assets/1606281704800.png">
<meta property="og:image" content="http://example.com/2020/12/18/spark%E5%AE%89%E8%A3%85/spark%E5%AE%89%E8%A3%85.assets/1606284378614.png">
<meta property="og:image" content="http://example.com/2020/12/18/spark%E5%AE%89%E8%A3%85/spark%E5%AE%89%E8%A3%85.assets/1606284436182.png">
<meta property="og:image" content="http://example.com/2020/12/18/spark%E5%AE%89%E8%A3%85/spark%E5%AE%89%E8%A3%85.assets/1606284817174.png">
<meta property="og:image" content="http://example.com/2020/12/18/spark%E5%AE%89%E8%A3%85/spark%E5%AE%89%E8%A3%85.assets/1606284875720.png">
<meta property="article:published_time" content="2020-12-18T01:22:05.246Z">
<meta property="article:modified_time" content="2020-12-18T01:22:05.246Z">
<meta property="article:author" content="John Doe">
<meta name="twitter:card" content="summary">
<meta name="twitter:image" content="http://example.com/2020/12/18/spark%E5%AE%89%E8%A3%85/spark%E5%AE%89%E8%A3%85.assets/1606273331055.png">
  
    <link rel="alternate" href="/atom.xml" title="Hexo" type="application/atom+xml">
  
  
    <link rel="shortcut icon" href="/favicon.png">
  
  
    
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/typeface-source-code-pro@0.0.71/index.min.css">

  
  
<link rel="stylesheet" href="/css/style.css">

  
    
<link rel="stylesheet" href="/fancybox/jquery.fancybox.min.css">

  
<meta name="generator" content="Hexo 5.3.0"></head>

<body>
  <div id="container">
    <div id="wrap">
      <header id="header">
  <div id="banner"></div>
  <div id="header-outer" class="outer">
    <div id="header-title" class="inner">
      <h1 id="logo-wrap">
        <a href="/" id="logo">Hexo</a>
      </h1>
      
    </div>
    <div id="header-inner" class="inner">
      <nav id="main-nav">
        <a id="main-nav-toggle" class="nav-icon"></a>
        
          <a class="main-nav-link" href="/">Home</a>
        
          <a class="main-nav-link" href="/archives">Archives</a>
        
      </nav>
      <nav id="sub-nav">
        
          <a id="nav-rss-link" class="nav-icon" href="/atom.xml" title="RSS Feed"></a>
        
        <a id="nav-search-btn" class="nav-icon" title="Search"></a>
      </nav>
      <div id="search-form-wrap">
        <form action="//google.com/search" method="get" accept-charset="UTF-8" class="search-form"><input type="search" name="q" class="search-form-input" placeholder="Search"><button type="submit" class="search-form-submit">&#xF002;</button><input type="hidden" name="sitesearch" value="http://example.com"></form>
      </div>
    </div>
  </div>
</header>

      <div class="outer">
        <section id="main"><article id="post-spark安装" class="h-entry article article-type-post" itemprop="blogPost" itemscope itemtype="https://schema.org/BlogPosting">
  <div class="article-meta">
    <a href="/2020/12/18/spark%E5%AE%89%E8%A3%85/" class="article-date">
  <time class="dt-published" datetime="2020-12-18T01:22:05.246Z" itemprop="datePublished">2020-12-18</time>
</a>
    
  </div>
  <div class="article-inner">
    
    
      <header class="article-header">
        
  
    <h1 class="p-name article-title" itemprop="headline name">
      Spark
    </h1>
  

      </header>
    
    <div class="e-content article-entry" itemprop="articleBody">
      
        <h1><center>Spark</center></h1>
<h2>实验目的</h2>
安装spark并熟悉pyspark 的RDD算子操作

<h2>实验过程</h2>
## 一、Spark local模式安装

<h4 id="1-1安装scala"><a href="#1-1安装scala" class="headerlink" title="1.1安装scala"></a>1.1安装scala</h4><figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br></pre></td><td class="code"><pre><span class="line">cp ~/big_data_tools/scala-2.12.8.tgz /apps/</span><br><span class="line">tar zxvf /apps/scala-2.12.8.tgz -C /apps/</span><br><span class="line">mv /apps/scala-2.12.8/ /apps/scala</span><br><span class="line"><span class="meta">#</span><span class="bash"> 删除压缩包</span></span><br><span class="line">rm /apps/scala-2.12.8.tgz</span><br><span class="line"><span class="meta">#</span><span class="bash"> 添加环境变量</span></span><br><span class="line">vim ~/.bashrc</span><br><span class="line"><span class="meta">#</span><span class="bash"> Scala</span></span><br><span class="line">export SCALA_HOME=/apps/scala</span><br><span class="line">export PATH=$SCALA_HOME/bin:$PATH</span><br><span class="line"><span class="meta">#</span><span class="bash"> 使配置生效</span></span><br><span class="line">source ~/.bashrc</span><br></pre></td></tr></table></figure>
<h4 id="1-2安装spark"><a href="#1-2安装spark" class="headerlink" title="1.2安装spark"></a>1.2安装spark</h4><figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br></pre></td><td class="code"><pre><span class="line">cp ~/big_data_tools/spark-2.4.3-bin-hadoop2.7.tgz /apps</span><br><span class="line">tar zxvf /apps/spark-2.4.3-bin-hadoop2.7.tgz -C /apps/</span><br><span class="line">mv /apps/spark-2.4.3-bin-hadoop2.7/ /apps/spark</span><br><span class="line"><span class="meta">#</span><span class="bash"> 删除压缩包</span></span><br><span class="line">rm /apps/spark-2.4.3-bin-hadoop2.7.tgz</span><br><span class="line"><span class="meta">#</span><span class="bash"> 添加环境变量</span></span><br><span class="line">vim ~/.bashrc</span><br><span class="line"><span class="meta">#</span><span class="bash"> Spark</span></span><br><span class="line">export SPARK_HOME=/apps/spark</span><br><span class="line">export PATH=$SPARK_HOME/bin:$PATH</span><br><span class="line"><span class="meta">#</span><span class="bash"> 使配置生效</span></span><br><span class="line">source ~/.bashrc </span><br></pre></td></tr></table></figure>
<p> 不需要对 spark 进行任何配置，就可以启动 spark-shell 进行任务处理了。 在终端中执行 </p>
<p><img src="spark%E5%AE%89%E8%A3%85.assets/1606273331055.png" alt="1606273331055"></p>
<p>产查看当前运行模式</p>
<figure class="highlight"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">scala&gt; sc.master</span><br><span class="line">res1: String = local[*]</span><br></pre></td></tr></table></figure>
<p>用单机的多个线程来模拟Spark分布式计算。</p>
<h4 id="1-3执行测试"><a href="#1-3执行测试" class="headerlink" title="1.3执行测试"></a>1.3执行测试</h4><p> 在 Spark Shell 中，使用 Scala 加载 Spark 安装目录下文件 README.md 并转变为 RDD。 </p>
<figure class="highlight scala"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">val</span> rdd = sc.textFile(<span class="string">&quot;/apps/spark/README.md&quot;</span>)</span><br></pre></td></tr></table></figure>
<p><img src="spark%E5%AE%89%E8%A3%85.assets/1606273762884.png" alt="1606273762884"></p>
<p> 对 RDD 进行算子操作，统计文件的行数。 </p>
<figure class="highlight scala"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">rdd.count()</span><br></pre></td></tr></table></figure>
<p><img src="spark%E5%AE%89%E8%A3%85.assets/1606273853343.png" alt="1606273853343"></p>
<p>退出</p>
<figure class="highlight scala"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">:quit</span><br></pre></td></tr></table></figure>
<h4 id="1-4启动pyspark"><a href="#1-4启动pyspark" class="headerlink" title="1.4启动pyspark"></a>1.4启动pyspark</h4><p> 在终端中执行 ,指定运行spark的python版本</p>
<figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">PYSPARK_PYTHON=python3 pyspark</span><br></pre></td></tr></table></figure>
<p><img src="spark%E5%AE%89%E8%A3%85.assets/1606273953600.png" alt="1606273953600"></p>
<p> 在 Spark Shell 中，使用 Python 加载 Spark 安装目录下文件 README.md 并转变为 RDD </p>
<figure class="highlight scala"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">&gt;&gt;&gt; rdd = sc.textFile(<span class="string">&quot;file:/apps/spark/README.md&quot;</span>)</span><br><span class="line">&gt;&gt;&gt; rdd.count()</span><br></pre></td></tr></table></figure>
<p><img src="spark%E5%AE%89%E8%A3%85.assets/1606274086471.png" alt="1606274086471"></p>
<p> 到此 Spark Local 模式已经安装完成. </p>
<h2 id="二、伪分布式安装"><a href="#二、伪分布式安装" class="headerlink" title="二、伪分布式安装"></a>二、伪分布式安装</h2><p> 安装伪分布式，还需要对配置文件做一些修改。进入配置文件目录/apps/spark/conf</p>
<figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">cd /apps/spark/conf</span><br></pre></td></tr></table></figure>
<p> 将 slaves.template 重命名为 slaves  </p>
<figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">mv slaves.template slaves</span><br></pre></td></tr></table></figure>
<p> 之前只有一个节点，保持原样就可以了 </p>
<p><img src="spark%E5%AE%89%E8%A3%85.assets/1606274323080.png" alt="1606274323080"></p>
<p> 将 spark-env.sh.template 重命名 spark-env.sh </p>
<figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">mv spark-env.sh.template spark-env.sh</span><br></pre></td></tr></table></figure>
<p> 在 spark-env.sh 中添加如下内容 </p>
<figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br></pre></td><td class="code"><pre><span class="line">HADOOP_CONF_DIR=/apps/hadoop/etc/hadoop</span><br><span class="line">JAVA_HOME=/apps/java</span><br><span class="line">SPARK_MASTER_IP=ubuntu</span><br><span class="line">SPARK_MASTER_PORT=7077</span><br><span class="line">SPARK_MASTER_WEBUI_PORT=8080</span><br><span class="line">SPARK_WORKER_CORES=1</span><br><span class="line">SPARK_WORKER_MEMORY=1g</span><br><span class="line">SPARK_WORKER_PORT=7078</span><br><span class="line">SPARK_WORKER_WEBUI_PORT=8081</span><br><span class="line">SPARK_EXECUTOR_INSTANCES=1</span><br></pre></td></tr></table></figure>
<p> 说明：需要配置 JAVA_HOME 以及 HADOOP 配置文件所在的目录 HADOOP_CONF_DIR。SPARK_MASTER_IP、SPARK_MASTER_PORT、 SPARK_MASTER_WEBUI_PORT，分别指 spark 集群中，master 节点的 ip 地址、端口 号、提供的 web 接口的端口。SPARK_WORKER_CORES、SPARK_WORKER_MEMORY 分布为 worker 节点的内核数、内存大小。此处根据自己机器情况调整配置项参数，比如 ip 地址改为自己的主机名。 </p>
<p> 配置传递给 spark 应用程序的默认属性 将 spark-defaults.conf.template 重命名 spark-defaults.conf </p>
<figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">mv spark-defaults.conf.template spark-defaults.conf</span><br></pre></td></tr></table></figure>
<p> 在其中添加如下内容 </p>
<figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line">spark.master                        spark://ubuntu:7077</span><br><span class="line">spark.eventLog.enabled              true</span><br><span class="line">spark.eventLog.dir                  hdfs://localhost:9000/spark/eventLog</span><br><span class="line">spark.serializer                    org.apache.spark.serializer.KryoSerializer</span><br><span class="line">spark.driver.memory                 1g</span><br><span class="line">spark.jars.package                  Azure:mmlspark:0.12</span><br></pre></td></tr></table></figure>
<p> MMLSpark 是微软开源的用于 Spark 的深度学习库，为 Apache Spark 提供了大量深度 学习和数据科学工具，包括将 Spark Machine Learning 管道与 Microsoft Cognitive Toolkit(CNTK)和 OpenCV 进行无缝集成，使您能够快速创建功能强大，高度可扩展的大 型图像和文本数据集分析预测模型。</p>
<p> eventLog 用来存放日志，需要手动创建 </p>
<figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">start-all.sh</span><br><span class="line">hadoop fs -mkdir -p /spark/eventLog</span><br></pre></td></tr></table></figure>
<p> spark-defaults.conf 文件不配置的话，运行演示示例的任务不会显示在 web 界面中。 </p>
<h4 id="2-1启动spark"><a href="#2-1启动spark" class="headerlink" title="2.1启动spark"></a>2.1启动spark</h4><figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">/apps/spark/sbin/start-all.sh</span><br></pre></td></tr></table></figure>
<p><img src="spark%E5%AE%89%E8%A3%85.assets/1606274937357.png" alt="1606274937357"></p>
<p> 可以看到 Spark 创建了 Master 和 Worker 两个进程 </p>
<h4 id="运行演示实例"><a href="#运行演示实例" class="headerlink" title="运行演示实例"></a>运行演示实例</h4><figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">/apps/spark/bin/run-example SparkPi</span><br></pre></td></tr></table></figure>
<p><img src="spark%E5%AE%89%E8%A3%85.assets/1606275049400.png" alt="1606275049400"></p>
<p> 日志信息很多，很难找到输出结果，下面对日志进行设置。 </p>
<h4 id="2-2设置日志"><a href="#2-2设置日志" class="headerlink" title="2.2设置日志"></a>2.2设置日志</h4><p> 上面运行过程中，由于 Log4j 的日志输出级别为 INFO 级别，所以会在屏幕上输出很多的 日志信息，造成很难定位程序的输出结果。可以通过修改日志级别进行解决。</p>
<p> 切换目录到/apps/spark/sbin 目录下，停止 Spark。  </p>
<figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">/apps/spark/sbin/stop-all.sh</span><br></pre></td></tr></table></figure>
<p> 再切换目录到/apps/spark/conf 目录下，将目录下 log4j.properties.template 重命名为 </p>
<figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">mv log4j.properties.template log4j.properties</span><br><span class="line">vim log4j.properties</span><br></pre></td></tr></table></figure>
<p> 第 19 行修改 log4j.rootCategory 的值为 WARN </p>
<p><img src="spark%E5%AE%89%E8%A3%85.assets/1606275258091.png" alt="1606275258091"></p>
<p> 启动 Spark，再次运行演示实例，可以很容易找到结果。 </p>
<figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">/apps/spark/sbin/start-all.sh</span><br><span class="line">/apps/spark/bin/run-example SparkPi</span><br></pre></td></tr></table></figure>
<p><img src="spark%E5%AE%89%E8%A3%85.assets/1606275364676.png" alt="1606275364676"></p>
<h4 id="使用pyspark统计HDFS上的文件的行数"><a href="#使用pyspark统计HDFS上的文件的行数" class="headerlink" title="使用pyspark统计HDFS上的文件的行数"></a>使用pyspark统计HDFS上的文件的行数</h4><p> 在 HDFS 上新建目录/input/spark 并上传文件 README.md 到该目录 </p>
<figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">hadoop fs -mkdir /input/spark/</span><br><span class="line">hadoop fs -put /apps/spark/README.md /input/spark/</span><br></pre></td></tr></table></figure>
<p><img src="spark%E5%AE%89%E8%A3%85.assets/1606275518216.png" alt="1606275518216"></p>
<h4 id="启动-pyspark"><a href="#启动-pyspark" class="headerlink" title="启动 pyspark"></a>启动 pyspark</h4><figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">PYSPARK_PYTHON=python3 pyspark</span><br></pre></td></tr></table></figure>
<p> 使用 python 加载 HDFS 上的 README.md 文件，并转变为 RDD </p>
<figure class="highlight scala"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">rdd = sc.textFile(<span class="string">&quot;hdfs://localhost:9000/input/spark/README.md&quot;</span>)</span><br><span class="line">rdd.count()</span><br></pre></td></tr></table></figure>
<p><img src="spark%E5%AE%89%E8%A3%85.assets/1606275645182.png" alt="1606275645182"></p>
<p> 查看当前运行模式 </p>
<figure class="highlight scala"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">sc.master</span><br></pre></td></tr></table></figure>
<p><img src="spark%E5%AE%89%E8%A3%85.assets/1606275686989.png" alt="1606275686989"></p>
<p> 因为我们在 spark-defaults.conf 中对主节点进行了设置，所以这里显示的运行模式不再 是 local。 </p>
<h2 id="三、Web界面"><a href="#三、Web界面" class="headerlink" title="三、Web界面"></a>三、Web界面</h2><p> 可以看到只有一个 worker，我们运行的例子显示的已完成的列表里 </p>
<p><img src="spark%E5%AE%89%E8%A3%85.assets/1606275841446.png" alt="1606275841446"></p>
<h2 id="四、Jupyter-notebook环境搭建"><a href="#四、Jupyter-notebook环境搭建" class="headerlink" title="四、Jupyter notebook环境搭建"></a>四、Jupyter notebook环境搭建</h2><p>安装jupyter notebook</p>
<figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">sudo apt install jupyter-notebook</span><br></pre></td></tr></table></figure>
<p> 新建工作目录~/work_pyspark </p>
<figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">mkdir ~/work_pyspark</span><br></pre></td></tr></table></figure>
<p> 进入目录，执行以下命令在 jupyter notebook 中运行 spark </p>
<figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">PYSPARK_DRIVER_PYTHON=jupyter PYSPARK_DRIVER_PYTHON_OPTS=&#x27;notebook&#x27; \</span><br><span class="line">PYSPARK_PYTHON=python3 pyspark</span><br></pre></td></tr></table></figure>
<p><img src="spark%E5%AE%89%E8%A3%85.assets/1606276390750.png" alt="1606276390750"></p>
<p> 为方便起见，可以将下面的环境变量添加到~/.bashrc 中 </p>
<figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line">export PYSPARK_DRIVER_PYTHON=jupyter</span><br><span class="line">export PYSPARK_DRIVER_PYTHON_OPTS=&#x27;notebook&#x27;</span><br><span class="line">export PYSPARK_PYTHON=python3</span><br></pre></td></tr></table></figure>
<p>使配置生效</p>
<figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">source ~/.bashrc</span><br></pre></td></tr></table></figure>
<p> 这样在终端中执行 pyspark，就默认在 jupyter notebook 中运行 spark。 </p>
<p><img src="spark%E5%AE%89%E8%A3%85.assets/1606276422023.png" alt="1606276422023"></p>
<p><img src="spark%E5%AE%89%E8%A3%85.assets/1606281704800.png" alt="1606281704800"></p>
<p> 新建一个工作目录 </p>
<figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">mkdir ~/pyspark-workspace</span><br></pre></td></tr></table></figure>
<h2 id="五、配置jupyter-notebook"><a href="#五、配置jupyter-notebook" class="headerlink" title="五、配置jupyter notebook"></a>五、配置jupyter notebook</h2><p>由于虚拟机内部使用图形界面体验感不够好,所以配置以下jupyter服务器,使宿主机可以直接访问notebook</p>
<ul>
<li>生成配置文件</li>
</ul>
<figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">jupyter notebook --generate-config</span><br></pre></td></tr></table></figure>
<p><img src="spark%E5%AE%89%E8%A3%85.assets/1606284378614.png" alt="1606284378614"></p>
<ul>
<li>启动python3 shell,设置密码,并拷贝输出的sha1一行</li>
</ul>
<figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre></td><td class="code"><pre><span class="line">python3</span><br><span class="line"><span class="meta">&gt;</span><span class="bash">&gt;&gt; from notebook.auth import passwd</span></span><br><span class="line"><span class="meta">&gt;</span><span class="bash">&gt;&gt; passwd()</span></span><br><span class="line">Enter password: </span><br><span class="line">Verify password: </span><br><span class="line">&#x27;sha1:f81168ee5979:773a40f2f37625b9dd22f6626ed1dfe9300adba3&#x27;</span><br><span class="line"><span class="meta">&gt;</span><span class="bash">&gt;&gt; <span class="built_in">exit</span>()</span></span><br></pre></td></tr></table></figure>
<p><img src="spark%E5%AE%89%E8%A3%85.assets/1606284436182.png" alt="1606284436182"></p>
<ul>
<li>编辑 /home/chen/.jupyter/jupyter_notebook_config.py,添加如下</li>
</ul>
<figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">c.NotebookApp.ip = &quot;10.0.0.135&quot; # 不知道为什么这里填127.0.0.1不可以,10.0.0.135是NAT模式的IP</span><br><span class="line">c.NotebookApp.password=u&quot;sha1:f81168ee5979:773a40f2f37625b9dd22f6626ed1dfe9300adba3&quot;</span><br><span class="line">c.NotebookApp.open_browser = False</span><br><span class="line">c.NotebookApp.port=8888</span><br></pre></td></tr></table></figure>
<ul>
<li>在工作空间启动pyspark</li>
</ul>
<figure class="highlight shell"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">cd ~/pyspark-workspace</span><br><span class="line">pyspark</span><br></pre></td></tr></table></figure>
<p><img src="spark%E5%AE%89%E8%A3%85.assets/1606284817174.png" alt="1606284817174"></p>
<p><img src="spark%E5%AE%89%E8%A3%85.assets/1606284875720.png" alt="1606284875720"></p>
<h2 id="六、pyspark-RDD"><a href="#六、pyspark-RDD" class="headerlink" title="六、pyspark RDD"></a>六、pyspark RDD</h2><p>RDD算子</p>
<ul>
<li>Transformation<ul>
<li>Transformation是通过转换从一个或多个RDD生成新的RDD，该操作是lazy的，当调用action算则，才会发起job</li>
<li>典型算子：map .  flatMap  .filter  .  reduceByKey  等</li>
</ul>
</li>
<li>Action<ul>
<li>当代码调用该类型算子时，立即启动job。</li>
<li>典型算子：take count  saveAsTextFile等</li>
</ul>
</li>
</ul>
<p>Transformation</p>
<table>
<thead>
<tr>
<th>Transformation API</th>
<th>说明</th>
</tr>
</thead>
<tbody><tr>
<td>filter(func)</td>
<td>筛选出满足函数Func的元素，并返回一个新的数据集</td>
</tr>
<tr>
<td>map(func)</td>
<td>将每个元素传递到函数func中，并将结果返回为一个新的数据集</td>
</tr>
<tr>
<td>flatMap(func)</td>
<td>与map()相似，但每个输入元素都可以映射到0或多个输出结果</td>
</tr>
<tr>
<td>groupByKey()</td>
<td>应用于(K,V)键值对的数据集时，返回一个新的(K,Iterable<V>形式的数据集)</td>
</tr>
<tr>
<td>reduceByKey(func)</td>
<td>应用于(K,V)键值对的数据集时，返回一个新的(K,V)形式的数据集，其中的每个值是将每个key传递到函数func中进行聚合。</td>
</tr>
</tbody></table>
<p>Action</p>
<table>
<thead>
<tr>
<th>Action API</th>
<th>说明</th>
</tr>
</thead>
<tbody><tr>
<td>count()</td>
<td>返回数据集中的元素的个数</td>
</tr>
<tr>
<td>collect()</td>
<td>以数组的形式返回数据集中的所有元素</td>
</tr>
<tr>
<td>first()</td>
<td>返回数据集中的第一个元素</td>
</tr>
<tr>
<td>take(n)</td>
<td>以数组的形式返回数据集中的前n个元素</td>
</tr>
<tr>
<td>reduce(func)</td>
<td>通过函数Func(输入两个参数并返回一个值)聚合数据集中的元素</td>
</tr>
<tr>
<td>foreach(func)</td>
<td>将数据集中的每个元素传递到函数func中运行</td>
</tr>
</tbody></table>
<h2 id="七、Pyspark-RDD"><a href="#七、Pyspark-RDD" class="headerlink" title="七、Pyspark RDD"></a>七、Pyspark RDD</h2><h3 id="1-查看pyspark的版本号"><a href="#1-查看pyspark的版本号" class="headerlink" title="1.查看pyspark的版本号"></a>1.查看pyspark的版本号</h3><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">print(<span class="string">&quot;pyspark version:&quot;</span>+<span class="built_in">str</span>(sc.version))</span><br></pre></td></tr></table></figure>
<pre><code>pyspark version:2.4.3</code></pre>
<h3 id="2-使用parallelize创建RDD"><a href="#2-使用parallelize创建RDD" class="headerlink" title="2.使用parallelize创建RDD"></a>2.使用parallelize创建RDD</h3><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">2</span>,<span class="number">3</span>])</span><br><span class="line">print(x.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[1, 2, 3]</code></pre>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="string">&quot;apple&quot;</span>,<span class="string">&quot;orange&quot;</span>,<span class="string">&quot;banana&quot;</span>])</span><br><span class="line">print(x.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[&#39;apple&#39;, &#39;orange&#39;, &#39;banana&#39;]</code></pre>
<h2 id="3-map"><a href="#3-map" class="headerlink" title="3. map"></a>3. map</h2><p>map(f,presercesPartitioning=False):对RDD中每个元素进行f函数里面的操作，返回一个新的RDD。preservePartitioning表示是否保留父RDD的分区信息</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">2</span>,<span class="number">3</span>])</span><br><span class="line">y = x.<span class="built_in">map</span>(<span class="keyword">lambda</span> x:(x,x**<span class="number">2</span>))</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[1, 2, 3]
[(1, 1), (2, 4), (3, 9)]</code></pre>
<h2 id="4-flatMap"><a href="#4-flatMap" class="headerlink" title="4.flatMap"></a>4.flatMap</h2><p>flatMap(f, preservesPartitioning=False)：对 RDD 中每个元素进行 f 函数里面的操作，返回一个扁平化结果的新 RDD</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">3</span>,<span class="number">3</span>])</span><br><span class="line">y = x.flatMap(<span class="keyword">lambda</span> x:(x,x*<span class="number">100</span>,x**<span class="number">2</span>))</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[1, 3, 3]
[1, 100, 1, 3, 300, 9, 3, 300, 9]</code></pre>
<h2 id="5-mapPatitions"><a href="#5-mapPatitions" class="headerlink" title="5.mapPatitions"></a>5.mapPatitions</h2><p>mapPartitions(f, preservesPartitioning=False)：对 RDD 中每个分区里面的全部元素进行自定义f 函数操作，返回一个新 RDD Section ??</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">2</span>,<span class="number">3</span>],<span class="number">2</span>)</span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">f</span>(<span class="params">iterator</span>):</span><span class="keyword">yield</span> <span class="built_in">sum</span>(iterator)</span><br><span class="line">    </span><br><span class="line">y = x.mapPartitions(f)</span><br><span class="line">print(x.glom().collect())</span><br><span class="line">print(y.glom().collect())</span><br></pre></td></tr></table></figure>
<pre><code>[[1], [2, 3]]
[[1], [5]]</code></pre>
<h2 id="6-mapPartitionsWithIndex"><a href="#6-mapPartitionsWithIndex" class="headerlink" title="6.mapPartitionsWithIndex"></a>6.mapPartitionsWithIndex</h2><p>mapPartitionsWithIndex(f, preservesPartitioning=False)：对 RDD 中每个分区里面的全部元素进行自定义 f 函数操作，并跟踪每个分区索引</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">2</span>,<span class="number">3</span>],<span class="number">2</span>)</span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">f</span>(<span class="params">partitionIndex,iterator</span>):</span><span class="keyword">yield</span> (partitionIndex,<span class="built_in">sum</span>(iterator))</span><br><span class="line">    </span><br><span class="line">y = x.mapPartitionsWithIndex(f)</span><br><span class="line">print(x.glom().collect())</span><br><span class="line">print(y.glom().collect())</span><br></pre></td></tr></table></figure>
<pre><code>[[1], [2, 3]]
[[(0, 1)], [(1, 5)]]</code></pre>
<h2 id="7-分区数量"><a href="#7-分区数量" class="headerlink" title="7.分区数量"></a>7.分区数量</h2><p>getNumPartition():返回分区的数量</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">2</span>,<span class="number">3</span>],<span class="number">2</span>)</span><br><span class="line">y = x.getNumPartitions()</span><br><span class="line">print(x.glom().collect())</span><br><span class="line">print(y)</span><br></pre></td></tr></table></figure>
<pre><code>[[1], [2, 3]]
2</code></pre>
<h2 id="8-filter"><a href="#8-filter" class="headerlink" title="8.filter"></a>8.filter</h2><p>filter(f)：对 RDD 中的元素进行过滤，返回一个满足过滤条件的新 RDD</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">2</span>,<span class="number">3</span>])</span><br><span class="line">y = x.<span class="built_in">filter</span>(<span class="keyword">lambda</span> x:x%<span class="number">2</span>==<span class="number">1</span>)</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[1, 2, 3]
[1, 3]</code></pre>
<h2 id="9-distinct"><a href="#9-distinct" class="headerlink" title="9.distinct"></a>9.distinct</h2><p>distinct(numPartitions=None)：对 RDD 中的元素进行去重，返回去重后的新 RDD</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="string">&quot;A&quot;</span>,<span class="string">&quot;B&quot;</span>,<span class="string">&quot;A&quot;</span>,<span class="string">&quot;C&quot;</span>])</span><br><span class="line">y = x.distinct()</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[&#39;A&#39;, &#39;B&#39;, &#39;A&#39;, &#39;C&#39;]
[&#39;C&#39;, &#39;A&#39;, &#39;B&#39;]</code></pre>
<h2 id="10-sample"><a href="#10-sample" class="headerlink" title="10.sample"></a>10.sample</h2><p>sample(withReplacement, fraction, seed=None)：对 RDD 进行抽样操作。</p>
<ul>
<li>withReplacement：是否有放回</li>
<li>fraction：抽取的比率</li>
<li>seed：随机生成的种子</li>
</ul>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize(<span class="built_in">range</span>(<span class="number">7</span>))</span><br><span class="line"><span class="comment"># call &#x27;sample&#x27; 5 times</span></span><br><span class="line">ylist = [x.sample(withReplacement=<span class="literal">False</span>, fraction=<span class="number">0.5</span>) <span class="keyword">for</span> i <span class="keyword">in</span> <span class="built_in">range</span>(<span class="number">5</span>)]</span><br><span class="line">print(<span class="string">&#x27;x = &#x27;</span> + <span class="built_in">str</span>(x.collect()))</span><br><span class="line"><span class="keyword">for</span> cnt,y <span class="keyword">in</span> <span class="built_in">zip</span>(<span class="built_in">range</span>(<span class="built_in">len</span>(ylist)), ylist):</span><br><span class="line">    print(<span class="string">&#x27;sample:&#x27;</span> + <span class="built_in">str</span>(cnt) + <span class="string">&#x27; y = &#x27;</span> + <span class="built_in">str</span>(y.collect()))</span><br></pre></td></tr></table></figure>
<pre><code>x = [0, 1, 2, 3, 4, 5, 6]
sample:0 y = [1, 2, 5, 6]
sample:1 y = [0, 1, 2, 6]
sample:2 y = [1, 2, 3, 4]
sample:3 y = [2, 4, 6]
sample:4 y = [0, 1, 3]</code></pre>
<h2 id="11-takeSample"><a href="#11-takeSample" class="headerlink" title="11.takeSample"></a>11.takeSample</h2><p>takeSample(withReplacement, num, seed=None)：对 RDD 中元素进行抽样，返回抽样后 num<br>个元素。- withReplacement：是否有放回 - seed：随机种子数</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize(<span class="built_in">range</span>(<span class="number">7</span>))</span><br><span class="line"><span class="comment"># call &#x27;sample&#x27; 5 times</span></span><br><span class="line">ylist = [x.takeSample(withReplacement=<span class="literal">False</span>, num=<span class="number">3</span>) <span class="keyword">for</span> i <span class="keyword">in</span> <span class="built_in">range</span>(<span class="number">5</span>)]</span><br><span class="line">print(<span class="string">&#x27;x = &#x27;</span> + <span class="built_in">str</span>(x.collect()))</span><br><span class="line"><span class="keyword">for</span> cnt,y <span class="keyword">in</span> <span class="built_in">zip</span>(<span class="built_in">range</span>(<span class="built_in">len</span>(ylist)), ylist):</span><br><span class="line">    print(<span class="string">&#x27;sample:&#x27;</span> + <span class="built_in">str</span>(cnt) + <span class="string">&#x27; y = &#x27;</span> + <span class="built_in">str</span>(y))</span><br></pre></td></tr></table></figure>
<pre><code>x = [0, 1, 2, 3, 4, 5, 6]
sample:0 y = [6, 2, 3]
sample:1 y = [3, 1, 2]
sample:2 y = [1, 4, 3]
sample:3 y = [0, 2, 4]
sample:4 y = [2, 5, 0]</code></pre>
<h2 id="12-union"><a href="#12-union" class="headerlink" title="12.union"></a>12.union</h2><p>union(other)：将自身 RDD 与其它 RDD 进行合并操作，返回一个新的 RDD</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="string">&#x27;A&#x27;</span>,<span class="string">&#x27;A&#x27;</span>,<span class="string">&#x27;B&#x27;</span>])</span><br><span class="line">y = sc.parallelize([<span class="string">&#x27;D&#x27;</span>,<span class="string">&#x27;C&#x27;</span>,<span class="string">&#x27;A&#x27;</span>])</span><br><span class="line">z = x.union(y)</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br><span class="line">print(z.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[&#39;A&#39;, &#39;A&#39;, &#39;B&#39;]
[&#39;D&#39;, &#39;C&#39;, &#39;A&#39;]
[&#39;A&#39;, &#39;A&#39;, &#39;B&#39;, &#39;D&#39;, &#39;C&#39;, &#39;A&#39;]</code></pre>
<h2 id="13-intersection"><a href="#13-intersection" class="headerlink" title="13.intersection"></a>13.intersection</h2><p>intersection：对自身 RDD 与其它 RDD 取交集</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="string">&#x27;A&#x27;</span>,<span class="string">&#x27;A&#x27;</span>,<span class="string">&#x27;B&#x27;</span>])</span><br><span class="line">y = sc.parallelize([<span class="string">&#x27;A&#x27;</span>,<span class="string">&#x27;C&#x27;</span>,<span class="string">&#x27;D&#x27;</span>])</span><br><span class="line">z = x.intersection(y)</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br><span class="line">print(z.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[&#39;A&#39;, &#39;A&#39;, &#39;B&#39;]
[&#39;A&#39;, &#39;C&#39;, &#39;D&#39;]
[&#39;A&#39;]</code></pre>
<h2 id="14-sortByKey"><a href="#14-sortByKey" class="headerlink" title="14.sortByKey"></a>14.sortByKey</h2><p>sortByKey(ascending=True, numPartitions=None, keyfunc)：对 RDD 按 key 值或对 key 操作<br>的自定义 keyfunc 函数进行排序，默认为升序，numPartitions：分区的数目。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([(<span class="string">&#x27;B&#x27;</span>,<span class="number">1</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">2</span>),(<span class="string">&#x27;C&#x27;</span>,<span class="number">3</span>)])</span><br><span class="line">y = x.sortByKey()</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br><span class="line"></span><br></pre></td></tr></table></figure>
<pre><code>[(&#39;B&#39;, 1), (&#39;A&#39;, 2), (&#39;C&#39;, 3)]
[(&#39;A&#39;, 2), (&#39;B&#39;, 1), (&#39;C&#39;, 3)]</code></pre>
<h2 id="15-sortBy"><a href="#15-sortBy" class="headerlink" title="15.sortBy"></a>15.sortBy</h2><p>sortBy(keyfunc, ascending=True, numPartitions=None)：按自定义的 keyfunc 函数对 RDD 中<br>元素进行排序，默认为升序</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="string">&#x27;Cat&#x27;</span>,<span class="string">&#x27;Apple&#x27;</span>,<span class="string">&#x27;Bat&#x27;</span>])</span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">keyGen</span>(<span class="params">val</span>):</span> <span class="keyword">return</span> val[<span class="number">0</span>]</span><br><span class="line">y = x.sortBy(keyGen)</span><br><span class="line">print(y.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[&#39;Apple&#39;, &#39;Bat&#39;, &#39;Cat&#39;]</code></pre>
<h2 id="16-glom"><a href="#16-glom" class="headerlink" title="16.glom"></a>16.glom</h2><p>glom()：创建一个新的 RDD，通过合并每个分区里面的全部元素到一个列表中</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="string">&#x27;C&#x27;</span>,<span class="string">&#x27;B&#x27;</span>,<span class="string">&#x27;A&#x27;</span>], <span class="number">2</span>)</span><br><span class="line">y = x.glom()</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[&#39;C&#39;, &#39;B&#39;, &#39;A&#39;]
[[&#39;C&#39;], [&#39;B&#39;, &#39;A&#39;]]</code></pre>
<h2 id="17-cartesian"><a href="#17-cartesian" class="headerlink" title="17.cartesian"></a>17.cartesian</h2><p>cartesian(other)：将 RDD 与其它 RDD 进行笛卡尔积，返回 &lt;key,value&gt; 类型的 RDD，其中 key为自身的元素，value 为其它 RDD 的元素。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="string">&#x27;A&#x27;</span>,<span class="string">&#x27;B&#x27;</span>])</span><br><span class="line">y = sc.parallelize([<span class="string">&#x27;C&#x27;</span>,<span class="string">&#x27;D&#x27;</span>])</span><br><span class="line">z = x.cartesian(y)</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br><span class="line">print(z.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[&#39;A&#39;, &#39;B&#39;]
[&#39;C&#39;, &#39;D&#39;]
[(&#39;A&#39;, &#39;C&#39;), (&#39;A&#39;, &#39;D&#39;), (&#39;B&#39;, &#39;C&#39;), (&#39;B&#39;, &#39;D&#39;)]</code></pre>
<h2 id="18-groupBy"><a href="#18-groupBy" class="headerlink" title="18.groupBy"></a>18.groupBy</h2><p>groupBy(f, numPartitions=None, partitionFunc)：对 RDD 中每个元素按照满足自定义的 f 函数为条件进行分组，返回一个新的 &lt;key,value&amp;gt 类型的 RDD，其中 key 为 f 函数的返回值。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">2</span>,<span class="number">3</span>])</span><br><span class="line">y = x.groupBy(<span class="keyword">lambda</span> x: <span class="string">&#x27;A&#x27;</span> <span class="keyword">if</span> (x%<span class="number">2</span> == <span class="number">1</span>) <span class="keyword">else</span> <span class="string">&#x27;B&#x27;</span> )</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br><span class="line">print([(j[<span class="number">0</span>],[i <span class="keyword">for</span> i <span class="keyword">in</span> j[<span class="number">1</span>]]) <span class="keyword">for</span> j <span class="keyword">in</span> y.collect()])</span><br></pre></td></tr></table></figure>
<pre><code>[1, 2, 3]
[(&#39;A&#39;, &lt;pyspark.resultiterable.ResultIterable object at 0x7f380801f2e8&gt;), (&#39;B&#39;, &lt;pyspark.resultiterable.ResultIterable object at 0x7f380801ff98&gt;)]
[(&#39;A&#39;, [1, 3]), (&#39;B&#39;, [2])]</code></pre>
<h2 id="19-pipe"><a href="#19-pipe" class="headerlink" title="19.pipe"></a>19.pipe</h2><p>pipe(command, env=None, checkCode=False)：对 RDD 元素进行管道操作，将返回 shell 命<br>令的处理结果，形成一个新的 RDD</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="string">&#x27;A&#x27;</span>, <span class="string">&#x27;Ba&#x27;</span>, <span class="string">&#x27;C&#x27;</span>, <span class="string">&#x27;AD&#x27;</span>])</span><br><span class="line">y = x.pipe(<span class="string">&#x27;grep &quot;A&quot;&#x27;</span>) </span><br><span class="line">y1 = x.pipe(<span class="string">&#x27;grep -i &quot;a&quot;&#x27;</span>)</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br><span class="line">print(y1.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[&#39;A&#39;, &#39;Ba&#39;, &#39;C&#39;, &#39;AD&#39;]
[&#39;A&#39;, &#39;AD&#39;]
[&#39;A&#39;, &#39;Ba&#39;, &#39;AD&#39;]</code></pre>
<h2 id="20-foreach"><a href="#20-foreach" class="headerlink" title="20.foreach"></a>20.foreach</h2><p>foreach(f)：对 RDD 中每个元素进行自定义 f 函数输出操作</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">2</span>,<span class="number">3</span>])</span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">f</span>(<span class="params">el</span>):</span></span><br><span class="line">    <span class="string">&#x27;&#x27;&#x27;side effect: append the current RDD elements to a file&#x27;&#x27;&#x27;</span></span><br><span class="line">    f1 = <span class="built_in">open</span>(<span class="string">&quot;/home/chen/foreachExample.txt&quot;</span>, <span class="string">&#x27;a+&#x27;</span>)</span><br><span class="line">    print(el,file=f1)</span><br><span class="line"></span><br></pre></td></tr></table></figure>

<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment"># first clear the file contents</span></span><br><span class="line"><span class="built_in">open</span>(<span class="string">&#x27;/home/chen/foreachExample.txt&#x27;</span>, <span class="string">&#x27;w&#x27;</span>).close()</span><br><span class="line">y = x.foreach(f) <span class="comment"># writes into foreachExample.txt</span></span><br><span class="line">print(x.collect())</span><br><span class="line">print(y) <span class="comment"># foreach returns &#x27;None&#x27;</span></span><br><span class="line"><span class="comment"># print the contents of foreachExample.txt</span></span><br><span class="line"><span class="keyword">with</span> <span class="built_in">open</span>(<span class="string">&quot;/home/chen/foreachExample.txt&quot;</span>, <span class="string">&quot;r&quot;</span>) <span class="keyword">as</span> foreachExample:</span><br><span class="line">    <span class="built_in">print</span> (foreachExample.read())</span><br></pre></td></tr></table></figure>
<pre><code>[1, 2, 3]
None
1
2
3</code></pre>
<h2 id="21-foreachPartition"><a href="#21-foreachPartition" class="headerlink" title="21.foreachPartition"></a>21.foreachPartition</h2><p>foreachPartition(f)：对 RDD 每个分区中元素进行自定义 f 函数操作。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">2</span>,<span class="number">3</span>,<span class="number">4</span>,<span class="number">5</span>,<span class="number">6</span>],<span class="number">5</span>)</span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">f</span>(<span class="params">parition</span>):</span></span><br><span class="line">    <span class="string">&#x27;&#x27;&#x27;side effect: append the current RDD partition contents to a file&#x27;&#x27;&#x27;</span></span><br><span class="line">    f1=<span class="built_in">open</span>(<span class="string">&quot;/home/chen/foreachPartitionExample.txt&quot;</span>, <span class="string">&#x27;a+&#x27;</span>)</span><br><span class="line">    print([el <span class="keyword">for</span> el <span class="keyword">in</span> parition],file=f1)</span><br></pre></td></tr></table></figure>

<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre></td><td class="code"><pre><span class="line"><span class="built_in">open</span>(<span class="string">&#x27;/home/chen/foreachPartitionExample.txt&#x27;</span>, <span class="string">&#x27;w&#x27;</span>).close()</span><br><span class="line">y = x.foreachPartition(f) <span class="comment"># writes into foreachExample.txt</span></span><br><span class="line">print(x.glom().collect())</span><br><span class="line">print(y) <span class="comment"># foreach returns &#x27;None&#x27;</span></span><br><span class="line"><span class="comment"># print the contents of foreachExample.txt</span></span><br><span class="line"><span class="keyword">with</span> <span class="built_in">open</span>(<span class="string">&quot;/home/chen/foreachPartitionExample.txt&quot;</span>, <span class="string">&quot;r&quot;</span>) <span class="keyword">as</span> foreachExample:</span><br><span class="line">    <span class="built_in">print</span> (foreachExample.read())</span><br></pre></td></tr></table></figure>
<pre><code>[[1], [2], [3], [4], [5, 6]]
None
[1]
[2]
[3]
[4]
[5, 6]</code></pre>
<h2 id="22-reduce"><a href="#22-reduce" class="headerlink" title="22.reduce"></a>22.reduce</h2><p>reduce(f)：使用指定的二元运算符，对 RDD 中每个元素进行 reduce 操作</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">2</span>,<span class="number">3</span>])</span><br><span class="line">y = x.reduce(<span class="keyword">lambda</span> x, y: x + y) <span class="comment"># computes a cumulative sum</span></span><br><span class="line">print(x.collect())</span><br><span class="line">print(y)</span><br></pre></td></tr></table></figure>
<pre><code>[1, 2, 3]
6</code></pre>
<h2 id="23-fold"><a href="#23-fold" class="headerlink" title="23.fold"></a>23.fold</h2><p>fold(zeroValue, op)：对 RDD 中每个元素进行聚合操作。使用一个函数和零值，先对每个分区的<br>元素进行聚合，然后对全部分区进行聚合。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">2</span>,<span class="number">3</span>],<span class="number">2</span>)</span><br><span class="line">neutral_zero_value = <span class="number">0</span> <span class="comment"># 0 for sum, 1 for multiplication</span></span><br><span class="line">y = x.fold(neutral_zero_value,<span class="keyword">lambda</span> x, y: x + y) <span class="comment"># computes cumulative sum</span></span><br><span class="line">print(x.glom().collect())</span><br><span class="line">print(y)</span><br></pre></td></tr></table></figure>
<pre><code>[[1], [2, 3]]
6</code></pre>
<h2 id="24-aggregate"><a href="#24-aggregate" class="headerlink" title="24.aggregate"></a>24.aggregate</h2><p>aggregate(zeroValue, seqOp, combOp)：使用一个合并函数和一个零值，先对每个分区按合并<br>函数进行聚合，然后将全部分区进行聚合。- seqOp: 每个分区执行的聚合函数, 对 rdd 中按分区每<br>个元素 y 执行此函数, x 为上一次的执行结果, 首次计算时使用默认值 zeroValue</p>
<ul>
<li>comOp: 对每个分区的结果执行的聚合函数, 执行此函数时, 每个分区的计算结果 y 执行此函<br>数, x 为上一次的执行结果, 首次计算时使用默认值 zeroValue</li>
</ul>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">2</span>,<span class="number">3</span>,<span class="number">4</span>])</span><br><span class="line">neutral_zero_value = (<span class="number">0</span>,<span class="number">1</span>) <span class="comment"># sum: x = x+0, product: x = 1*x</span></span><br><span class="line">seqOp = (<span class="keyword">lambda</span> x, y: (x[<span class="number">0</span>] + y, x[<span class="number">1</span>] * y))</span><br><span class="line">combOp = (<span class="keyword">lambda</span> x, y: (x[<span class="number">0</span>] + y[<span class="number">0</span>], x[<span class="number">1</span>] * y[<span class="number">1</span>]))</span><br><span class="line">y = x.aggregate(neutral_zero_value,seqOp,combOp) <span class="comment"># computes (cumulative sum,cumulative product)</span></span><br><span class="line">print(x.collect())</span><br><span class="line">print(y)</span><br></pre></td></tr></table></figure>
<pre><code>[2, 3, 4]
(9, 24)</code></pre>
<h2 id="25-max"><a href="#25-max" class="headerlink" title="25.max"></a>25.max</h2><p>max(key=None)：找寻 RDD 中最大的一项，参数 key: 一个函数用于生成比较的关键条件</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">3</span>,<span class="number">2</span>])</span><br><span class="line">y = x.<span class="built_in">max</span>()</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y)</span><br><span class="line"></span><br></pre></td></tr></table></figure>
<pre><code>[1, 3, 2]
3</code></pre>
<h2 id="26-min"><a href="#26-min" class="headerlink" title="26.min"></a>26.min</h2><p>min(key=None)：找寻 RDD 中最小的一项，参数 key: 一个函数用于生成比较的关键条件</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">3</span>,<span class="number">2</span>])</span><br><span class="line">y = x.<span class="built_in">min</span>()</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y)</span><br></pre></td></tr></table></figure>
<pre><code>[1, 3, 2]
1</code></pre>
<h2 id="27-sum"><a href="#27-sum" class="headerlink" title="27.sum"></a>27.sum</h2><p>sum()：对 RDD 中所有元素进行累加求和</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">3</span>,<span class="number">2</span>])</span><br><span class="line">y = x.<span class="built_in">sum</span>()</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y)</span><br></pre></td></tr></table></figure>
<pre><code>[1, 3, 2]
6</code></pre>
<h2 id="28-count"><a href="#28-count" class="headerlink" title="28.count"></a>28.count</h2><p>count()：计算 RDD 中元素的个数</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">3</span>,<span class="number">2</span>])</span><br><span class="line">y = x.count()</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y)</span><br><span class="line"></span><br></pre></td></tr></table></figure>
<pre><code>[1, 3, 2]
3</code></pre>
<h2 id="29-histogram"><a href="#29-histogram" class="headerlink" title="29.histogram"></a>29.histogram</h2><p>histogram(buckets)：使用提供的桶计算直方图。例如 [1,10,20,50] 意思是桶 [1,10) [10,20) [20,50]</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">3</span>,<span class="number">1</span>,<span class="number">2</span>,<span class="number">3</span>])</span><br><span class="line">y = x.histogram(buckets = <span class="number">2</span>)</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y)</span><br></pre></td></tr></table></figure>
<pre><code>[1, 3, 1, 2, 3]
([1, 2, 3], [2, 3])</code></pre>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">3</span>,<span class="number">1</span>,<span class="number">2</span>,<span class="number">3</span>])</span><br><span class="line">y = x.histogram([<span class="number">0</span>,<span class="number">0.5</span>,<span class="number">1</span>,<span class="number">1.5</span>,<span class="number">2</span>,<span class="number">2.5</span>,<span class="number">3</span>,<span class="number">3.5</span>])</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y)</span><br></pre></td></tr></table></figure>
<pre><code>[1, 3, 1, 2, 3]
([0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5], [0, 0, 2, 0, 1, 0, 2])</code></pre>
<h2 id="30-mean"><a href="#30-mean" class="headerlink" title="30.mean"></a>30.mean</h2><p>mean()：计算 RDD 中元素的平均值</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">3</span>,<span class="number">2</span>])</span><br><span class="line">y = x.mean()</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y)</span><br></pre></td></tr></table></figure>
<pre><code>[1, 3, 2]
2.0</code></pre>
<h2 id="31-variance"><a href="#31-variance" class="headerlink" title="31.variance"></a>31.variance</h2><p>variance()：计算 RDD 中所有元素的方差</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">3</span>,<span class="number">2</span>])</span><br><span class="line">y = x.variance() <span class="comment"># divides by N</span></span><br><span class="line">print(x.collect())</span><br><span class="line">print(y)</span><br></pre></td></tr></table></figure>
<pre><code>[1, 3, 2]
0.6666666666666666</code></pre>
<h2 id="32-stdev"><a href="#32-stdev" class="headerlink" title="32.stdev"></a>32.stdev</h2><p>stdev()：计算 RDD 中所有元素的标准差</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">3</span>,<span class="number">2</span>])</span><br><span class="line">y = x.stdev() <span class="comment"># divides by N</span></span><br><span class="line">print(x.collect())</span><br><span class="line">print(y)</span><br></pre></td></tr></table></figure>
<pre><code>[1, 3, 2]
0.816496580927726</code></pre>
<h2 id="33-sampleStdev"><a href="#33-sampleStdev" class="headerlink" title="33.sampleStdev"></a>33.sampleStdev</h2><p>sampleStdev()：计算 RDD 中所有元素的样本标准差</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">3</span>,<span class="number">2</span>])</span><br><span class="line">y = x.sampleStdev() <span class="comment"># divides by N-1</span></span><br><span class="line">print(x.collect())</span><br><span class="line">print(y)</span><br></pre></td></tr></table></figure>
<pre><code>[1, 3, 2]
1.0</code></pre>
<h2 id="34-sampleVariance"><a href="#34-sampleVariance" class="headerlink" title="34.sampleVariance"></a>34.sampleVariance</h2><p>sampleVariance()：计算 RDD 中所有元素的样本方差</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">3</span>,<span class="number">2</span>])</span><br><span class="line">y = x.sampleVariance() <span class="comment"># divides by N-1</span></span><br><span class="line">print(x.collect())</span><br><span class="line">print(y)</span><br></pre></td></tr></table></figure>
<pre><code>[1, 3, 2]
1.0</code></pre>
<h2 id="35-countByValue"><a href="#35-countByValue" class="headerlink" title="35.countByValue"></a>35.countByValue</h2><p>countByValue()：对 RDD 中每个元素进行计数</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="string">&quot;A&quot;</span>,<span class="string">&quot;C&quot;</span>,<span class="string">&quot;A&quot;</span>,<span class="string">&quot;B&quot;</span>,<span class="string">&quot;C&quot;</span>])</span><br><span class="line">y = x.countByValue()</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y)</span><br></pre></td></tr></table></figure>
<pre><code>[&#39;A&#39;, &#39;C&#39;, &#39;A&#39;, &#39;B&#39;, &#39;C&#39;]
defaultdict(&lt;class &#39;int&#39;&gt;, &#123;&#39;A&#39;: 2, &#39;C&#39;: 2, &#39;B&#39;: 1&#125;)</code></pre>
<h2 id="36-top"><a href="#36-top" class="headerlink" title="36.top"></a>36.top</h2><p>top(num, key=None)：对 RDD 中元素按降序或自定义 key 函数进行排序，输出排序后的前 num<br>个元素</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">3</span>,<span class="number">1</span>,<span class="number">2</span>,<span class="number">3</span>])</span><br><span class="line">y = x.top(num = <span class="number">3</span>)</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y)</span><br></pre></td></tr></table></figure>
<pre><code>[1, 3, 1, 2, 3]
[3, 3, 2]</code></pre>
<h2 id="37-takeOrdered"><a href="#37-takeOrdered" class="headerlink" title="37.takeOrdered"></a>37.takeOrdered</h2><p>takeOrdered(num, key=None)：对 RDD 中元素按升序或自定义 key 函数进行排序，输出排序<br>后的前 num 个元素。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">3</span>,<span class="number">1</span>,<span class="number">2</span>,<span class="number">3</span>])</span><br><span class="line">y = x.takeOrdered(num = <span class="number">3</span>)</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y)</span><br></pre></td></tr></table></figure>
<pre><code>[1, 3, 1, 2, 3]
[1, 1, 2]</code></pre>
<h2 id="38-take"><a href="#38-take" class="headerlink" title="38.take"></a>38.take</h2><p>take(num)：输出 RDD 中前 num 个元素</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">3</span>,<span class="number">1</span>,<span class="number">2</span>,<span class="number">3</span>])</span><br><span class="line">y = x.take(num = <span class="number">3</span>)</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y)</span><br><span class="line"></span><br></pre></td></tr></table></figure>
<pre><code>[1, 3, 1, 2, 3]
[1, 3, 1]</code></pre>
<h2 id="39-first"><a href="#39-first" class="headerlink" title="39.first"></a>39.first</h2><p>first()：输出 RDD 中第一个元素</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">3</span>,<span class="number">1</span>,<span class="number">2</span>,<span class="number">3</span>])</span><br><span class="line">y = x.first()</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y)</span><br><span class="line"></span><br></pre></td></tr></table></figure>
<pre><code>[1, 3, 1, 2, 3]
1</code></pre>
<h2 id="40-collectAsMap"><a href="#40-collectAsMap" class="headerlink" title="40.collectAsMap"></a>40.collectAsMap</h2><p>collectAsMap()：对 RDD 的每个元素进行遍历，返回一个键值对类型的字典。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([(<span class="string">&#x27;C&#x27;</span>,<span class="number">3</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">1</span>),(<span class="string">&#x27;B&#x27;</span>,<span class="number">2</span>)])</span><br><span class="line">y = x.collectAsMap()</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y)</span><br></pre></td></tr></table></figure>
<pre><code>[(&#39;C&#39;, 3), (&#39;A&#39;, 1), (&#39;B&#39;, 2)]
&#123;&#39;C&#39;: 3, &#39;A&#39;: 1, &#39;B&#39;: 2&#125;</code></pre>
<h2 id="41-keys"><a href="#41-keys" class="headerlink" title="41.keys"></a>41.keys</h2><p>keys()：对 &lt;key,value&gt; 类型 RDD 进行操作，返回 RDD 每个元素的 key 值。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([(<span class="string">&#x27;C&#x27;</span>,<span class="number">3</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">1</span>),(<span class="string">&#x27;B&#x27;</span>,<span class="number">2</span>)])</span><br><span class="line">y = x.keys()</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[(&#39;C&#39;, 3), (&#39;A&#39;, 1), (&#39;B&#39;, 2)]
[&#39;C&#39;, &#39;A&#39;, &#39;B&#39;]</code></pre>
<h2 id="42-values"><a href="#42-values" class="headerlink" title="42.values"></a>42.values</h2><p>values()：对 &lt;key,value&gt; 类型的 RDD 进行操作，返回 RDD 每个元素的 value 值。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([(<span class="string">&#x27;C&#x27;</span>,<span class="number">3</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">1</span>),(<span class="string">&#x27;B&#x27;</span>,<span class="number">2</span>)])</span><br><span class="line">y = x.values()</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[(&#39;C&#39;, 3), (&#39;A&#39;, 1), (&#39;B&#39;, 2)]
[3, 1, 2]</code></pre>
<h2 id="43-reduceByKey"><a href="#43-reduceByKey" class="headerlink" title="43.reduceByKey"></a>43.reduceByKey</h2><p>reduceByKey(func, numPartitions=None, partitionFunc)：对 pairRDD 中的 key 先进行 group<br>by 操作，然后对聚合后的 value 数据进行自定义 f 函数操作，返回一个新的 RDD</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([(<span class="string">&#x27;B&#x27;</span>,<span class="number">1</span>),(<span class="string">&#x27;B&#x27;</span>,<span class="number">2</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">3</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">4</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">5</span>)])</span><br><span class="line">y = x.reduceByKey(<span class="keyword">lambda</span> agg, obj: agg + obj)</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[(&#39;B&#39;, 1), (&#39;B&#39;, 2), (&#39;A&#39;, 3), (&#39;A&#39;, 4), (&#39;A&#39;, 5)]
[(&#39;B&#39;, 3), (&#39;A&#39;, 12)]</code></pre>
<h2 id="44-countByKey"><a href="#44-countByKey" class="headerlink" title="44.countByKey"></a>44.countByKey</h2><p>countByKey()：对 key 相同的所有元素进行计数，返回值为一个字典</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([(<span class="string">&#x27;B&#x27;</span>,<span class="number">1</span>),(<span class="string">&#x27;B&#x27;</span>,<span class="number">2</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">3</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">4</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">5</span>)])</span><br><span class="line">y = x.countByKey()</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y)</span><br><span class="line"></span><br></pre></td></tr></table></figure>
<pre><code>[(&#39;B&#39;, 1), (&#39;B&#39;, 2), (&#39;A&#39;, 3), (&#39;A&#39;, 4), (&#39;A&#39;, 5)]
defaultdict(&lt;class &#39;int&#39;&gt;, &#123;&#39;B&#39;: 2, &#39;A&#39;: 3&#125;)</code></pre>
<h2 id="45-join"><a href="#45-join" class="headerlink" title="45.join"></a>45.join</h2><p>join(other, numPartitions=None)：对 RDD 上的每个元素与其它 RDD 进行 join 操作，返回一<br>个 (k, (v1, v2)) 类型的新 RDD，其中 (k, v1) 在自身 RDD，(k, v2) 在其它 RDD。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([(<span class="string">&#x27;C&#x27;</span>,<span class="number">4</span>),(<span class="string">&#x27;B&#x27;</span>,<span class="number">3</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">2</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">1</span>)])</span><br><span class="line">y = sc.parallelize([(<span class="string">&#x27;A&#x27;</span>,<span class="number">8</span>),(<span class="string">&#x27;B&#x27;</span>,<span class="number">7</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">6</span>),(<span class="string">&#x27;D&#x27;</span>,<span class="number">5</span>)])</span><br><span class="line">z = x.join(y)</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br><span class="line">print(z.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[(&#39;C&#39;, 4), (&#39;B&#39;, 3), (&#39;A&#39;, 2), (&#39;A&#39;, 1)]
[(&#39;A&#39;, 8), (&#39;B&#39;, 7), (&#39;A&#39;, 6), (&#39;D&#39;, 5)]
[(&#39;B&#39;, (3, 7)), (&#39;A&#39;, (2, 8)), (&#39;A&#39;, (2, 6)), (&#39;A&#39;, (1, 8)), (&#39;A&#39;, (1, 6))]</code></pre>
<h2 id="46-leftOuterJoin"><a href="#46-leftOuterJoin" class="headerlink" title="46.leftOuterJoin"></a>46.leftOuterJoin</h2><p>leftOuterJoin(other, numPartitions=None)：执行自身 RDD 与其他 RDD 的 left outer join 操作，例如自身 RDD 每个元素为 &lt;k,v&gt;，其他 RDD 每个元素为 &lt;k,w&gt;，返回新的 RDD 中包含全<br>部的 pairs(k, (v, w)) 或者 pair(k, (v, None))。numPartitions：进行 Hash 分区的数量</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([(<span class="string">&#x27;C&#x27;</span>,<span class="number">4</span>),(<span class="string">&#x27;B&#x27;</span>,<span class="number">3</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">2</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">1</span>)])</span><br><span class="line">y = sc.parallelize([(<span class="string">&#x27;A&#x27;</span>,<span class="number">8</span>),(<span class="string">&#x27;B&#x27;</span>,<span class="number">7</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">6</span>),(<span class="string">&#x27;D&#x27;</span>,<span class="number">5</span>)])</span><br><span class="line">z = x.leftOuterJoin(y)</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br><span class="line">print(z.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[(&#39;C&#39;, 4), (&#39;B&#39;, 3), (&#39;A&#39;, 2), (&#39;A&#39;, 1)]
[(&#39;A&#39;, 8), (&#39;B&#39;, 7), (&#39;A&#39;, 6), (&#39;D&#39;, 5)]
[(&#39;B&#39;, (3, 7)), (&#39;A&#39;, (2, 8)), (&#39;A&#39;, (2, 6)), (&#39;A&#39;, (1, 8)), (&#39;A&#39;, (1, 6)), (&#39;C&#39;, (4, None))]</code></pre>
<h2 id="47-rightOuterJoin"><a href="#47-rightOuterJoin" class="headerlink" title="47.rightOuterJoin"></a>47.rightOuterJoin</h2><p>rightOuterJoin：执行自身 RDD 与其他 RDD 的 right outer join 操作，例如自身 RDD 每个元<br>素为 &lt;k,v&gt;，其他 RDD 每个元素为 &lt;k,w&gt;，返回新的 RDD 中包含全部的 pairs(k, (v, w)) 或者<br>pair(k, (None, w))。numPartitions：进行 Hash 分区的数量</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([(<span class="string">&#x27;C&#x27;</span>,<span class="number">4</span>),(<span class="string">&#x27;B&#x27;</span>,<span class="number">3</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">2</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">1</span>)])</span><br><span class="line">y = sc.parallelize([(<span class="string">&#x27;A&#x27;</span>,<span class="number">8</span>),(<span class="string">&#x27;B&#x27;</span>,<span class="number">7</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">6</span>),(<span class="string">&#x27;D&#x27;</span>,<span class="number">5</span>)])</span><br><span class="line">z = x.rightOuterJoin(y)</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br><span class="line">print(z.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[(&#39;C&#39;, 4), (&#39;B&#39;, 3), (&#39;A&#39;, 2), (&#39;A&#39;, 1)]
[(&#39;A&#39;, 8), (&#39;B&#39;, 7), (&#39;A&#39;, 6), (&#39;D&#39;, 5)]
[(&#39;B&#39;, (3, 7)), (&#39;A&#39;, (2, 8)), (&#39;A&#39;, (2, 6)), (&#39;A&#39;, (1, 8)), (&#39;A&#39;, (1, 6)), (&#39;D&#39;, (None, 5))]</code></pre>
<h2 id="48-partitionBy"><a href="#48-partitionBy" class="headerlink" title="48.partitionBy"></a>48.partitionBy</h2><p>partitionBy(numPartitions, partitionFunc)：对 RDD 进行分区。numPartitions：分区的数目，<br>partitionFunc：自定义分区函数，partitionFunc(k) % numPartitions 的值为新分区的索引 index</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment"># partitionBy Example1</span></span><br><span class="line">x = sc.parallelize([(<span class="number">0</span>,<span class="number">1</span>),(<span class="number">1</span>,<span class="number">2</span>),(<span class="number">2</span>,<span class="number">3</span>)],<span class="number">2</span>)</span><br><span class="line">y = x.partitionBy(numPartitions = <span class="number">3</span>, partitionFunc = <span class="keyword">lambda</span> x: x) <span class="comment"># only key␣is passed to paritionFunc</span></span><br><span class="line">print(x.glom().collect())</span><br><span class="line">print(y.glom().collect())</span><br></pre></td></tr></table></figure>
<pre><code>[[(0, 1)], [(1, 2), (2, 3)]]
[[(0, 1)], [(1, 2)], [(2, 3)]]</code></pre>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment"># partitionBy Example2</span></span><br><span class="line">x = sc.parallelize([(<span class="string">&quot;hadoop&quot;</span>,<span class="number">1</span>),(<span class="string">&quot;spark&quot;</span>,<span class="number">2</span>),(<span class="string">&quot;python&quot;</span>,<span class="number">3</span>),(<span class="string">&quot;C&quot;</span>,<span class="number">4</span>)],<span class="number">2</span>)</span><br><span class="line">y = x.partitionBy(numPartitions = <span class="number">3</span>, partitionFunc = <span class="keyword">lambda</span> x: <span class="built_in">len</span>(x)) <span class="comment"># only␣key is passed to paritionFunc</span></span><br><span class="line">print(x.glom().collect())</span><br><span class="line">print(y.glom().collect())</span><br><span class="line"></span><br></pre></td></tr></table></figure>
<pre><code>[[(&#39;hadoop&#39;, 1), (&#39;spark&#39;, 2)], [(&#39;python&#39;, 3), (&#39;C&#39;, 4)]]
[[(&#39;hadoop&#39;, 1), (&#39;python&#39;, 3)], [(&#39;C&#39;, 4)], [(&#39;spark&#39;, 2)]]</code></pre>
<h2 id="49-combineByKey"><a href="#49-combineByKey" class="headerlink" title="49.combineByKey"></a>49.combineByKey</h2><p>combineByKey(createCombiner, mergeValue, mergeCombiners, numPartitions=None,<br>partitionFunc)：泛型函数使用一个自定义的聚合函数，去合并 RDD 中每个 key 相同的元素，具<br>体为转换 RDD[(K, V)] 形成一个新的 RDD[(K, C)]，其中 C 是一个合并类型。createCombiner：创<br>建一个 V 到 C 的函数，mergeValue：将一个 V 形成一个 C，mergeCombiners：将 C 的集合进<br>行合并。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([(<span class="string">&#x27;B&#x27;</span>,<span class="number">1</span>),(<span class="string">&#x27;B&#x27;</span>,<span class="number">2</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">3</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">4</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">5</span>)])</span><br><span class="line">createCombiner = (<span class="keyword">lambda</span> el: [(el,el**<span class="number">2</span>)])</span><br><span class="line">mergeVal = (<span class="keyword">lambda</span> aggregated, el: aggregated + [(el,el**<span class="number">2</span>)]) <span class="comment"># append to␣aggregated</span></span><br><span class="line">mergeComb = (<span class="keyword">lambda</span> agg1,agg2: agg1 + agg2 ) <span class="comment"># append agg1 with agg2</span></span><br><span class="line">y = x.combineByKey(createCombiner,mergeVal,mergeComb)</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br><span class="line"></span><br></pre></td></tr></table></figure>
<pre><code>[(&#39;B&#39;, 1), (&#39;B&#39;, 2), (&#39;A&#39;, 3), (&#39;A&#39;, 4), (&#39;A&#39;, 5)]
[(&#39;B&#39;, [(1, 1), (2, 4)]), (&#39;A&#39;, [(3, 9), (4, 16), (5, 25)])]</code></pre>
<h2 id="50-aggregateByKey"><a href="#50-aggregateByKey" class="headerlink" title="50.aggregateByKey"></a>50.aggregateByKey</h2><p>aggregateByKey(zeroValue, seqFunc, combFunc, numPartitions=None, partitionFunc)：聚<br>合每个键的值, 使用组合函数和一个零值，函数返回一个不同类型的 rdd。seqFunc：是对一个分区<br>里每个键的值聚合，combFunc：是对分区间每个键的聚合结果进行聚合。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([(<span class="string">&#x27;B&#x27;</span>,<span class="number">1</span>),(<span class="string">&#x27;B&#x27;</span>,<span class="number">2</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">3</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">4</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">5</span>)])</span><br><span class="line">zeroValue = [] <span class="comment"># empty list is &#x27;zero value&#x27; for append operation</span></span><br><span class="line">mergeVal = (<span class="keyword">lambda</span> aggregated, el: aggregated + [(el,el**<span class="number">2</span>)])</span><br><span class="line">mergeComb = (<span class="keyword">lambda</span> agg1,agg2: agg1 + agg2 )</span><br><span class="line">y = x.aggregateByKey(zeroValue,mergeVal,mergeComb)</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br><span class="line"></span><br></pre></td></tr></table></figure>
<pre><code>[(&#39;B&#39;, 1), (&#39;B&#39;, 2), (&#39;A&#39;, 3), (&#39;A&#39;, 4), (&#39;A&#39;, 5)]
[(&#39;B&#39;, [(1, 1), (2, 4)]), (&#39;A&#39;, [(3, 9), (4, 16), (5, 25)])]</code></pre>
<h2 id="51-foldByKey"><a href="#51-foldByKey" class="headerlink" title="51.foldByKey"></a>51.foldByKey</h2><p>foldByKey(zeroValue, func, numPartitions=None, partitionFunc)：使用一个组合函数 func 与<br>一个零值，对 key 相同的 value 值进行聚合</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([(<span class="string">&#x27;B&#x27;</span>,<span class="number">1</span>),(<span class="string">&#x27;B&#x27;</span>,<span class="number">2</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">3</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">4</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">5</span>)])</span><br><span class="line">zeroValue = <span class="number">1</span> <span class="comment"># one is &#x27;zero value&#x27; for multiplication</span></span><br><span class="line">y = x.foldByKey(zeroValue,<span class="keyword">lambda</span> agg,x: agg*x ) <span class="comment"># computes cumulative product␣,→within each key</span></span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[(&#39;B&#39;, 1), (&#39;B&#39;, 2), (&#39;A&#39;, 3), (&#39;A&#39;, 4), (&#39;A&#39;, 5)]
[(&#39;B&#39;, 2), (&#39;A&#39;, 60)]</code></pre>
<h2 id="52-groupByKey"><a href="#52-groupByKey" class="headerlink" title="52.groupByKey"></a>52.groupByKey</h2><p>groupByKey(numPartitions=None, partitionFunc)：对 RDD 里 key 相同的元素进行分组，分<br>组结果形成一个序列，最后返回一个新的 &lt;key,value&gt; 类型的 RDD。numPartitions：进行 Hash<br>分区的分区数</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([(<span class="string">&#x27;B&#x27;</span>,<span class="number">5</span>),(<span class="string">&#x27;B&#x27;</span>,<span class="number">4</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">3</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">2</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">1</span>)])</span><br><span class="line">y = x.groupByKey()</span><br><span class="line">print(x.collect())</span><br><span class="line">print([(j[<span class="number">0</span>],[i <span class="keyword">for</span> i <span class="keyword">in</span> j[<span class="number">1</span>]]) <span class="keyword">for</span> j <span class="keyword">in</span> y.collect()])</span><br></pre></td></tr></table></figure>
<pre><code>[(&#39;B&#39;, 5), (&#39;B&#39;, 4), (&#39;A&#39;, 3), (&#39;A&#39;, 2), (&#39;A&#39;, 1)]
[(&#39;B&#39;, [5, 4]), (&#39;A&#39;, [3, 2, 1])]</code></pre>
<h2 id="53-flatMapValues"><a href="#53-flatMapValues" class="headerlink" title="53.flatMapValues"></a>53.flatMapValues</h2><p>flatMapValues(f)：使用一个 flatMap 函数，对类型 RDD 中 key 相同的 value 值进行操作，返回<br>一个新的 RDD。新 RDD 的 key 值不变，只改变了 value 值，还保留了原始 RDD 的分区。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([(<span class="string">&#x27;A&#x27;</span>,(<span class="number">1</span>,<span class="number">2</span>,<span class="number">3</span>)),(<span class="string">&#x27;B&#x27;</span>,(<span class="number">4</span>,<span class="number">5</span>))])</span><br><span class="line">y = x.flatMapValues(<span class="keyword">lambda</span> x: [i**<span class="number">2</span> <span class="keyword">for</span> i <span class="keyword">in</span> x]) <span class="comment"># function is applied to␣,→entire value, then result is flattened</span></span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[(&#39;A&#39;, (1, 2, 3)), (&#39;B&#39;, (4, 5))]
[(&#39;A&#39;, 1), (&#39;A&#39;, 4), (&#39;A&#39;, 9), (&#39;B&#39;, 16), (&#39;B&#39;, 25)]</code></pre>
<h2 id="54-mapValues"><a href="#54-mapValues" class="headerlink" title="54.mapValues"></a>54.mapValues</h2><p>mapValues：对键值对 &lt;key,value&gt; 中的 value 部分执行函数里面的操作，返回 &lt;key,value&gt; 键<br>值对形式的新 RDD</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([(<span class="string">&#x27;A&#x27;</span>,(<span class="number">1</span>,<span class="number">2</span>,<span class="number">3</span>)),(<span class="string">&#x27;B&#x27;</span>,(<span class="number">4</span>,<span class="number">5</span>))])</span><br><span class="line">y = x.mapValues(<span class="keyword">lambda</span> x: [i**<span class="number">2</span> <span class="keyword">for</span> i <span class="keyword">in</span> x]) <span class="comment"># function is applied to entire␣,→value</span></span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[(&#39;A&#39;, (1, 2, 3)), (&#39;B&#39;, (4, 5))]
[(&#39;A&#39;, [1, 4, 9]), (&#39;B&#39;, [16, 25])]</code></pre>
<h2 id="55-cogroup"><a href="#55-cogroup" class="headerlink" title="55.cogroup"></a>55.cogroup</h2><p>cogroup(other)：对两个 RDD 数据集按 key 相同的数据进行 group by，并对 key 值相同的数据<br>中每个 RDD 的 value 进行单独 group by</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([(<span class="string">&#x27;C&#x27;</span>,<span class="number">4</span>),(<span class="string">&#x27;B&#x27;</span>,(<span class="number">3</span>,<span class="number">3</span>)),(<span class="string">&#x27;A&#x27;</span>,<span class="number">2</span>),(<span class="string">&#x27;A&#x27;</span>,(<span class="number">1</span>,<span class="number">1</span>))])</span><br><span class="line">y = sc.parallelize([(<span class="string">&#x27;A&#x27;</span>,<span class="number">8</span>),(<span class="string">&#x27;B&#x27;</span>,<span class="number">7</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">6</span>),(<span class="string">&#x27;D&#x27;</span>,(<span class="number">5</span>,<span class="number">5</span>))])</span><br><span class="line">z = x.cogroup(y)</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br><span class="line"><span class="keyword">for</span> key,val <span class="keyword">in</span> <span class="built_in">list</span>(z.collect()):</span><br><span class="line">    print(key, [<span class="built_in">list</span>(i) <span class="keyword">for</span> i <span class="keyword">in</span> val])</span><br><span class="line"></span><br></pre></td></tr></table></figure>
<pre><code>[(&#39;C&#39;, 4), (&#39;B&#39;, (3, 3)), (&#39;A&#39;, 2), (&#39;A&#39;, (1, 1))]
[(&#39;A&#39;, 8), (&#39;B&#39;, 7), (&#39;A&#39;, 6), (&#39;D&#39;, (5, 5))]
B [[(3, 3)], [7]]
A [[2, (1, 1)], [8, 6]]
C [[4], []]
D [[], [(5, 5)]]</code></pre>
<h2 id="56-groupWith"><a href="#56-groupWith" class="headerlink" title="56.groupWith"></a>56.groupWith</h2><p>groupWith(other, *others)：类似于 cogroup 操作，但支持多个 RDD。返回类型为 RDD。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([(<span class="string">&#x27;C&#x27;</span>,<span class="number">4</span>),(<span class="string">&#x27;B&#x27;</span>,(<span class="number">3</span>,<span class="number">3</span>)),(<span class="string">&#x27;A&#x27;</span>,<span class="number">2</span>),(<span class="string">&#x27;A&#x27;</span>,(<span class="number">1</span>,<span class="number">1</span>))])</span><br><span class="line">y = sc.parallelize([(<span class="string">&#x27;B&#x27;</span>,(<span class="number">7</span>,<span class="number">7</span>)),(<span class="string">&#x27;A&#x27;</span>,<span class="number">6</span>),(<span class="string">&#x27;D&#x27;</span>,(<span class="number">5</span>,<span class="number">5</span>))])</span><br><span class="line">z = sc.parallelize([(<span class="string">&#x27;D&#x27;</span>,<span class="number">9</span>),(<span class="string">&#x27;B&#x27;</span>,(<span class="number">8</span>,<span class="number">8</span>))])</span><br><span class="line">a = x.groupWith(y,z)</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br><span class="line">print(z.collect())</span><br><span class="line">print(<span class="string">&quot;Result:&quot;</span>)</span><br><span class="line"><span class="keyword">for</span> key,val <span class="keyword">in</span> <span class="built_in">list</span>(a.collect()):</span><br><span class="line">    print(key, [<span class="built_in">list</span>(i) <span class="keyword">for</span> i <span class="keyword">in</span> val])</span><br></pre></td></tr></table></figure>
<pre><code>[(&#39;C&#39;, 4), (&#39;B&#39;, (3, 3)), (&#39;A&#39;, 2), (&#39;A&#39;, (1, 1))]
[(&#39;B&#39;, (7, 7)), (&#39;A&#39;, 6), (&#39;D&#39;, (5, 5))]
[(&#39;D&#39;, 9), (&#39;B&#39;, (8, 8))]
Result:
C [[4], [], []]
A [[2, (1, 1)], [6], []]
B [[(3, 3)], [(7, 7)], [(8, 8)]]
D [[], [(5, 5)], [9]]</code></pre>
<h2 id="57-sampleByKey"><a href="#57-sampleByKey" class="headerlink" title="57.sampleByKey"></a>57.sampleByKey</h2><p>sampleByKey(withReplacement, fractions, seed=None)：以 key 值对元素进行抽样，返回一个<br>新 RDD，withReplacement：表示是否有放回，True 表示有放回，fractions：key 值得抽样率，<br>seed：随机种子</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([(<span class="string">&#x27;A&#x27;</span>,<span class="number">1</span>),(<span class="string">&#x27;B&#x27;</span>,<span class="number">2</span>),(<span class="string">&#x27;C&#x27;</span>,<span class="number">3</span>),(<span class="string">&#x27;B&#x27;</span>,<span class="number">4</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">5</span>)])</span><br><span class="line">y = x.sampleByKey(withReplacement=<span class="literal">False</span>, fractions=&#123;<span class="string">&#x27;A&#x27;</span>:<span class="number">0.5</span>, <span class="string">&#x27;B&#x27;</span>:<span class="number">1</span>, <span class="string">&#x27;C&#x27;</span>:<span class="number">0.2</span>&#125;)</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[(&#39;A&#39;, 1), (&#39;B&#39;, 2), (&#39;C&#39;, 3), (&#39;B&#39;, 4), (&#39;A&#39;, 5)]
[(&#39;B&#39;, 2), (&#39;C&#39;, 3), (&#39;B&#39;, 4), (&#39;A&#39;, 5)]</code></pre>
<h2 id="58-subtractByKey"><a href="#58-subtractByKey" class="headerlink" title="58.subtractByKey"></a>58.subtractByKey</h2><p>subtractByKey(other, numPartitions=None)：按 key 值对 RDD 进行扣除操作，返回自身<br>&lt;key,value&gt; 类型 RDD 不匹配其他 RDD 中 key 的部分</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([(<span class="string">&#x27;C&#x27;</span>,<span class="number">1</span>),(<span class="string">&#x27;B&#x27;</span>,<span class="number">2</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">3</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">4</span>)])</span><br><span class="line">y = sc.parallelize([(<span class="string">&#x27;A&#x27;</span>,<span class="number">5</span>),(<span class="string">&#x27;D&#x27;</span>,<span class="number">6</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">7</span>),(<span class="string">&#x27;D&#x27;</span>,<span class="number">8</span>)])</span><br><span class="line">z = x.subtractByKey(y)</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br><span class="line">print(z.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[(&#39;C&#39;, 1), (&#39;B&#39;, 2), (&#39;A&#39;, 3), (&#39;A&#39;, 4)]
[(&#39;A&#39;, 5), (&#39;D&#39;, 6), (&#39;A&#39;, 7), (&#39;D&#39;, 8)]
[(&#39;B&#39;, 2), (&#39;C&#39;, 1)]</code></pre>
<h2 id="59-subtract"><a href="#59-subtract" class="headerlink" title="59.subtract"></a>59.subtract</h2><p>subtract(other, numPartitions=None)：返回自身 RDD 中不匹配其他 RDD 的部分</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([(<span class="string">&#x27;C&#x27;</span>,<span class="number">4</span>),(<span class="string">&#x27;B&#x27;</span>,<span class="number">3</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">2</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">1</span>)])</span><br><span class="line">y = sc.parallelize([(<span class="string">&#x27;C&#x27;</span>,<span class="number">8</span>),(<span class="string">&#x27;A&#x27;</span>,<span class="number">2</span>),(<span class="string">&#x27;D&#x27;</span>,<span class="number">1</span>)])</span><br><span class="line">z = x.subtract(y)</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br><span class="line">print(z.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[(&#39;C&#39;, 4), (&#39;B&#39;, 3), (&#39;A&#39;, 2), (&#39;A&#39;, 1)]
[(&#39;C&#39;, 8), (&#39;A&#39;, 2), (&#39;D&#39;, 1)]
[(&#39;C&#39;, 4), (&#39;A&#39;, 1), (&#39;B&#39;, 3)]</code></pre>
<h2 id="60-keyBy"><a href="#60-keyBy" class="headerlink" title="60.keyBy"></a>60.keyBy</h2><p>keyBy(f)：使用自定义函数 f，创建每个元素为元组类型的新 RDD，f 函数的返回值作为 key，RDD<br>的元素值作为 value。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">2</span>,<span class="number">3</span>])</span><br><span class="line">y = x.keyBy(<span class="keyword">lambda</span> x: x**<span class="number">2</span>)</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[1, 2, 3]
[(1, 1), (4, 2), (9, 3)]</code></pre>
<h2 id="61-repartition"><a href="#61-repartition" class="headerlink" title="61.repartition"></a>61.repartition</h2><p>repartition(numPartitions)：对 RDD 进行分区。numPartitions：为分区数</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">2</span>,<span class="number">3</span>,<span class="number">4</span>,<span class="number">5</span>],<span class="number">2</span>)</span><br><span class="line">y = x.repartition(numPartitions=<span class="number">3</span>)</span><br><span class="line">print(x.glom().collect())</span><br><span class="line">print(y.glom().collect())</span><br></pre></td></tr></table></figure>
<pre><code>[[1, 2], [3, 4, 5]]
[[], [1, 2], [3, 4, 5]]</code></pre>
<h2 id="62-coalesce"><a href="#62-coalesce" class="headerlink" title="62.coalesce"></a>62.coalesce</h2><p>coalesce(numPartitions, shuffle=False)：对 RDD 进行分区，将分区数减少到 numPartitions。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="number">1</span>,<span class="number">2</span>,<span class="number">3</span>,<span class="number">4</span>,<span class="number">5</span>],<span class="number">2</span>)</span><br><span class="line">y = x.coalesce(numPartitions=<span class="number">1</span>)</span><br><span class="line">print(x.glom().collect())</span><br><span class="line">print(y.glom().collect())</span><br></pre></td></tr></table></figure>
<pre><code>[[1, 2], [3, 4, 5]]
[[1, 2, 3, 4, 5]]</code></pre>
<h2 id="63-zip"><a href="#63-zip" class="headerlink" title="63.zip"></a>63.zip</h2><p>zip(other)：将 RDD 与其它 RDD 进行 zip 操作，返回 &lt;key,value&gt; 类型的新 RDD，其中 key 为<br>自身 RDD 的元素值，value 为其它 RDD 的元素值</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="string">&#x27;B&#x27;</span>,<span class="string">&#x27;A&#x27;</span>,<span class="string">&#x27;A&#x27;</span>])</span><br><span class="line"><span class="comment"># zip expects x and y to have same #partitions and #elements/partition</span></span><br><span class="line">y = x.<span class="built_in">map</span>(<span class="keyword">lambda</span> x: <span class="built_in">ord</span>(x))</span><br><span class="line">z = x.<span class="built_in">zip</span>(y)</span><br><span class="line">print(x.collect())</span><br><span class="line">print(y.collect())</span><br><span class="line">print(z.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[&#39;B&#39;, &#39;A&#39;, &#39;A&#39;]
[66, 65, 65]
[(&#39;B&#39;, 66), (&#39;A&#39;, 65), (&#39;A&#39;, 65)]</code></pre>
<h2 id="64-zipWithIndex"><a href="#64-zipWithIndex" class="headerlink" title="64.zipWithIndex"></a>64.zipWithIndex</h2><p>zipWithIndex()：对 RDD 进行 zip 操作，返回新的 RDD 中，每个元素包含原 RDD 的元素值还有<br>对应的索引。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="string">&#x27;B&#x27;</span>,<span class="string">&#x27;A&#x27;</span>,<span class="string">&#x27;A&#x27;</span>],<span class="number">2</span>)</span><br><span class="line">y = x.zipWithIndex()</span><br><span class="line">print(x.glom().collect())</span><br><span class="line">print(y.collect())</span><br></pre></td></tr></table></figure>
<pre><code>[[&#39;B&#39;], [&#39;A&#39;, &#39;A&#39;]]
[(&#39;B&#39;, 0), (&#39;A&#39;, 1), (&#39;A&#39;, 2)]</code></pre>
<h2 id="65-zipWithUniqueId"><a href="#65-zipWithUniqueId" class="headerlink" title="65. zipWithUniqueId"></a>65. zipWithUniqueId</h2><p>zipWithUniqueId()：对 RDD 进行 zip 操作，返回 &lt;key,value&gt; 类型新 RDD，key 为原 RDD 的<br>元素值，value 为从 0 开始的值得 id<br>[70]: x = sc.parallelize([‘B’,’A’,’A’]</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line">x = sc.parallelize([<span class="string">&#x27;B&#x27;</span>,<span class="string">&#x27;A&#x27;</span>,<span class="string">&#x27;A&#x27;</span>],<span class="number">2</span>)</span><br><span class="line">y = x.zipWithUniqueId()</span><br><span class="line">print(x.glom().collect())</span><br><span class="line">print(y.collect())</span><br><span class="line"></span><br></pre></td></tr></table></figure>
<pre><code>[[&#39;B&#39;], [&#39;A&#39;, &#39;A&#39;]]
[(&#39;B&#39;, 0), (&#39;A&#39;, 1), (&#39;A&#39;, 3)]</code></pre>
<h2 id="66-WordCount"><a href="#66-WordCount" class="headerlink" title="66.WordCount"></a>66.WordCount</h2><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">## 从dhfs加载文件</span></span><br><span class="line">textFile = sc.textFile(<span class="string">&quot;hdfs://localhost:9000/input/wordcount/testfile&quot;</span>)</span><br></pre></td></tr></table></figure>

<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">## 以空格为分界符分词</span></span><br><span class="line">stringRDD = textFile.flatMap(<span class="keyword">lambda</span> line:line.split(<span class="string">&quot; &quot;</span>))</span><br></pre></td></tr></table></figure>

<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">## map执行的是将每个词都变为（词，1）这样的二元组，</span></span><br><span class="line"><span class="comment">## reduceByKey执行的是将key相同的二元组的value相加</span></span><br><span class="line">countsRDD = stringRDD.<span class="built_in">map</span>(<span class="keyword">lambda</span> word:(word,<span class="number">1</span>)).reduceByKey(<span class="keyword">lambda</span> x,y:x+y)</span><br></pre></td></tr></table></figure>

<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">countsRDD.collect()</span><br></pre></td></tr></table></figure>

<pre><code>[(&#39;hadoop&#39;, 1), (&#39;world&#39;, 1), (&#39;python&#39;, 1), (&#39;hello&#39;, 4), (&#39;spark&#39;, 1)]</code></pre>

      
    </div>
    <footer class="article-footer">
      <a data-url="http://example.com/2020/12/18/spark%E5%AE%89%E8%A3%85/" data-id="ckitlg0sw00044cn6c8cwf37i" data-title="Spark" class="article-share-link">Share</a>
      
      
      
    </footer>
  </div>
  
    
<nav id="article-nav">
  
    <a href="/2020/12/18/Hadoop%E7%A8%8B%E5%BA%8F%E5%BC%80%E5%8F%91/" id="article-nav-newer" class="article-nav-link-wrap">
      <strong class="article-nav-caption">Newer</strong>
      <div class="article-nav-title">
        
          Hadoop ���򿪷�
        
      </div>
    </a>
  
  
    <a href="/2020/12/18/%E6%88%91%E7%9A%84%E7%AC%AC%E4%B8%80%E7%AF%87%E5%8D%9A%E5%AE%A2/" id="article-nav-older" class="article-nav-link-wrap">
      <strong class="article-nav-caption">Older</strong>
      <div class="article-nav-title">我的第一篇博客</div>
    </a>
  
</nav>

  
</article>


</section>
        
          <aside id="sidebar">
  
    

  
    

  
    
  
    
  <div class="widget-wrap">
    <h3 class="widget-title">Archives</h3>
    <div class="widget">
      <ul class="archive-list"><li class="archive-list-item"><a class="archive-list-link" href="/archives/2020/12/">December 2020</a></li></ul>
    </div>
  </div>


  
    
  <div class="widget-wrap">
    <h3 class="widget-title">Recent Posts</h3>
    <div class="widget">
      <ul>
        
          <li>
            <a href="/2020/12/18/hive%E9%AB%98%E7%BA%A7/">Hive高级用法</a>
          </li>
        
          <li>
            <a href="/2020/12/18/Hadoop%E7%A8%8B%E5%BA%8F%E5%BC%80%E5%8F%91/">Hadoop ���򿪷�</a>
          </li>
        
          <li>
            <a href="/2020/12/18/spark%E5%AE%89%E8%A3%85/">Spark</a>
          </li>
        
          <li>
            <a href="/2020/12/18/%E6%88%91%E7%9A%84%E7%AC%AC%E4%B8%80%E7%AF%87%E5%8D%9A%E5%AE%A2/">我的第一篇博客</a>
          </li>
        
          <li>
            <a href="/2020/12/18/hello-world/">Hello World</a>
          </li>
        
      </ul>
    </div>
  </div>

  
</aside>
        
      </div>
      <footer id="footer">
  
  <div class="outer">
    <div id="footer-info" class="inner">
      
      &copy; 2020 John Doe<br>
      Powered by <a href="https://hexo.io/" target="_blank">Hexo</a>
    </div>
  </div>
</footer>

    </div>
    <nav id="mobile-nav">
  
    <a href="/" class="mobile-nav-link">Home</a>
  
    <a href="/archives" class="mobile-nav-link">Archives</a>
  
</nav>
    


<script src="/js/jquery-3.4.1.min.js"></script>



  
<script src="/fancybox/jquery.fancybox.min.js"></script>




<script src="/js/script.js"></script>





  </div>
</body>
</html>