

<!DOCTYPE html>
<html lang="zh-CN" data-default-color-scheme=auto>



<head>
  <meta charset="UTF-8">
  <link rel="apple-touch-icon" sizes="76x76" href="/img/fluid.png">
  <link rel="icon" href="/img/fluid.png">
  <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=5.0, shrink-to-fit=no">
  <meta http-equiv="x-ua-compatible" content="ie=edge">
  
  <meta name="theme-color" content="#2f4154">
  <meta name="author" content="John Doe">
  <meta name="keywords" content="">
  
    <meta name="description" content="@[toc] 选择题总结 (89个题)第1章 大数据技术概述 (10个题) ‎1.1 大数据技术及其代表性的软件种类很多，不同的技术有其不同应用场景，都对应着不同的大数据计算模式，请问软件产品Pregel主要应用于以下哪种计算模式？  B.图计算 1234A.流计算B.图计算C.查询分析计算D.批处理计算     1.2 Hadoop生态系统中用于构建数据仓库并允许用户输入SQL语句进行查询的功能">
<meta property="og:type" content="article">
<meta property="og:title" content="Spark Scala版本 _ 选择题汇总">
<meta property="og:url" content="http://example.com/2022/01/03/Spark%20Scala%E7%89%88%E6%9C%AC%20_%20%E9%80%89%E6%8B%A9%E9%A2%98%E6%B1%87%E6%80%BB/index.html">
<meta property="og:site_name" content="Hexo">
<meta property="og:description" content="@[toc] 选择题总结 (89个题)第1章 大数据技术概述 (10个题) ‎1.1 大数据技术及其代表性的软件种类很多，不同的技术有其不同应用场景，都对应着不同的大数据计算模式，请问软件产品Pregel主要应用于以下哪种计算模式？  B.图计算 1234A.流计算B.图计算C.查询分析计算D.批处理计算     1.2 Hadoop生态系统中用于构建数据仓库并允许用户输入SQL语句进行查询的功能">
<meta property="og:locale" content="zh_CN">
<meta property="article:published_time" content="2022-01-02T16:45:21.000Z">
<meta property="article:modified_time" content="2022-08-22T15:48:14.959Z">
<meta property="article:author" content="John Doe">
<meta property="article:tag" content="Spark">
<meta name="twitter:card" content="summary_large_image">
  
  
  
  <title>Spark Scala版本 _ 选择题汇总 - Hexo</title>

  <link  rel="stylesheet" href="https://lib.baomitu.com/twitter-bootstrap/4.6.1/css/bootstrap.min.css" />



  <link  rel="stylesheet" href="https://lib.baomitu.com/github-markdown-css/4.0.0/github-markdown.min.css" />

  <link  rel="stylesheet" href="https://lib.baomitu.com/hint.css/2.7.0/hint.min.css" />

  <link  rel="stylesheet" href="https://lib.baomitu.com/fancybox/3.5.7/jquery.fancybox.min.css" />



<!-- 主题依赖的图标库，不要自行修改 -->
<!-- Do not modify the link that theme dependent icons -->

<link rel="stylesheet" href="//at.alicdn.com/t/font_1749284_hj8rtnfg7um.css">



<link rel="stylesheet" href="//at.alicdn.com/t/font_1736178_lbnruvf0jn.css">


<link  rel="stylesheet" href="/css/main.css" />


  <link id="highlight-css" rel="stylesheet" href="/css/highlight.css" />
  
    <link id="highlight-css-dark" rel="stylesheet" href="/css/highlight-dark.css" />
  




  <script id="fluid-configs">
    var Fluid = window.Fluid || {};
    Fluid.ctx = Object.assign({}, Fluid.ctx)
    var CONFIG = {"hostname":"example.com","root":"/","version":"1.9.2","typing":{"enable":true,"typeSpeed":70,"cursorChar":"_","loop":false,"scope":[]},"anchorjs":{"enable":true,"element":"h1,h2,h3,h4,h5,h6","placement":"left","visible":"hover","icon":""},"progressbar":{"enable":true,"height_px":3,"color":"#29d","options":{"showSpinner":false,"trickleSpeed":100}},"code_language":{"enable":true,"default":"TEXT"},"copy_btn":true,"image_caption":{"enable":true},"image_zoom":{"enable":true,"img_url_replace":["",""]},"toc":{"enable":true,"placement":"right","headingSelector":"h1,h2,h3,h4,h5,h6","collapseDepth":0},"lazyload":{"enable":true,"loading_img":"/img/loading.gif","onlypost":false,"offset_factor":2},"web_analytics":{"enable":false,"follow_dnt":true,"baidu":null,"google":null,"gtag":null,"tencent":{"sid":null,"cid":null},"woyaola":null,"cnzz":null,"leancloud":{"app_id":null,"app_key":null,"server_url":null,"path":"window.location.pathname","ignore_local":false}},"search_path":"/local-search.xml"};

    if (CONFIG.web_analytics.follow_dnt) {
      var dntVal = navigator.doNotTrack || window.doNotTrack || navigator.msDoNotTrack;
      Fluid.ctx.dnt = dntVal && (dntVal.startsWith('1') || dntVal.startsWith('yes') || dntVal.startsWith('on'));
    }
  </script>
  <script  src="/js/utils.js" ></script>
  <script  src="/js/color-schema.js" ></script>
  


  
<meta name="generator" content="Hexo 6.2.0"></head>


<body>
  

  <header>
    

<div class="header-inner" style="height: 70vh;">
  <nav id="navbar" class="navbar fixed-top  navbar-expand-lg navbar-dark scrolling-navbar">
  <div class="container">
    <a class="navbar-brand" href="/">
      <strong>尤 Ni&#39;s Blog</strong>
    </a>

    <button id="navbar-toggler-btn" class="navbar-toggler" type="button" data-toggle="collapse"
            data-target="#navbarSupportedContent"
            aria-controls="navbarSupportedContent" aria-expanded="false" aria-label="Toggle navigation">
      <div class="animated-icon"><span></span><span></span><span></span></div>
    </button>

    <!-- Collapsible content -->
    <div class="collapse navbar-collapse" id="navbarSupportedContent">
      <ul class="navbar-nav ml-auto text-center">
        
          
          
          
          
            <li class="nav-item">
              <a class="nav-link" href="/">
                <i class="iconfont icon-home-fill"></i>
                首页
              </a>
            </li>
          
        
          
          
          
          
            <li class="nav-item">
              <a class="nav-link" href="/archives/">
                <i class="iconfont icon-archive-fill"></i>
                归档
              </a>
            </li>
          
        
          
          
          
          
            <li class="nav-item">
              <a class="nav-link" href="/categories/">
                <i class="iconfont icon-category-fill"></i>
                分类
              </a>
            </li>
          
        
          
          
          
          
            <li class="nav-item">
              <a class="nav-link" href="/tags/">
                <i class="iconfont icon-tags-fill"></i>
                标签
              </a>
            </li>
          
        
          
          
          
          
            <li class="nav-item">
              <a class="nav-link" href="/about/">
                <i class="iconfont icon-user-fill"></i>
                关于
              </a>
            </li>
          
        
        
          <li class="nav-item" id="search-btn">
            <a class="nav-link" target="_self" href="javascript:;" data-toggle="modal" data-target="#modalSearch" aria-label="Search">
              &nbsp;<i class="iconfont icon-search"></i>&nbsp;
            </a>
          </li>
          
        
        
          <li class="nav-item" id="color-toggle-btn">
            <a class="nav-link" target="_self" href="javascript:;" aria-label="Color Toggle">&nbsp;<i
                class="iconfont icon-dark" id="color-toggle-icon"></i>&nbsp;</a>
          </li>
        
      </ul>
    </div>
  </div>
</nav>

  

<div id="banner" class="banner" parallax=true
     style="background: url('/img/default.png') no-repeat center center; background-size: cover;">
  <div class="full-bg-img">
    <div class="mask flex-center" style="background-color: rgba(0, 0, 0, 0.3)">
      <div class="banner-text text-center fade-in-up">
        <div class="h2">
          
            <span id="subtitle" data-typed-text="Spark Scala版本 _ 选择题汇总"></span>
          
        </div>

        
          
  <div class="mt-3">
    
      <span class="post-meta mr-2">
        <i class="iconfont icon-author" aria-hidden="true"></i>
        John Doe
      </span>
    
    
      <span class="post-meta">
        <i class="iconfont icon-date-fill" aria-hidden="true"></i>
        <time datetime="2022-01-03 00:45" pubdate>
          2022年1月3日 凌晨
        </time>
      </span>
    
  </div>

  <div class="mt-1">
    
      <span class="post-meta mr-2">
        <i class="iconfont icon-chart"></i>
        
          13k 字
        
      </span>
    

    
      <span class="post-meta mr-2">
        <i class="iconfont icon-clock-fill"></i>
        
        
        
          111 分钟
        
      </span>
    

    
    
  </div>


        
      </div>

      
    </div>
  </div>
</div>

</div>

  </header>

  <main>
    
      

<div class="container-fluid nopadding-x">
  <div class="row nomargin-x">
    <div class="side-col d-none d-lg-block col-lg-2">
      

    </div>

    <div class="col-lg-8 nopadding-x-md">
      <div class="container nopadding-x-md" id="board-ctn">
        <div id="board">
          <article class="post-content mx-auto">
            <!-- SEO header -->
            <h1 style="display: none">Spark Scala版本 _ 选择题汇总</h1>
            
              <p class="note note-info">
                
                  
                    本文最后更新于：1 小时前
                  
                
              </p>
            
            
              <div class="markdown-body">
                
                <p>@[toc]</p>
<h1 id="选择题总结-89个题"><a href="#选择题总结-89个题" class="headerlink" title="选择题总结 (89个题)"></a>选择题总结 (89个题)</h1><h1 id="第1章-大数据技术概述-10个题"><a href="#第1章-大数据技术概述-10个题" class="headerlink" title="第1章 大数据技术概述 (10个题)"></a>第1章 大数据技术概述 (10个题)</h1><hr>
<p><em><strong>‎1.1 大数据技术及其代表性的软件种类很多，不同的技术有其不同应用场景，都对应着不同的大数据计算模式，请问软件产品Pregel主要应用于以下哪种计算模式？</strong></em>  <code>B.图计算</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span>.流计算<br><span class="hljs-selector-tag">B</span>.图计算<br>C.查询分析计算<br>D.批处理计算<br></code></pre></td></tr></table></figure>




<p><em><strong>1.2 Hadoop生态系统中用于构建数据仓库并允许用户输入SQL语句进行查询的功能组件是?</strong></em>  <code>C. Hive</code></p>
<figure class="highlight stylus"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs stylus">A<span class="hljs-selector-class">.Flume</span><br>B<span class="hljs-selector-class">.Pregel</span><br>C<span class="hljs-selector-class">.Hive</span><br>D.Spark<br></code></pre></td></tr></table></figure>
<p><em><strong>1.3 ‏Hadoop的生态系统组件之一Sqoop的功能是?</strong></em>  <code>D.交换数据</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span>.负责集群资源调度管理的组件<br><span class="hljs-selector-tag">B</span>.用来存储非结构化和半结构化的松散数据<br>C.提供高可靠性、高可用、分布式的海量日志采集<br>D.用来在Hadoop和关系数据库之间的交换数据，改进数据的互操作性<br></code></pre></td></tr></table></figure>
<p><em><strong>1.4 以下哪一项不是Hadoop的缺点？</strong></em> <code>B.分布存储到多台机器</code></p>
<figure class="highlight mathematica"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs mathematica"><span class="hljs-variable">A</span><span class="hljs-operator">.</span>计算延迟高<br><span class="hljs-variable">B</span><span class="hljs-operator">.</span>数据文件被分布存储到多台机器上<br><span class="hljs-built_in">C</span><span class="hljs-operator">.</span>磁盘<span class="hljs-built_in">I</span><span class="hljs-operator">/</span><span class="hljs-built_in">O</span>开销大<br><span class="hljs-built_in">D</span><span class="hljs-operator">.</span>计算表达能力有限<br></code></pre></td></tr></table></figure>
<p><em><strong>1.5 用户在使用HDFS时，仍然可以像普通文件系统那样用文件名去访问文件，以下哪个选项是正确的访问方式？</strong></em> <code>D. 三短一长选最长(</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css">‎<span class="hljs-selector-tag">A</span>.把文件名发送给名称节点，根据文件名直接在名称节点上获取数据<br><span class="hljs-selector-tag">B</span>.把文件名发送给数据节点，根据文件名直接在数据节点上获取数据<br>C.以上说法都不对<br>D.把文件名发送给名称节点，根据文件名在名称节点上找到数据块的实际存储信息，客户端再到数据节点上获取数据<br></code></pre></td></tr></table></figure>
<p><em><strong>1.6 目前学术界和业界比较认可的关于大数据的四个特点是?</strong></em> <code>ABCD</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css">‍<span class="hljs-selector-tag">A</span>.数据类型多<br><span class="hljs-selector-tag">B</span>.价值密度低<br>C.数据量大<br>D.处理速度快<br></code></pre></td></tr></table></figure>
<p><em><strong>1.7 Hadoop两大核心组成部分是什么？</strong></em> <code>CD </code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css">​<span class="hljs-selector-tag">A</span>.资源调度管理框架YARN<br><span class="hljs-selector-tag">B</span>.分布式协作服务Zookeeper<br>C.分布式计算框架MapReduce<br>D.分布式文件系统HDFS<br></code></pre></td></tr></table></figure>
<p><em><strong>1.8.‏YARN是负责集群资源调度管理的组件。不同的计算框架统一运行在YARN框架之上，具有哪些优点：</strong></em> <code>ABCD</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css">‏<span class="hljs-selector-tag">A</span>.计算资源按需伸缩<br><span class="hljs-selector-tag">B</span>.大大降低了运维成本<br>C.不同负载应用混搭，集群利用率高<br>D.共享底层存储，避免数据跨集群迁移<br></code></pre></td></tr></table></figure>
<p><em><strong>1.9 关于Hadoop生态系统中HBase与其它部分的关系，以下说法正确的有：</strong></em> <code>ABCD</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css">‍<span class="hljs-selector-tag">A</span><span class="hljs-selector-class">.HBase</span>利用MapReduce来处理HBase中的海量数据，实现高性能计算<br><span class="hljs-selector-tag">B</span>.利用Pig和Hive为HBase提供了高层语言支持<br>C.使用HDFS作为高可靠的底层存储，利用廉价集群提供海量数据存储能力<br>D.使用Sqoop为HBase提供了高效便捷的RDBMS数据导入功能<br></code></pre></td></tr></table></figure>
<p><em><strong>1.10.Spark的设计遵循“一个软件栈满足不同应用场景”的理念，逐渐形成了一套完整的生态系统，可以支持以下哪些操作计算：</strong></em> <code>ABCD</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css">‏<span class="hljs-selector-tag">A</span>.流式计算（Spark Streaming）<br><span class="hljs-selector-tag">B</span><span class="hljs-selector-class">.SQL</span>即席查询（Spark SQL）<br>C.图计算（GraphX）<br>D.机器学习（MLlib）<br></code></pre></td></tr></table></figure>
<h1 id="第2章-Scala-语言基础-20个题"><a href="#第2章-Scala-语言基础-20个题" class="headerlink" title="第2章 Scala 语言基础 (20个题)"></a>第2章 Scala 语言基础 (20个题)</h1><hr>
<p><em><strong>2.1 下面输出与其他不一致的是？</strong></em> <code> D</code></p>
<figure class="highlight reasonml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs reasonml"><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">A</span>.</span></span>print(<span class="hljs-string">&quot;Hello World\n&quot;</span>)<br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">B</span>.</span></span>println(<span class="hljs-string">&quot;Hello World&quot;</span>)<br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">C</span>.</span></span>printf(<span class="hljs-string">&quot;Hello %s&quot;</span>, <span class="hljs-string">&quot;World\n&quot;</span>)<br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">D</span>.</span></span><span class="hljs-keyword">val</span> w = <span class="hljs-string">&quot;World&quot;</span> ; println(<span class="hljs-string">&quot;Hello $w&quot;</span>)<br></code></pre></td></tr></table></figure>
<p><em><strong>2.2 有关操作符优先级的描述不正确的是？</strong></em> <code>A</code></p>
<figure class="highlight mathematica"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs mathematica"><span class="hljs-variable">A</span><span class="hljs-operator">.+</span>的优先级高于！<br><span class="hljs-variable">B</span><span class="hljs-operator">.%</span>的优先级高于<span class="hljs-operator">+</span><br><span class="hljs-built_in">C</span><span class="hljs-operator">.&gt;</span>的优先级高于<span class="hljs-operator">&amp;</span><br><span class="hljs-built_in">D</span><span class="hljs-operator">.*=</span>的优先级低于<span class="hljs-operator">+</span><br></code></pre></td></tr></table></figure>
<p><em><strong>2.3 对集合(Set)进行操作”Set(2, 0, 1) + 1 + 1 - 1”之后的结果为？</strong></em> <code>C</code></p>
<figure class="highlight apache"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs apache"><span class="hljs-attribute">A</span>.以上均不正确<br><span class="hljs-attribute">B</span>.Set(<span class="hljs-number">2</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>)<br><span class="hljs-attribute">C</span>.Set(<span class="hljs-number">2</span>, <span class="hljs-number">0</span>)<br><span class="hljs-attribute">D</span>.Set(<span class="hljs-number">2</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>)<br></code></pre></td></tr></table></figure>
<p><em><strong>2.4 以下关于闭包描述错误的是？</strong></em>  <code>D</code></p>
<figure class="highlight reasonml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs reasonml">A.对于def mul<span class="hljs-constructor">By(<span class="hljs-params">factor</span>: Double)</span> =<span class="hljs-function"> (<span class="hljs-params">x</span>: D<span class="hljs-params">ouble</span>) =&gt;</span> factor<span class="hljs-operator"> * </span>x; <span class="hljs-keyword">val</span> triple = mul<span class="hljs-constructor">By(3)</span>;,函数triple是一个闭包<br>B.闭包是一个函数，其返回值依赖于声明在函数包部的一个或多个变量<br>C.通常来讲，可以将闭包看作是可以访问一个函数里面局部变量的另一个函数<br>D.对于def mul<span class="hljs-constructor">By(<span class="hljs-params">factor</span>: Double)</span> =<span class="hljs-function"> (<span class="hljs-params">x</span>: D<span class="hljs-params">ouble</span>) =&gt;</span> <span class="hljs-number">3</span><span class="hljs-operator"> * </span>x; <span class="hljs-keyword">val</span> triple = mul<span class="hljs-constructor">By(3)</span>;,函数triple是一个闭包<br></code></pre></td></tr></table></figure>
<p><em><strong>2.5 对于以下代码描述有误的是？</strong></em>  <code>C</code></p>
<figure class="highlight kotlin"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><code class="hljs kotlin"><span class="hljs-keyword">val</span> <span class="hljs-keyword">data</span> = Map(<span class="hljs-number">1</span> -&gt; <span class="hljs-string">&quot;One&quot;</span>, <span class="hljs-number">2</span> -&gt; <span class="hljs-string">&quot;Two&quot;</span>)<br>‏<span class="hljs-keyword">val</span> res = <span class="hljs-keyword">for</span>((k, v) &lt;- <span class="hljs-keyword">data</span>; <span class="hljs-keyword">if</span>(k &gt; <span class="hljs-number">1</span>)) yield v<br></code></pre></td></tr></table></figure>
<figure class="highlight mathematica"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs mathematica"><span class="hljs-variable">A</span><span class="hljs-operator">.</span>其中的<span class="hljs-variable">if</span><span class="hljs-punctuation">(</span><span class="hljs-variable">k</span> <span class="hljs-operator">&gt;</span> <span class="hljs-number">1</span><span class="hljs-punctuation">)</span>是一个守卫表达式<br><span class="hljs-variable">B</span><span class="hljs-operator">.</span>运行后<span class="hljs-variable">res</span>的结果为<span class="hljs-built_in">List</span><span class="hljs-punctuation">(</span><span class="hljs-string">&quot;Two&quot;</span><span class="hljs-punctuation">)</span><br><span class="hljs-built_in">C</span><span class="hljs-operator">.</span>运行后<span class="hljs-variable">res</span>的结果为<span class="hljs-built_in">List</span><span class="hljs-punctuation">(</span><span class="hljs-string">&quot;One&quot;</span><span class="hljs-operator">,</span> <span class="hljs-string">&quot;Two&quot;</span><span class="hljs-punctuation">)</span><br><span class="hljs-built_in">D</span><span class="hljs-operator">.</span>对映射<span class="hljs-variable">data</span>中的每一个<span class="hljs-punctuation">(</span>键，值<span class="hljs-punctuation">)</span>对，<span class="hljs-variable">k</span>被绑定对键，而<span class="hljs-variable">v</span>则被绑定到值<br></code></pre></td></tr></table></figure>

<p><em><strong>2.6‍ Scala中，下面的哪个类定义是不正确的？</strong></em> <code>B</code></p>
<figure class="highlight ruby"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs ruby">A.<span class="hljs-keyword">class</span> <span class="hljs-title class_">Counter</span>&#123;<span class="hljs-keyword">def</span> <span class="hljs-title function_">counter</span> = “counter”&#125;<br>B.<span class="hljs-keyword">class</span> <span class="hljs-title class_">Counter</span>&#123;var <span class="hljs-symbol">counter:</span>String&#125;<br>C.<span class="hljs-keyword">class</span> <span class="hljs-title class_">Counter</span>&#123;<span class="hljs-keyword">def</span> <span class="hljs-title function_">counter</span> () &#123;&#125;&#125;<br>D.<span class="hljs-keyword">class</span> <span class="hljs-title class_">Counter</span>&#123;val counter = “counter”&#125;<br></code></pre></td></tr></table></figure>

<p><em><strong>2.7 以下关于类和单例对象的对比说法正确的是？</strong></em> <code>A</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span>.单例对象不可以带参数，而类可以<br><span class="hljs-selector-tag">B</span>.单例对象不可以定义方法，而类可以<br>C.单例对象不可以定义私有属性，而类可以<br>D.单例对象不可以继承，而类可以<br></code></pre></td></tr></table></figure>
<p><em><strong>2.8 Scala语言中，关于List的定义，不正确的是？</strong></em> <code>B</code></p>
<figure class="highlight stylus"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs stylus">A<span class="hljs-selector-class">.val</span> list = <span class="hljs-built_in">List</span>(<span class="hljs-number">1</span>,<span class="hljs-number">2</span>,<span class="hljs-number">3</span>)<br>B<span class="hljs-selector-class">.val</span> list = List <span class="hljs-selector-attr">[String]</span>(<span class="hljs-string">&#x27;A&#x27;</span>,<span class="hljs-string">&#x27;B&#x27;</span>,<span class="hljs-string">&#x27;C&#x27;</span>)<br>C<span class="hljs-selector-class">.val</span> list = List <span class="hljs-selector-attr">[Int]</span>(<span class="hljs-number">1</span>,<span class="hljs-number">2</span>,<span class="hljs-number">3</span>)<br>D<span class="hljs-selector-class">.val</span> list = List <span class="hljs-selector-attr">[String]</span>()<br></code></pre></td></tr></table></figure>

<p><em><strong>2.9‏ 对于Map(“book” -&gt; 5, “pen” -&gt; 2).map(m &#x3D;&gt; m._1 -&gt; m._2 * 2)的结果，下面哪个是正确的？</strong></em> <code>A</code></p>
<figure class="highlight livescript"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs livescript">‍A.<span class="hljs-built_in">Map</span>(<span class="hljs-string">&quot;book&quot;</span> -&gt; <span class="hljs-number">10</span>, <span class="hljs-string">&quot;pen&quot;</span> -&gt; <span class="hljs-number">4</span>)<br>B.<span class="hljs-built_in">Map</span>(<span class="hljs-string">&quot;bookbook&quot;</span> -&gt; <span class="hljs-number">10</span>, <span class="hljs-string">&quot;penpen&quot;</span> -&gt; <span class="hljs-number">4</span>)<br>C.<span class="hljs-built_in">Map</span>(<span class="hljs-string">&quot;book&quot;</span> -&gt; <span class="hljs-number">5</span>, <span class="hljs-string">&quot;pen&quot;</span> -&gt; <span class="hljs-number">2</span> ,<span class="hljs-string">&quot;book&quot;</span> -&gt; <span class="hljs-number">5</span>, <span class="hljs-string">&quot;pen&quot;</span> -&gt; <span class="hljs-number">2</span>)<br>D.<span class="hljs-built_in">Map</span>(<span class="hljs-string">&quot;bookbook&quot;</span> -&gt; <span class="hljs-number">5</span>, <span class="hljs-string">&quot;penpen&quot;</span> -&gt; <span class="hljs-number">2</span>)<br></code></pre></td></tr></table></figure>
<p><em><strong>2.10‌ 表达式for(i &lt;- 1 to 3; j &lt;- 1 to 3; if i !&#x3D; j ) {print((10 * i + j));print(“ “)}输出结果正确的是？</strong></em> <code>D</code></p>
<figure class="highlight apache"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs apache"><span class="hljs-attribute">A</span>.<span class="hljs-number">11</span> <span class="hljs-number">12</span> <span class="hljs-number">21</span> <span class="hljs-number">22</span> <span class="hljs-number">31</span> <span class="hljs-number">32</span><br><span class="hljs-attribute">B</span>.<span class="hljs-number">11</span> <span class="hljs-number">13</span> <span class="hljs-number">21</span> <span class="hljs-number">23</span> <span class="hljs-number">31</span> <span class="hljs-number">33</span><br><span class="hljs-attribute">C</span>.<span class="hljs-number">11</span> <span class="hljs-number">12</span> <span class="hljs-number">13</span> <span class="hljs-number">21</span> <span class="hljs-number">22</span> <span class="hljs-number">23</span> <span class="hljs-number">31</span> <span class="hljs-number">32</span> <span class="hljs-number">33</span><br><span class="hljs-attribute">D</span>.<span class="hljs-number">12</span> <span class="hljs-number">13</span> <span class="hljs-number">21</span> <span class="hljs-number">23</span> <span class="hljs-number">31</span> <span class="hljs-number">32</span><br></code></pre></td></tr></table></figure>
<p><em><strong>2.11 ‎以下哪些选项属于Scala的基本特性?</strong></em> <code> ABCD</code></p>
<figure class="highlight mipsasm"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs mipsasm">A.是一门类<span class="hljs-keyword">Java的多范式语言</span><br><span class="hljs-keyword"></span><span class="hljs-keyword">B.是一门函数式语言，支持高阶函数，允许嵌套多层函数，并支持柯里化（Currying）</span><br><span class="hljs-keyword"></span>C.运行于<span class="hljs-keyword">Java虚拟机（JVM）之上，并且兼容现有的Java程序</span><br><span class="hljs-keyword"></span>D.是一门纯粹的面向对象的语言<br></code></pre></td></tr></table></figure>
<p><em><strong>2.12 关于主构造器，以下说法正确的是？</strong></em> <code>ABD</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css">‏<span class="hljs-selector-tag">A</span>.主构造器的参数可以直接放在类名后<br><span class="hljs-selector-tag">B</span>.主构造器中可以使用默认参数<br>C.主构造器在每个类都可以定义多个<br>D.主构造器会执行类定义中的所有语句<br></code></pre></td></tr></table></figure>
<p><em><strong>2.13 Scala里的函数是“头等公民”，以下哪些说法是正确的？</strong></em> <code>ACD</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span>.将函数赋值给变量<br><span class="hljs-selector-tag">B</span>.以上说法都不正确<br>C.将函数作为其他函数的返回值<br>D.将函数作为参数传递给其他函数<br></code></pre></td></tr></table></figure>
<p><em><strong>2.14 以下关于特质的说法正确的是？</strong></em> <code>ABC</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span>.类可以实现任意数量的特质<br><span class="hljs-selector-tag">B</span>.特质可以要求实现它们的类具备特定的字段、方法或超类<br>C.当将多个特质叠加在一起时，顺序很重要，其方法先被执行的特质排在更后面<br>D.与Java接口(Interface)相同，Scala特质不可以提供方法和字段的实现<br></code></pre></td></tr></table></figure>
<p><em><strong>2.15 对于元组val t &#x3D; (1, 3.14, “Fred”)说法正确的是？</strong></em> <code>BCD</code></p>
<figure class="highlight reasonml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs reasonml"><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">A</span>.</span></span>t_1 等于 <span class="hljs-number">1</span><br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">B</span>.</span></span>t._0无法访问，会抛出异常<br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">C</span>.</span></span>t 的类型为 Tuple3<span class="hljs-literal">[I<span class="hljs-identifier">nt</span>, D<span class="hljs-identifier">ouble</span>, <span class="hljs-identifier">java</span>.<span class="hljs-identifier">lang</span>.S<span class="hljs-identifier">tring</span>]</span><br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">D</span>.</span></span><span class="hljs-keyword">val</span> (first, second, _) = t <span class="hljs-comment">// second 等于 3.14</span><br></code></pre></td></tr></table></figure>
<p><em><strong>2.16 Scala 中，类和它的伴生对象说法正确的是？</strong></em> <code>BC</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span>.类和它的伴生对象可以有不同的名称<br><span class="hljs-selector-tag">B</span>.类和它的伴生对象定义在同一个文件中<br>C.类和它的伴生对象可以互相访问私有特性<br>D.类有静态方法，伴生对象没有静态方法<br></code></pre></td></tr></table></figure>

<p><em><strong>2.17 关于数组val a &#x3D; Array(1,2,3)下列说法正确的是？</strong></em> <code>ABC</code></p>
<figure class="highlight reasonml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs reasonml">‍<span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">A</span>.</span></span><span class="hljs-keyword">val</span> b = <span class="hljs-keyword">for</span>(elem &lt;- a <span class="hljs-keyword">if</span> elem % <span class="hljs-number">2</span><span class="hljs-operator"> == </span><span class="hljs-number">0</span>) yield <span class="hljs-number">2</span><span class="hljs-operator"> * </span>elem <span class="hljs-comment">// b 等于 Array(4)</span><br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">B</span>.</span></span><span class="hljs-keyword">val</span> b = <span class="hljs-keyword">for</span>(elem &lt;- a) yield <span class="hljs-number">2</span><span class="hljs-operator"> * </span>elem <span class="hljs-comment">// b 等于 Array(2,4,6)</span><br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">C</span>.</span></span><span class="hljs-keyword">val</span> b = a.map(_*<span class="hljs-number">2</span>) <span class="hljs-comment">// b 等于 Array(2,4,6)</span><br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">D</span>.</span></span><span class="hljs-keyword">val</span> b = <span class="hljs-number">2</span><span class="hljs-operator"> * </span>a <span class="hljs-comment">// b 等于 Array(2,4,6)</span><br></code></pre></td></tr></table></figure>

<p><em><strong>2.18‎ 以下关于Scala各种数据结构的说法正确的是?</strong></em>  <code>ABC</code></p>
<figure class="highlight mathematica"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs mathematica"><span class="hljs-variable">A</span><span class="hljs-operator">.</span>集合<span class="hljs-punctuation">(</span><span class="hljs-built_in">Set</span><span class="hljs-punctuation">)</span>是不重复元素的容器<br><span class="hljs-variable">B</span><span class="hljs-operator">.</span>列表<span class="hljs-punctuation">(</span><span class="hljs-built_in">List</span><span class="hljs-punctuation">)</span>一旦被定义<span class="hljs-operator">,</span>其值就不能改变<br><span class="hljs-built_in">C</span><span class="hljs-operator">.</span>迭代器<span class="hljs-punctuation">(</span><span class="hljs-variable">Iterator</span><span class="hljs-punctuation">)</span>是一种提供了按顺序访问容器元素的数据结构<br><span class="hljs-built_in">D</span><span class="hljs-operator">.</span>映射<span class="hljs-punctuation">(</span><span class="hljs-built_in">Map</span><span class="hljs-punctuation">)</span>是一系列键值对的容器<span class="hljs-operator">,</span>在一个映射中<span class="hljs-operator">,</span>键是唯一的<span class="hljs-operator">,</span>值也是唯一的<br></code></pre></td></tr></table></figure>
<p><em><strong>2.19 ‎val books &#x3D; List(“Hadoop”,”Hive”,”Mapreduce”),以下哪些操作能将字符串全部变成大写？</strong></em>  <code>BCD</code></p>
<figure class="highlight mipsasm"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs mipsasm">‎A.for (<span class="hljs-keyword">book </span>&lt;-<span class="hljs-keyword">books; </span>c&lt;-<span class="hljs-keyword">book) </span>yield c.toUpperCase<br><span class="hljs-keyword">B.books.map(s </span>=&gt; s.toUpperCase)<br>C.for (<span class="hljs-keyword">book&lt;-books) </span>yield <span class="hljs-keyword">book.toUpperCase</span><br><span class="hljs-keyword"></span>D.<span class="hljs-keyword">books.map(_.toUpperCase)</span><br></code></pre></td></tr></table></figure>

<p><em><strong>2.20 在Scala中，关于Nothing，null，Null，Option，Some，None的说法正确的是？</strong></em> <code>ABCD</code></p>
<figure class="highlight pgsql"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs pgsql">‍A.<span class="hljs-keyword">Null</span>是所有引用类型的子类，其唯一的实例是<span class="hljs-keyword">null</span><br>B.<span class="hljs-keyword">null</span>表示一个空对象，可以赋值给任何引用类型<br>C.类<span class="hljs-keyword">Option</span>是一个抽象类，有一个具体子类<span class="hljs-keyword">Some</span> 和一个对象<span class="hljs-keyword">None</span>，分别表示有值和无值的情况<br>D.<span class="hljs-keyword">Nothing</span> 是所有其他类型的子类，没有实例，主要用于异常处理函数的返回类型<br></code></pre></td></tr></table></figure>
<h1 id="第3章-Spark的设计与运行原理-10个题"><a href="#第3章-Spark的设计与运行原理-10个题" class="headerlink" title="第3章 Spark的设计与运行原理 (10个题)"></a>第3章 Spark的设计与运行原理 (10个题)</h1><hr>
<p><em><strong>3.1 ‎以下是Spark的主要特点的有?</strong></em>   <code>ABCD</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span>.运行速度快<br><span class="hljs-selector-tag">B</span>.容易使用，简洁的API设计有助于用户轻松构建并行程序<br>C.通用性，Spark提供了完整而强大的技术栈<br>D.运行模式多样<br></code></pre></td></tr></table></figure>
<p><em><strong>3.2 Spark的运行架构包括哪些？</strong></em> <code>ABCD</code></p>
<figure class="highlight crmsh"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs crmsh">A.集群资源管理器（Cluster Manager）<br>B.执行进程（Executor）<br>C.Worker <span class="hljs-keyword">Node</span><br><span class="hljs-title">D</span>.任务控制节点Driver Program<br></code></pre></td></tr></table></figure>

<p><em><strong>3.‎3 关于RDD之间的依赖分为窄依赖和宽依赖，以下说法正确的是？</strong></em> <code>AC</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span>.存在一个父RDD的一个分区对应一个子RDD的多个分区，则为宽依赖<br><span class="hljs-selector-tag">B</span>.存在一个父RDD的多个分区对应一个子RDD的一个分区，则为宽依赖<br>C.存在一个父RDD的一个分区只被一个子RDD的一个分区所使用，则为窄依赖<br>D.存在一个父RDD的一个分区被一个子RDD的多个分区所使用，则为窄依赖<br></code></pre></td></tr></table></figure>

<p><em><strong>3.4 Spark可以采用几种不同的部署方式，以下正确的部署方式有？</strong></em> <code>ABCD</code></p>
<figure class="highlight stylus"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs stylus">A<span class="hljs-selector-class">.Local</span><br>B<span class="hljs-selector-class">.Standalone</span><br>C<span class="hljs-selector-class">.Spark</span> on Mesos<br>D<span class="hljs-selector-class">.Spark</span> on YARN<br></code></pre></td></tr></table></figure>
<p><em><strong>3.5 ​目前的大数据处理典型应用场景可分为哪几个类型?</strong></em> <code>ABD</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span>.复杂的批量数据处理<br><span class="hljs-selector-tag">B</span>.基于历史数据的交互式查询<br>C.大数据的分布式计算<br>D.基于实时数据流的数据处理<br></code></pre></td></tr></table></figure>
<p><em><strong>3.6 以下哪个不是Spark的组件?</strong></em>  <code>D</code></p>
<figure class="highlight stylus"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs stylus">A<span class="hljs-selector-class">.Spark</span> Streaming<br>B<span class="hljs-selector-class">.MLlib</span><br>C<span class="hljs-selector-class">.GraphX</span><br>D.Flink<br></code></pre></td></tr></table></figure>

<p><em><strong>3.7 下面哪个不是 RDD 的特点 ?</strong></em>   <code>C</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span>.可分区<br><span class="hljs-selector-tag">B</span>.可序列化<br>C.可修改<br>D.可持久化<br></code></pre></td></tr></table></figure>
<p><em><strong>3.8.Task是Executor上的工作单元，运行于下面哪个组件上？</strong></em> <code>C</code></p>
<figure class="highlight crmsh"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs crmsh">A.Driver Program<br>B.Spark <span class="hljs-keyword">Master</span><br><span class="hljs-title">C</span>.Worker <span class="hljs-keyword">Node</span><br><span class="hljs-title">D</span>.Cluster Manager<br></code></pre></td></tr></table></figure>

<p><em><strong>3.9 下面哪个操作肯定是宽依赖？</strong></em> <code>C</code></p>
<figure class="highlight reasonml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs reasonml"><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">A</span>.</span></span>map<br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">B</span>.</span></span>filter<br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">C</span>.</span></span>reduceByKey<br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">D</span>.</span></span>union<br></code></pre></td></tr></table></figure>

<p><em><strong>3.10 以下选项中哪些是Spark的优点？</strong></em> <code>AC</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css">‍<span class="hljs-selector-tag">A</span>.具有高效的容错性<br><span class="hljs-selector-tag">B</span>.利用进程模型<br>C.可以将中间结果持久化到内存<br>D.表达能力有限<br></code></pre></td></tr></table></figure>

<h1 id="第4章-Spark环境搭建和使用方法-10个题"><a href="#第4章-Spark环境搭建和使用方法-10个题" class="headerlink" title="第4章 Spark环境搭建和使用方法 (10个题)"></a>第4章 Spark环境搭建和使用方法 (10个题)</h1><hr>
<p><em><strong>4.1​ Spark部署模式有哪几种?</strong></em> <code>ABCD</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span><span class="hljs-selector-class">.Local</span>模式（单机模式）<br><span class="hljs-selector-tag">B</span><span class="hljs-selector-class">.Standalone</span>模式 <br>C<span class="hljs-selector-class">.YARN</span>模式<br>D<span class="hljs-selector-class">.Mesos</span>模式<br></code></pre></td></tr></table></figure>
<p><em><strong>4.2‏ 关于Hadoop和Spark的相互关系，以下说法正确的是？</strong></em> <code>ABCD</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span><span class="hljs-selector-class">.Hadoop</span>和Spark可以相互协作<br><span class="hljs-selector-tag">B</span><span class="hljs-selector-class">.Hadoop</span>负责数据的存储和管理<br>C<span class="hljs-selector-class">.Spark</span>负责数据的计算<br>D<span class="hljs-selector-class">.Spark</span>要操作Hadoop中的数据，需要先启动HDFS<br></code></pre></td></tr></table></figure>
<p><em><strong>4.3 判断HDFS是否启动成功，可以通过哪个命令？</strong></em>  <code>C</code></p>
<figure class="highlight reasonml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs reasonml">‎<span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">A</span>.</span></span>hdfs<br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">B</span>.</span></span>spark<br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">C</span>.</span></span>jps<br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">D</span>.</span></span>start-dfs<br></code></pre></td></tr></table></figure>
<p><em><strong>4.4 ‏HDFS若启动成功，系统会列出以下哪些进程？</strong></em> <code>ACD</code></p>
<figure class="highlight stylus"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs stylus">A<span class="hljs-selector-class">.NameNode</span><br>B<span class="hljs-selector-class">.HDFS</span><br>C<span class="hljs-selector-class">.DataNode</span><br>D.SecondaryNameNode<br></code></pre></td></tr></table></figure>
<p><em><strong>4.5 spark-shell在启动时，<master-url>采用<code>local[*]</code>时，它的含义是？</strong></em> <code>B</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span>.使用任意个线程来本地化运行Spark<br><span class="hljs-selector-tag">B</span>.使用与逻辑CPU个数相同数量的线程来本地化运行Spark<br>C.使用与逻辑CPU个数相同数量的进程来本地化运行Spark<br>D.使用单个线程来本地化运行Spark<br></code></pre></td></tr></table></figure>
<p><em><strong>4.6‎ spark-shell在启动时，采用yarn-client模式时，以下说法正确的是？</strong></em> <code>AC</code></p>
<figure class="highlight arduino"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs arduino">A.当用户提交了作业之后，不能关掉<span class="hljs-built_in">Client</span><br>B.当用户提交了作业之后，就可以关掉<span class="hljs-built_in">Client</span><br>C.该模式适合运行交互类型的作业<br>D.该模式不适合运行交互类型的作业<br></code></pre></td></tr></table></figure>

<p><em><strong>4.7 spark-shell在启动时，采用yarn-cluster模式时，以下说法正确的是？</strong></em>  <code>BD</code></p>
<figure class="highlight arduino"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs arduino">A.当用户提交了作业之后，不能关掉<span class="hljs-built_in">Client</span><br>B.当用户提交了作业之后，就可以关掉<span class="hljs-built_in">Client</span><br>C.该模式适合运行交互类型的作业<br>D.该模式不适合运行交互类型的作业<br></code></pre></td></tr></table></figure>
<p><em><strong>4.8‍ 开发Spark独立应用程序的基本步骤通常有哪些?</strong></em>   <code>ABCD</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css">‌<span class="hljs-selector-tag">A</span>.安装编译打包工具，如sbt，Maven<br><span class="hljs-selector-tag">B</span>.编写Spark应用程序代码<br>C.编译打包<br>D.通过spark-submit运行程序<br></code></pre></td></tr></table></figure>
<p><em><strong>4.9 下面描述正确的是：</strong></em> <code>C</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span><span class="hljs-selector-class">.Hadoop</span>和Spark不能部署在同一个集群中<br><span class="hljs-selector-tag">B</span><span class="hljs-selector-class">.Hadoop</span>只包含了存储组件，不包含计算组件<br>C<span class="hljs-selector-class">.Spark</span>是一个分布式计算框架，可以和Hadoop组合使用<br>D<span class="hljs-selector-class">.Spark</span>和Hadoop是竞争关系，二者不能组合使用<br></code></pre></td></tr></table></figure>

<p><em><strong>4.10‍ 集群上运行Spark应用程序的方法步骤有哪些?</strong></em> <code>ABCD</code></p>
<figure class="highlight crmsh"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs crmsh">A.启动Hadoop集群<br>B.启动Spark的<span class="hljs-literal">Master</span>节点和所有<span class="hljs-literal">Slave</span>节点<br>C.在集群中运行应用程序JAR包<br>D.查看集群信息以获得应用程序运行的相关信息<br></code></pre></td></tr></table></figure>

<h1 id="第5章-RDD编程-10个题"><a href="#第5章-RDD编程-10个题" class="headerlink" title="第5章 RDD编程 (10个题)"></a>第5章 RDD编程 (10个题)</h1><hr>
<p><em><strong>5.1 以下操作中，哪个不是Spark RDD编程中的操作</strong></em> <code>A</code></p>
<figure class="highlight reasonml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs reasonml">‍<span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">A</span>.</span></span>get<span class="hljs-constructor">LastOne()</span><br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">B</span>.</span></span>filter<span class="hljs-literal">()</span><br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">C</span>.</span></span>reduce<span class="hljs-constructor">ByKey(<span class="hljs-params">func</span>)</span><br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">D</span>.</span></span>reduce<span class="hljs-literal">()</span><br></code></pre></td></tr></table></figure>

<p><em><strong>5.2下述语句执行的结果是</strong></em>  <code>A</code></p>
<figure class="highlight stylus"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><code class="hljs stylus">‏val rdd=sc<span class="hljs-selector-class">.parallelize</span>(<span class="hljs-built_in">Array</span>(<span class="hljs-number">1</span>,<span class="hljs-number">2</span>,<span class="hljs-number">3</span>,<span class="hljs-number">4</span>,<span class="hljs-number">5</span>))<br>rdd<span class="hljs-selector-class">.take</span>(<span class="hljs-number">3</span>)<br></code></pre></td></tr></table></figure>
<figure class="highlight apache"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs apache"><span class="hljs-attribute">A</span>.Array(<span class="hljs-number">1</span>,<span class="hljs-number">2</span>,<span class="hljs-number">3</span>)<br><span class="hljs-attribute">B</span>.Array(<span class="hljs-number">2</span>,<span class="hljs-number">3</span>,<span class="hljs-number">4</span>)<br><span class="hljs-attribute">C</span>.<span class="hljs-number">3</span><br><span class="hljs-attribute">D</span>.<span class="hljs-number">6</span><br></code></pre></td></tr></table></figure>

<p><em><strong>5.3‍ 有一个键值对RDD，名称为pairRDD，它包含4个元素，分别是(“Hadoop”,1)、(“Spark”,1)、(“Hive”,1)和(“Spark”,1),则pairRDD.reduceByKey((a,b)&#x3D;&gt;a+b)执行结果得到的RDD，它里面包含的元素是</strong></em> <code>A</code></p>
<figure class="highlight apache"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs apache"><span class="hljs-attribute">A</span>.(“Hadoop”,<span class="hljs-number">1</span>),(“Spark”,<span class="hljs-number">2</span>),(“Hive”,<span class="hljs-number">1</span>)<br><span class="hljs-attribute">B</span>.(“Hadoop”,<span class="hljs-number">2</span>),(“Spark”,<span class="hljs-number">1</span>),(“Hive”,<span class="hljs-number">1</span>)<br><span class="hljs-attribute">C</span>.(“Hadoop”,<span class="hljs-number">2</span>),(“Spark”,<span class="hljs-number">2</span>),(“Hive”,<span class="hljs-number">2</span>)<br><span class="hljs-attribute">D</span>.(“Hadoop”,<span class="hljs-number">1</span>),(“Spark”,<span class="hljs-number">2</span>),(“Hive”,<span class="hljs-number">2</span>)<br></code></pre></td></tr></table></figure>
<p><em><strong>5.4 ‌下述语句的执行结果wordCountsWithGroup中包含的元素是</strong></em> <code>A</code></p>
<figure class="highlight arcade"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><code class="hljs arcade">val  words = <span class="hljs-built_in">Array</span>(<span class="hljs-string">&quot;one&quot;</span>, <span class="hljs-string">&quot;two&quot;</span>, <span class="hljs-string">&quot;two&quot;</span>, <span class="hljs-string">&quot;three&quot;</span>, <span class="hljs-string">&quot;three&quot;</span>, <span class="hljs-string">&quot;three&quot;</span>) <br>‌val  wordPairsRDD = sc.parallelize(words).<span class="hljs-built_in">map</span>(<span class="hljs-function"><span class="hljs-params">word</span> =&gt;</span> (word, <span class="hljs-number">1</span>))<br>‌val  wordCountsWithGroup = wordPairsRDD. groupByKey().<span class="hljs-built_in">map</span>(<span class="hljs-function"><span class="hljs-params">t</span> =&gt;</span> (t._1, t._2.<span class="hljs-built_in">sum</span>))<br></code></pre></td></tr></table></figure>
<figure class="highlight apache"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs apache"><span class="hljs-attribute">A</span>.(“one”,<span class="hljs-number">1</span>),(“two”,<span class="hljs-number">2</span>),(“three”,<span class="hljs-number">3</span>)<br><span class="hljs-attribute">B</span>.(“one”,<span class="hljs-number">1</span>),(“two”,<span class="hljs-number">2</span>),(“three”,<span class="hljs-number">1</span>)<br><span class="hljs-attribute">C</span>.(“one”,<span class="hljs-number">3</span>),(“two”,<span class="hljs-number">2</span>),(“three”,<span class="hljs-number">1</span>)<br><span class="hljs-attribute">D</span>.(“one”,<span class="hljs-number">1</span>),(“two”,<span class="hljs-number">1</span>),(“three”,<span class="hljs-number">1</span>)<br></code></pre></td></tr></table></figure>
<p><em><strong>5.5 有一个键值对RDD，名称为pairRDD，包含4个元素，分别是(“Hadoop”,1)、(“Spark”,1)、(“Hive”,1)和(“Spark”,1)，则pairRDD.mapValues(x &#x3D;&gt; x+1)操作得到的RDD中所包含的元素是</strong></em> <code>C</code></p>
<figure class="highlight apache"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs apache"><span class="hljs-attribute">A</span>.<span class="hljs-number">1</span>,<span class="hljs-number">1</span>,<span class="hljs-number">1</span>,<span class="hljs-number">1</span><br><span class="hljs-attribute">B</span>.<span class="hljs-number">2</span>,<span class="hljs-number">2</span>,<span class="hljs-number">2</span>,<span class="hljs-number">2</span><br><span class="hljs-attribute">C</span>.(<span class="hljs-string">&quot;Hadoop&quot;</span>,<span class="hljs-number">2</span>)、(<span class="hljs-string">&quot;Spark&quot;</span>,<span class="hljs-number">2</span>)、(<span class="hljs-string">&quot;Hive&quot;</span>,<span class="hljs-number">2</span>)和(<span class="hljs-string">&quot;Spark&quot;</span>,<span class="hljs-number">2</span>)<br><span class="hljs-attribute">D</span>. (<span class="hljs-string">&quot;Hadoop&quot;</span>,<span class="hljs-number">1</span>)、(<span class="hljs-string">&quot;Spark&quot;</span>,<span class="hljs-number">1</span>)、(<span class="hljs-string">&quot;Hive&quot;</span>,<span class="hljs-number">1</span>)和(<span class="hljs-string">&quot;Spark&quot;</span>,<span class="hljs-number">1</span>)<br></code></pre></td></tr></table></figure>
<p><em><strong>5.6 RDD操作包括哪两种类型</strong></em> <code>AC</code></p>
<figure class="highlight mathematica"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs mathematica"><span class="hljs-variable">A</span><span class="hljs-operator">.</span>行动（<span class="hljs-variable">Action</span>）<br><span class="hljs-variable">B</span><span class="hljs-operator">.</span>分组（<span class="hljs-built_in">GroupBy</span>）<br><span class="hljs-built_in">C</span><span class="hljs-operator">.</span>转换（<span class="hljs-variable">Transformation</span>）<br><span class="hljs-built_in">D</span><span class="hljs-operator">.</span>连接（<span class="hljs-built_in">Join</span>）<br></code></pre></td></tr></table></figure>
<p><em><strong>5.7 ‏以下操作中，哪些是转换（Transformation）操作</strong></em> <code>AB</code></p>
<figure class="highlight reasonml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs reasonml"><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">A</span>.</span></span>filter<span class="hljs-literal">()</span><br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">B</span>.</span></span>reduce<span class="hljs-constructor">ByKey(<span class="hljs-params">func</span>)</span><br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">C</span>.</span></span>first<span class="hljs-literal">()</span><br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">D</span>.</span></span>count<span class="hljs-literal">()</span><br></code></pre></td></tr></table></figure>

<p><em><strong>5.8 以下操作中，哪些是行动（Action）操作</strong></em>  <code>AB</code></p>
<figure class="highlight reasonml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs reasonml"><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">A</span>.</span></span>reduce<span class="hljs-literal">()</span><br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">B</span>.</span></span>collect<span class="hljs-literal">()</span><br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">C</span>.</span></span>group<span class="hljs-constructor">ByKey()</span><br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">D</span>.</span></span>map<span class="hljs-literal">()</span><br></code></pre></td></tr></table></figure>
<p><em><strong>5.9 ‏以下关于RDD的持久化的描述，正确的是</strong></em> <code>ABCD</code></p>
<figure class="highlight scss"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs scss"><span class="hljs-selector-tag">A</span><span class="hljs-selector-class">.persist</span>(MEMORY_ONLY)：表示将RDD作为反序列化的对象存储于JVM中，如果内存不足，就要按照LRU原则替换缓存中的内容<br><span class="hljs-selector-tag">B</span>.通过持久化（缓存）机制可以避免重复计算的开销<br>C<span class="hljs-selector-class">.persist</span>(MEMORY_AND_DISK)：表示将RDD作为反序列化的对象存储在JVM中，如果内存不足，超出的分区将会被存放在硬盘上<br>D.使用<span class="hljs-built_in">cache</span>()方法时，会调用<span class="hljs-built_in">persist</span>(MEMORY_ONLY)<br></code></pre></td></tr></table></figure>


<p><em><strong>5.10 ‎关于RDD分区的作用，下面描述正确的是</strong></em>  <code>BC</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span>.增加时间开销<br><span class="hljs-selector-tag">B</span>.增加并行度<br>C.减少通信开销<br>D.减少并行度<br></code></pre></td></tr></table></figure>

<h1 id="第6章-Spark-SQL-10个题"><a href="#第6章-Spark-SQL-10个题" class="headerlink" title="第6章 Spark SQL (10个题)"></a>第6章 Spark SQL (10个题)</h1><hr>
<p><em><strong>6.1 关于Shark，下面描述正确的是：</strong></em> <code>C</code></p>
<figure class="highlight mipsasm"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs mipsasm">A.<span class="hljs-keyword">Shark提供了类似Pig的功能</span><br><span class="hljs-keyword"></span><span class="hljs-keyword">B.Shark把SQL语句转换成MapReduce作业</span><br><span class="hljs-keyword"></span>C.<span class="hljs-keyword">Shark重用了Hive中的HiveQL解析、逻辑执行计划翻译、执行计划优化等逻辑</span><br><span class="hljs-keyword"></span>D.<span class="hljs-keyword">Shark的性能比Hive差很多</span><br></code></pre></td></tr></table></figure>
<p><em><strong>6.2‎ 下面关于Spark SQL架构的描述错误的是：</strong></em> <code>D</code></p>
<figure class="highlight sql"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs sql">A.在Shark原有的架构上重写了逻辑执行计划的优化部分，解决了Shark存在的问题<br>B.Spark <span class="hljs-keyword">SQL</span>在Hive兼容层面仅依赖HiveQL解析和Hive元数据<br>C.Spark <span class="hljs-keyword">SQL</span>执行计划生成和优化都由Catalyst（函数式关系查询优化框架）负责<br>D.Spark <span class="hljs-keyword">SQL</span>执行计划生成和优化需要依赖Hive来完成<br></code></pre></td></tr></table></figure>
<p><em><strong>6.3 要把一个DataFrame保存到people.json文件中，下面语句哪个是正确的：</strong></em> <code>A</code></p>
<figure class="highlight stylus"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs stylus">A<span class="hljs-selector-class">.df</span><span class="hljs-selector-class">.write</span><span class="hljs-selector-class">.json</span>(<span class="hljs-string">&quot;people.json&quot;</span>)<br>B. df<span class="hljs-selector-class">.json</span>(<span class="hljs-string">&quot;people.json&quot;</span>)<br>C<span class="hljs-selector-class">.df</span><span class="hljs-selector-class">.write</span><span class="hljs-selector-class">.format</span>(<span class="hljs-string">&quot;csv&quot;</span>)<span class="hljs-selector-class">.save</span>(<span class="hljs-string">&quot;people.json&quot;</span>)<br>D<span class="hljs-selector-class">.df</span><span class="hljs-selector-class">.write</span><span class="hljs-selector-class">.csv</span>(<span class="hljs-string">&quot;people.json&quot;</span>)<br></code></pre></td></tr></table></figure>

<p><em><strong>6.4 以下操作中，哪个不是DataFrame的常用操作：</strong></em> <code>D</code></p>
<figure class="highlight reasonml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs reasonml"><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">A</span>.</span></span>print<span class="hljs-constructor">Schema()</span><br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">B</span>.</span></span>select<span class="hljs-literal">()</span><br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">C</span>.</span></span>filter<span class="hljs-literal">()</span><br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">D</span>.</span></span>sendto<span class="hljs-literal">()</span><br></code></pre></td></tr></table></figure>

<p><em><strong>6.5‍ Shark的设计导致了两个问题：</strong></em> <code>AC</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span>.执行计划优化完全依赖于Hive，不方便添加新的优化策略<br><span class="hljs-selector-tag">B</span>.执行计划优化不依赖于Hive，方便添加新的优化策略<br>C<span class="hljs-selector-class">.Spark</span>是线程级并行，而MapReduce是进程级并行，因此，Spark在兼容Hive的实现上存在线程安全问题，导致Shark不得不使用另外一套独立维护的、打了补丁的Hive源码分支<br>D<span class="hljs-selector-class">.Spark</span>是进程级并行，而MapReduce是线程级并行，因此，Spark在兼容Hive的实现上存在线程安全问题，导致Shark不得不使用另外一套独立维护的、打了补丁的Hive源码分支<br></code></pre></td></tr></table></figure>

<p><em><strong>6.6 ‏下面关于为什么推出Spark SQL的原因的描述正确的是：</strong></em> <code>AB</code>  </p>
<figure class="highlight sql"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs sql">‍A.Spark <span class="hljs-keyword">SQL</span>可以提供DataFrame API，可以对内部和外部各种数据源执行各种关系操作<br>B.可以支持大量的数据源和数据分析算法，组合使用Spark <span class="hljs-keyword">SQL</span>和Spark MLlib，可以融合传统关系数据库的结构化数据管理能力和机器学习算法的数据处理能力<br>C.Spark <span class="hljs-keyword">SQL</span>无法对各种不同的数据源进行整合<br>D.Spark <span class="hljs-keyword">SQL</span>无法融合结构化数据管理能力和机器学习算法的数据处理能力<br></code></pre></td></tr></table></figure>

<p><em><strong>6.7 下面关于DataFrame的描述正确的是：</strong></em> <code>ABCD</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span><span class="hljs-selector-class">.DataFrame</span>的推出，让Spark具备了处理大规模结构化数据的能力<br><span class="hljs-selector-tag">B</span><span class="hljs-selector-class">.DataFrame</span>比原有的RDD转化方式更加简单易用，而且获得了更高的计算性能<br>C<span class="hljs-selector-class">.Spark</span>能够轻松实现从MySQL到DataFrame的转化，并且支持SQL查询<br>D<span class="hljs-selector-class">.DataFrame</span>是一种以RDD为基础的分布式数据集，提供了详细的结构信息<br></code></pre></td></tr></table></figure>
<p><em><strong>6.8‌ 要读取people.json文件生成DataFrame，可以使用下面哪些命令：</strong></em> <code> AC</code></p>
<figure class="highlight stylus"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs stylus">A<span class="hljs-selector-class">.spark</span><span class="hljs-selector-class">.read</span><span class="hljs-selector-class">.json</span>(<span class="hljs-string">&quot;people.json&quot;</span>)<br>B<span class="hljs-selector-class">.spark</span><span class="hljs-selector-class">.read</span><span class="hljs-selector-class">.parquet</span>(<span class="hljs-string">&quot;people.json&quot;</span>)<br>C<span class="hljs-selector-class">.spark</span><span class="hljs-selector-class">.read</span><span class="hljs-selector-class">.format</span>(<span class="hljs-string">&quot;json&quot;</span>)<span class="hljs-selector-class">.load</span>(<span class="hljs-string">&quot;people.json&quot;</span>)<br>D<span class="hljs-selector-class">.spark</span><span class="hljs-selector-class">.read</span><span class="hljs-selector-class">.format</span>(<span class="hljs-string">&quot;csv&quot;</span>)<span class="hljs-selector-class">.load</span>(<span class="hljs-string">&quot;people.json&quot;</span>)<br></code></pre></td></tr></table></figure>

<p><em><strong>6.9 从RDD转换得到DataFrame包含两种典型方法，分别是：</strong></em> <code>AB</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span>.利用反射机制推断RDD模式<br><span class="hljs-selector-tag">B</span>.使用编程方式定义RDD模式<br>C.利用投影机制推断RDD模式<br>D.利用互联机制推断RDD模式<br></code></pre></td></tr></table></figure>

<p><em><strong>6.10 使用编程方式定义RDD模式时，主要包括哪三个步骤：</strong></em>  <code>ABD</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css">‍<span class="hljs-selector-tag">A</span>.制作“表头”<br><span class="hljs-selector-tag">B</span>.制作“表中的记录”<br>C.制作映射表<br>D.把“表头”和“表中的记录”拼装在一起<br></code></pre></td></tr></table></figure>

<h1 id="第7章-Spark-Streaming-11个题"><a href="#第7章-Spark-Streaming-11个题" class="headerlink" title="第7章 Spark Streaming (11个题)"></a>第7章 Spark Streaming (11个题)</h1><hr>
<p><em><strong>7.1 以下流计算框架中，哪个不是开源的：</strong></em> <code>A</code></p>
<figure class="highlight stylus"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs stylus">A<span class="hljs-selector-class">.IBM</span> StreamBase<br>B<span class="hljs-selector-class">.Twitter</span> Storm<br>C.Yahoo! S4<br>D<span class="hljs-selector-class">.Spark</span> Streaming<br></code></pre></td></tr></table></figure>

<p><em><strong>7.2 ‎下面关于Spark Streaming的描述错误的是：</strong></em> <code>D</code></p>
<figure class="highlight stylus"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs stylus">A<span class="hljs-selector-class">.Spark</span> Streaming的基本原理是将实时输入数据流以时间片为单位进行拆分，然后采用Spark引擎以类似批处理的方式处理每个时间片数据<br>B<span class="hljs-selector-class">.Spark</span> Streaming最主要的抽象是DStream（Discretized Stream，离散化数据流），表示连续不断的数据流<br>C<span class="hljs-selector-class">.Spark</span> Streaming可整合多种输入数据源，如Kafka、Flume、HDFS，甚至是普通的TCP套接字<br>D<span class="hljs-selector-class">.Spark</span> Streaming的数据抽象是DataFrame<br></code></pre></td></tr></table></figure>

<p><em><strong>7.3 ​下面关于Spark Streaming和Storm的描述正确的是：</strong></em> <code>A</code></p>
<figure class="highlight stylus"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs stylus">A<span class="hljs-selector-class">.Spark</span> Streaming无法实现毫秒级的流计算，而Storm可以实现毫秒级响应<br>B<span class="hljs-selector-class">.Spark</span> Streaming可以实现毫秒级的流计算，而Storm无法实现毫秒级响应<br>C<span class="hljs-selector-class">.Spark</span> Streaming和Storm都可以实现毫秒级的流计算<br>D<span class="hljs-selector-class">.Spark</span> Streaming和Storm都无法实现毫秒级的流计算<br></code></pre></td></tr></table></figure>
<p><em><strong>7.4 ‏下面描述错误的是：</strong></em> <code>D</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span>.在RDD编程中需要生成一个SparkContext对象<br><span class="hljs-selector-tag">B</span>.在Spark SQL编程中需要生成一个SparkSession对象<br>C.运行一个Spark Streaming程序，就需要首先生成一个StreamingContext对象<br>D.在Spark SQL编程中需要生成一个StreamingContext对象<br></code></pre></td></tr></table></figure>

<p><em><strong>7.5 下面不属于Spark Streaming基本输入源的是：</strong></em> <code>D</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span>.文件流<br><span class="hljs-selector-tag">B</span>.套接字流<br>C<span class="hljs-selector-class">.RDD</span>队列流<br>D.双向数据流<br></code></pre></td></tr></table></figure>

<p><em><strong>7.6 以下关于流数据特征的描述，哪些是正确的：</strong></em> <code>ABCD</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css">‍<span class="hljs-selector-tag">A</span>.数据快速持续到达，潜在大小也许是无穷无尽的<br><span class="hljs-selector-tag">B</span>.数据来源众多，格式复杂<br>C.数据量大，但是不十分关注存储，一旦流数据中的某个元素经过处理，要么被丢弃，要么被归档存储<br>D.数据顺序颠倒，或者不完整，系统无法控制将要处理的新到达的数据元素的顺序<br></code></pre></td></tr></table></figure>

<p><em><strong>7.7 流计算处理流程一般包括哪三个阶段：</strong></em> <code>ABD</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span>.数据实时采集<br><span class="hljs-selector-tag">B</span>.数据实时计算<br>C.数据汇总分析<br>D.实时查询服务<br></code></pre></td></tr></table></figure>

<p><em><strong>7.8 ‎以下产品哪些属于日志采集组件：</strong></em> <code>AC</code></p>
<figure class="highlight stylus"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs stylus">A<span class="hljs-selector-class">.Scribe</span><br>B<span class="hljs-selector-class">.GraphX</span><br>C<span class="hljs-selector-class">.Flume</span><br>D.MySQL<br></code></pre></td></tr></table></figure>

<p><em><strong>7.9 流处理系统与传统的数据处理系统的不同之处在于：</strong></em> <code>ABC</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span>.流处理系统处理的是实时的数据，而传统的数据处理系统处理的是预先存储好的静态数据<br><span class="hljs-selector-tag">B</span>.用户通过流处理系统获取的是实时结果，而通过传统的数据处理系统获取的是过去某一时刻的结果<br>C.流处理系统无需用户主动发出查询，实时查询服务可以主动将实时结果推送给用户<br>D.流处理系统处理的是历史的数据，而传统的数据处理系统处理的是实时的数据<br></code></pre></td></tr></table></figure>
<p><em><strong>7.10‌ 编写Spark Streaming程序的基本步骤包括：</strong></em> <code>ABCD</code></p>
<figure class="highlight scss"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs scss"><span class="hljs-selector-tag">A</span>.通过创建输入DStream（<span class="hljs-selector-tag">Input</span> Dstream）来定义输入源<br><span class="hljs-selector-tag">B</span>.通过对DStream应用转换操作和输出操作来定义流计算<br>C.调用StreamingContext对象的<span class="hljs-built_in">start</span>()方法来开始接收数据和处理流程<br>D.调用StreamingContext对象的<span class="hljs-built_in">awaitTermination</span>()方法来等待流计算进程结束<br></code></pre></td></tr></table></figure>

<p><em><strong>7.11 DStream有状态转换操作包括哪两种：</strong></em>  <code>CD</code></p>
<figure class="highlight reasonml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs reasonml"><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">A</span>.</span></span>update操作<br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">B</span>.</span></span>reduceByKey操作<br>C.滑动窗口转换操作<br><span class="hljs-module-access"><span class="hljs-module"><span class="hljs-identifier">D</span>.</span></span>updateStateByKey操作<br></code></pre></td></tr></table></figure>

<h1 id="第8章-Spark-MLlib-8个题"><a href="#第8章-Spark-MLlib-8个题" class="headerlink" title="第8章 Spark MLlib (8个题)"></a>第8章 Spark MLlib (8个题)</h1><hr>
<p><em><strong>8.1 下面论述中错误的是：</strong></em> <code>A</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span>.机器学习和人工智能是不存在关联关系的两个独立领域<br><span class="hljs-selector-tag">B</span>.机器学习强调三个关键词：算法、经验、性能<br>C.推荐系统、金融反欺诈、语音识别、自然语言处理和机器翻译、模式识别、智能控制等领域，都用到了机器学习的知识<br>D.机器学习可以看作是一门人工智能的科学，该领域的主要研究对象是人工智能<br></code></pre></td></tr></table></figure>

<p><em><strong>8.2‌ 下面关于机器学习处理过程的描述，错误的是：</strong></em> <code>D</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css">‌<span class="hljs-selector-tag">A</span>.在数据的基础上，通过算法构建出模型并对模型进行评估<br><span class="hljs-selector-tag">B</span>.评估的性能如果达到要求，就用该模型来测试其他的数据<br>C.评估的性能如果达不到要求，就要调整算法来重新建立模型，再次进行评估<br>D.通过算法构建出的模型不需要评估就可以用于其他数据的测试<br></code></pre></td></tr></table></figure>
<p><em><strong>8.3 ​下面关于机器学习流水线(PipeLine)的描述，错误的是：</strong></em>  <code>D</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span>.流水线将多个工作流阶段（转换器和评估器）连接在一起，形成机器学习的工作流，并获得结果输出<br><span class="hljs-selector-tag">B</span>.要构建一个机器学习流水线，首先需要定义流水线中的各个PipelineStage<br>C<span class="hljs-selector-class">.PipelineStage</span>称为工作流阶段，包括转换器和评估器，比如指标提取和转换模型训练等<br>D.流水线构建好以后，就是一个转换器（Transformer）<br></code></pre></td></tr></table></figure>

<p><em><strong>8.4 下面关于评估器（Estimator）的描述错误的是：</strong></em> <code>C</code></p>
<figure class="highlight scss"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs scss"><span class="hljs-selector-tag">A</span>.评估器是学习算法或在训练数据上的训练方法的概念抽象<br><span class="hljs-selector-tag">B</span>.在机器学习流水线里，评估器通常是被用来操作 DataFrame数据并生成一个转换器<br>C.评估器实现了方法<span class="hljs-built_in">transfrom</span>()，它接受一个DataFrame并产生一个转换器<br>D.评估器实现了方法<span class="hljs-built_in">fit</span>()，它接受一个DataFrame并产生一个转换器<br></code></pre></td></tr></table></figure>
<p><em><strong>8.5 下面关于转换器（Transformer）的描述错误的是：</strong></em> <code>B</code></p>
<figure class="highlight scss"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs scss"><span class="hljs-selector-tag">A</span>.转换器是一种可以将一个DataFrame转换为另一个DataFrame的算法<br><span class="hljs-selector-tag">B</span>.技术上，转换器实现了一个方法<span class="hljs-built_in">fit</span>()，它通过附加一个或多个列，将一个DataFrame转换为另一个DataFrame<br>C.一个模型就是一个转换器，它把一个不包含预测标签的测试数据集DataFrame打上标签，转化成另一个包含预测标签的 DataFrame<br>D.技术上，转换器实现了一个方法<span class="hljs-attribute">transform</span>()，它通过附加一个或多个列，将一个DataFrame转换为另一个DataFrame<br></code></pre></td></tr></table></figure>
<p><em><strong>8.6 下面的论述中，正确的是：</strong></em> <code>AB</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span>.传统的机器学习算法，由于技术和单机存储的限制，大多只能在少量数据上使用<br><span class="hljs-selector-tag">B</span>.利用MapReduce框架在全量数据上进行机器学习，这在一定程度上解决了统计随机性的问题，提高了机器学习的精度<br>C<span class="hljs-selector-class">.MapReduce</span>可以高效支持迭代计算<br>D<span class="hljs-selector-class">.Spark</span>无法高效支持迭代计算<br></code></pre></td></tr></table></figure>
<p><em><strong>8.7 下面关于Spark MLlib库的描述正确的是：</strong></em> <code>AC</code></p>
<figure class="highlight stylus"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs stylus">‍A.MLlib库从<span class="hljs-number">1.2</span>版本以后分为两个包：spark.mllib和spark<span class="hljs-selector-class">.ml</span><br>B<span class="hljs-selector-class">.spark</span>.mllib包含基于DataFrame的原始算法API<br>C<span class="hljs-selector-class">.spark</span>.mllib包含基于RDD的原始算法API<br>D<span class="hljs-selector-class">.spark</span>.ml则提供了基于RDD的、高层次的API<br></code></pre></td></tr></table></figure>

<p><em><strong>8.8下面论述中正确的是：</strong></em> <code>ABC</code></p>
<figure class="highlight css"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><code class="hljs css"><span class="hljs-selector-tag">A</span><span class="hljs-selector-class">.DataFrame</span>可容纳各种数据类型，与RDD数据集相比，它包含了模式（schema）信息，类似于传统数据库中的二维表格<br><span class="hljs-selector-tag">B</span>.流水线用DataFrame来存储源数据<br>C.转换器（Transformer）是一种可以将一个DataFrame转换为另一个DataFrame的算法<br>D.评估器（Estimator）是一种可以将一个DataFrame转换为另一个DataFrame的算法<br></code></pre></td></tr></table></figure>


                
              </div>
            
            <hr/>
            <div>
              <div class="post-metas my-3">
  
    <div class="post-meta mr-3 d-flex align-items-center">
      <i class="iconfont icon-category"></i>
      

<span class="category-chains">
  
  
    
      <span class="category-chain">
        
  <a href="/categories/%E5%A4%A7%E6%95%B0%E6%8D%AE/" class="category-chain-item">大数据</a>
  
  

      </span>
    
  
</span>

    </div>
  
  
    <div class="post-meta">
      <i class="iconfont icon-tags"></i>
      
        <a href="/tags/Spark/">#Spark</a>
      
    </div>
  
</div>


              
  

  <div class="license-box my-3">
    <div class="license-title">
      <div>Spark Scala版本 _ 选择题汇总</div>
      <div>http://example.com/2022/01/03/Spark Scala版本 _ 选择题汇总/</div>
    </div>
    <div class="license-meta">
      
        <div class="license-meta-item">
          <div>作者</div>
          <div>John Doe</div>
        </div>
      
      
        <div class="license-meta-item license-meta-date">
          <div>发布于</div>
          <div>2022年1月3日</div>
        </div>
      
      
      <div class="license-meta-item">
        <div>许可协议</div>
        <div>
          
            
            
              <a target="_blank" href="https://creativecommons.org/licenses/by/4.0/">
              <span class="hint--top hint--rounded" aria-label="BY - 署名">
                <i class="iconfont icon-by"></i>
              </span>
              </a>
            
          
        </div>
      </div>
    </div>
    <div class="license-icon iconfont"></div>
  </div>



              
                <div class="post-prevnext my-3">
                  <article class="post-prev col-6">
                    
                    
                      <a href="/2022/01/06/SpringMVC%20%E7%AC%94%E8%AE%B03%20%E6%8B%A6%E6%88%AA%E5%99%A8%E7%9A%84%E7%AE%80%E5%8D%95%E9%85%8D%E7%BD%AE%E4%B8%8E%E6%B5%8B%E8%AF%95/" title="SpringMVC 笔记3 拦截器的简单配置与测试">
                        <i class="iconfont icon-arrowleft"></i>
                        <span class="hidden-mobile">SpringMVC 笔记3 拦截器的简单配置与测试</span>
                        <span class="visible-mobile">上一篇</span>
                      </a>
                    
                  </article>
                  <article class="post-next col-6">
                    
                    
                      <a href="/2022/01/02/Android%20_%20Handler%E5%A4%9A%E7%BA%BF%E7%A8%8B%E6%B6%88%E6%81%AF%E4%BC%A0%E9%80%92%E6%9C%BA%E5%88%B6%20_%20Bundle%E5%B0%81%E8%A3%85%E6%95%B0%E6%8D%AE%20_%20Message%E6%B6%88%E6%81%AF%E5%8F%91%E9%80%81%20+Demo/" title="Android Handler多线程消息传递机制 _ Bundle封装数据 _ Message消息发送 +Demo">
                        <span class="hidden-mobile">Android Handler多线程消息传递机制 _ Bundle封装数据 _ Message消息发送 +Demo</span>
                        <span class="visible-mobile">下一篇</span>
                        <i class="iconfont icon-arrowright"></i>
                      </a>
                    
                  </article>
                </div>
              
            </div>

            
          </article>
        </div>
      </div>
    </div>

    <div class="side-col d-none d-lg-block col-lg-2">
      
  <aside class="sidebar" style="margin-left: -1rem">
    <div id="toc">
  <p class="toc-header"><i class="iconfont icon-list"></i>&nbsp;目录</p>
  <div class="toc-body" id="toc-body"></div>
</div>



  </aside>


    </div>
  </div>
</div>





  



  



  



  



  







    

    
      <a id="scroll-top-button" aria-label="TOP" href="#" role="button">
        <i class="iconfont icon-arrowup" aria-hidden="true"></i>
      </a>
    

    
      <div class="modal fade" id="modalSearch" tabindex="-1" role="dialog" aria-labelledby="ModalLabel"
     aria-hidden="true">
  <div class="modal-dialog modal-dialog-scrollable modal-lg" role="document">
    <div class="modal-content">
      <div class="modal-header text-center">
        <h4 class="modal-title w-100 font-weight-bold">搜索</h4>
        <button type="button" id="local-search-close" class="close" data-dismiss="modal" aria-label="Close">
          <span aria-hidden="true">&times;</span>
        </button>
      </div>
      <div class="modal-body mx-3">
        <div class="md-form mb-5">
          <input type="text" id="local-search-input" class="form-control validate">
          <label data-error="x" data-success="v" for="local-search-input">关键词</label>
        </div>
        <div class="list-group" id="local-search-result"></div>
      </div>
    </div>
  </div>
</div>

    

    
  </main>

  <footer>
    <div class="footer-inner">
  
    <div class="footer-content">
       <a href="https://hexo.io" target="_blank" rel="nofollow noopener"><span>Hexo</span></a> <i class="iconfont icon-love"></i> <a href="https://github.com/fluid-dev/hexo-theme-fluid" target="_blank" rel="nofollow noopener"><span>Fluid</span></a> 
    </div>
  
  
  
  
</div>

  </footer>

  <!-- Scripts -->
  
  <script  src="https://lib.baomitu.com/nprogress/0.2.0/nprogress.min.js" ></script>
  <link  rel="stylesheet" href="https://lib.baomitu.com/nprogress/0.2.0/nprogress.min.css" />

  <script>
    NProgress.configure({"showSpinner":false,"trickleSpeed":100})
    NProgress.start()
    window.addEventListener('load', function() {
      NProgress.done();
    })
  </script>


<script  src="https://lib.baomitu.com/jquery/3.6.0/jquery.min.js" ></script>
<script  src="https://lib.baomitu.com/twitter-bootstrap/4.6.1/js/bootstrap.min.js" ></script>
<script  src="/js/events.js" ></script>
<script  src="/js/plugins.js" ></script>


  <script  src="https://lib.baomitu.com/typed.js/2.0.12/typed.min.js" ></script>
  <script>
    (function (window, document) {
      var typing = Fluid.plugins.typing;
      var subtitle = document.getElementById('subtitle');
      if (!subtitle || !typing) {
        return;
      }
      var text = subtitle.getAttribute('data-typed-text');
      
        typing(text);
      
    })(window, document);
  </script>




  
    <script  src="/js/img-lazyload.js" ></script>
  




  
<script>
  Fluid.utils.createScript('https://lib.baomitu.com/tocbot/4.18.2/tocbot.min.js', function() {
    var toc = jQuery('#toc');
    if (toc.length === 0 || !window.tocbot) { return; }
    var boardCtn = jQuery('#board-ctn');
    var boardTop = boardCtn.offset().top;

    window.tocbot.init({
      tocSelector     : '#toc-body',
      contentSelector : '.markdown-body',
      headingSelector : CONFIG.toc.headingSelector || 'h1,h2,h3,h4,h5,h6',
      linkClass       : 'tocbot-link',
      activeLinkClass : 'tocbot-active-link',
      listClass       : 'tocbot-list',
      isCollapsedClass: 'tocbot-is-collapsed',
      collapsibleClass: 'tocbot-is-collapsible',
      collapseDepth   : CONFIG.toc.collapseDepth || 0,
      scrollSmooth    : true,
      headingsOffset  : -boardTop
    });
    if (toc.find('.toc-list-item').length > 0) {
      toc.css('visibility', 'visible');
    }
  });
</script>


  <script src=https://lib.baomitu.com/clipboard.js/2.0.10/clipboard.min.js></script>

  <script>Fluid.plugins.codeWidget();</script>


  
<script>
  Fluid.utils.createScript('https://lib.baomitu.com/anchor-js/4.3.1/anchor.min.js', function() {
    window.anchors.options = {
      placement: CONFIG.anchorjs.placement,
      visible  : CONFIG.anchorjs.visible
    };
    if (CONFIG.anchorjs.icon) {
      window.anchors.options.icon = CONFIG.anchorjs.icon;
    }
    var el = (CONFIG.anchorjs.element || 'h1,h2,h3,h4,h5,h6').split(',');
    var res = [];
    for (var item of el) {
      res.push('.markdown-body > ' + item.trim());
    }
    if (CONFIG.anchorjs.placement === 'left') {
      window.anchors.options.class = 'anchorjs-link-left';
    }
    window.anchors.add(res.join(', '));
  });
</script>


  
<script>
  Fluid.utils.createScript('https://lib.baomitu.com/fancybox/3.5.7/jquery.fancybox.min.js', function() {
    Fluid.plugins.fancyBox();
  });
</script>


  <script>Fluid.plugins.imageCaption();</script>

  <script  src="/js/local-search.js" ></script>





<!-- 主题的启动项，将它保持在最底部 -->
<!-- the boot of the theme, keep it at the bottom -->
<script  src="/js/boot.js" ></script>


  

  <noscript>
    <div class="noscript-warning">博客在允许 JavaScript 运行的环境下浏览效果更佳</div>
  </noscript>
</body>
</html>
