

<!DOCTYPE html>
<html class="writer-html5" lang="en" >
<head>
  <meta charset="utf-8" />
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0" />
  
  <title>mindspore.dataset.engine.datasets_text &mdash; MindSpore master documentation</title>
  

  
  <link rel="stylesheet" href="../../../../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../../../../_static/pygments.css" type="text/css" />

  
  

  
  

  

  
  <!--[if lt IE 9]>
    <script src="../../../../_static/js/html5shiv.min.js"></script>
  <![endif]-->
  
    
      <script type="text/javascript" id="documentation_options" data-url_root="../../../../" src="../../../../_static/documentation_options.js"></script>
        <script src="../../../../_static/jquery.js"></script>
        <script src="../../../../_static/underscore.js"></script>
        <script src="../../../../_static/doctools.js"></script>
        <script src="../../../../_static/language_data.js"></script>
        <script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
    
    <script type="text/javascript" src="../../../../_static/js/theme.js"></script>

    
    <link rel="index" title="Index" href="../../../../genindex.html" />
    <link rel="search" title="Search" href="../../../../search.html" /> 
</head>

<body class="wy-body-for-nav">

   
  <div class="wy-grid-for-nav">
    
    <nav data-toggle="wy-nav-shift" class="wy-nav-side">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search" >
          

          
            <a href="../../../../index.html" class="icon icon-home"> MindSpore
          

          
          </a>

          
            
            
          

          
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../../../../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

          
        </div>

        
        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          
            
            
              
            
            
              <p class="caption"><span class="caption-text">MindSpore Python API</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.html">mindspore</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.common.initializer.html">mindspore.common.initializer</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.communication.html">mindspore.communication</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.compression.html">mindspore.compression</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.context.html">mindspore.context</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.html">mindspore.dataset</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.audio.html">mindspore.dataset.audio</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.config.html">mindspore.dataset.config</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.text.html">mindspore.dataset.text</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.transforms.html">mindspore.dataset.transforms</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.vision.html">mindspore.dataset.vision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.mindrecord.html">mindspore.mindrecord</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.nn.html">mindspore.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.nn.probability.html">mindspore.nn.probability</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.nn.transformer.html">mindspore.nn.transformer</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.numpy.html">mindspore.numpy</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.ops.html">mindspore.ops</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.parallel.html">mindspore.parallel</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.parallel.nn.html">mindspore.parallel.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.profiler.html">mindspore.profiler</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.scipy.html">mindspore.scipy</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.train.html">mindspore.train</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.boost.html">mindspore.boost</a></li>
</ul>
<p class="caption"><span class="caption-text">MindSpore C++ API</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://www.mindspore.cn/lite/api/zh-CN/master/api_cpp/mindspore.html">MindSpore Lite↗</a></li>
</ul>

            
          
        </div>
        
      </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">

      
      <nav class="wy-nav-top" aria-label="top navigation">
        
          <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
          <a href="../../../../index.html">MindSpore</a>
        
      </nav>


      <div class="wy-nav-content">
        
        <div class="rst-content">
        
          

















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="wy-breadcrumbs">
    
      <li><a href="../../../../index.html" class="icon icon-home"></a> &raquo;</li>
        
          <li><a href="../../../index.html">Module code</a> &raquo;</li>
        
      <li>mindspore.dataset.engine.datasets_text</li>
    
    
      <li class="wy-breadcrumbs-aside">
        
      </li>
    
  </ul>

  
  <hr/>
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <h1>Source code for mindspore.dataset.engine.datasets_text</h1><div class="highlight"><pre>
<span></span><span class="c1"># Copyright 2019-2022 Huawei Technologies Co., Ltd</span>
<span class="c1">#</span>
<span class="c1"># Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);</span>
<span class="c1"># you may not use this file except in compliance with the License.</span>
<span class="c1"># You may obtain a copy of the License at</span>
<span class="c1">#</span>
<span class="c1"># http://www.apache.org/licenses/LICENSE-2.0</span>
<span class="c1">#</span>
<span class="c1"># Unless required by applicable law or agreed to in writing, software</span>
<span class="c1"># distributed under the License is distributed on an &quot;AS IS&quot; BASIS,</span>
<span class="c1"># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.</span>
<span class="c1"># See the License for the specific language governing permissions and</span>
<span class="c1"># limitations under the License.</span>
<span class="c1"># ==============================================================================</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd">This file contains specific text dataset loading classes. You can easily use</span>
<span class="sd">these classes to load the prepared dataset. For example:</span>
<span class="sd">    IMDBDataset: which is IMDB dataset.</span>
<span class="sd">    WikiTextDataset: which is Wiki text dataset.</span>
<span class="sd">    CLUEDataset: which is CLUE dataset.</span>
<span class="sd">    YelpReviewDataset: which is yelp review dataset.</span>
<span class="sd">    ...</span>
<span class="sd">After declaring the dataset object, you can further apply dataset operations</span>
<span class="sd">(e.g. filter, skip, concat, map, batch) on it.</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="kn">import</span> <span class="nn">mindspore._c_dataengine</span> <span class="k">as</span> <span class="nn">cde</span>

<span class="kn">from</span> <span class="nn">.datasets</span> <span class="kn">import</span> <span class="n">TextBaseDataset</span><span class="p">,</span> <span class="n">SourceDataset</span><span class="p">,</span> <span class="n">MappableDataset</span><span class="p">,</span> <span class="n">Shuffle</span>
<span class="kn">from</span> <span class="nn">.validators</span> <span class="kn">import</span> <span class="n">check_imdb_dataset</span><span class="p">,</span> <span class="n">check_iwslt2016_dataset</span><span class="p">,</span> <span class="n">check_iwslt2017_dataset</span><span class="p">,</span> \
    <span class="n">check_penn_treebank_dataset</span><span class="p">,</span> <span class="n">check_ag_news_dataset</span><span class="p">,</span> <span class="n">check_amazon_review_dataset</span><span class="p">,</span> <span class="n">check_udpos_dataset</span><span class="p">,</span> \
    <span class="n">check_wiki_text_dataset</span><span class="p">,</span> <span class="n">check_conll2000_dataset</span><span class="p">,</span> <span class="n">check_cluedataset</span><span class="p">,</span> \
    <span class="n">check_sogou_news_dataset</span><span class="p">,</span> <span class="n">check_textfiledataset</span><span class="p">,</span> <span class="n">check_dbpedia_dataset</span><span class="p">,</span> <span class="n">check_yelp_review_dataset</span><span class="p">,</span> \
    <span class="n">check_en_wik9_dataset</span><span class="p">,</span> <span class="n">check_yahoo_answers_dataset</span>

<span class="kn">from</span> <span class="nn">..core.validator_helpers</span> <span class="kn">import</span> <span class="n">replace_none</span>


<div class="viewcode-block" id="AGNewsDataset"><a class="viewcode-back" href="../../../../api_python/dataset/mindspore.dataset.AGNewsDataset.html#mindspore.dataset.AGNewsDataset">[docs]</a><span class="k">class</span> <span class="nc">AGNewsDataset</span><span class="p">(</span><span class="n">SourceDataset</span><span class="p">,</span> <span class="n">TextBaseDataset</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    A source dataset that reads and parses AG News datasets.</span>

<span class="sd">    The generated dataset has three columns: :py:obj:`[index, title, description]`.</span>
<span class="sd">    The tensor of column :py:obj:`index` is of the string type.</span>
<span class="sd">    The tensor of column :py:obj:`title` is of the string type.</span>
<span class="sd">    The tensor of column :py:obj:`description` is of the string type.</span>

<span class="sd">    Args:</span>
<span class="sd">        dataset_dir (str): Path to the root directory that contains the dataset.</span>
<span class="sd">        usage (str, optional): Acceptable usages include `train`, `test` and `all` (default=None, all samples).</span>
<span class="sd">        num_samples (int, optional): Number of samples (rows) to read (default=None, reads the full dataset).</span>
<span class="sd">        num_parallel_workers (int, optional): Number of workers to read the data</span>
<span class="sd">            (default=None, number set in the config).</span>
<span class="sd">        shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch</span>
<span class="sd">            (default=Shuffle.GLOBAL).</span>
<span class="sd">            If shuffle is False, no shuffling will be performed.</span>
<span class="sd">            If shuffle is True, performs global shuffle.</span>
<span class="sd">            There are three levels of shuffling, desired shuffle enum defined by mindspore.dataset.Shuffle.</span>

<span class="sd">            - Shuffle.GLOBAL: Shuffle both the files and samples, same as setting shuffle to True.</span>

<span class="sd">            - Shuffle.FILES: Shuffle files only.</span>

<span class="sd">        num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).</span>
<span class="sd">            When this argument is specified, &#39;num_samples&#39; reflects the max sample number of per shard.</span>
<span class="sd">        shard_id (int, optional): The shard ID within num_shards (default=None). This</span>
<span class="sd">            argument can only be specified when num_shards is also specified.</span>
<span class="sd">        cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.</span>
<span class="sd">            (default=None, which means no cache is used).</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; ag_news_dataset_dir = &quot;/path/to/ag_news_dataset_file&quot;</span>
<span class="sd">        &gt;&gt;&gt; dataset = ds.AGNewsDataset(dataset_dir=ag_news_dataset_dir, usage=&#39;all&#39;)</span>

<span class="sd">    About AGNews dataset:</span>

<span class="sd">    AG is a collection of over 1 million news articles. The news articles were collected</span>
<span class="sd">    by ComeToMyHead from over 2,000 news sources in over 1 year of activity. ComeToMyHead</span>
<span class="sd">    is an academic news search engine that has been in operation since July 2004.</span>
<span class="sd">    The dataset is provided by academics for research purposes such as data mining</span>
<span class="sd">    (clustering, classification, etc.), information retrieval (ranking, searching, etc.),</span>
<span class="sd">    xml, data compression, data streaming, and any other non-commercial activities.</span>
<span class="sd">    AG&#39;s news topic classification dataset was constructed by selecting the four largest</span>
<span class="sd">    classes from the original corpus. Each class contains 30,000 training samples and</span>
<span class="sd">    1,900 test samples. The total number of training samples in train.csv is 120,000</span>
<span class="sd">    and the number of test samples in test.csv is 7,600.</span>

<span class="sd">    You can unzip the dataset files into the following structure and read by MindSpore&#39;s API:</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        .</span>
<span class="sd">        └── ag_news_dataset_dir</span>
<span class="sd">            ├── classes.txt</span>
<span class="sd">            ├── train.csv</span>
<span class="sd">            ├── test.csv</span>
<span class="sd">            └── readme.txt</span>

<span class="sd">    Citation:</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        @misc{zhang2015characterlevel,</span>
<span class="sd">        title={Character-level Convolutional Networks for Text Classification},</span>
<span class="sd">        author={Xiang Zhang and Junbo Zhao and Yann LeCun},</span>
<span class="sd">        year={2015},</span>
<span class="sd">        eprint={1509.01626},</span>
<span class="sd">        archivePrefix={arXiv},</span>
<span class="sd">        primaryClass={cs.LG}</span>
<span class="sd">        }</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_ag_news_dataset</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dataset_dir</span><span class="p">,</span> <span class="n">usage</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
                 <span class="n">num_parallel_workers</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">Shuffle</span><span class="o">.</span><span class="n">GLOBAL</span><span class="p">,</span> <span class="n">num_shards</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">num_parallel_workers</span><span class="o">=</span><span class="n">num_parallel_workers</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="n">num_samples</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">shuffle</span><span class="p">,</span>
                         <span class="n">num_shards</span><span class="o">=</span><span class="n">num_shards</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="n">shard_id</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="n">cache</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span> <span class="o">=</span> <span class="n">dataset_dir</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">usage</span> <span class="o">=</span> <span class="n">replace_none</span><span class="p">(</span><span class="n">usage</span><span class="p">,</span> <span class="s2">&quot;all&quot;</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">children</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">AGNewsNode</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">usage</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_samples</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">shuffle_flag</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_shards</span><span class="p">,</span>
                              <span class="bp">self</span><span class="o">.</span><span class="n">shard_id</span><span class="p">)</span></div>


<div class="viewcode-block" id="AmazonReviewDataset"><a class="viewcode-back" href="../../../../api_python/dataset/mindspore.dataset.AmazonReviewDataset.html#mindspore.dataset.AmazonReviewDataset">[docs]</a><span class="k">class</span> <span class="nc">AmazonReviewDataset</span><span class="p">(</span><span class="n">SourceDataset</span><span class="p">,</span> <span class="n">TextBaseDataset</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    A source dataset that reads and parses Amazon Review Polarity and Amazon Review Full datasets.</span>

<span class="sd">    The generated dataset has three columns: :py:obj:`[label, title, content]`.</span>
<span class="sd">    The tensor of column :py:obj:`label` is of the string type.</span>
<span class="sd">    The tensor of column :py:obj:`title` is of the string type.</span>
<span class="sd">    The tensor of column :py:obj:`content` is of the string type.</span>

<span class="sd">    Args:</span>
<span class="sd">        dataset_dir (str): Path to the root directory that contains the Amazon Review Polarity dataset</span>
<span class="sd">            or the Amazon Review Full dataset.</span>
<span class="sd">        usage (str, optional): Usage of this dataset, can be `train`, `test` or `all` (default= `all`).</span>
<span class="sd">            For Polarity dataset, `train` will read from 3,600,000 train samples,</span>
<span class="sd">            `test` will read from 400,000 test samples,</span>
<span class="sd">            `all` will read from all 4,000,000 samples.</span>
<span class="sd">            For Full dataset, `train` will read from 3,000,000 train samples,</span>
<span class="sd">            `test` will read from 650,000 test samples,</span>
<span class="sd">            `all` will read from all 3,650,000 samples (default=None, all samples).</span>
<span class="sd">        num_samples (int, optional): Number of samples (rows) to be read (default=None, reads the full dataset).</span>
<span class="sd">        shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch</span>
<span class="sd">            (default=Shuffle.GLOBAL).</span>
<span class="sd">            If shuffle is False, no shuffling will be performed.</span>
<span class="sd">            If shuffle is True, performs global shuffle.</span>
<span class="sd">            There are three levels of shuffling, desired shuffle enum defined by mindspore.dataset.Shuffle.</span>

<span class="sd">            - Shuffle.GLOBAL: Shuffle both the files and samples, same as setting shuffle to True.</span>

<span class="sd">            - Shuffle.FILES: Shuffle files only.</span>

<span class="sd">        num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).</span>
<span class="sd">            When this argument is specified, `num_samples` reflects the max sample number of per shard.</span>
<span class="sd">        shard_id (int, optional): The shard ID within num_shards (default=None). This</span>
<span class="sd">            argument can only be specified when num_shards is also specified.</span>
<span class="sd">        num_parallel_workers (int, optional): Number of workers to read the data</span>
<span class="sd">            (default=None, number set in the  mindspore.dataset.config).</span>
<span class="sd">        cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing</span>
<span class="sd">            (default=None, which means no cache is used).</span>

<span class="sd">    Raises:</span>
<span class="sd">        RuntimeError: If dataset_dir does not contain data files.</span>
<span class="sd">        RuntimeError: If num_parallel_workers exceeds the max thread numbers.</span>
<span class="sd">        RuntimeError: If num_shards is specified but shard_id is None.</span>
<span class="sd">        RuntimeError: If shard_id is specified but num_shards is None.</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; amazon_review_dataset_dir = &quot;/path/to/amazon_review_dataset_dir&quot;</span>
<span class="sd">        &gt;&gt;&gt; dataset = ds.AmazonReviewDataset(dataset_dir=amazon_review_dataset_dir, usage=&#39;all&#39;)</span>

<span class="sd">    About AmazonReview Dataset:</span>

<span class="sd">    The Amazon reviews full dataset consists of reviews from Amazon. The data span a period of 18 years, including ~35</span>
<span class="sd">    million reviews up to March 2013. Reviews include product and user information, ratings, and a plaintext review.</span>
<span class="sd">    The dataset is mainly used for text classification, given the content and title, predict the correct star rating.</span>

<span class="sd">    The Amazon reviews polarity dataset is constructed by taking review score 1 and 2 as negative, 4 and 5 as positive.</span>
<span class="sd">    Samples of score 3 is ignored. In the dataset, class 1 is the negative and class 2 is the positive.</span>

<span class="sd">    The Amazon Reviews Polarity and Amazon Reviews Full datasets have the same directory structures.</span>
<span class="sd">    You can unzip the dataset files into the following structure and read by MindSpore&#39;s API:</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        .</span>
<span class="sd">        └── amazon_review_dir</span>
<span class="sd">             ├── train.csv</span>
<span class="sd">             ├── test.csv</span>
<span class="sd">             └── readme.txt</span>

<span class="sd">    Citation:</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        @article{zhang2015character,</span>
<span class="sd">          title={Character-level convolutional networks for text classification},</span>
<span class="sd">          author={Zhang, Xiang and Zhao, Junbo and LeCun, Yann},</span>
<span class="sd">          journal={Advances in neural information processing systems},</span>
<span class="sd">          volume={28},</span>
<span class="sd">          pages={649--657},</span>
<span class="sd">          year={2015}</span>
<span class="sd">        }</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_amazon_review_dataset</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dataset_dir</span><span class="p">,</span> <span class="n">usage</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_parallel_workers</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">Shuffle</span><span class="o">.</span><span class="n">GLOBAL</span><span class="p">,</span>
                 <span class="n">num_shards</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">num_parallel_workers</span><span class="o">=</span><span class="n">num_parallel_workers</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="n">num_samples</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">shuffle</span><span class="p">,</span>
                         <span class="n">num_shards</span><span class="o">=</span><span class="n">num_shards</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="n">shard_id</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="n">cache</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span> <span class="o">=</span> <span class="n">dataset_dir</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">usage</span> <span class="o">=</span> <span class="n">replace_none</span><span class="p">(</span><span class="n">usage</span><span class="p">,</span> <span class="s1">&#39;all&#39;</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">children</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">AmazonReviewNode</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">usage</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_samples</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">shuffle_flag</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_shards</span><span class="p">,</span>
                                    <span class="bp">self</span><span class="o">.</span><span class="n">shard_id</span><span class="p">)</span></div>


<span class="k">class</span> <span class="nc">CLUEDataset</span><span class="p">(</span><span class="n">SourceDataset</span><span class="p">,</span> <span class="n">TextBaseDataset</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    A source dataset that reads and parses CLUE datasets.</span>
<span class="sd">    Supported CLUE classification tasks: `AFQMC`, `TNEWS`, `IFLYTEK`, `CMNLI`, `WSC` and `CSL`.</span>

<span class="sd">    Args:</span>
<span class="sd">        dataset_files (Union[str, list[str]]): String or list of files to be read or glob strings to search for</span>
<span class="sd">            a pattern of files. The list will be sorted in a lexicographical order.</span>
<span class="sd">        task (str, optional): The kind of task, one of `AFQMC`, `TNEWS`, `IFLYTEK`, `CMNLI`, `WSC` and `CSL`.</span>
<span class="sd">            (default=AFQMC).</span>
<span class="sd">        usage (str, optional): Specify the `train`, `test` or `eval` part of dataset (default=&quot;train&quot;).</span>
<span class="sd">        num_samples (int, optional): The number of samples to be included in the dataset</span>
<span class="sd">            (default=None, will include all images).</span>
<span class="sd">        num_parallel_workers (int, optional): Number of workers to read the data</span>
<span class="sd">            (default=None, number set in the config).</span>
<span class="sd">        shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch</span>
<span class="sd">            (default=Shuffle.GLOBAL).</span>
<span class="sd">            If shuffle is False, no shuffling will be performed.</span>
<span class="sd">            If shuffle is True, performs global shuffle.</span>
<span class="sd">            There are three levels of shuffling, desired shuffle enum defined by mindspore.dataset.Shuffle.</span>

<span class="sd">            - Shuffle.GLOBAL: Shuffle both the files and samples, same as setting shuffle to True.</span>

<span class="sd">            - Shuffle.FILES: Shuffle files only.</span>

<span class="sd">        num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).</span>
<span class="sd">            When this argument is specified, `num_samples` reflects the maximum sample number of per shard.</span>
<span class="sd">        shard_id (int, optional): The shard ID within num_shards (default=None). This</span>
<span class="sd">            argument can only be specified when num_shards is also specified.</span>
<span class="sd">        cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.</span>
<span class="sd">            (default=None, which means no cache is used).</span>

<span class="sd">    Note:</span>
<span class="sd">        The generated dataset with different task setting has different output columns:</span>

<span class="sd">        - task = :py:obj:`AFQMC`</span>
<span class="sd">            - usage = :py:obj:`train`, output columns: :py:obj:`[sentence1, dtype=string]`, \</span>
<span class="sd">                :py:obj:`[sentence2, dtype=string]`, :py:obj:`[label, dtype=string]`.</span>
<span class="sd">            - usage = :py:obj:`test`, output columns: :py:obj:`[id, dtype=uint32]`, \</span>
<span class="sd">                :py:obj:`[sentence1, dtype=string]`, :py:obj:`[sentence2, dtype=string]`.</span>
<span class="sd">            - usage = :py:obj:`eval`, output columns: :py:obj:`[sentence1, dtype=string]`, \</span>
<span class="sd">                :py:obj:`[sentence2, dtype=string]`, :py:obj:`[label, dtype=string]`.</span>

<span class="sd">        - task = :py:obj:`TNEWS`</span>
<span class="sd">            - usage = :py:obj:`train`, output columns: :py:obj:`[label, dtype=string]`, \</span>
<span class="sd">                :py:obj:`[label_des, dtype=string]`, :py:obj:`[sentence, dtype=string]`, \</span>
<span class="sd">                :py:obj:`[keywords, dtype=string]`.</span>
<span class="sd">            - usage = :py:obj:`test`, output columns: :py:obj:`[id, dtype=uint32]`, \</span>
<span class="sd">                :py:obj:`[keywords, dtype=string]`, :py:obj:`[sentence, dtype=string]`.</span>
<span class="sd">            - usage = :py:obj:`eval`, output columns: :py:obj:`[label, dtype=string]`, \</span>
<span class="sd">                :py:obj:`[label_desc, dtype=string]`, :py:obj:`[sentence, dtype=string]`,\</span>
<span class="sd">                :py:obj:`[keywords, dtype=string]`.</span>

<span class="sd">        - task = :py:obj:`IFLYTEK`</span>
<span class="sd">            - usage = :py:obj:`train`, output columns: :py:obj:`[label, dtype=string]`, \</span>
<span class="sd">                :py:obj:`[label_des, dtype=string]`, :py:obj:`[sentence, dtype=string]`.</span>
<span class="sd">            - usage = :py:obj:`test`, output columns: :py:obj:`[id, dtype=uint32]`, \</span>
<span class="sd">                :py:obj:`[sentence, dtype=string]`.</span>
<span class="sd">            - usage = :py:obj:`eval`, output columns: :py:obj:`[label, dtype=string]`, \</span>
<span class="sd">                :py:obj:`[label_des, dtype=string]`, :py:obj:`[sentence, dtype=string]`.</span>

<span class="sd">        - task = :py:obj:`CMNLI`</span>
<span class="sd">            - usage = :py:obj:`train`, output columns: :py:obj:`[sentence1, dtype=string]`, \</span>
<span class="sd">                :py:obj:`[sentence2, dtype=string]`, :py:obj:`[label, dtype=string]`.</span>
<span class="sd">            - usage = :py:obj:`test`, output columns: :py:obj:`[id, dtype=uint32]`, \</span>
<span class="sd">                :py:obj:`[sentence1, dtype=string]`, :py:obj:`[sentence2, dtype=string]`.</span>
<span class="sd">            - usage = :py:obj:`eval`, output columns: :py:obj:`[sentence1, dtype=string]`, \</span>
<span class="sd">                :py:obj:`[sentence2, dtype=string]`, :py:obj:`[label, dtype=string]`.</span>

<span class="sd">        - task = :py:obj:`WSC`</span>
<span class="sd">            - usage = :py:obj:`train`, output columns: :py:obj:`[span1_index, dtype=uint32]`, \</span>
<span class="sd">                :py:obj:`[span2_index, dtype=uint32]`, :py:obj:`[span1_text, dtype=string]`, \</span>
<span class="sd">                :py:obj:`[span2_text, dtype=string]`, :py:obj:`[idx, dtype=uint32]`, \</span>
<span class="sd">                :py:obj:`[text, dtype=string]`, :py:obj:`[label, dtype=string]`.</span>
<span class="sd">            - usage = :py:obj:`test`, output columns: :py:obj:`[span1_index, dtype=uint32]`, \</span>
<span class="sd">                :py:obj:`[span2_index, dtype=uint32]`, :py:obj:`[span1_text, dtype=string]`, \</span>
<span class="sd">                :py:obj:`[span2_text, dtype=string]`, :py:obj:`[idx, dtype=uint32]`, :py:obj:`[text, dtype=string]`.</span>
<span class="sd">            - usage = :py:obj:`eval`, output columns: :py:obj:`[span1_index, dtype=uint32]`, \</span>
<span class="sd">                :py:obj:`[span2_index, dtype=uint32]`, :py:obj:`[span1_text, dtype=string]`, \</span>
<span class="sd">                :py:obj:`[span2_text, dtype=string]`, :py:obj:`[idx, dtype=uint32]`, \</span>
<span class="sd">                :py:obj:`[text, dtype=string]`, :py:obj:`[label, dtype=string]`.</span>

<span class="sd">        - task = :py:obj:`CSL`</span>
<span class="sd">            - usage = :py:obj:`train`, output columns: :py:obj:`[id, dtype=uint32]`, \</span>
<span class="sd">                :py:obj:`[abst, dtype=string]`, :py:obj:`[keyword, dtype=string]`, :py:obj:`[label, dtype=string]`.</span>
<span class="sd">            - usage = :py:obj:`test`, output columns: :py:obj:`[id, dtype=uint32]`, \</span>
<span class="sd">                :py:obj:`[abst, dtype=string]`, :py:obj:`[keyword, dtype=string]`.</span>
<span class="sd">            - usage = :py:obj:`eval`, output columns: :py:obj:`[id, dtype=uint32]`, \</span>
<span class="sd">                :py:obj:`[abst, dtype=string]`, :py:obj:`[keyword, dtype=string]`, :py:obj:`[label, dtype=string]`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        ValueError: If dataset_files are not valid or do not exist.</span>
<span class="sd">        ValueError: task is not in &#39;AFQMC&#39;, &#39;TNEWS&#39;, &#39;IFLYTEK&#39;, &#39;CMNLI&#39;, &#39;WSC&#39; or &#39;CSL&#39;.</span>
<span class="sd">        ValueError: usage is not in &#39;train&#39;, &#39;test&#39; or &#39;eval&#39;.</span>
<span class="sd">        ValueError: If num_parallel_workers exceeds the max thread numbers.</span>
<span class="sd">        RuntimeError: If num_shards is specified but shard_id is None.</span>
<span class="sd">        RuntimeError: If shard_id is specified but num_shards is None.</span>
<span class="sd">        ValueError: If shard_id is invalid (&lt; 0 or &gt;= num_shards).</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; clue_dataset_dir = [&quot;/path/to/clue_dataset_file&quot;] # contains 1 or multiple clue files</span>
<span class="sd">        &gt;&gt;&gt; dataset = ds.CLUEDataset(dataset_files=clue_dataset_dir, task=&#39;AFQMC&#39;, usage=&#39;train&#39;)</span>

<span class="sd">    About CLUE dataset:</span>

<span class="sd">    CLUE, a Chinese Language Understanding Evaluation benchmark. It contains multiple</span>
<span class="sd">    tasks, including single-sentence classification, sentence pair classification, and machine</span>
<span class="sd">    reading comprehension.</span>

<span class="sd">    You can unzip the dataset files into the following structure and read by MindSpore&#39;s API,</span>
<span class="sd">    such as afqmc dataset:</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        .</span>
<span class="sd">        └── afqmc_public</span>
<span class="sd">             ├── train.json</span>
<span class="sd">             ├── test.json</span>
<span class="sd">             └── dev.json</span>

<span class="sd">    Citation:</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        @article{CLUEbenchmark,</span>
<span class="sd">        title   = {CLUE: A Chinese Language Understanding Evaluation Benchmark},</span>
<span class="sd">        author  = {Liang Xu, Xuanwei Zhang, Lu Li, Hai Hu, Chenjie Cao, Weitang Liu, Junyi Li, Yudong Li,</span>
<span class="sd">                Kai Sun, Yechen Xu, Yiming Cui, Cong Yu, Qianqian Dong, Yin Tian, Dian Yu, Bo Shi, Jun Zeng,</span>
<span class="sd">                Rongzhao Wang, Weijian Xie, Yanting Li, Yina Patterson, Zuoyu Tian, Yiwen Zhang, He Zhou,</span>
<span class="sd">                Shaoweihua Liu, Qipeng Zhao, Cong Yue, Xinrui Zhang, Zhengliang Yang, Zhenzhong Lan},</span>
<span class="sd">        journal = {arXiv preprint arXiv:2004.05986},</span>
<span class="sd">        year    = {2020},</span>
<span class="sd">        howpublished = {https://github.com/CLUEbenchmark/CLUE}</span>
<span class="sd">        }</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_cluedataset</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dataset_files</span><span class="p">,</span> <span class="n">task</span><span class="o">=</span><span class="s1">&#39;AFQMC&#39;</span><span class="p">,</span> <span class="n">usage</span><span class="o">=</span><span class="s1">&#39;train&#39;</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_parallel_workers</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
                 <span class="n">shuffle</span><span class="o">=</span><span class="n">Shuffle</span><span class="o">.</span><span class="n">GLOBAL</span><span class="p">,</span> <span class="n">num_shards</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">num_parallel_workers</span><span class="o">=</span><span class="n">num_parallel_workers</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="n">num_samples</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">shuffle</span><span class="p">,</span>
                         <span class="n">num_shards</span><span class="o">=</span><span class="n">num_shards</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="n">shard_id</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="n">cache</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dataset_files</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_find_files</span><span class="p">(</span><span class="n">dataset_files</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">usage</span> <span class="o">=</span> <span class="n">replace_none</span><span class="p">(</span><span class="n">usage</span><span class="p">,</span> <span class="s1">&#39;train&#39;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">task</span> <span class="o">=</span> <span class="n">replace_none</span><span class="p">(</span><span class="n">task</span><span class="p">,</span> <span class="s1">&#39;AFQMC&#39;</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">children</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">CLUENode</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">dataset_files</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">task</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">usage</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_samples</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">shuffle_flag</span><span class="p">,</span>
                            <span class="bp">self</span><span class="o">.</span><span class="n">num_shards</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">shard_id</span><span class="p">)</span>


<div class="viewcode-block" id="CoNLL2000Dataset"><a class="viewcode-back" href="../../../../api_python/dataset/mindspore.dataset.CoNLL2000Dataset.html#mindspore.dataset.CoNLL2000Dataset">[docs]</a><span class="k">class</span> <span class="nc">CoNLL2000Dataset</span><span class="p">(</span><span class="n">SourceDataset</span><span class="p">,</span> <span class="n">TextBaseDataset</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    A source dataset that reads and parses CoNLL2000 dataset.</span>

<span class="sd">    The generated dataset has three columns: :py:obj:`[word, pos_tag, chunk_tag]`.</span>
<span class="sd">    The tensor of column :py:obj:`word` is of the string type.</span>
<span class="sd">    The tensor of column :py:obj:`pos_tag` is of the string type.</span>
<span class="sd">    The tensor of column :py:obj:`chunk_tag` is of the string type.</span>

<span class="sd">    Args:</span>
<span class="sd">        dataset_dir (str): Path to the root directory that contains the dataset.</span>
<span class="sd">        usage (str, optional): Usage of this dataset, can be `train`, `test`,  or `all`. `train` will read from</span>
<span class="sd">            8936 train samples, `test` will read from 2,012 test samples,</span>
<span class="sd">            `all` will read from all 1,0948 samples (default=None, all samples).</span>
<span class="sd">        num_samples (int, optional): Number of samples (rows) to read (default=None, reads the full dataset).</span>
<span class="sd">        shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch</span>
<span class="sd">            (default=Shuffle.GLOBAL).</span>
<span class="sd">            If shuffle is False, no shuffling will be performed.</span>
<span class="sd">            If shuffle is True, performs global shuffle.</span>
<span class="sd">            There are three levels of shuffling, desired shuffle enum defined by mindspore.dataset.Shuffle.</span>

<span class="sd">            - Shuffle.GLOBAL: Shuffle both the files and samples, same as setting shuffle to True.</span>

<span class="sd">            - Shuffle.FILES: Shuffle files only.</span>

<span class="sd">        num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).</span>
<span class="sd">            When this argument is specified, `num_samples` reflects the max sample number of per shard.</span>
<span class="sd">        shard_id (int, optional): The shard ID within num_shards (default=None). This</span>
<span class="sd">            argument can only be specified when num_shards is also specified.</span>
<span class="sd">        num_parallel_workers (int, optional): Number of workers to read the data</span>
<span class="sd">            (default=None, number set in the config).</span>
<span class="sd">        cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing</span>
<span class="sd">            (default=None, which means no cache is used).</span>

<span class="sd">    Raises:</span>
<span class="sd">        RuntimeError: If dataset_dir does not contain data files.</span>
<span class="sd">        RuntimeError: If num_parallel_workers exceeds the max thread numbers.</span>
<span class="sd">        RuntimeError: If num_shards is specified but shard_id is None.</span>
<span class="sd">        RuntimeError: If shard_id is specified but num_shards is None.</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; conll2000_dataset_dir = &quot;/path/to/conll2000_dataset_dir&quot;</span>
<span class="sd">        &gt;&gt;&gt; dataset = ds.CoNLL2000Dataset(dataset_files=conll2000_dataset_dir, usage=&#39;all&#39;)</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_conll2000_dataset</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dataset_dir</span><span class="p">,</span> <span class="n">usage</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">Shuffle</span><span class="o">.</span><span class="n">GLOBAL</span><span class="p">,</span> <span class="n">num_shards</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
                 <span class="n">shard_id</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_parallel_workers</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">num_parallel_workers</span><span class="o">=</span><span class="n">num_parallel_workers</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="n">num_samples</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">shuffle</span><span class="p">,</span>
                         <span class="n">num_shards</span><span class="o">=</span><span class="n">num_shards</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="n">shard_id</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="n">cache</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span> <span class="o">=</span> <span class="n">dataset_dir</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">usage</span> <span class="o">=</span> <span class="n">replace_none</span><span class="p">(</span><span class="n">usage</span><span class="p">,</span> <span class="s1">&#39;all&#39;</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">children</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">CoNLL2000Node</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">usage</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_samples</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">shuffle_flag</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_shards</span><span class="p">,</span>
                                 <span class="bp">self</span><span class="o">.</span><span class="n">shard_id</span><span class="p">)</span></div>


<div class="viewcode-block" id="DBpediaDataset"><a class="viewcode-back" href="../../../../api_python/dataset/mindspore.dataset.DBpediaDataset.html#mindspore.dataset.DBpediaDataset">[docs]</a><span class="k">class</span> <span class="nc">DBpediaDataset</span><span class="p">(</span><span class="n">SourceDataset</span><span class="p">,</span> <span class="n">TextBaseDataset</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    A source dataset that reads and parses the DBpedia dataset.</span>

<span class="sd">    The generated dataset has three columns :py:obj:`[class, title, content]`.</span>
<span class="sd">    The tensor of column :py:obj:`class` is of the string type.</span>
<span class="sd">    The tensor of column :py:obj:`title` is of the string type.</span>
<span class="sd">    The tensor of column :py:obj:`content` is of the string type.</span>

<span class="sd">    Args:</span>
<span class="sd">        dataset_dir (str): Path to the root directory that contains the dataset.</span>
<span class="sd">        usage (str, optional): Usage of this dataset, can be `train`, `test` or `all`.</span>
<span class="sd">            `train` will read from 560,000 train samples,</span>
<span class="sd">            `test` will read from 70,000 test samples,</span>
<span class="sd">            `all` will read from all 630,000 samples (default=None, all samples).</span>
<span class="sd">        num_samples (int, optional): The number of samples to be included in the dataset</span>
<span class="sd">            (default=None, will include all text).</span>
<span class="sd">        num_parallel_workers (int, optional): Number of workers to read the data</span>
<span class="sd">            (default=None, number set in the config).</span>
<span class="sd">        shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch</span>
<span class="sd">            (default=Shuffle.GLOBAL).</span>
<span class="sd">            If shuffle is False, no shuffling will be performed.</span>
<span class="sd">            If shuffle is True, performs global shuffle.</span>
<span class="sd">            There are three levels of shuffling, desired shuffle enum defined by mindspore.dataset.Shuffle.</span>

<span class="sd">            - Shuffle.GLOBAL: Shuffle both the files and samples, same as setting shuffle to True.</span>

<span class="sd">            - Shuffle.FILES: Shuffle files only.</span>

<span class="sd">        num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).</span>
<span class="sd">            When this argument is specified, `num_samples` reflects the maximum sample number of per shard.</span>
<span class="sd">        shard_id (int, optional): The shard ID within num_shards (default=None). This</span>
<span class="sd">            argument can only be specified when num_shards is also specified.</span>
<span class="sd">        cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.</span>
<span class="sd">            (default=None, which means no cache is used).</span>

<span class="sd">    Raises:</span>
<span class="sd">        RuntimeError: If dataset_dir does not contain data files.</span>
<span class="sd">        RuntimeError: If num_parallel_workers exceeds the max thread numbers.</span>
<span class="sd">        RuntimeError: If num_shards is specified but shard_id is None.</span>
<span class="sd">        RuntimeError: If shard_id is specified but num_shards is None.</span>
<span class="sd">        ValueError: If shard_id is invalid (&lt; 0 or &gt;= num_shards).</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; dbpedia_dataset_dir = &quot;/path/to/dbpedia_dataset_directory&quot;</span>
<span class="sd">        &gt;&gt;&gt;</span>
<span class="sd">        &gt;&gt;&gt; # 1) Read 3 samples from DBpedia dataset</span>
<span class="sd">        &gt;&gt;&gt; dataset = ds.DBpediaDataset(dataset_dir=dbpedia_dataset_dir, num_samples=3)</span>
<span class="sd">        &gt;&gt;&gt;</span>
<span class="sd">        &gt;&gt;&gt; # 2) Read train samples from DBpedia dataset</span>
<span class="sd">        &gt;&gt;&gt; dataset = ds.DBpediaDataset(dataset_dir=dbpedia_dataset_dir, usage=&quot;train&quot;)</span>

<span class="sd">    About DBpedia dataset:</span>

<span class="sd">    The DBpedia dataset consists of 630,000 text samples in 14 classes, there are 560,000 samples in the train.csv</span>
<span class="sd">    and 70,000 samples in the test.csv.</span>
<span class="sd">    The 14 different classes represent Company, EducationaInstitution, Artist, Athlete, OfficeHolder,</span>
<span class="sd">    MeanOfTransportation, Building, NaturalPlace, Village, Animal, Plant, Album, Film, WrittenWork.</span>

<span class="sd">    Here is the original DBpedia dataset structure.</span>
<span class="sd">    You can unzip the dataset files into this directory structure and read by Mindspore&#39;s API.</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        .</span>
<span class="sd">        └── dbpedia_dataset_dir</span>
<span class="sd">            ├── train.csv</span>
<span class="sd">            ├── test.csv</span>
<span class="sd">            ├── classes.txt</span>
<span class="sd">            └── readme.txt</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        @article{DBpedia,</span>
<span class="sd">        title   = {DBPedia Ontology Classification Dataset},</span>
<span class="sd">        author  = {Jens Lehmann, Robert Isele, Max Jakob, Anja Jentzsch, Dimitris Kontokostas,</span>
<span class="sd">                Pablo N. Mendes, Sebastian Hellmann, Mohamed Morsey, Patrick van Kleef,</span>
<span class="sd">                    Sören Auer, Christian Bizer},</span>
<span class="sd">        year    = {2015},</span>
<span class="sd">        howpublished = {http://dbpedia.org}</span>
<span class="sd">        }</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_dbpedia_dataset</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dataset_dir</span><span class="p">,</span> <span class="n">usage</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_parallel_workers</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">Shuffle</span><span class="o">.</span><span class="n">GLOBAL</span><span class="p">,</span>
                 <span class="n">num_shards</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">num_parallel_workers</span><span class="o">=</span><span class="n">num_parallel_workers</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="n">num_samples</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">shuffle</span><span class="p">,</span>
                         <span class="n">num_shards</span><span class="o">=</span><span class="n">num_shards</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="n">shard_id</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="n">cache</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span> <span class="o">=</span> <span class="n">dataset_dir</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">usage</span> <span class="o">=</span> <span class="n">replace_none</span><span class="p">(</span><span class="n">usage</span><span class="p">,</span> <span class="s2">&quot;all&quot;</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">children</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">DBpediaNode</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">usage</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_samples</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">shuffle_flag</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_shards</span><span class="p">,</span>
                               <span class="bp">self</span><span class="o">.</span><span class="n">shard_id</span><span class="p">)</span></div>


<div class="viewcode-block" id="EnWik9Dataset"><a class="viewcode-back" href="../../../../api_python/dataset/mindspore.dataset.EnWik9Dataset.html#mindspore.dataset.EnWik9Dataset">[docs]</a><span class="k">class</span> <span class="nc">EnWik9Dataset</span><span class="p">(</span><span class="n">SourceDataset</span><span class="p">,</span> <span class="n">TextBaseDataset</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    A source dataset that reads and parses EnWik9 dataset.</span>

<span class="sd">    The generated dataset has one column :py:obj:`[text]` with type string.</span>

<span class="sd">    Args:</span>
<span class="sd">        dataset_dir (str): Path to the root directory that contains the dataset.</span>
<span class="sd">        num_samples (int, optional): The number of samples to be included in the dataset</span>
<span class="sd">            (default=None, will include all samples).</span>
<span class="sd">        num_parallel_workers (int, optional): Number of workers to read the data</span>
<span class="sd">            (default=None, number set in the config).</span>
<span class="sd">        shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch</span>
<span class="sd">            (default=True).</span>
<span class="sd">            If shuffle is False, no shuffling will be performed.</span>
<span class="sd">            If shuffle is True, performs global shuffle.</span>
<span class="sd">            There are three levels of shuffling, desired shuffle enum defined by mindspore.dataset.Shuffle.</span>

<span class="sd">            - Shuffle.GLOBAL: Shuffle both the files and samples, same as setting shuffle to True.</span>

<span class="sd">            - Shuffle.FILES: Shuffle files only.</span>

<span class="sd">        num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).</span>
<span class="sd">            When this argument is specified, `num_samples` reflects the maximum sample number of per shard.</span>
<span class="sd">        shard_id (int, optional): The shard ID within num_shards (default=None). This</span>
<span class="sd">            argument can only be specified when num_shards is also specified.</span>
<span class="sd">        cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing</span>
<span class="sd">            (default=None, which means no cache is used).</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; en_wik9_dataset_dir = &quot;/path/to/en_wik9_dataset&quot;</span>
<span class="sd">        &gt;&gt;&gt; dataset2 = ds.EnWik9Dataset(dataset_dir=en_wik9_dataset_dir, num_samples=2,</span>
<span class="sd">        ...                             shuffle=True)</span>

<span class="sd">    About EnWik9 dataset:</span>

<span class="sd">    The data of EnWik9 is UTF-8 encoded XML consisting primarily of English text. It contains 243,426 article titles,</span>
<span class="sd">    of which 85,560 are #REDIRECT to fix broken links, and the rest are regular articles.</span>

<span class="sd">    The data is UTF-8 clean. All characters are in the range U&#39;0000 to U&#39;10FFFF with valid encodings of 1 to</span>
<span class="sd">    4 bytes. The byte values 0xC0, 0xC1, and 0xF5-0xFF never occur. Also, in the Wikipedia dumps,</span>
<span class="sd">    there are no control characters in the range 0x00-0x1F except for 0x09 (tab) and 0x0A (linefeed).</span>
<span class="sd">    Linebreaks occur only on paragraph boundaries, so they always have a semantic purpose.</span>

<span class="sd">    You can unzip the dataset files into the following directory structure and read by MindSpore&#39;s API.</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        .</span>
<span class="sd">        └── EnWik9</span>
<span class="sd">             ├── enwik9</span>

<span class="sd">    Citation:</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        @NetworkResource{Hutter_prize,</span>
<span class="sd">        author    = {English Wikipedia},</span>
<span class="sd">        url       = &quot;https://cs.fit.edu/~mmahoney/compression/textdata.html&quot;,</span>
<span class="sd">        month     = {March},</span>
<span class="sd">        year      = {2006}</span>
<span class="sd">        }</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_en_wik9_dataset</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dataset_dir</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_parallel_workers</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
                 <span class="n">num_shards</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">num_parallel_workers</span><span class="o">=</span><span class="n">num_parallel_workers</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="n">num_samples</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">shuffle</span><span class="p">,</span>
                         <span class="n">num_shards</span><span class="o">=</span><span class="n">num_shards</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="n">shard_id</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="n">cache</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span> <span class="o">=</span> <span class="n">dataset_dir</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">children</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">EnWik9Node</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_samples</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">shuffle_flag</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_shards</span><span class="p">,</span>
                              <span class="bp">self</span><span class="o">.</span><span class="n">shard_id</span><span class="p">)</span></div>


<div class="viewcode-block" id="IMDBDataset"><a class="viewcode-back" href="../../../../api_python/dataset/mindspore.dataset.IMDBDataset.html#mindspore.dataset.IMDBDataset">[docs]</a><span class="k">class</span> <span class="nc">IMDBDataset</span><span class="p">(</span><span class="n">MappableDataset</span><span class="p">,</span> <span class="n">TextBaseDataset</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    A source dataset that reads and parses Internet Movie Database (IMDb).</span>

<span class="sd">    The generated dataset has two columns: :py:obj:`[text, label]`.</span>
<span class="sd">    The tensor of column :py:obj:`text` is of the string type.</span>
<span class="sd">    The tensor of column :py:obj:`label` is of a scalar of uint32 type.</span>

<span class="sd">    Args:</span>
<span class="sd">        dataset_dir (str): Path to the root directory that contains the dataset.</span>
<span class="sd">        usage (str, optional): Usage of this dataset, can be `train`, `test` or `all`</span>
<span class="sd">            (default=None, will read all samples).</span>
<span class="sd">        num_samples (int, optional): The number of images to be included in the dataset</span>
<span class="sd">            (default=None, will read all samples).</span>
<span class="sd">        num_parallel_workers (int, optional): Number of workers to read the data</span>
<span class="sd">            (default=None, set in the config).</span>
<span class="sd">        shuffle (bool, optional): Whether or not to perform shuffle on the dataset</span>
<span class="sd">            (default=None, expected order behavior shown in the table).</span>
<span class="sd">        sampler (Sampler, optional): Object used to choose samples from the</span>
<span class="sd">            dataset (default=None, expected order behavior shown in the table).</span>
<span class="sd">        num_shards (int, optional): Number of shards that the dataset will be divided</span>
<span class="sd">            into (default=None). When this argument is specified, `num_samples` reflects</span>
<span class="sd">            the maximum sample number of per shard.</span>
<span class="sd">        shard_id (int, optional): The shard ID within num_shards (default=None). This</span>
<span class="sd">            argument can only be specified when num_shards is also specified.</span>
<span class="sd">        cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing</span>
<span class="sd">            (default=None, which means no cache is used).</span>

<span class="sd">    Raises:</span>
<span class="sd">        RuntimeError: If dataset_dir does not contain data files.</span>
<span class="sd">        RuntimeError: If num_parallel_workers exceeds the max thread numbers.</span>
<span class="sd">        RuntimeError: If sampler and shuffle are specified at the same time.</span>
<span class="sd">        RuntimeError: If sampler and sharding are specified at the same time.</span>
<span class="sd">        RuntimeError: If num_shards is specified but shard_id is None.</span>
<span class="sd">        RuntimeError: If shard_id is specified but num_shards is None.</span>
<span class="sd">        ValueError: If shard_id is invalid (&lt; 0 or &gt;= num_shards).</span>

<span class="sd">    Note:</span>
<span class="sd">        - The shape of the test column.</span>
<span class="sd">        - This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.</span>
<span class="sd">          The table below shows what input arguments are allowed and their expected behavior.</span>

<span class="sd">    .. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`</span>
<span class="sd">       :widths: 25 25 50</span>
<span class="sd">       :header-rows: 1</span>

<span class="sd">       * - Parameter `sampler`</span>
<span class="sd">         - Parameter `shuffle`</span>
<span class="sd">         - Expected Order Behavior</span>
<span class="sd">       * - None</span>
<span class="sd">         - None</span>
<span class="sd">         - random order</span>
<span class="sd">       * - None</span>
<span class="sd">         - True</span>
<span class="sd">         - random order</span>
<span class="sd">       * - None</span>
<span class="sd">         - False</span>
<span class="sd">         - sequential order</span>
<span class="sd">       * - Sampler object</span>
<span class="sd">         - None</span>
<span class="sd">         - order defined by sampler</span>
<span class="sd">       * - Sampler object</span>
<span class="sd">         - True</span>
<span class="sd">         - not allowed</span>
<span class="sd">       * - Sampler object</span>
<span class="sd">         - False</span>
<span class="sd">         - not allowed</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; imdb_dataset_dir = &quot;/path/to/imdb_dataset_directory&quot;</span>
<span class="sd">        &gt;&gt;&gt;</span>
<span class="sd">        &gt;&gt;&gt; # 1) Read all samples (text files) in imdb_dataset_dir with 8 threads</span>
<span class="sd">        &gt;&gt;&gt; dataset = ds.IMDBDataset(dataset_dir=imdb_dataset_dir, num_parallel_workers=8)</span>
<span class="sd">        &gt;&gt;&gt;</span>
<span class="sd">        &gt;&gt;&gt; # 2) Read train samples (text files).</span>
<span class="sd">        &gt;&gt;&gt; dataset = ds.IMDBDataset(dataset_dir=imdb_dataset_dir, usage=&quot;train&quot;)</span>

<span class="sd">    About IMDBDataset:</span>

<span class="sd">    The IMDB dataset contains 50, 000 highly polarized reviews from the Internet Movie Database (IMDB). The dataset</span>
<span class="sd">    was divided into 25 000 comments for training and 25 000 comments for testing, with both the training set and test</span>
<span class="sd">    set containing 50% positive and 50% negative comments. Train labels and test labels are all lists of 0 and 1, where</span>
<span class="sd">    0 stands for negative and 1 for positive.</span>

<span class="sd">    You can unzip the dataset files into this directory structure and read by MindSpore&#39;s API.</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        .</span>
<span class="sd">        └── imdb_dataset_directory</span>
<span class="sd">             ├── train</span>
<span class="sd">             │    ├── pos</span>
<span class="sd">             │    │    ├── 0_9.txt</span>
<span class="sd">             │    │    ├── 1_7.txt</span>
<span class="sd">             │    │    ├── ...</span>
<span class="sd">             │    ├── neg</span>
<span class="sd">             │    │    ├── 0_3.txt</span>
<span class="sd">             │    │    ├── 1_1.txt</span>
<span class="sd">             │    │    ├── ...</span>
<span class="sd">             ├── test</span>
<span class="sd">             │    ├── pos</span>
<span class="sd">             │    │    ├── 0_10.txt</span>
<span class="sd">             │    │    ├── 1_10.txt</span>
<span class="sd">             │    │    ├── ...</span>
<span class="sd">             │    ├── neg</span>
<span class="sd">             │    │    ├── 0_2.txt</span>
<span class="sd">             │    │    ├── 1_3.txt</span>
<span class="sd">             │    │    ├── ...</span>

<span class="sd">    Citation:</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        @InProceedings{maas-EtAl:2011:ACL-HLT2011,</span>
<span class="sd">          author    = {Maas, Andrew L.  and  Daly, Raymond E.  and  Pham, Peter T.  and  Huang, Dan</span>
<span class="sd">                        and  Ng, Andrew Y.  and  Potts, Christopher},</span>
<span class="sd">          title     = {Learning Word Vectors for Sentiment Analysis},</span>
<span class="sd">          booktitle = {Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics:</span>
<span class="sd">                        Human Language Technologies},</span>
<span class="sd">          month     = {June},</span>
<span class="sd">          year      = {2011},</span>
<span class="sd">          address   = {Portland, Oregon, USA},</span>
<span class="sd">          publisher = {Association for Computational Linguistics},</span>
<span class="sd">          pages     = {142--150},</span>
<span class="sd">          url       = {http://www.aclweb.org/anthology/P11-1015}</span>
<span class="sd">        }</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_imdb_dataset</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dataset_dir</span><span class="p">,</span> <span class="n">usage</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_parallel_workers</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">sampler</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
                 <span class="n">num_shards</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">num_parallel_workers</span><span class="o">=</span><span class="n">num_parallel_workers</span><span class="p">,</span> <span class="n">sampler</span><span class="o">=</span><span class="n">sampler</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="n">num_samples</span><span class="p">,</span>
                         <span class="n">shuffle</span><span class="o">=</span><span class="n">shuffle</span><span class="p">,</span> <span class="n">num_shards</span><span class="o">=</span><span class="n">num_shards</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="n">shard_id</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="n">cache</span><span class="p">)</span>

        <span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span> <span class="o">=</span> <span class="n">dataset_dir</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">usage</span> <span class="o">=</span> <span class="n">replace_none</span><span class="p">(</span><span class="n">usage</span><span class="p">,</span> <span class="s2">&quot;all&quot;</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">children</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">IMDBNode</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">usage</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">sampler</span><span class="p">)</span></div>


<div class="viewcode-block" id="IWSLT2016Dataset"><a class="viewcode-back" href="../../../../api_python/dataset/mindspore.dataset.IWSLT2016Dataset.html#mindspore.dataset.IWSLT2016Dataset">[docs]</a><span class="k">class</span> <span class="nc">IWSLT2016Dataset</span><span class="p">(</span><span class="n">SourceDataset</span><span class="p">,</span> <span class="n">TextBaseDataset</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    A source dataset that reads and parses IWSLT2016 datasets.</span>

<span class="sd">    The generated dataset has two columns: :py:obj:`[text, translation]`.</span>
<span class="sd">    The tensor of column :py:obj: `text` is of the string type.</span>
<span class="sd">    The tensor of column :py:obj: `translation` is of the string type.</span>

<span class="sd">    Args:</span>
<span class="sd">        dataset_dir (str): Path to the root directory that contains the dataset.</span>
<span class="sd">        usage (str, optional): Acceptable usages include &quot;train&quot;, &quot;valid&quot;, &quot;test&quot; and &quot;all&quot; (default=None, all samples).</span>
<span class="sd">        language_pair (sequence, optional): Sequence containing source and target language, supported values are</span>
<span class="sd">            (`en`, `fr`), (&quot;en&quot;, &quot;de&quot;), (&quot;en&quot;, &quot;cs&quot;), (&quot;en&quot;, &quot;ar&quot;), (&quot;fr&quot;, &quot;en&quot;), (&quot;de&quot;, &quot;en&quot;), (&quot;cs&quot;, &quot;en&quot;),</span>
<span class="sd">            (&quot;ar&quot;, &quot;en&quot;) (default=(&quot;de&quot;, &quot;en&quot;)).</span>
<span class="sd">        valid_set (str, optional): A string to identify validation set, when usage is valid or all, the validation set</span>
<span class="sd">            of valid_set type will be read, supported values are &quot;dev2010&quot;, &quot;tst2010&quot;, &quot;tst2011&quot;, &quot;tst2012&quot;, &quot;tst2013&quot;</span>
<span class="sd">            and &quot;tst2014&quot; (default=&quot;tst2013&quot;).</span>
<span class="sd">        test_set (str, optional): A string to identify test set, when usage is test or all, the test set of test_set</span>
<span class="sd">            type will be read, supported values are &quot;dev2010&quot;, &quot;tst2010&quot;, &quot;tst2011&quot;, &quot;tst2012&quot;, &quot;tst2013&quot; and &quot;tst2014&quot;</span>
<span class="sd">            (default=&quot;tst2014&quot;).</span>
<span class="sd">        num_samples (int, optional): Number of samples (rows) to read (default=None, reads the full dataset).</span>
<span class="sd">        shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch</span>
<span class="sd">            (default=Shuffle.GLOBAL).</span>
<span class="sd">            If shuffle is False, no shuffling will be performed.</span>
<span class="sd">            If shuffle is True, performs global shuffle.</span>
<span class="sd">            There are three levels of shuffling, desired shuffle enum defined by mindspore.dataset.Shuffle.</span>

<span class="sd">            - Shuffle.GLOBAL: Shuffle both the files and samples, same as setting shuffle to True.</span>

<span class="sd">            - Shuffle.FILES: Shuffle files only.</span>
<span class="sd">        num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).</span>
<span class="sd">            When this argument is specified, `num_samples` reflects the max sample number of per shard.</span>
<span class="sd">        shard_id (int, optional): The shard ID within num_shards (default=None). This</span>
<span class="sd">            argument can only be specified when num_shards is also specified.</span>
<span class="sd">        num_parallel_workers (int, optional): Number of workers to read the data</span>
<span class="sd">            (default=None, number set in the config).</span>
<span class="sd">        cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.</span>
<span class="sd">            (default=None, which means no cache is used).</span>

<span class="sd">    Raises:</span>
<span class="sd">        RuntimeError: If dataset_dir does not contain data files.</span>
<span class="sd">        RuntimeError: If num_parallel_workers exceeds the max thread numbers.</span>
<span class="sd">        RuntimeError: If num_shards is specified but shard_id is None.</span>
<span class="sd">        RuntimeError: If shard_id is specified but num_shards is None.</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; iwslt2016_dataset_dir = &quot;/path/to/iwslt2016_dataset_dir&quot;</span>
<span class="sd">        &gt;&gt;&gt; dataset = ds.IWSLT2016Dataset(dataset_files=iwslt2016_dataset_dir, usage=&#39;all&#39;,</span>
<span class="sd">        ...                               language_pair=(&#39;de&#39;, &#39;en&#39;), valid_set=&#39;tst2013&#39;, test_set=&#39;tst2014&#39;)</span>

<span class="sd">    About IWSLT2016 dataset:</span>

<span class="sd">    IWSLT is an international oral translation conference, a major annual scientific conference dedicated to all aspects</span>
<span class="sd">    of oral translation. The MT task of the IWSLT evaluation activity constitutes a dataset, which can be publicly</span>
<span class="sd">    obtained through the WIT3 website wit3.fbk.eu. The IWSLT2016 dataset includes translations from English to Arabic,</span>
<span class="sd">    Czech, French, and German, and translations from Arabic, Czech, French, and German to English.</span>

<span class="sd">    You can unzip the original IWSLT2016 dataset files into this directory structure and read by MindSpore&#39;s API. After</span>
<span class="sd">    decompression, you also need to decompress the dataset to be read in the specified folder. For example, if you want</span>
<span class="sd">    to read the dataset of de-en, you need to unzip the tgz file in the de/en directory, the dataset is in the</span>
<span class="sd">    unzipped folder.</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        .</span>
<span class="sd">        └── iwslt2016_dataset_directory</span>
<span class="sd">             ├── subeval_files</span>
<span class="sd">             └── texts</span>
<span class="sd">                  ├── ar</span>
<span class="sd">                  │    └── en</span>
<span class="sd">                  │        └── ar-en</span>
<span class="sd">                  ├── cs</span>
<span class="sd">                  │    └── en</span>
<span class="sd">                  │        └── cs-en</span>
<span class="sd">                  ├── de</span>
<span class="sd">                  │    └── en</span>
<span class="sd">                  │        └── de-en</span>
<span class="sd">                  │            ├── IWSLT16.TED.dev2010.de-en.de.xml</span>
<span class="sd">                  │            ├── train.tags.de-en.de</span>
<span class="sd">                  │            ├── ...</span>
<span class="sd">                  ├── en</span>
<span class="sd">                  │    ├── ar</span>
<span class="sd">                  │    │   └── en-ar</span>
<span class="sd">                  │    ├── cs</span>
<span class="sd">                  │    │   └── en-cs</span>
<span class="sd">                  │    ├── de</span>
<span class="sd">                  │    │   └── en-de</span>
<span class="sd">                  │    └── fr</span>
<span class="sd">                  │        └── en-fr</span>
<span class="sd">                  └── fr</span>
<span class="sd">                       └── en</span>
<span class="sd">                           └── fr-en</span>

<span class="sd">    Citation:</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        @inproceedings{cettoloEtAl:EAMT2012,</span>
<span class="sd">        Address = {Trento, Italy},</span>
<span class="sd">        Author = {Mauro Cettolo and Christian Girardi and Marcello Federico},</span>
<span class="sd">        Booktitle = {Proceedings of the 16$^{th}$ Conference of the European Association for Machine Translation</span>
<span class="sd">                     (EAMT)},</span>
<span class="sd">        Date = {28-30},</span>
<span class="sd">        Month = {May},</span>
<span class="sd">        Pages = {261--268},</span>
<span class="sd">        Title = {WIT$^3$: Web Inventory of Transcribed and Translated Talks},</span>
<span class="sd">        Year = {2012}}</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_iwslt2016_dataset</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dataset_dir</span><span class="p">,</span> <span class="n">usage</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">language_pair</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">valid_set</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">test_set</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
                 <span class="n">num_samples</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">Shuffle</span><span class="o">.</span><span class="n">GLOBAL</span><span class="p">,</span> <span class="n">num_shards</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_parallel_workers</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
                 <span class="n">cache</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">num_parallel_workers</span><span class="o">=</span><span class="n">num_parallel_workers</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="n">num_samples</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">shuffle</span><span class="p">,</span>
                         <span class="n">num_shards</span><span class="o">=</span><span class="n">num_shards</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="n">shard_id</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="n">cache</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span> <span class="o">=</span> <span class="n">dataset_dir</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">usage</span> <span class="o">=</span> <span class="n">replace_none</span><span class="p">(</span><span class="n">usage</span><span class="p">,</span> <span class="s1">&#39;all&#39;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">language_pair</span> <span class="o">=</span> <span class="n">replace_none</span><span class="p">(</span><span class="n">language_pair</span><span class="p">,</span> <span class="p">[</span><span class="s2">&quot;de&quot;</span><span class="p">,</span> <span class="s2">&quot;en&quot;</span><span class="p">])</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">valid_set</span> <span class="o">=</span> <span class="n">replace_none</span><span class="p">(</span><span class="n">valid_set</span><span class="p">,</span> <span class="s1">&#39;tst2013&#39;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">test_set</span> <span class="o">=</span> <span class="n">replace_none</span><span class="p">(</span><span class="n">test_set</span><span class="p">,</span> <span class="s1">&#39;tst2014&#39;</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">children</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">IWSLT2016Node</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">usage</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">language_pair</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">valid_set</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">test_set</span><span class="p">,</span>
                                 <span class="bp">self</span><span class="o">.</span><span class="n">num_samples</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">shuffle_flag</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_shards</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">shard_id</span><span class="p">)</span></div>


<div class="viewcode-block" id="IWSLT2017Dataset"><a class="viewcode-back" href="../../../../api_python/dataset/mindspore.dataset.IWSLT2017Dataset.html#mindspore.dataset.IWSLT2017Dataset">[docs]</a><span class="k">class</span> <span class="nc">IWSLT2017Dataset</span><span class="p">(</span><span class="n">SourceDataset</span><span class="p">,</span> <span class="n">TextBaseDataset</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    A source dataset that reads and parses IWSLT2017 datasets.</span>

<span class="sd">    The generated dataset has two columns: :py:obj:`[text, translation]`.</span>
<span class="sd">    The tensor of column :py:obj:`text` is of the string type.</span>
<span class="sd">    The tensor of column :py:obj:`translation` is of the string type.</span>

<span class="sd">    Args:</span>
<span class="sd">        dataset_dir (str): Path to the root directory that contains the dataset.</span>
<span class="sd">        usage (str, optional): Acceptable usages include &quot;train&quot;, &quot;valid&quot;, &quot;test&quot; and &quot;all&quot; (default=None, all samples).</span>
<span class="sd">        language_pair (list, optional): List containing src and tgt language, supported values are (&quot;en&quot;, &quot;nl&quot;),</span>
<span class="sd">            (&quot;en&quot;, &quot;de&quot;), (&quot;en&quot;, &quot;it&quot;), (&quot;en&quot;, &quot;ro&quot;), (&quot;nl&quot;, &quot;en&quot;), (&quot;nl&quot;, &quot;de&quot;), (&quot;nl&quot;, &quot;it&quot;), (&quot;nl&quot;, &quot;ro&quot;),</span>
<span class="sd">            (&quot;de&quot;, &quot;en&quot;), (&quot;de&quot;, &quot;nl&quot;), (&quot;de&quot;, &quot;it&quot;), (&quot;de&quot;, &quot;ro&quot;), (&quot;it&quot;, &quot;en&quot;), (&quot;it&quot;, &quot;nl&quot;), (&quot;it&quot;, &quot;de&quot;),</span>
<span class="sd">            (&quot;it&quot;, &quot;ro&quot;), (`ro`, `en`), (`ro`, `nl`), (`ro`, `de`), (`ro`, `it`) (default=(`de`, `en`)).</span>
<span class="sd">        num_samples (int, optional): Number of samples (rows) to read (default=None, reads the full dataset).</span>
<span class="sd">        shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch</span>
<span class="sd">            (default=Shuffle.GLOBAL).</span>
<span class="sd">            If shuffle is False, no shuffling will be performed.</span>
<span class="sd">            If shuffle is True, performs global shuffle.</span>
<span class="sd">            There are three levels of shuffling, desired shuffle enum defined by mindspore.dataset.Shuffle.</span>

<span class="sd">            - Shuffle.GLOBAL: Shuffle both the files and samples, same as setting shuffle to True.</span>

<span class="sd">            - Shuffle.FILES: Shuffle files only.</span>
<span class="sd">        num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).</span>
<span class="sd">            When this argument is specified, `num_samples` reflects the max sample number of per shard.</span>
<span class="sd">        shard_id (int, optional): The shard ID within num_shards (default=None). This</span>
<span class="sd">            argument can only be specified when num_shards is also specified.</span>
<span class="sd">        num_parallel_workers (int, optional): Number of workers to read the data</span>
<span class="sd">            (default=None, number set in the config).</span>
<span class="sd">        cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.</span>
<span class="sd">            (default=None, which means no cache is used).</span>

<span class="sd">    Raises:</span>
<span class="sd">        RuntimeError: If dataset_dir does not contain data files.</span>
<span class="sd">        RuntimeError: If num_parallel_workers exceeds the max thread numbers.</span>
<span class="sd">        RuntimeError: If num_shards is specified but shard_id is None.</span>
<span class="sd">        RuntimeError: If shard_id is specified but num_shards is None.</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; iwslt2017_dataset_dir = &quot;/path/to/iwslt207_dataset_dir&quot;</span>
<span class="sd">        &gt;&gt;&gt; dataset = ds.IWSLT2017Dataset(dataset_files=iwslt2017_dataset_dir, usage=&#39;all&#39;, language_pair=(&#39;de&#39;, &#39;en&#39;))</span>

<span class="sd">    About IWSLT2017 dataset:</span>

<span class="sd">    IWSLT is an international oral translation conference, a major annual scientific conference dedicated to all aspects</span>
<span class="sd">    of oral translation. The MT task of the IWSLT evaluation activity constitutes a dataset, which can be publicly</span>
<span class="sd">    obtained through the WIT3 website wit3.fbk.eu. The IWSLT2017 dataset involves German, English, Italian, Dutch, and</span>
<span class="sd">    Romanian. The dataset includes translations in any two different languages.</span>

<span class="sd">    You can unzip the original IWSLT2017 dataset files into this directory structure and read by MindSpore&#39;s API. You</span>
<span class="sd">    need to decompress the dataset package in texts/DeEnItNlRo/DeEnItNlRo directory to get the DeEnItNlRo-DeEnItNlRo</span>
<span class="sd">    subdirectory.</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        .</span>
<span class="sd">        └── iwslt2017_dataset_directory</span>
<span class="sd">            └── DeEnItNlRo</span>
<span class="sd">                └── DeEnItNlRo</span>
<span class="sd">                    └── DeEnItNlRo-DeEnItNlRo</span>
<span class="sd">                        ├── IWSLT17.TED.dev2010.de-en.de.xml</span>
<span class="sd">                        ├── train.tags.de-en.de</span>
<span class="sd">                        ├── ...</span>

<span class="sd">    Citation:</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        @inproceedings{cettoloEtAl:EAMT2012,</span>
<span class="sd">        Address = {Trento, Italy},</span>
<span class="sd">        Author = {Mauro Cettolo and Christian Girardi and Marcello Federico},</span>
<span class="sd">        Booktitle = {Proceedings of the 16$^{th}$ Conference of the European Association for Machine Translation</span>
<span class="sd">                     (EAMT)},</span>
<span class="sd">        Date = {28-30},</span>
<span class="sd">        Month = {May},</span>
<span class="sd">        Pages = {261--268},</span>
<span class="sd">        Title = {WIT$^3$: Web Inventory of Transcribed and Translated Talks},</span>
<span class="sd">        Year = {2012}}</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_iwslt2017_dataset</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dataset_dir</span><span class="p">,</span> <span class="n">usage</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">language_pair</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">Shuffle</span><span class="o">.</span><span class="n">GLOBAL</span><span class="p">,</span>
                 <span class="n">num_shards</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_parallel_workers</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">num_parallel_workers</span><span class="o">=</span><span class="n">num_parallel_workers</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="n">num_samples</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">shuffle</span><span class="p">,</span>
                         <span class="n">num_shards</span><span class="o">=</span><span class="n">num_shards</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="n">shard_id</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="n">cache</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span> <span class="o">=</span> <span class="n">dataset_dir</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">usage</span> <span class="o">=</span> <span class="n">replace_none</span><span class="p">(</span><span class="n">usage</span><span class="p">,</span> <span class="s1">&#39;all&#39;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">language_pair</span> <span class="o">=</span> <span class="n">replace_none</span><span class="p">(</span><span class="n">language_pair</span><span class="p">,</span> <span class="p">[</span><span class="s2">&quot;de&quot;</span><span class="p">,</span> <span class="s2">&quot;en&quot;</span><span class="p">])</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">children</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">IWSLT2017Node</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">usage</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">language_pair</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_samples</span><span class="p">,</span>
                                 <span class="bp">self</span><span class="o">.</span><span class="n">shuffle_flag</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_shards</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">shard_id</span><span class="p">)</span></div>


<div class="viewcode-block" id="PennTreebankDataset"><a class="viewcode-back" href="../../../../api_python/dataset/mindspore.dataset.PennTreebankDataset.html#mindspore.dataset.PennTreebankDataset">[docs]</a><span class="k">class</span> <span class="nc">PennTreebankDataset</span><span class="p">(</span><span class="n">SourceDataset</span><span class="p">,</span> <span class="n">TextBaseDataset</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    A source dataset that reads and parses PennTreebank datasets.</span>

<span class="sd">    The generated dataset has one column :py:obj:`[text]`.</span>
<span class="sd">    The tensor of column :py:obj:`text` is of the string type.</span>

<span class="sd">    Args:</span>
<span class="sd">        dataset_dir (str): Path to the root directory that contains the dataset.</span>
<span class="sd">        usage (str, optional): Acceptable usages include `train`, `test`, &#39;valid&#39; and `all`.</span>
<span class="sd">            &#39;train&#39; will read from 42,068 train samples of string type,</span>
<span class="sd">            &#39;test&#39; will read from 3,370 test samples of string type,</span>
<span class="sd">            &#39;valid&#39; will read from 3,761 test samples of string type,</span>
<span class="sd">            &#39;all&#39; will read from all 49,199 samples of string type (default=None, all samples).</span>
<span class="sd">        num_samples (int, optional): Number of samples (rows) to read (default=None, reads the full dataset).</span>
<span class="sd">        num_parallel_workers (int, optional): Number of workers to read the data</span>
<span class="sd">            (default=None, number set in the config).</span>
<span class="sd">        shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch</span>
<span class="sd">            (default=Shuffle.GLOBAL).</span>
<span class="sd">            If shuffle is False, no shuffling will be performed.</span>
<span class="sd">            If shuffle is True, performs global shuffle.</span>
<span class="sd">            There are three levels of shuffling, desired shuffle enum defined by mindspore.dataset.Shuffle.</span>

<span class="sd">            - Shuffle.GLOBAL: Shuffle both the files and samples, same as setting shuffle to True.</span>

<span class="sd">            - Shuffle.FILES: Shuffle files only.</span>

<span class="sd">        num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).</span>
<span class="sd">            When this argument is specified, &#39;num_samples&#39; reflects the max sample number of per shard.</span>
<span class="sd">        shard_id (int, optional): The shard ID within num_shards (default=None). This</span>
<span class="sd">            argument can only be specified when num_shards is also specified.</span>
<span class="sd">        cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.</span>
<span class="sd">            (default=None, which means no cache is used).</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; penn_treebank_dataset_dir = &quot;/path/to/penn_treebank_dataset_directory&quot;</span>
<span class="sd">        &gt;&gt;&gt; dataset = ds.PennTreebankDataset(dataset_dir=penn_treebank_dataset_dir, usage=&#39;all&#39;)</span>

<span class="sd">    About PennTreebank dataset:</span>

<span class="sd">    Penn Treebank (PTB) dataset, is widely used in machine learning for NLP (Natural Language Processing)</span>
<span class="sd">    research. Word-level PTB does not contain capital letters, numbers, and punctuations, and the vocabulary</span>
<span class="sd">    is capped at 10k unique words, which is relatively small in comparison to most modern datasets which</span>
<span class="sd">    can result in a larger number of out of vocabulary tokens.</span>

<span class="sd">    Here is the original PennTreebank dataset structure.</span>
<span class="sd">    You can unzip the dataset files into this directory structure and read by MindSpore&#39;s API.</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        .</span>
<span class="sd">        └── PennTreebank_dataset_dir</span>
<span class="sd">             ├── ptb.test.txt</span>
<span class="sd">             ├── ptb.train.txt</span>
<span class="sd">             └── ptb.valid.txt</span>

<span class="sd">    Citation:</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        @techreport{Santorini1990,</span>
<span class="sd">          added-at = {2014-03-26T23:25:56.000+0100},</span>
<span class="sd">          author = {Santorini, Beatrice},</span>
<span class="sd">          biburl = {https://www.bibsonomy.org/bibtex/234cdf6ddadd89376090e7dada2fc18ec/butonic},</span>
<span class="sd">          file = {:Santorini - Penn Treebank tag definitions.pdf:PDF},</span>
<span class="sd">          institution = {Department of Computer and Information Science, University of Pennsylvania},</span>
<span class="sd">          interhash = {818e72efd9e4b5fae3e51e88848100a0},</span>
<span class="sd">          intrahash = {34cdf6ddadd89376090e7dada2fc18ec},</span>
<span class="sd">          keywords = {dis pos tagging treebank},</span>
<span class="sd">          number = {MS-CIS-90-47},</span>
<span class="sd">          timestamp = {2014-03-26T23:25:56.000+0100},</span>
<span class="sd">          title = {Part-of-speech tagging guidelines for the {P}enn {T}reebank {P}roject},</span>
<span class="sd">          url = {ftp://ftp.cis.upenn.edu/pub/treebank/doc/tagguide.ps.gz},</span>
<span class="sd">          year = 1990</span>
<span class="sd">        }</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_penn_treebank_dataset</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dataset_dir</span><span class="p">,</span> <span class="n">usage</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_parallel_workers</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">Shuffle</span><span class="o">.</span><span class="n">GLOBAL</span><span class="p">,</span>
                 <span class="n">num_shards</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">num_parallel_workers</span><span class="o">=</span><span class="n">num_parallel_workers</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="n">num_samples</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">shuffle</span><span class="p">,</span>
                         <span class="n">num_shards</span><span class="o">=</span><span class="n">num_shards</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="n">shard_id</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="n">cache</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span> <span class="o">=</span> <span class="n">dataset_dir</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">usage</span> <span class="o">=</span> <span class="n">replace_none</span><span class="p">(</span><span class="n">usage</span><span class="p">,</span> <span class="s2">&quot;all&quot;</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">children</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">PennTreebankNode</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">usage</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_samples</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">shuffle_flag</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_shards</span><span class="p">,</span>
                                    <span class="bp">self</span><span class="o">.</span><span class="n">shard_id</span><span class="p">)</span></div>


<div class="viewcode-block" id="SogouNewsDataset"><a class="viewcode-back" href="../../../../api_python/dataset/mindspore.dataset.SogouNewsDataset.html#mindspore.dataset.SogouNewsDataset">[docs]</a><span class="k">class</span> <span class="nc">SogouNewsDataset</span><span class="p">(</span><span class="n">SourceDataset</span><span class="p">,</span> <span class="n">TextBaseDataset</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    A source dataset that reads and parses Sogou News dataset.</span>

<span class="sd">    The generated dataset has three columns: :py:obj:`[index, title, content]`.</span>
<span class="sd">    The tensor of column :py:obj:`index` is of the string type.</span>
<span class="sd">    The tensor of column :py:obj:`title` is of the string type.</span>
<span class="sd">    The tensor of column :py:obj:`content` is of the string type.</span>

<span class="sd">    Args:</span>
<span class="sd">        dataset_dir (str): Path to the root directory that contains the dataset.</span>
<span class="sd">        usage (str, optional): Usage of this dataset, can be `train`, `test` or `all` .</span>
<span class="sd">            `train` will read from 450,000 train samples, `test` will read from 60,000 test samples,</span>
<span class="sd">            `all` will read from all 510,000 samples (default=None, all samples).</span>
<span class="sd">        num_samples (int, optional): Number of samples (rows) to read (default=None, read all samples).</span>
<span class="sd">        shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch</span>
<span class="sd">            (default=Shuffle.GLOBAL).</span>
<span class="sd">            If shuffle is False, no shuffling will be performed.</span>
<span class="sd">            If shuffle is True, performs global shuffle.</span>
<span class="sd">            There are three levels of shuffling, desired shuffle enum defined by mindspore.dataset.Shuffle.</span>

<span class="sd">            - Shuffle.GLOBAL: Shuffle both the files and samples, same as setting shuffle to True.</span>

<span class="sd">            - Shuffle.FILES: Shuffle files only.</span>
<span class="sd">        num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).</span>
<span class="sd">            When this argument is specified, `num_samples` reflects the max sample number of per shard.</span>
<span class="sd">        shard_id (int, optional): The shard ID within num_shards (default=None). This</span>
<span class="sd">            argument can only be specified when num_shards is also specified.</span>
<span class="sd">        num_parallel_workers (int, optional): Number of workers to read the data</span>
<span class="sd">            (default=None, number set in the config).</span>
<span class="sd">        cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.</span>
<span class="sd">            (default=None, which means no cache is used).</span>

<span class="sd">    Raises:</span>
<span class="sd">        RuntimeError: If dataset_dir does not contain data files.</span>
<span class="sd">        RuntimeError: If num_parallel_workers exceeds the max thread numbers.</span>
<span class="sd">        RuntimeError: If num_shards is specified but shard_id is None.</span>
<span class="sd">        RuntimeError: If shard_id is specified but num_shards is None.</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; sogou_news_dataset_dir = &quot;/path/to/sogou_news_dataset_dir&quot;</span>
<span class="sd">        &gt;&gt;&gt; dataset = ds.SogouNewsDataset(dataset_files=sogou_news_dataset_dir, usage=&#39;all&#39;)</span>

<span class="sd">    About SogouNews Dataset:</span>

<span class="sd">    SogouNews dataset includes 3 columns, corresponding to class index (1 to 5), title and content. The title and</span>
<span class="sd">    content are escaped using double quotes (&quot;), and any internal double quote is escaped by 2 double quotes (&quot;&quot;).</span>
<span class="sd">    New lines are escaped by a backslash followed with an &quot;n&quot; character, that is &quot;\n&quot;.</span>

<span class="sd">    You can unzip the dataset files into the following structure and read by MindSpore&#39;s API:</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        .</span>
<span class="sd">        └── sogou_news_dir</span>
<span class="sd">             ├── classes.txt</span>
<span class="sd">             ├── readme.txt</span>
<span class="sd">             ├── test.csv</span>
<span class="sd">             └── train.csv</span>

<span class="sd">    Citation:</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        @misc{zhang2015characterlevel,</span>
<span class="sd">            title={Character-level Convolutional Networks for Text Classification},</span>
<span class="sd">            author={Xiang Zhang and Junbo Zhao and Yann LeCun},</span>
<span class="sd">            year={2015},</span>
<span class="sd">            eprint={1509.01626},</span>
<span class="sd">            archivePrefix={arXiv},</span>
<span class="sd">            primaryClass={cs.LG}</span>
<span class="sd">        }</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_sogou_news_dataset</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dataset_dir</span><span class="p">,</span> <span class="n">usage</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">Shuffle</span><span class="o">.</span><span class="n">GLOBAL</span><span class="p">,</span> <span class="n">num_shards</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
                 <span class="n">shard_id</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_parallel_workers</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">num_parallel_workers</span><span class="o">=</span><span class="n">num_parallel_workers</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="n">num_samples</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">shuffle</span><span class="p">,</span>
                         <span class="n">num_shards</span><span class="o">=</span><span class="n">num_shards</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="n">shard_id</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="n">cache</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span> <span class="o">=</span> <span class="n">dataset_dir</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">usage</span> <span class="o">=</span> <span class="n">replace_none</span><span class="p">(</span><span class="n">usage</span><span class="p">,</span> <span class="s1">&#39;all&#39;</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">children</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">SogouNewsNode</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">usage</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_samples</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">shuffle_flag</span><span class="p">,</span>
                                 <span class="bp">self</span><span class="o">.</span><span class="n">num_shards</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">shard_id</span><span class="p">)</span></div>


<span class="k">class</span> <span class="nc">TextFileDataset</span><span class="p">(</span><span class="n">SourceDataset</span><span class="p">,</span> <span class="n">TextBaseDataset</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    A source dataset that reads and parses datasets stored on disk in text format.</span>
<span class="sd">    The generated dataset has one column :py:obj:`[text]` with type string.</span>

<span class="sd">    Args:</span>
<span class="sd">        dataset_files (Union[str, list[str]]): String or list of files to be read or glob strings to search for a</span>
<span class="sd">            pattern of files. The list will be sorted in a lexicographical order.</span>
<span class="sd">        num_samples (int, optional): The number of samples to be included in the dataset</span>
<span class="sd">            (default=None, will include all images).</span>
<span class="sd">        num_parallel_workers (int, optional): Number of workers to read the data</span>
<span class="sd">            (default=None, number set in the config).</span>
<span class="sd">        shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch</span>
<span class="sd">            (default=Shuffle.GLOBAL).</span>
<span class="sd">            If shuffle is False, no shuffling will be performed.</span>
<span class="sd">            If shuffle is True, performs global shuffle.</span>
<span class="sd">            There are three levels of shuffling, desired shuffle enum defined by mindspore.dataset.Shuffle.</span>

<span class="sd">            - Shuffle.GLOBAL: Shuffle both the files and samples, same as setting shuffle to True.</span>

<span class="sd">            - Shuffle.FILES: Shuffle files only.</span>

<span class="sd">        num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).</span>
<span class="sd">            When this argument is specified, `num_samples` reflects the maximum sample number of per shard.</span>
<span class="sd">        shard_id (int, optional): The shard ID within num_shards (default=None). This</span>
<span class="sd">            argument can only be specified when num_shards is also specified.</span>
<span class="sd">        cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.</span>
<span class="sd">            (default=None, which means no cache is used).</span>

<span class="sd">    Raises:</span>
<span class="sd">        ValueError: If dataset_files are not valid or do not exist.</span>
<span class="sd">        ValueError: If num_parallel_workers exceeds the max thread numbers.</span>
<span class="sd">        RuntimeError: If num_shards is specified but shard_id is None.</span>
<span class="sd">        RuntimeError: If shard_id is specified but num_shards is None.</span>
<span class="sd">        ValueError: If shard_id is invalid (&lt; 0 or &gt;= num_shards).</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; text_file_dataset_dir = [&quot;/path/to/text_file_dataset_file&quot;] # contains 1 or multiple text files</span>
<span class="sd">        &gt;&gt;&gt; dataset = ds.TextFileDataset(dataset_files=text_file_dataset_dir)</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_textfiledataset</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dataset_files</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_parallel_workers</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">Shuffle</span><span class="o">.</span><span class="n">GLOBAL</span><span class="p">,</span>
                 <span class="n">num_shards</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">num_parallel_workers</span><span class="o">=</span><span class="n">num_parallel_workers</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="n">num_samples</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">shuffle</span><span class="p">,</span>
                         <span class="n">num_shards</span><span class="o">=</span><span class="n">num_shards</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="n">shard_id</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="n">cache</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dataset_files</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_find_files</span><span class="p">(</span><span class="n">dataset_files</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dataset_files</span><span class="o">.</span><span class="n">sort</span><span class="p">()</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">children</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">TextFileNode</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">dataset_files</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_samples</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">shuffle_flag</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_shards</span><span class="p">,</span>
                                <span class="bp">self</span><span class="o">.</span><span class="n">shard_id</span><span class="p">)</span>


<div class="viewcode-block" id="UDPOSDataset"><a class="viewcode-back" href="../../../../api_python/dataset/mindspore.dataset.UDPOSDataset.html#mindspore.dataset.UDPOSDataset">[docs]</a><span class="k">class</span> <span class="nc">UDPOSDataset</span><span class="p">(</span><span class="n">SourceDataset</span><span class="p">,</span> <span class="n">TextBaseDataset</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    A source dataset that reads and parses UDPOS dataset.</span>

<span class="sd">    The generated dataset has three columns: :py:obj:`[word, universal, stanford]`.</span>
<span class="sd">    The tensor of column :py:obj:`word` is of the string type.</span>
<span class="sd">    The tensor of column :py:obj:`universal` is of the string type.</span>
<span class="sd">    The tensor of column :py:obj:`stanford` is of the string type.</span>

<span class="sd">    Args:</span>
<span class="sd">        dataset_dir (str): Path to the root directory that contains the dataset.</span>
<span class="sd">        usage (str, optional): Usage of this dataset, can be `train`, `test`, `valid` or `all`. `train` will read from</span>
<span class="sd">            12,543 train samples, `test` will read from 2,077 test samples, `valid` will read from 2,002 test samples,</span>
<span class="sd">            `all` will read from all 16,622 samples (default=None, all samples).</span>
<span class="sd">        num_samples (int, optional): Number of samples (rows) to read (default=None, reads the full dataset).</span>
<span class="sd">        shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch</span>
<span class="sd">            (default=Shuffle.GLOBAL).</span>
<span class="sd">            If shuffle is False, no shuffling will be performed;</span>
<span class="sd">            If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL</span>
<span class="sd">            Otherwise, there are two levels of shuffling:</span>

<span class="sd">            - Shuffle.GLOBAL: Shuffle both the files and samples.</span>

<span class="sd">            - Shuffle.FILES: Shuffle files only.</span>

<span class="sd">        num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).</span>
<span class="sd">            When this argument is specified, `num_samples` reflects the max sample number of per shard.</span>
<span class="sd">        shard_id (int, optional): The shard ID within num_shards (default=None). This</span>
<span class="sd">            argument can only be specified when num_shards is also specified.</span>
<span class="sd">        num_parallel_workers (int, optional): Number of workers to read the data</span>
<span class="sd">            (default=None, number set in the config).</span>
<span class="sd">        cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.</span>
<span class="sd">            (default=None, which means no cache is used).</span>

<span class="sd">    Raises:</span>
<span class="sd">        RuntimeError: If dataset_dir does not contain data files.</span>
<span class="sd">        RuntimeError: If num_parallel_workers exceeds the max thread numbers.</span>
<span class="sd">        RuntimeError: If num_shards is specified but shard_id is None.</span>
<span class="sd">        RuntimeError: If shard_id is specified but num_shards is None.</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; udpos_dataset_dir = &quot;/path/to/udpos_dataset_dir&quot;</span>
<span class="sd">        &gt;&gt;&gt; dataset = ds.UDPOSDataset(dataset_files=udpos_dataset_dir, usage=&#39;all&#39;)</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_udpos_dataset</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dataset_dir</span><span class="p">,</span> <span class="n">usage</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">Shuffle</span><span class="o">.</span><span class="n">GLOBAL</span><span class="p">,</span> <span class="n">num_shards</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
                 <span class="n">shard_id</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_parallel_workers</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">num_parallel_workers</span><span class="o">=</span><span class="n">num_parallel_workers</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="n">num_samples</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">shuffle</span><span class="p">,</span>
                         <span class="n">num_shards</span><span class="o">=</span><span class="n">num_shards</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="n">shard_id</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="n">cache</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span> <span class="o">=</span> <span class="n">dataset_dir</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">usage</span> <span class="o">=</span> <span class="n">replace_none</span><span class="p">(</span><span class="n">usage</span><span class="p">,</span> <span class="s1">&#39;all&#39;</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">children</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">UDPOSNode</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">usage</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_samples</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">shuffle_flag</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_shards</span><span class="p">,</span>
                             <span class="bp">self</span><span class="o">.</span><span class="n">shard_id</span><span class="p">)</span></div>


<div class="viewcode-block" id="WikiTextDataset"><a class="viewcode-back" href="../../../../api_python/dataset/mindspore.dataset.WikiTextDataset.html#mindspore.dataset.WikiTextDataset">[docs]</a><span class="k">class</span> <span class="nc">WikiTextDataset</span><span class="p">(</span><span class="n">SourceDataset</span><span class="p">,</span> <span class="n">TextBaseDataset</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    A source dataset that reads and parses WikiText2 and WikiText103 datasets.</span>

<span class="sd">    The generated dataset has one column :py:obj:`[text]`.</span>
<span class="sd">    The tensor of column :py:obj:`text` is of the string type.</span>

<span class="sd">    Args:</span>
<span class="sd">        dataset_dir (str): Path to the root directory that contains the dataset.</span>
<span class="sd">        usage (str, optional): Acceptable usages include `train`, `test`, &#39;valid&#39; and `all` (default=None, all samples).</span>
<span class="sd">        num_samples (int, optional): Number of samples (rows) to read (default=None, reads the full dataset).</span>
<span class="sd">        num_parallel_workers (int, optional): Number of workers to read the data</span>
<span class="sd">            (default=None, number set in the config).</span>
<span class="sd">        shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch</span>
<span class="sd">            (default=Shuffle.GLOBAL).</span>
<span class="sd">            If shuffle is False, no shuffling will be performed;</span>
<span class="sd">            If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL</span>
<span class="sd">            Otherwise, there are two levels of shuffling:</span>

<span class="sd">            - Shuffle.GLOBAL: Shuffle both the files and samples.</span>

<span class="sd">            - Shuffle.FILES: Shuffle files only.</span>

<span class="sd">        num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).</span>
<span class="sd">            When this argument is specified, &#39;num_samples&#39; reflects the max sample number of per shard.</span>
<span class="sd">        shard_id (int, optional): The shard ID within num_shards (default=None). This</span>
<span class="sd">            argument can only be specified when num_shards is also specified.</span>
<span class="sd">        cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.</span>
<span class="sd">            (default=None, which means no cache is used).</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; wiki_text_dataset_dir = &quot;/path/to/wiki_text_dataset_directory&quot;</span>
<span class="sd">        &gt;&gt;&gt; dataset = ds.WikiTextDataset(dataset_dir=wiki_text_dataset_dir, usage=&#39;all&#39;)</span>

<span class="sd">    About WikiTextDataset dataset:</span>

<span class="sd">    The WikiText Long Term Dependency Language Modeling Dataset is an English lexicon containing 100 million words.</span>
<span class="sd">    These terms are drawn from Wikipedia&#39;s premium and benchmark articles, including versions of Wikitext2 and</span>
<span class="sd">    Wikitext103. For WikiText2, it has 36718 lines in wiki.train.tokens, 4358 lines in wiki.test.tokens and</span>
<span class="sd">    3760 lines in wiki.valid.tokens. For WikiText103, it has 1801350 lines in wiki.train.tokens, 4358 lines in</span>
<span class="sd">    wiki.test.tokens and 3760 lines in wiki.valid.tokens.</span>

<span class="sd">    Here is the original WikiText dataset structure.</span>
<span class="sd">    You can unzip the dataset files into this directory structure and read by MindSpore&#39;s API.</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        .</span>
<span class="sd">        └── WikiText2/WikiText103</span>
<span class="sd">             ├── wiki.train.tokens</span>
<span class="sd">             ├── wiki.test.tokens</span>
<span class="sd">             ├── wiki.valid.tokens</span>

<span class="sd">    Citation:</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        @article{merity2016pointer,</span>
<span class="sd">          title={Pointer sentinel mixture models},</span>
<span class="sd">          author={Merity, Stephen and Xiong, Caiming and Bradbury, James and Socher, Richard},</span>
<span class="sd">          journal={arXiv preprint arXiv:1609.07843},</span>
<span class="sd">          year={2016}</span>
<span class="sd">        }</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_wiki_text_dataset</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dataset_dir</span><span class="p">,</span> <span class="n">usage</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_parallel_workers</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">Shuffle</span><span class="o">.</span><span class="n">GLOBAL</span><span class="p">,</span>
                 <span class="n">num_shards</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">num_parallel_workers</span><span class="o">=</span><span class="n">num_parallel_workers</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="n">num_samples</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">shuffle</span><span class="p">,</span>
                         <span class="n">num_shards</span><span class="o">=</span><span class="n">num_shards</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="n">shard_id</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="n">cache</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span> <span class="o">=</span> <span class="n">dataset_dir</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">usage</span> <span class="o">=</span> <span class="n">replace_none</span><span class="p">(</span><span class="n">usage</span><span class="p">,</span> <span class="s2">&quot;all&quot;</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">children</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">WikiTextNode</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">usage</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_samples</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">shuffle_flag</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_shards</span><span class="p">,</span>
                                <span class="bp">self</span><span class="o">.</span><span class="n">shard_id</span><span class="p">)</span></div>


<div class="viewcode-block" id="YahooAnswersDataset"><a class="viewcode-back" href="../../../../api_python/dataset/mindspore.dataset.YahooAnswersDataset.html#mindspore.dataset.YahooAnswersDataset">[docs]</a><span class="k">class</span> <span class="nc">YahooAnswersDataset</span><span class="p">(</span><span class="n">SourceDataset</span><span class="p">,</span> <span class="n">TextBaseDataset</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    A source dataset that reads and parses the YahooAnswers dataset.</span>

<span class="sd">    The generated dataset has three columns :py:obj:`[class, title, content, answer]`.</span>
<span class="sd">    The tensor of column :py:obj:`class` is of the string type.</span>
<span class="sd">    The tensor of column :py:obj:`title` is of the string type.</span>
<span class="sd">    The tensor of column :py:obj:`content` is of the string type.</span>
<span class="sd">    The tensor of column :py:obj:`answer` is of the string type.</span>

<span class="sd">    Args:</span>
<span class="sd">        dataset_dir (str): Path to the root directory that contains the dataset.</span>
<span class="sd">        usage (str, optional): Usage of this dataset, can be `train`, `test` or `all`. `train` will read</span>
<span class="sd">            from 1,400,000 train samples, `test` will read from 60,000 test samples, `all` will read from</span>
<span class="sd">            all 1,460,000 samples (default=None, all samples).</span>
<span class="sd">        num_samples (int, optional): The number of samples to be included in the dataset</span>
<span class="sd">            (default=None, will include all text).</span>
<span class="sd">        num_parallel_workers (int, optional): Number of workers to read the data</span>
<span class="sd">            (default=None, number set in the config).</span>
<span class="sd">        shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch</span>
<span class="sd">            (default=Shuffle.GLOBAL).</span>
<span class="sd">            If shuffle is False, no shuffling will be performed;</span>
<span class="sd">            If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL</span>
<span class="sd">            Otherwise, there are two levels of shuffling:</span>

<span class="sd">            - Shuffle.GLOBAL: Shuffle both the files and samples.</span>

<span class="sd">            - Shuffle.FILES: Shuffle files only.</span>

<span class="sd">        num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).</span>
<span class="sd">            When this argument is specified, `num_samples` reflects the maximum sample number of per shard.</span>
<span class="sd">        shard_id (int, optional): The shard ID within num_shards (default=None). This</span>
<span class="sd">            argument can only be specified when num_shards is also specified.</span>
<span class="sd">        cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing</span>
<span class="sd">            (default=None, which means no cache is used).</span>

<span class="sd">    Raises:</span>
<span class="sd">        RuntimeError: If dataset_dir does not contain data files.</span>
<span class="sd">        RuntimeError: If num_parallel_workers exceeds the max thread numbers.</span>
<span class="sd">        RuntimeError: If num_shards is specified but shard_id is None.</span>
<span class="sd">        RuntimeError: If shard_id is specified but num_shards is None.</span>
<span class="sd">        ValueError: If shard_id is invalid (&lt; 0 or &gt;= num_shards).</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; yahoo_answers_dataset_dir = &quot;/path/to/yahoo_answers_dataset_directory&quot;</span>
<span class="sd">        &gt;&gt;&gt;</span>
<span class="sd">        &gt;&gt;&gt; # 1) Read 3 samples from YahooAnswers dataset</span>
<span class="sd">        &gt;&gt;&gt; dataset = ds.YahooAnswersDataset(dataset_dir=yahoo_answers_dataset_dir, num_samples=3)</span>
<span class="sd">        &gt;&gt;&gt;</span>
<span class="sd">        &gt;&gt;&gt; # 2) Read train samples from YahooAnswers dataset</span>
<span class="sd">        &gt;&gt;&gt; dataset = ds.YahooAnswersDataset(dataset_dir=yahoo_answers_dataset_dir, usage=&quot;train&quot;)</span>

<span class="sd">    About YahooAnswers dataset:</span>

<span class="sd">    The YahooAnswers dataset consists of 630,000 text samples in 14 classes,</span>
<span class="sd">    There are 560,000 samples in the train.csv and 70,000 samples in the test.csv.</span>
<span class="sd">    The 10 different classes represent Society &amp; Culture, Science &amp; Mathematics, Health, Education &amp; Reference,</span>
<span class="sd">    Computers &amp; Internet, Sports, Business &amp; Finance, Entertainment &amp; Music, Family &amp; Relationships,</span>
<span class="sd">    Politics &amp; Government.</span>

<span class="sd">    Here is the original YahooAnswers dataset structure.</span>
<span class="sd">    You can unzip the dataset files into this directory structure and read by Mindspore&#39;s API.</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        .</span>
<span class="sd">        └── yahoo_answers_dataset_dir</span>
<span class="sd">            ├── train.csv</span>
<span class="sd">            ├── test.csv</span>
<span class="sd">            ├── classes.txt</span>
<span class="sd">            └── readme.txt</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        @article{YahooAnswers,</span>
<span class="sd">        title   = {Yahoo! Answers Topic Classification Dataset},</span>
<span class="sd">        author  = {Xiang Zhang},</span>
<span class="sd">        year    = {2015},</span>
<span class="sd">        howpublished = {}</span>
<span class="sd">        }</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_yahoo_answers_dataset</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dataset_dir</span><span class="p">,</span> <span class="n">usage</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_parallel_workers</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">Shuffle</span><span class="o">.</span><span class="n">GLOBAL</span><span class="p">,</span>
                 <span class="n">num_shards</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">num_parallel_workers</span><span class="o">=</span><span class="n">num_parallel_workers</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="n">num_samples</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">shuffle</span><span class="p">,</span>
                         <span class="n">num_shards</span><span class="o">=</span><span class="n">num_shards</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="n">shard_id</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="n">cache</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span> <span class="o">=</span> <span class="n">dataset_dir</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">usage</span> <span class="o">=</span> <span class="n">replace_none</span><span class="p">(</span><span class="n">usage</span><span class="p">,</span> <span class="s2">&quot;all&quot;</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">children</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">YahooAnswersNode</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">usage</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_samples</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">shuffle_flag</span><span class="p">,</span>
                                    <span class="bp">self</span><span class="o">.</span><span class="n">num_shards</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">shard_id</span><span class="p">)</span></div>


<div class="viewcode-block" id="YelpReviewDataset"><a class="viewcode-back" href="../../../../api_python/dataset/mindspore.dataset.YelpReviewDataset.html#mindspore.dataset.YelpReviewDataset">[docs]</a><span class="k">class</span> <span class="nc">YelpReviewDataset</span><span class="p">(</span><span class="n">SourceDataset</span><span class="p">,</span> <span class="n">TextBaseDataset</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    A source dataset that reads and parses Yelp Review Polarity and Yelp Review Full dataset.</span>

<span class="sd">    The generated dataset has two columns: :py:obj:`[label, text]`.</span>
<span class="sd">    The tensor of column :py:obj:`label` is of the string type.</span>
<span class="sd">    The tensor of column :py:obj:`text` is of the string type.</span>

<span class="sd">    Args:</span>
<span class="sd">        dataset_dir (str): Path to the root directory that contains the dataset.</span>
<span class="sd">        usage (str, optional): Usage of this dataset, can be `train`, `test` or `all`.</span>
<span class="sd">            For Polarity, `train` will read from 560,000 train samples, `test` will read from 38,000 test samples,</span>
<span class="sd">            `all` will read from all 598,000 samples.</span>
<span class="sd">            For Full, `train` will read from 650,000 train samples, `test` will read from 50,000 test samples,</span>
<span class="sd">            `all` will read from all 700,000 samples (default=None, all samples).</span>
<span class="sd">        num_samples (int, optional): Number of samples (rows) to read (default=None, reads all samples).</span>
<span class="sd">        shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch</span>
<span class="sd">            (default=Shuffle.GLOBAL).</span>
<span class="sd">            If shuffle is False, no shuffling will be performed;</span>
<span class="sd">            If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL</span>
<span class="sd">            Otherwise, there are two levels of shuffling:</span>

<span class="sd">            - Shuffle.GLOBAL: Shuffle both the files and samples.</span>

<span class="sd">            - Shuffle.FILES: Shuffle files only.</span>
<span class="sd">        num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).</span>
<span class="sd">            When this argument is specified, `num_samples` reflects the max sample number of per shard.</span>
<span class="sd">        shard_id (int, optional): The shard ID within num_shards (default=None). This</span>
<span class="sd">            argument can only be specified when num_shards is also specified.</span>
<span class="sd">        num_parallel_workers (int, optional): Number of workers to read the data</span>
<span class="sd">            (default=None, number set in the config).</span>
<span class="sd">        cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.</span>
<span class="sd">            (default=None, which means no cache is used).</span>

<span class="sd">    Raises:</span>
<span class="sd">        RuntimeError: If dataset_dir does not contain data files.</span>
<span class="sd">        RuntimeError: If num_parallel_workers exceeds the max thread numbers.</span>
<span class="sd">        RuntimeError: If num_shards is specified but shard_id is None.</span>
<span class="sd">        RuntimeError: If shard_id is specified but num_shards is None.</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; yelp_review_dataset_dir = &quot;/path/to/yelp_review_dataset_dir&quot;</span>
<span class="sd">        &gt;&gt;&gt; dataset = ds.YelpReviewDataset(dataset_dir=yelp_review_dataset_dir, usage=&#39;all&#39;)</span>

<span class="sd">    About YelpReview Dataset:</span>

<span class="sd">    The Yelp Review Full dataset consists of reviews from Yelp. It is extracted from the Yelp Dataset Challenge 2015</span>
<span class="sd">    data, and it is mainly used for text classification.</span>

<span class="sd">    The Yelp Review Polarity dataset is constructed from the above dataset, by considering stars 1 and 2 negative, and 3</span>
<span class="sd">    and 4 positive.</span>

<span class="sd">    The directory structures of these two datasets are the same.</span>
<span class="sd">    You can unzip the dataset files into the following structure and read by MindSpore&#39;s API:</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        .</span>
<span class="sd">        └── yelp_review_dir</span>
<span class="sd">             ├── train.csv</span>
<span class="sd">             ├── test.csv</span>
<span class="sd">             └── readme.txt</span>

<span class="sd">    Citation:</span>

<span class="sd">    For Yelp Review Polarity:</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        @article{zhangCharacterlevelConvolutionalNetworks2015,</span>
<span class="sd">          archivePrefix = {arXiv},</span>
<span class="sd">          eprinttype = {arxiv},</span>
<span class="sd">          eprint = {1509.01626},</span>
<span class="sd">          primaryClass = {cs},</span>
<span class="sd">          title = {Character-Level {{Convolutional Networks}} for {{Text Classification}}},</span>
<span class="sd">          abstract = {This article offers an empirical exploration on the use of character-level convolutional networks</span>
<span class="sd">                      (ConvNets) for text classification. We constructed several large-scale datasets to show that</span>
<span class="sd">                      character-level convolutional networks could achieve state-of-the-art or competitive results.</span>
<span class="sd">                      Comparisons are offered against traditional models such as bag of words, n-grams and their TFIDF</span>
<span class="sd">                      variants, and deep learning models such as word-based ConvNets and recurrent neural networks.},</span>
<span class="sd">          journal = {arXiv:1509.01626 [cs]},</span>
<span class="sd">          author = {Zhang, Xiang and Zhao, Junbo and LeCun, Yann},</span>
<span class="sd">          month = sep,</span>
<span class="sd">          year = {2015},</span>
<span class="sd">        }</span>

<span class="sd">    Citation:</span>

<span class="sd">    For Yelp Review Full:</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        @article{zhangCharacterlevelConvolutionalNetworks2015,</span>
<span class="sd">          archivePrefix = {arXiv},</span>
<span class="sd">          eprinttype = {arxiv},</span>
<span class="sd">          eprint = {1509.01626},</span>
<span class="sd">          primaryClass = {cs},</span>
<span class="sd">          title = {Character-Level {{Convolutional Networks}} for {{Text Classification}}},</span>
<span class="sd">          abstract = {This article offers an empirical exploration on the use of character-level convolutional networks</span>
<span class="sd">                      (ConvNets) for text classification. We constructed several large-scale datasets to show that</span>
<span class="sd">                      character-level convolutional networks could achieve state-of-the-art or competitive results.</span>
<span class="sd">                      Comparisons are offered against traditional models such as bag of words, n-grams and their TFIDF</span>
<span class="sd">                      variants, and deep learning models such as word-based ConvNets and recurrent neural networks.},</span>
<span class="sd">          journal = {arXiv:1509.01626 [cs]},</span>
<span class="sd">          author = {Zhang, Xiang and Zhao, Junbo and LeCun, Yann},</span>
<span class="sd">          month = sep,</span>
<span class="sd">          year = {2015},</span>
<span class="sd">        }</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_yelp_review_dataset</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dataset_dir</span><span class="p">,</span> <span class="n">usage</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">Shuffle</span><span class="o">.</span><span class="n">GLOBAL</span><span class="p">,</span> <span class="n">num_shards</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
                 <span class="n">shard_id</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">num_parallel_workers</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">num_parallel_workers</span><span class="o">=</span><span class="n">num_parallel_workers</span><span class="p">,</span> <span class="n">num_samples</span><span class="o">=</span><span class="n">num_samples</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="n">shuffle</span><span class="p">,</span>
                         <span class="n">num_shards</span><span class="o">=</span><span class="n">num_shards</span><span class="p">,</span> <span class="n">shard_id</span><span class="o">=</span><span class="n">shard_id</span><span class="p">,</span> <span class="n">cache</span><span class="o">=</span><span class="n">cache</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span> <span class="o">=</span> <span class="n">dataset_dir</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">usage</span> <span class="o">=</span> <span class="n">replace_none</span><span class="p">(</span><span class="n">usage</span><span class="p">,</span> <span class="s1">&#39;all&#39;</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">children</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">YelpReviewNode</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">dataset_dir</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">usage</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_samples</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">shuffle_flag</span><span class="p">,</span>
                                  <span class="bp">self</span><span class="o">.</span><span class="n">num_shards</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">shard_id</span><span class="p">)</span></div>
</pre></div>

           </div>
           
          </div>
          <footer>

  <hr/>

  <div role="contentinfo">
    <p>
        &#169; Copyright 2021, MindSpore.

    </p>
  </div>
    
    
    
    Built with <a href="https://www.sphinx-doc.org/">Sphinx</a> using a
    
    <a href="https://github.com/readthedocs/sphinx_rtd_theme">theme</a>
    
    provided by <a href="https://readthedocs.org">Read the Docs</a>. 

</footer>
        </div>
      </div>

    </section>

  </div>
  

  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>

  
  
    
   

</body>
</html>