

<!DOCTYPE html>
<html class="writer-html5" lang="en" >
<head>
  <meta charset="utf-8" />
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0" />
  
  <title>mindspore.dataset.text.transforms &mdash; MindSpore master documentation</title>
  

  
  <link rel="stylesheet" href="../../../../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../../../../_static/pygments.css" type="text/css" />

  
  

  
  

  

  
  <!--[if lt IE 9]>
    <script src="../../../../_static/js/html5shiv.min.js"></script>
  <![endif]-->
  
    
      <script type="text/javascript" id="documentation_options" data-url_root="../../../../" src="../../../../_static/documentation_options.js"></script>
        <script src="../../../../_static/jquery.js"></script>
        <script src="../../../../_static/underscore.js"></script>
        <script src="../../../../_static/doctools.js"></script>
        <script src="../../../../_static/language_data.js"></script>
        <script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
    
    <script type="text/javascript" src="../../../../_static/js/theme.js"></script>

    
    <link rel="index" title="Index" href="../../../../genindex.html" />
    <link rel="search" title="Search" href="../../../../search.html" /> 
</head>

<body class="wy-body-for-nav">

   
  <div class="wy-grid-for-nav">
    
    <nav data-toggle="wy-nav-shift" class="wy-nav-side">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search" >
          

          
            <a href="../../../../index.html" class="icon icon-home"> MindSpore
          

          
          </a>

          
            
            
          

          
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../../../../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

          
        </div>

        
        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          
            
            
              
            
            
              <p class="caption"><span class="caption-text">MindSpore Python API</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.html">mindspore</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.common.initializer.html">mindspore.common.initializer</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.communication.html">mindspore.communication</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.compression.html">mindspore.compression</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.context.html">mindspore.context</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.html">mindspore.dataset</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.audio.html">mindspore.dataset.audio</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.config.html">mindspore.dataset.config</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.text.html">mindspore.dataset.text</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.transforms.html">mindspore.dataset.transforms</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.vision.html">mindspore.dataset.vision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.mindrecord.html">mindspore.mindrecord</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.nn.html">mindspore.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.nn.probability.html">mindspore.nn.probability</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.nn.transformer.html">mindspore.nn.transformer</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.numpy.html">mindspore.numpy</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.ops.html">mindspore.ops</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.parallel.html">mindspore.parallel</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.parallel.nn.html">mindspore.parallel.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.profiler.html">mindspore.profiler</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.scipy.html">mindspore.scipy</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.train.html">mindspore.train</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.boost.html">mindspore.boost</a></li>
</ul>
<p class="caption"><span class="caption-text">MindSpore C++ API</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://www.mindspore.cn/lite/api/zh-CN/master/api_cpp/mindspore.html">MindSpore Lite↗</a></li>
</ul>

            
          
        </div>
        
      </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">

      
      <nav class="wy-nav-top" aria-label="top navigation">
        
          <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
          <a href="../../../../index.html">MindSpore</a>
        
      </nav>


      <div class="wy-nav-content">
        
        <div class="rst-content">
        
          

















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="wy-breadcrumbs">
    
      <li><a href="../../../../index.html" class="icon icon-home"></a> &raquo;</li>
        
          <li><a href="../../../index.html">Module code</a> &raquo;</li>
        
      <li>mindspore.dataset.text.transforms</li>
    
    
      <li class="wy-breadcrumbs-aside">
        
      </li>
    
  </ul>

  
  <hr/>
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <h1>Source code for mindspore.dataset.text.transforms</h1><div class="highlight"><pre>
<span></span><span class="c1"># Copyright 2020-2021 Huawei Technologies Co., Ltd</span>
<span class="c1">#</span>
<span class="c1"># Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);</span>
<span class="c1"># you may not use this file except in compliance with the License.</span>
<span class="c1"># You may obtain a copy of the License at</span>
<span class="c1">#</span>
<span class="c1"># http://www.apache.org/licenses/LICENSE-2.0</span>
<span class="c1">#</span>
<span class="c1"># Unless required by applicable law or agreed to in writing, software</span>
<span class="c1"># distributed under the License is distributed on an &quot;AS IS&quot; BASIS,</span>
<span class="c1"># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.</span>
<span class="c1"># See the License for the specific language governing permissions and</span>
<span class="c1"># limitations under the License.</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd">The module text.transforms is inherited from _c_dataengine</span>
<span class="sd">and is implemented based on ICU4C and cppjieba in C++.</span>
<span class="sd">It&#39;s a high performance module to process NLP text.</span>
<span class="sd">Users can use Vocab to build their own dictionary,</span>
<span class="sd">use appropriate tokenizers to split sentences into different tokens,</span>
<span class="sd">and use Lookup to find the index of tokens in Vocab.</span>

<span class="sd">.. Note::</span>
<span class="sd">    A constructor&#39;s arguments for every class in this module must be saved into the</span>
<span class="sd">    class attributes (self.xxx) to support save() and load().</span>

<span class="sd">Examples:</span>
<span class="sd">    &gt;&gt;&gt; text_file_dataset_dir = [&quot;/path/to/text_file_dataset_file&quot;] # contains 1 or multiple text files</span>
<span class="sd">    &gt;&gt;&gt; # Create a dataset for text sentences saved as line data in a file</span>
<span class="sd">    &gt;&gt;&gt; text_file_dataset = ds.TextFileDataset(dataset_files=text_file_dataset_dir, shuffle=False)</span>
<span class="sd">    &gt;&gt;&gt; # Tokenize sentences to unicode characters</span>
<span class="sd">    &gt;&gt;&gt; tokenizer = text.UnicodeCharTokenizer()</span>
<span class="sd">    &gt;&gt;&gt; # Load vocabulary from list</span>
<span class="sd">    &gt;&gt;&gt; vocab = text.Vocab.from_list(word_list=[&#39;深&#39;, &#39;圳&#39;, &#39;欢&#39;, &#39;迎&#39;, &#39;您&#39;])</span>
<span class="sd">    &gt;&gt;&gt; # Use Lookup operator to map tokens to ids</span>
<span class="sd">    &gt;&gt;&gt; lookup = text.Lookup(vocab=vocab)</span>
<span class="sd">    &gt;&gt;&gt; text_file_dataset = text_file_dataset.map(operations=[tokenizer, lookup])</span>
<span class="sd">    &gt;&gt;&gt; # if text line in dataset_file is:</span>
<span class="sd">    &gt;&gt;&gt; # 深圳欢迎您</span>
<span class="sd">    &gt;&gt;&gt; # then the output will be:</span>
<span class="sd">    &gt;&gt;&gt; # {&#39;text&#39;: array([0, 1, 2, 3, 4], dtype=int32)}</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="kn">import</span> <span class="nn">os</span>
<span class="kn">import</span> <span class="nn">re</span>
<span class="kn">import</span> <span class="nn">platform</span>
<span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>

<span class="kn">import</span> <span class="nn">mindspore._c_dataengine</span> <span class="k">as</span> <span class="nn">cde</span>
<span class="kn">from</span> <span class="nn">mindspore.common</span> <span class="kn">import</span> <span class="n">dtype</span> <span class="k">as</span> <span class="n">mstype</span>

<span class="kn">from</span> <span class="nn">.utils</span> <span class="kn">import</span> <span class="n">JiebaMode</span><span class="p">,</span> <span class="n">NormalizeForm</span><span class="p">,</span> <span class="n">to_str</span><span class="p">,</span> <span class="n">SPieceTokenizerOutType</span><span class="p">,</span> <span class="n">SPieceTokenizerLoadType</span>
<span class="kn">from</span> <span class="nn">.validators</span> <span class="kn">import</span> <span class="n">check_lookup</span><span class="p">,</span> <span class="n">check_jieba_add_dict</span><span class="p">,</span> <span class="n">check_to_vectors</span><span class="p">,</span> \
    <span class="n">check_jieba_add_word</span><span class="p">,</span> <span class="n">check_jieba_init</span><span class="p">,</span> <span class="n">check_with_offsets</span><span class="p">,</span> <span class="n">check_unicode_script_tokenizer</span><span class="p">,</span> \
    <span class="n">check_wordpiece_tokenizer</span><span class="p">,</span> <span class="n">check_regex_replace</span><span class="p">,</span> <span class="n">check_regex_tokenizer</span><span class="p">,</span> <span class="n">check_basic_tokenizer</span><span class="p">,</span> <span class="n">check_ngram</span><span class="p">,</span> \
    <span class="n">check_pair_truncate</span><span class="p">,</span> <span class="n">check_to_number</span><span class="p">,</span> <span class="n">check_bert_tokenizer</span><span class="p">,</span> <span class="n">check_python_tokenizer</span><span class="p">,</span> <span class="n">check_slidingwindow</span><span class="p">,</span> \
    <span class="n">check_sentence_piece_tokenizer</span>
<span class="kn">from</span> <span class="nn">..core.datatypes</span> <span class="kn">import</span> <span class="n">mstype_to_detype</span>
<span class="kn">from</span> <span class="nn">..core.validator_helpers</span> <span class="kn">import</span> <span class="n">replace_none</span>
<span class="kn">from</span> <span class="nn">..transforms.c_transforms</span> <span class="kn">import</span> <span class="n">TensorOperation</span>


<span class="k">class</span> <span class="nc">TextTensorOperation</span><span class="p">(</span><span class="n">TensorOperation</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Base class of Text Tensor Ops</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">raise</span> <span class="ne">NotImplementedError</span><span class="p">(</span><span class="s2">&quot;TextTensorOperation has to implement parse() method.&quot;</span><span class="p">)</span>


<span class="n">DE_C_INTER_JIEBA_MODE</span> <span class="o">=</span> <span class="p">{</span>
    <span class="n">JiebaMode</span><span class="o">.</span><span class="n">MIX</span><span class="p">:</span> <span class="n">cde</span><span class="o">.</span><span class="n">JiebaMode</span><span class="o">.</span><span class="n">DE_JIEBA_MIX</span><span class="p">,</span>
    <span class="n">JiebaMode</span><span class="o">.</span><span class="n">MP</span><span class="p">:</span> <span class="n">cde</span><span class="o">.</span><span class="n">JiebaMode</span><span class="o">.</span><span class="n">DE_JIEBA_MP</span><span class="p">,</span>
    <span class="n">JiebaMode</span><span class="o">.</span><span class="n">HMM</span><span class="p">:</span> <span class="n">cde</span><span class="o">.</span><span class="n">JiebaMode</span><span class="o">.</span><span class="n">DE_JIEBA_HMM</span>
<span class="p">}</span>

<span class="n">DE_C_INTER_SENTENCEPIECE_LOADTYPE</span> <span class="o">=</span> <span class="p">{</span>
    <span class="n">SPieceTokenizerLoadType</span><span class="o">.</span><span class="n">FILE</span><span class="p">:</span> <span class="n">cde</span><span class="o">.</span><span class="n">SPieceTokenizerLoadType</span><span class="o">.</span><span class="n">DE_SPIECE_TOKENIZER_LOAD_KFILE</span><span class="p">,</span>
    <span class="n">SPieceTokenizerLoadType</span><span class="o">.</span><span class="n">MODEL</span><span class="p">:</span> <span class="n">cde</span><span class="o">.</span><span class="n">SPieceTokenizerLoadType</span><span class="o">.</span><span class="n">DE_SPIECE_TOKENIZER_LOAD_KMODEL</span>
<span class="p">}</span>

<span class="n">DE_C_INTER_SENTENCEPIECE_OUTTYPE</span> <span class="o">=</span> <span class="p">{</span>
    <span class="n">SPieceTokenizerOutType</span><span class="o">.</span><span class="n">STRING</span><span class="p">:</span> <span class="n">cde</span><span class="o">.</span><span class="n">SPieceTokenizerOutType</span><span class="o">.</span><span class="n">DE_SPIECE_TOKENIZER_OUTTYPE_KString</span><span class="p">,</span>
    <span class="n">SPieceTokenizerOutType</span><span class="o">.</span><span class="n">INT</span><span class="p">:</span> <span class="n">cde</span><span class="o">.</span><span class="n">SPieceTokenizerOutType</span><span class="o">.</span><span class="n">DE_SPIECE_TOKENIZER_OUTTYPE_KINT</span>
<span class="p">}</span>


<span class="k">class</span> <span class="nc">JiebaTokenizer</span><span class="p">(</span><span class="n">TextTensorOperation</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Tokenize Chinese string into words based on dictionary.</span>

<span class="sd">    Note:</span>
<span class="sd">        The integrity of the HMMSEgment algorithm and MPSegment algorithm files must be confirmed.</span>

<span class="sd">    Args:</span>
<span class="sd">        hmm_path (str): Dictionary file is used by HMMSegment algorithm.</span>
<span class="sd">            The dictionary can be obtained on the official website of cppjieba.</span>
<span class="sd">        mp_path (str): Dictionary file is used by MPSegment algorithm.</span>
<span class="sd">            The dictionary can be obtained on the official website of cppjieba.</span>
<span class="sd">        mode (JiebaMode, optional): Valid values can be any of [JiebaMode.MP, JiebaMode.HMM,</span>
<span class="sd">            JiebaMode.MIX](default=JiebaMode.MIX).</span>

<span class="sd">            - JiebaMode.MP, tokenize with MPSegment algorithm.</span>
<span class="sd">            - JiebaMode.HMM, tokenize with Hidden Markov Model Segment algorithm.</span>
<span class="sd">            - JiebaMode.MIX, tokenize with a mix of MPSegment and HMMSegment algorithm.</span>
<span class="sd">        with_offsets (bool, optional): Whether or not output offsets of tokens (default=False).</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; from mindspore.dataset.text import JiebaMode</span>
<span class="sd">        &gt;&gt;&gt; # If with_offsets=False, default output one column {[&quot;text&quot;, dtype=str]}</span>
<span class="sd">        &gt;&gt;&gt; jieba_hmm_file = &quot;/path/to/jieba/hmm/file&quot;</span>
<span class="sd">        &gt;&gt;&gt; jieba_mp_file = &quot;/path/to/jieba/mp/file&quot;</span>
<span class="sd">        &gt;&gt;&gt; tokenizer_op = text.JiebaTokenizer(jieba_hmm_file, jieba_mp_file, mode=JiebaMode.MP, with_offsets=False)</span>
<span class="sd">        &gt;&gt;&gt; text_file_dataset = text_file_dataset.map(operations=tokenizer_op)</span>
<span class="sd">        &gt;&gt;&gt; # If with_offsets=False, then output three columns {[&quot;token&quot;, dtype=str], [&quot;offsets_start&quot;, dtype=uint32],</span>
<span class="sd">        &gt;&gt;&gt; #                                                   [&quot;offsets_limit&quot;, dtype=uint32]}</span>
<span class="sd">        &gt;&gt;&gt; tokenizer_op = text.JiebaTokenizer(jieba_hmm_file, jieba_mp_file, mode=JiebaMode.MP, with_offsets=True)</span>
<span class="sd">        &gt;&gt;&gt; text_file_dataset_1 = text_file_dataset_1.map(operations=tokenizer_op, input_columns=[&quot;text&quot;],</span>
<span class="sd">        ...                                               output_columns=[&quot;token&quot;, &quot;offsets_start&quot;, &quot;offsets_limit&quot;],</span>
<span class="sd">        ...                                               column_order=[&quot;token&quot;, &quot;offsets_start&quot;, &quot;offsets_limit&quot;])</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_jieba_init</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">hmm_path</span><span class="p">,</span> <span class="n">mp_path</span><span class="p">,</span> <span class="n">mode</span><span class="o">=</span><span class="n">JiebaMode</span><span class="o">.</span><span class="n">MIX</span><span class="p">,</span> <span class="n">with_offsets</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">mode</span><span class="p">,</span> <span class="n">JiebaMode</span><span class="p">):</span>
            <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="s2">&quot;Wrong input type for mode, should be JiebaMode.&quot;</span><span class="p">)</span>

        <span class="bp">self</span><span class="o">.</span><span class="n">mode</span> <span class="o">=</span> <span class="n">mode</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">__check_path__</span><span class="p">(</span><span class="n">hmm_path</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">hmm_path</span> <span class="o">=</span> <span class="n">hmm_path</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">__check_path__</span><span class="p">(</span><span class="n">mp_path</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">mp_path</span> <span class="o">=</span> <span class="n">mp_path</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">with_offsets</span> <span class="o">=</span> <span class="n">with_offsets</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">words</span> <span class="o">=</span> <span class="p">[]</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="n">jieba_tokenizer</span> <span class="o">=</span> <span class="n">cde</span><span class="o">.</span><span class="n">JiebaTokenizerOperation</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">hmm_path</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">mp_path</span><span class="p">,</span>
                                                      <span class="n">DE_C_INTER_JIEBA_MODE</span><span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">mode</span><span class="p">],</span>
                                                      <span class="bp">self</span><span class="o">.</span><span class="n">with_offsets</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">word</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">words</span><span class="p">:</span>
            <span class="n">jieba_tokenizer</span><span class="o">.</span><span class="n">add_word</span><span class="p">(</span><span class="n">word</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">word</span><span class="p">[</span><span class="mi">1</span><span class="p">])</span>
        <span class="k">return</span> <span class="n">jieba_tokenizer</span>

    <span class="nd">@check_jieba_add_word</span>
    <span class="k">def</span> <span class="nf">add_word</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">word</span><span class="p">,</span> <span class="n">freq</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Add a user defined word to JiebaTokenizer&#39;s dictionary.</span>

<span class="sd">        Args:</span>
<span class="sd">            word (str): The word to be added to the JiebaTokenizer instance.</span>
<span class="sd">                The added word will not be written into the built-in dictionary on disk.</span>
<span class="sd">            freq (int, optional): The frequency of the word to be added. The higher the frequency,</span>
<span class="sd">                the better chance the word will be tokenized (default=None, use default frequency).</span>

<span class="sd">        Examples:</span>
<span class="sd">            &gt;&gt;&gt; from mindspore.dataset.text import JiebaMode</span>
<span class="sd">            &gt;&gt;&gt; jieba_hmm_file = &quot;/path/to/jieba/hmm/file&quot;</span>
<span class="sd">            &gt;&gt;&gt; jieba_mp_file = &quot;/path/to/jieba/mp/file&quot;</span>
<span class="sd">            &gt;&gt;&gt; jieba_op = text.JiebaTokenizer(jieba_hmm_file, jieba_mp_file, mode=JiebaMode.MP)</span>
<span class="sd">            &gt;&gt;&gt; sentence_piece_vocab_file = &quot;/path/to/sentence/piece/vocab/file&quot;</span>
<span class="sd">            &gt;&gt;&gt; with open(sentence_piece_vocab_file, &#39;r&#39;) as f:</span>
<span class="sd">            ...     for line in f:</span>
<span class="sd">            ...         word = line.split(&#39;,&#39;)[0]</span>
<span class="sd">            ...         jieba_op.add_word(word)</span>
<span class="sd">            &gt;&gt;&gt; text_file_dataset = text_file_dataset.map(operations=jieba_op, input_columns=[&quot;text&quot;])</span>
<span class="sd">        &quot;&quot;&quot;</span>

        <span class="k">if</span> <span class="n">freq</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">words</span><span class="o">.</span><span class="n">append</span><span class="p">((</span><span class="n">word</span><span class="p">,</span> <span class="mi">0</span><span class="p">))</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">words</span><span class="o">.</span><span class="n">append</span><span class="p">((</span><span class="n">word</span><span class="p">,</span> <span class="n">freq</span><span class="p">))</span>

    <span class="nd">@check_jieba_add_dict</span>
    <span class="k">def</span> <span class="nf">add_dict</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">user_dict</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Add a user defined word to JiebaTokenizer&#39;s dictionary.</span>

<span class="sd">        Args:</span>
<span class="sd">            user_dict (Union[str, dict]): One of the two loading methods is file path(str) loading</span>
<span class="sd">                (according to the Jieba dictionary format) and the other is Python dictionary(dict) loading,</span>
<span class="sd">                Python Dict format: {word1:freq1, word2:freq2,...}.</span>
<span class="sd">                Jieba dictionary format : word(required), freq(optional), such as:</span>

<span class="sd">                .. code-block::</span>

<span class="sd">                    word1 freq1</span>
<span class="sd">                    word2 None</span>
<span class="sd">                    word3 freq3</span>

<span class="sd">                Only valid word-freq pairs in user provided file will be added into the dictionary.</span>
<span class="sd">                Rows containing invalid input will be ignored. No error nor warning Status is returned.</span>

<span class="sd">        Examples:</span>
<span class="sd">            &gt;&gt;&gt; from mindspore.dataset.text import JiebaMode</span>
<span class="sd">            &gt;&gt;&gt; jieba_hmm_file = &quot;/path/to/jieba/hmm/file&quot;</span>
<span class="sd">            &gt;&gt;&gt; jieba_mp_file = &quot;/path/to/jieba/mp/file&quot;</span>
<span class="sd">            &gt;&gt;&gt; user_dict = {&quot;男默女泪&quot;: 10}</span>
<span class="sd">            &gt;&gt;&gt; jieba_op = text.JiebaTokenizer(jieba_hmm_file, jieba_mp_file, mode=JiebaMode.MP)</span>
<span class="sd">            &gt;&gt;&gt; jieba_op.add_dict(user_dict)</span>
<span class="sd">            &gt;&gt;&gt; text_file_dataset = text_file_dataset.map(operations=jieba_op, input_columns=[&quot;text&quot;])</span>
<span class="sd">        &quot;&quot;&quot;</span>

        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">user_dict</span><span class="p">,</span> <span class="nb">str</span><span class="p">):</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">__add_dict_py_file</span><span class="p">(</span><span class="n">user_dict</span><span class="p">)</span>
        <span class="k">elif</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">user_dict</span><span class="p">,</span> <span class="nb">dict</span><span class="p">):</span>
            <span class="k">for</span> <span class="n">k</span><span class="p">,</span> <span class="n">v</span> <span class="ow">in</span> <span class="n">user_dict</span><span class="o">.</span><span class="n">items</span><span class="p">():</span>
                <span class="bp">self</span><span class="o">.</span><span class="n">add_word</span><span class="p">(</span><span class="n">k</span><span class="p">,</span> <span class="n">v</span><span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="s2">&quot;The type of user_dict must str or dict.&quot;</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">__add_dict_py_file</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">file_path</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Add user defined word by file&quot;&quot;&quot;</span>
        <span class="n">words_list</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">__parser_file</span><span class="p">(</span><span class="n">file_path</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">data</span> <span class="ow">in</span> <span class="n">words_list</span><span class="p">:</span>
            <span class="k">if</span> <span class="n">data</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
                <span class="n">freq</span> <span class="o">=</span> <span class="mi">0</span>
            <span class="k">else</span><span class="p">:</span>
                <span class="n">freq</span> <span class="o">=</span> <span class="nb">int</span><span class="p">(</span><span class="n">data</span><span class="p">[</span><span class="mi">1</span><span class="p">])</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">add_word</span><span class="p">(</span><span class="n">data</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">freq</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">__parser_file</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">file_path</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;parser user defined word by file&quot;&quot;&quot;</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">exists</span><span class="p">(</span><span class="n">file_path</span><span class="p">):</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
                <span class="s2">&quot;user dict file </span><span class="si">{}</span><span class="s2"> is not exist.&quot;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">file_path</span><span class="p">))</span>
        <span class="n">real_file_path</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">realpath</span><span class="p">(</span><span class="n">file_path</span><span class="p">)</span>
        <span class="n">file_dict</span> <span class="o">=</span> <span class="nb">open</span><span class="p">(</span><span class="n">real_file_path</span><span class="p">)</span>
        <span class="n">data_re</span> <span class="o">=</span> <span class="n">re</span><span class="o">.</span><span class="n">compile</span><span class="p">(</span><span class="s1">&#39;^</span><span class="se">\\</span><span class="s1">s*([^</span><span class="se">\\</span><span class="s1">s*]+?)</span><span class="se">\\</span><span class="s1">s*([0-9]+)?</span><span class="se">\\</span><span class="s1">s*$&#39;</span><span class="p">,</span> <span class="n">re</span><span class="o">.</span><span class="n">U</span><span class="p">)</span>
        <span class="n">words_list</span> <span class="o">=</span> <span class="p">[]</span>
        <span class="k">for</span> <span class="n">item</span> <span class="ow">in</span> <span class="n">file_dict</span><span class="p">:</span>
            <span class="n">data</span> <span class="o">=</span> <span class="n">item</span><span class="o">.</span><span class="n">strip</span><span class="p">()</span>
            <span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">data</span><span class="p">,</span> <span class="nb">str</span><span class="p">):</span>
                <span class="n">data</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">__decode</span><span class="p">(</span><span class="n">data</span><span class="p">)</span>
            <span class="n">tmp</span> <span class="o">=</span> <span class="n">data_re</span><span class="o">.</span><span class="n">match</span><span class="p">(</span><span class="n">data</span><span class="p">)</span>
            <span class="k">if</span> <span class="ow">not</span> <span class="n">tmp</span><span class="p">:</span>
                <span class="k">continue</span>
            <span class="n">words</span> <span class="o">=</span> <span class="n">tmp</span><span class="o">.</span><span class="n">groups</span><span class="p">()</span>
            <span class="n">words_list</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">words</span><span class="p">)</span>
        <span class="n">file_dict</span><span class="o">.</span><span class="n">close</span><span class="p">()</span>
        <span class="k">return</span> <span class="n">words_list</span>

    <span class="k">def</span> <span class="nf">__decode</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">data</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;decode the dict file to utf8&quot;&quot;&quot;</span>
        <span class="k">try</span><span class="p">:</span>
            <span class="n">data</span> <span class="o">=</span> <span class="n">data</span><span class="o">.</span><span class="n">decode</span><span class="p">(</span><span class="s1">&#39;utf-8&#39;</span><span class="p">)</span>
        <span class="k">except</span> <span class="ne">UnicodeDecodeError</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="s2">&quot;user dict file must be utf8 format.&quot;</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">data</span><span class="o">.</span><span class="n">lstrip</span><span class="p">(</span><span class="s1">&#39;</span><span class="se">\ufeff</span><span class="s1">&#39;</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">__check_path__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">model_path</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;check model path&quot;&quot;&quot;</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">exists</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">realpath</span><span class="p">(</span><span class="n">model_path</span><span class="p">)):</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
                <span class="s2">&quot; jieba mode file </span><span class="si">{}</span><span class="s2"> is not exist.&quot;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">model_path</span><span class="p">))</span>


<span class="k">class</span> <span class="nc">Lookup</span><span class="p">(</span><span class="n">TextTensorOperation</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Look up a word into an id according to the input vocabulary table.</span>

<span class="sd">    Args:</span>
<span class="sd">        vocab (Vocab): A vocabulary object.</span>
<span class="sd">        unknown_token (str, optional): Word is used for lookup. In case of the word is out of vocabulary (OOV),</span>
<span class="sd">            the result of lookup will be replaced with unknown_token. If the unknown_token is not specified or</span>
<span class="sd">            it is OOV, runtime error will be thrown (default=None, means no unknown_token is specified).</span>
<span class="sd">        data_type (mindspore.dtype, optional): The data type that lookup operation maps</span>
<span class="sd">            string to(default=mindspore.int32).</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; # Load vocabulary from list</span>
<span class="sd">        &gt;&gt;&gt; vocab = text.Vocab.from_list([&#39;深&#39;, &#39;圳&#39;, &#39;欢&#39;, &#39;迎&#39;, &#39;您&#39;])</span>
<span class="sd">        &gt;&gt;&gt; # Use Lookup operator to map tokens to ids</span>
<span class="sd">        &gt;&gt;&gt; lookup = text.Lookup(vocab)</span>
<span class="sd">        &gt;&gt;&gt; text_file_dataset = text_file_dataset.map(operations=[lookup])</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_lookup</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">vocab</span><span class="p">,</span> <span class="n">unknown_token</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">data_type</span><span class="o">=</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">vocab</span> <span class="o">=</span> <span class="n">vocab</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">unknown_token</span> <span class="o">=</span> <span class="n">unknown_token</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">data_type</span> <span class="o">=</span> <span class="n">data_type</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">LookupOperation</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">vocab</span><span class="o">.</span><span class="n">c_vocab</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">unknown_token</span><span class="p">,</span> <span class="nb">str</span><span class="p">(</span><span class="n">mstype_to_detype</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">data_type</span><span class="p">)))</span>


<span class="k">class</span> <span class="nc">Ngram</span><span class="p">(</span><span class="n">TextTensorOperation</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    TensorOp to generate n-gram from a 1-D string Tensor.</span>

<span class="sd">    Refer to https://en.wikipedia.org/wiki/N-gram#Examples for an overview of what n-gram is and how it works.</span>

<span class="sd">    Args:</span>
<span class="sd">        n (list[int]): n in n-gram, which is a list of positive integers. For example, if n=[4, 3], then the result</span>
<span class="sd">            would be a 4-gram followed by a 3-gram in the same tensor. If the number of words is not enough to make up</span>
<span class="sd">            for a n-gram, an empty string will be returned. For example, 3 grams on [&quot;mindspore&quot;, &quot;best&quot;] will result in</span>
<span class="sd">            an empty string produced.</span>
<span class="sd">        left_pad (tuple, optional): Padding performed on left side of the sequence shaped like (&quot;pad_token&quot;, pad_width).</span>
<span class="sd">            `pad_width` will be capped at n-1. For example, specifying left_pad=(&quot;_&quot;, 2) would pad left side of the</span>
<span class="sd">            sequence with &quot;__&quot; (default=(&quot;&quot;, 0)).</span>
<span class="sd">        right_pad (tuple, optional): Padding performed on right side of the sequence shaped like</span>
<span class="sd">            (&quot;pad_token&quot;, pad_width). `pad_width` will be capped at n-1. For example, specifying right_pad=(&quot;_&quot;, 2)</span>
<span class="sd">            would pad right side of the sequence with &quot;__&quot; (default=(&quot;&quot;, 0)).</span>
<span class="sd">        separator (str, optional): Symbol used to join strings together. For example, if 2-gram is</span>
<span class="sd">            [&quot;mindspore&quot;, &quot;amazing&quot;] with separator=&quot;-&quot;, the result would be [&quot;mindspore-amazing&quot;]</span>
<span class="sd">            (default=&quot; &quot;, which will use whitespace as separator).</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; ngram_op = text.Ngram(3, separator=&quot;-&quot;)</span>
<span class="sd">        &gt;&gt;&gt; output = ngram_op([&quot;WildRose Country&quot;, &quot;Canada&#39;s Ocean Playground&quot;, &quot;Land of Living Skies&quot;])</span>
<span class="sd">        &gt;&gt;&gt; # output</span>
<span class="sd">        &gt;&gt;&gt; # [&quot;WildRose Country-Canada&#39;s Ocean Playground-Land of Living Skies&quot;]</span>
<span class="sd">        &gt;&gt;&gt; # same ngram_op called through map</span>
<span class="sd">        &gt;&gt;&gt; text_file_dataset = text_file_dataset.map(operations=ngram_op)</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_ngram</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">n</span><span class="p">,</span> <span class="n">left_pad</span><span class="o">=</span><span class="p">(</span><span class="s2">&quot;&quot;</span><span class="p">,</span> <span class="mi">0</span><span class="p">),</span> <span class="n">right_pad</span><span class="o">=</span><span class="p">(</span><span class="s2">&quot;&quot;</span><span class="p">,</span> <span class="mi">0</span><span class="p">),</span> <span class="n">separator</span><span class="o">=</span><span class="s2">&quot; &quot;</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">ngrams</span> <span class="o">=</span> <span class="n">n</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">left_pad</span> <span class="o">=</span> <span class="n">left_pad</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">right_pad</span> <span class="o">=</span> <span class="n">right_pad</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">separator</span> <span class="o">=</span> <span class="n">separator</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">NgramOperation</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">ngrams</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">left_pad</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">right_pad</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">separator</span><span class="p">)</span>


<span class="k">class</span> <span class="nc">SentencePieceTokenizer</span><span class="p">(</span><span class="n">TextTensorOperation</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Tokenize scalar token or 1-D tokens to tokens by sentencepiece.</span>

<span class="sd">    Args:</span>
<span class="sd">        mode (Union[str, SentencePieceVocab]): If the input parameter is a file, then its type should be string.</span>
<span class="sd">            If the input parameter is a SentencePieceVocab object, then its type should be SentencePieceVocab.</span>
<span class="sd">        out_type (SPieceTokenizerOutType): The type of output, it can be any of [SPieceTokenizerOutType.STRING,</span>
<span class="sd">            SPieceTokenizerOutType.INT].</span>

<span class="sd">            - SPieceTokenizerOutType.STRING, means output type of SentencePice Tokenizer is string.</span>
<span class="sd">            - SPieceTokenizerOutType.INT, means output type of SentencePice Tokenizer is int.</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; from mindspore.dataset.text import SentencePieceModel, SPieceTokenizerOutType</span>
<span class="sd">        &gt;&gt;&gt; sentence_piece_vocab_file = &quot;/path/to/sentence/piece/vocab/file&quot;</span>
<span class="sd">        &gt;&gt;&gt; vocab = text.SentencePieceVocab.from_file([sentence_piece_vocab_file], 5000, 0.9995,</span>
<span class="sd">        ...                                           SentencePieceModel.UNIGRAM, {})</span>
<span class="sd">        &gt;&gt;&gt; tokenizer = text.SentencePieceTokenizer(vocab, out_type=SPieceTokenizerOutType.STRING)</span>
<span class="sd">        &gt;&gt;&gt; text_file_dataset = text_file_dataset.map(operations=tokenizer)</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_sentence_piece_tokenizer</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">mode</span><span class="p">,</span> <span class="n">out_type</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">mode</span> <span class="o">=</span> <span class="n">mode</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">out_type</span> <span class="o">=</span> <span class="n">out_type</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">SentencePieceTokenizerOperation</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">mode</span><span class="p">,</span> <span class="n">DE_C_INTER_SENTENCEPIECE_OUTTYPE</span><span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">out_type</span><span class="p">])</span>


<span class="k">class</span> <span class="nc">SlidingWindow</span><span class="p">(</span><span class="n">TextTensorOperation</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Construct a tensor from given data (only support 1-D for now), where each element in the dimension axis</span>
<span class="sd">    is a slice of data starting at the corresponding position, with a specified width.</span>

<span class="sd">    Args:</span>
<span class="sd">        width (int): The width of the window. It must be an integer and greater than zero.</span>
<span class="sd">        axis (int, optional): The axis along which the sliding window is computed (default=0).</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; dataset = ds.NumpySlicesDataset(data=[[1, 2, 3, 4, 5]], column_names=&quot;col1&quot;)</span>
<span class="sd">        &gt;&gt;&gt; # Data before</span>
<span class="sd">        &gt;&gt;&gt; # |     col1     |</span>
<span class="sd">        &gt;&gt;&gt; # +--------------+</span>
<span class="sd">        &gt;&gt;&gt; # | [[1, 2, 3, 4, 5]] |</span>
<span class="sd">        &gt;&gt;&gt; # +--------------+</span>
<span class="sd">        &gt;&gt;&gt; dataset = dataset.map(operations=text.SlidingWindow(3, 0))</span>
<span class="sd">        &gt;&gt;&gt; # Data after</span>
<span class="sd">        &gt;&gt;&gt; # |     col1     |</span>
<span class="sd">        &gt;&gt;&gt; # +--------------+</span>
<span class="sd">        &gt;&gt;&gt; # |  [[1, 2, 3], |</span>
<span class="sd">        &gt;&gt;&gt; # |   [2, 3, 4], |</span>
<span class="sd">        &gt;&gt;&gt; # |   [3, 4, 5]] |</span>
<span class="sd">        &gt;&gt;&gt; # +--------------+</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_slidingwindow</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">width</span><span class="p">,</span> <span class="n">axis</span><span class="o">=</span><span class="mi">0</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">width</span> <span class="o">=</span> <span class="n">width</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">axis</span> <span class="o">=</span> <span class="n">axis</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">SlidingWindowOperation</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">width</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">axis</span><span class="p">)</span>


<span class="k">class</span> <span class="nc">ToNumber</span><span class="p">(</span><span class="n">TextTensorOperation</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Tensor operation to convert every element of a string tensor to a number.</span>

<span class="sd">    Strings are cast according to the rules specified in the following links, except that any strings which represent</span>
<span class="sd">    negative numbers cannot be cast to an unsigned integer type, rules links are as follows:</span>
<span class="sd">    https://en.cppreference.com/w/cpp/string/basic_string/stof,</span>
<span class="sd">    https://en.cppreference.com/w/cpp/string/basic_string/stoul,</span>

<span class="sd">    Args:</span>
<span class="sd">        data_type (mindspore.dtype): Type to be cast to. Must be a numeric type in mindspore.dtype.</span>

<span class="sd">    Raises:</span>
<span class="sd">        RuntimeError: If strings are invalid to cast, or are out of range after being cast.</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; from mindspore import dtype as mstype</span>
<span class="sd">        &gt;&gt;&gt; data = [[&quot;1&quot;, &quot;2&quot;, &quot;3&quot;]]</span>
<span class="sd">        &gt;&gt;&gt; dataset = ds.NumpySlicesDataset(data)</span>
<span class="sd">        &gt;&gt;&gt; to_number_op = text.ToNumber(mstype.int8)</span>
<span class="sd">        &gt;&gt;&gt; dataset = dataset.map(operations=to_number_op)</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_to_number</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">data_type</span><span class="p">):</span>
        <span class="n">data_type</span> <span class="o">=</span> <span class="n">mstype_to_detype</span><span class="p">(</span><span class="n">data_type</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">data_type</span> <span class="o">=</span> <span class="nb">str</span><span class="p">(</span><span class="n">data_type</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">ToNumberOperation</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">data_type</span><span class="p">)</span>


<span class="k">class</span> <span class="nc">ToVectors</span><span class="p">(</span><span class="n">TextTensorOperation</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Look up a token into vectors according to the input vector table.</span>

<span class="sd">    Args:</span>
<span class="sd">        vectors (Vectors): A vectors object.</span>
<span class="sd">        unk_init (sequence, optional): Sequence used to initialize out-of-vectors (OOV) token</span>
<span class="sd">            (default=None, initialize with zero vectors).</span>
<span class="sd">        lower_case_backup (bool, optional): Whether to look up the token in the lower case. If False, each token in the</span>
<span class="sd">            original case will be looked up; if True, each token in the original case will be looked up first, if not</span>
<span class="sd">            found in the keys of the property stoi, the token in the lower case will be looked up (default=False).</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; # Load vectors from file</span>
<span class="sd">        &gt;&gt;&gt; vectors = text.Vectors.from_file(&quot;/path/to/vectors/file&quot;)</span>
<span class="sd">        &gt;&gt;&gt; # Use ToVectors operator to map tokens to vectors</span>
<span class="sd">        &gt;&gt;&gt; to_vectors = text.ToVectors(vectors)</span>
<span class="sd">        &gt;&gt;&gt; text_file_dataset = text_file_dataset.map(operations=[to_vectors])</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_to_vectors</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">vectors</span><span class="p">,</span> <span class="n">unk_init</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">lower_case_backup</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">vectors</span> <span class="o">=</span> <span class="n">vectors</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">unk_init</span> <span class="o">=</span> <span class="n">unk_init</span> <span class="k">if</span> <span class="n">unk_init</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="k">else</span> <span class="p">[]</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">lower_case_backup</span> <span class="o">=</span> <span class="n">lower_case_backup</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">ToVectorsOperation</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">vectors</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">unk_init</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">lower_case_backup</span><span class="p">)</span>


<span class="k">class</span> <span class="nc">TruncateSequencePair</span><span class="p">(</span><span class="n">TextTensorOperation</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Truncate a pair of rank-1 tensors such that the total length is less than max_length.</span>

<span class="sd">    This operation takes two input tensors and returns two output Tensors.</span>

<span class="sd">    Args:</span>
<span class="sd">        max_length (int): Maximum length required.</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; dataset = ds.NumpySlicesDataset(data={&quot;col1&quot;: [[1, 2, 3]], &quot;col2&quot;: [[4, 5]]})</span>
<span class="sd">        &gt;&gt;&gt; # Data before</span>
<span class="sd">        &gt;&gt;&gt; # |   col1    |   col2    |</span>
<span class="sd">        &gt;&gt;&gt; # +-----------+-----------|</span>
<span class="sd">        &gt;&gt;&gt; # | [1, 2, 3] |  [4, 5]   |</span>
<span class="sd">        &gt;&gt;&gt; # +-----------+-----------+</span>
<span class="sd">        &gt;&gt;&gt; truncate_sequence_pair_op = text.TruncateSequencePair(max_length=4)</span>
<span class="sd">        &gt;&gt;&gt; dataset = dataset.map(operations=truncate_sequence_pair_op)</span>
<span class="sd">        &gt;&gt;&gt; # Data after</span>
<span class="sd">        &gt;&gt;&gt; # |   col1    |   col2    |</span>
<span class="sd">        &gt;&gt;&gt; # +-----------+-----------+</span>
<span class="sd">        &gt;&gt;&gt; # |  [1, 2]   |  [4, 5]   |</span>
<span class="sd">        &gt;&gt;&gt; # +-----------+-----------+</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_pair_truncate</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">max_length</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">max_length</span> <span class="o">=</span> <span class="n">max_length</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">TruncateSequencePairOperation</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">max_length</span><span class="p">)</span>


<span class="k">class</span> <span class="nc">UnicodeCharTokenizer</span><span class="p">(</span><span class="n">TextTensorOperation</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Tokenize a scalar tensor of UTF-8 string to Unicode characters.</span>

<span class="sd">    Args:</span>
<span class="sd">        with_offsets (bool, optional): Whether or not output offsets of tokens (default=False).</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; # If with_offsets=False, default output one column {[&quot;text&quot;, dtype=str]}</span>
<span class="sd">        &gt;&gt;&gt; tokenizer_op = text.UnicodeCharTokenizer(with_offsets=False)</span>
<span class="sd">        &gt;&gt;&gt; text_file_dataset = text_file_dataset.map(operations=tokenizer_op)</span>
<span class="sd">        &gt;&gt;&gt; # If with_offsets=True, then output three columns {[&quot;token&quot;, dtype=str], [&quot;offsets_start&quot;, dtype=uint32],</span>
<span class="sd">        &gt;&gt;&gt; #                                                   [&quot;offsets_limit&quot;, dtype=uint32]}</span>
<span class="sd">        &gt;&gt;&gt; tokenizer_op = text.UnicodeCharTokenizer(with_offsets=True)</span>
<span class="sd">        &gt;&gt;&gt; text_file_dataset = text_file_dataset.map(operations=tokenizer_op, input_columns=[&quot;text&quot;],</span>
<span class="sd">        ...                                           output_columns=[&quot;token&quot;, &quot;offsets_start&quot;, &quot;offsets_limit&quot;],</span>
<span class="sd">        ...                                           column_order=[&quot;token&quot;, &quot;offsets_start&quot;, &quot;offsets_limit&quot;])</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_with_offsets</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">with_offsets</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">with_offsets</span> <span class="o">=</span> <span class="n">with_offsets</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">UnicodeCharTokenizerOperation</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">with_offsets</span><span class="p">)</span>


<span class="k">class</span> <span class="nc">WordpieceTokenizer</span><span class="p">(</span><span class="n">TextTensorOperation</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Tokenize scalar token or 1-D tokens to 1-D subword tokens.</span>

<span class="sd">    Args:</span>
<span class="sd">        vocab (Vocab): A  vocabulary object.</span>
<span class="sd">        suffix_indicator (str, optional): Used to show that the subword is the last part of a word (default=&#39;##&#39;).</span>
<span class="sd">        max_bytes_per_token (int, optional): Tokens exceeding this length will not be further split (default=100).</span>
<span class="sd">        unknown_token (str, optional): When a token cannot be found: if &#39;unknown_token&#39; is empty string,</span>
<span class="sd">            return the token directly, else return &#39;unknown_token&#39; (default=&#39;[UNK]&#39;).</span>
<span class="sd">        with_offsets (bool, optional): Whether or not output offsets of tokens (default=False).</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; vocab_list = [&quot;book&quot;, &quot;cholera&quot;, &quot;era&quot;, &quot;favor&quot;, &quot;##ite&quot;, &quot;my&quot;, &quot;is&quot;, &quot;love&quot;, &quot;dur&quot;, &quot;##ing&quot;, &quot;the&quot;]</span>
<span class="sd">        &gt;&gt;&gt; vocab = text.Vocab.from_list(vocab_list)</span>
<span class="sd">        &gt;&gt;&gt; # If with_offsets=False, default output one column {[&quot;text&quot;, dtype=str]}</span>
<span class="sd">        &gt;&gt;&gt; tokenizer_op = text.WordpieceTokenizer(vocab=vocab, unknown_token=&#39;[UNK]&#39;,</span>
<span class="sd">        ...                                        max_bytes_per_token=100, with_offsets=False)</span>
<span class="sd">        &gt;&gt;&gt; text_file_dataset = text_file_dataset.map(operations=tokenizer_op)</span>
<span class="sd">        &gt;&gt;&gt; # If with_offsets=True, then output three columns {[&quot;token&quot;, dtype=str], [&quot;offsets_start&quot;, dtype=uint32],</span>
<span class="sd">        &gt;&gt;&gt; #                                                   [&quot;offsets_limit&quot;, dtype=uint32]}</span>
<span class="sd">        &gt;&gt;&gt; tokenizer_op = text.WordpieceTokenizer(vocab=vocab, unknown_token=&#39;[UNK]&#39;,</span>
<span class="sd">        ...                                       max_bytes_per_token=100, with_offsets=True)</span>
<span class="sd">        &gt;&gt;&gt; text_file_dataset = text_file_dataset.map(operations=tokenizer_op, input_columns=[&quot;text&quot;],</span>
<span class="sd">        ...                                           output_columns=[&quot;token&quot;, &quot;offsets_start&quot;, &quot;offsets_limit&quot;],</span>
<span class="sd">        ...                                           column_order=[&quot;token&quot;, &quot;offsets_start&quot;, &quot;offsets_limit&quot;])</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_wordpiece_tokenizer</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">vocab</span><span class="p">,</span> <span class="n">suffix_indicator</span><span class="o">=</span><span class="s1">&#39;##&#39;</span><span class="p">,</span> <span class="n">max_bytes_per_token</span><span class="o">=</span><span class="mi">100</span><span class="p">,</span>
                 <span class="n">unknown_token</span><span class="o">=</span><span class="s1">&#39;[UNK]&#39;</span><span class="p">,</span> <span class="n">with_offsets</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">vocab</span> <span class="o">=</span> <span class="n">vocab</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">suffix_indicator</span> <span class="o">=</span> <span class="n">suffix_indicator</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">max_bytes_per_token</span> <span class="o">=</span> <span class="n">max_bytes_per_token</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">unknown_token</span> <span class="o">=</span> <span class="n">unknown_token</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">with_offsets</span> <span class="o">=</span> <span class="n">with_offsets</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">WordpieceTokenizerOperation</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">vocab</span><span class="o">.</span><span class="n">c_vocab</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">suffix_indicator</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_bytes_per_token</span><span class="p">,</span>
                                               <span class="bp">self</span><span class="o">.</span><span class="n">unknown_token</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">with_offsets</span><span class="p">)</span>


<span class="k">class</span> <span class="nc">PythonTokenizer</span><span class="p">:</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Class that applies user-defined string tokenizer into input string.</span>

<span class="sd">    Args:</span>
<span class="sd">        tokenizer (Callable): Python function that takes a `str` and returns a list of `str` as tokens.</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; def my_tokenizer(line):</span>
<span class="sd">        ...     return line.split()</span>
<span class="sd">        &gt;&gt;&gt; text_file_dataset = text_file_dataset.map(operations=text.PythonTokenizer(my_tokenizer))</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@check_python_tokenizer</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">tokenizer</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">pyfunc</span> <span class="o">=</span> <span class="n">tokenizer</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">tokenizer</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">vectorize</span><span class="p">(</span><span class="k">lambda</span> <span class="n">x</span><span class="p">:</span> <span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">tokenizer</span><span class="p">(</span><span class="n">x</span><span class="p">),</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">&#39;U&#39;</span><span class="p">),</span> <span class="n">signature</span><span class="o">=</span><span class="s1">&#39;()-&gt;(n)&#39;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">random</span> <span class="o">=</span> <span class="kc">False</span>

    <span class="k">def</span> <span class="fm">__call__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">in_array</span><span class="p">):</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">in_array</span><span class="p">,</span> <span class="n">np</span><span class="o">.</span><span class="n">ndarray</span><span class="p">):</span>
            <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="s2">&quot;input should be a NumPy array. Got </span><span class="si">{}</span><span class="s2">.&quot;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="nb">type</span><span class="p">(</span><span class="n">in_array</span><span class="p">)))</span>
        <span class="k">if</span> <span class="n">in_array</span><span class="o">.</span><span class="n">dtype</span><span class="o">.</span><span class="n">type</span> <span class="ow">is</span> <span class="n">np</span><span class="o">.</span><span class="n">bytes_</span><span class="p">:</span>
            <span class="n">in_array</span> <span class="o">=</span> <span class="n">to_str</span><span class="p">(</span><span class="n">in_array</span><span class="p">)</span>
        <span class="k">try</span><span class="p">:</span>
            <span class="n">tokens</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">tokenizer</span><span class="p">(</span><span class="n">in_array</span><span class="p">)</span>
        <span class="k">except</span> <span class="ne">Exception</span> <span class="k">as</span> <span class="n">e</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span><span class="s2">&quot;Error occurred in Pyfunc [&quot;</span> <span class="o">+</span> <span class="nb">str</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">pyfunc</span><span class="o">.</span><span class="vm">__name__</span><span class="p">)</span> <span class="o">+</span> <span class="s2">&quot;], error message: &quot;</span> <span class="o">+</span> <span class="nb">str</span><span class="p">(</span><span class="n">e</span><span class="p">))</span>
        <span class="k">return</span> <span class="n">tokens</span>


<span class="k">if</span> <span class="n">platform</span><span class="o">.</span><span class="n">system</span><span class="p">()</span><span class="o">.</span><span class="n">lower</span><span class="p">()</span> <span class="o">!=</span> <span class="s1">&#39;windows&#39;</span><span class="p">:</span>
    <span class="n">DE_C_INTER_NORMALIZE_FORM</span> <span class="o">=</span> <span class="p">{</span>
        <span class="n">NormalizeForm</span><span class="o">.</span><span class="n">NONE</span><span class="p">:</span> <span class="n">cde</span><span class="o">.</span><span class="n">NormalizeForm</span><span class="o">.</span><span class="n">DE_NORMALIZE_NONE</span><span class="p">,</span>
        <span class="n">NormalizeForm</span><span class="o">.</span><span class="n">NFC</span><span class="p">:</span> <span class="n">cde</span><span class="o">.</span><span class="n">NormalizeForm</span><span class="o">.</span><span class="n">DE_NORMALIZE_NFC</span><span class="p">,</span>
        <span class="n">NormalizeForm</span><span class="o">.</span><span class="n">NFKC</span><span class="p">:</span> <span class="n">cde</span><span class="o">.</span><span class="n">NormalizeForm</span><span class="o">.</span><span class="n">DE_NORMALIZE_NFKC</span><span class="p">,</span>
        <span class="n">NormalizeForm</span><span class="o">.</span><span class="n">NFD</span><span class="p">:</span> <span class="n">cde</span><span class="o">.</span><span class="n">NormalizeForm</span><span class="o">.</span><span class="n">DE_NORMALIZE_NFD</span><span class="p">,</span>
        <span class="n">NormalizeForm</span><span class="o">.</span><span class="n">NFKD</span><span class="p">:</span> <span class="n">cde</span><span class="o">.</span><span class="n">NormalizeForm</span><span class="o">.</span><span class="n">DE_NORMALIZE_NFKD</span>
    <span class="p">}</span>


    <span class="k">class</span> <span class="nc">BasicTokenizer</span><span class="p">(</span><span class="n">TextTensorOperation</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Tokenize a scalar tensor of UTF-8 string by specific rules.</span>

<span class="sd">        Note:</span>
<span class="sd">            BasicTokenizer is not supported on Windows platform yet.</span>

<span class="sd">        Args:</span>
<span class="sd">            lower_case (bool, optional): If True, apply CaseFold, NormalizeUTF8 with `NFD` mode, RegexReplace operation</span>
<span class="sd">                on input text to fold the text to lower case and strip accents characters. If False, only apply</span>
<span class="sd">                NormalizeUTF8 operation with the specified mode on input text (default=False).</span>
<span class="sd">            keep_whitespace (bool, optional): If True, the whitespace will be kept in output tokens (default=False).</span>
<span class="sd">            normalization_form (NormalizeForm, optional): Used to specify a specific normalize mode</span>
<span class="sd">                (default=NormalizeForm.NONE). This is only effective when `lower_case` is False. It can be any of</span>
<span class="sd">                [NormalizeForm.NONE, NormalizeForm.NFC, NormalizeForm.NFKC, NormalizeForm.NFD, NormalizeForm.NFKD].</span>

<span class="sd">                - NormalizeForm.NONE, do nothing for input string tensor.</span>
<span class="sd">                - NormalizeForm.NFC, normalize with Normalization Form C.</span>
<span class="sd">                - NormalizeForm.NFKC, normalize with Normalization Form KC.</span>
<span class="sd">                - NormalizeForm.NFD, normalize with Normalization Form D.</span>
<span class="sd">                - NormalizeForm.NFKD, normalize with Normalization Form KD.</span>

<span class="sd">            preserve_unused_token (bool, optional): If True, do not split special tokens like</span>
<span class="sd">                &#39;[CLS]&#39;, &#39;[SEP]&#39;, &#39;[UNK]&#39;, &#39;[PAD]&#39;, &#39;[MASK]&#39; (default=True).</span>
<span class="sd">            with_offsets (bool, optional): Whether or not output offsets of tokens (default=False).</span>

<span class="sd">        Examples:</span>
<span class="sd">            &gt;&gt;&gt; from mindspore.dataset.text import NormalizeForm</span>
<span class="sd">            &gt;&gt;&gt;</span>
<span class="sd">            &gt;&gt;&gt; # If with_offsets=False, default output one column {[&quot;text&quot;, dtype=str]}</span>
<span class="sd">            &gt;&gt;&gt; tokenizer_op = text.BasicTokenizer(lower_case=False,</span>
<span class="sd">            ...                                    keep_whitespace=False,</span>
<span class="sd">            ...                                    normalization_form=NormalizeForm.NONE,</span>
<span class="sd">            ...                                    preserve_unused_token=True,</span>
<span class="sd">            ...                                    with_offsets=False)</span>
<span class="sd">            &gt;&gt;&gt; text_file_dataset = text_file_dataset.map(operations=tokenizer_op)</span>
<span class="sd">            &gt;&gt;&gt; # If with_offsets=True, then output three columns {[&quot;token&quot;, dtype=str],</span>
<span class="sd">            &gt;&gt;&gt; #                                                   [&quot;offsets_start&quot;, dtype=uint32],</span>
<span class="sd">            &gt;&gt;&gt; #                                                   [&quot;offsets_limit&quot;, dtype=uint32]}</span>
<span class="sd">            &gt;&gt;&gt; tokenizer_op = text.BasicTokenizer(lower_case=False,</span>
<span class="sd">            ...                                    keep_whitespace=False,</span>
<span class="sd">            ...                                    normalization_form=NormalizeForm.NONE,</span>
<span class="sd">            ...                                    preserve_unused_token=True,</span>
<span class="sd">            ...                                    with_offsets=True)</span>
<span class="sd">            &gt;&gt;&gt; text_file_dataset_1 = text_file_dataset_1.map(operations=tokenizer_op, input_columns=[&quot;text&quot;],</span>
<span class="sd">            ...                                               output_columns=[&quot;token&quot;, &quot;offsets_start&quot;,</span>
<span class="sd">            ...                                                               &quot;offsets_limit&quot;],</span>
<span class="sd">            ...                                               column_order=[&quot;token&quot;, &quot;offsets_start&quot;,</span>
<span class="sd">            ...                                                             &quot;offsets_limit&quot;])</span>

<span class="sd">        &quot;&quot;&quot;</span>

        <span class="nd">@check_basic_tokenizer</span>
        <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">lower_case</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">keep_whitespace</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">normalization_form</span><span class="o">=</span><span class="n">NormalizeForm</span><span class="o">.</span><span class="n">NONE</span><span class="p">,</span>
                     <span class="n">preserve_unused_token</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">with_offsets</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
            <span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">normalization_form</span><span class="p">,</span> <span class="n">NormalizeForm</span><span class="p">):</span>
                <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="s2">&quot;Wrong input type for normalization_form, should be enum of &#39;NormalizeForm&#39;.&quot;</span><span class="p">)</span>

            <span class="bp">self</span><span class="o">.</span><span class="n">lower_case</span> <span class="o">=</span> <span class="n">lower_case</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">keep_whitespace</span> <span class="o">=</span> <span class="n">keep_whitespace</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">normalization_form</span> <span class="o">=</span> <span class="n">DE_C_INTER_NORMALIZE_FORM</span><span class="p">[</span><span class="n">normalization_form</span><span class="p">]</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">preserve_unused_token</span> <span class="o">=</span> <span class="n">preserve_unused_token</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">with_offsets</span> <span class="o">=</span> <span class="n">with_offsets</span>

        <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
            <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">BasicTokenizerOperation</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">lower_case</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">keep_whitespace</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">normalization_form</span><span class="p">,</span>
                                               <span class="bp">self</span><span class="o">.</span><span class="n">preserve_unused_token</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">with_offsets</span><span class="p">)</span>


<div class="viewcode-block" id="BertTokenizer"><a class="viewcode-back" href="../../../../api_python/dataset_text/mindspore.dataset.text.transforms.BertTokenizer.html#mindspore.dataset.text.transforms.BertTokenizer">[docs]</a>    <span class="k">class</span> <span class="nc">BertTokenizer</span><span class="p">(</span><span class="n">TextTensorOperation</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Tokenizer used for Bert text process.</span>

<span class="sd">        Note:</span>
<span class="sd">            BertTokenizer is not supported on Windows platform yet.</span>

<span class="sd">        Args:</span>
<span class="sd">            vocab (Vocab): A vocabulary object.</span>
<span class="sd">            suffix_indicator (str, optional): Used to show that the subword is the last part of a word (default=&#39;##&#39;).</span>
<span class="sd">            max_bytes_per_token (int, optional): If Tokens exceeding this length, it will not be further</span>
<span class="sd">                split (default=100).</span>
<span class="sd">            unknown_token (str, optional): When an unknown token is found, return the token directly if `unknown_token`</span>
<span class="sd">                is an empty string, else return `unknown_token` instead (default=&#39;[UNK]&#39;).</span>
<span class="sd">            lower_case (bool, optional): If True, apply CaseFold, NormalizeUTF8 with `NFD` mode, RegexReplace operation</span>
<span class="sd">                on input text to fold the text to lower case and strip accented characters. If False, only apply</span>
<span class="sd">                NormalizeUTF8 operation with the specified mode on input text (default=False).</span>
<span class="sd">            keep_whitespace (bool, optional): If True, the whitespace will be kept in out tokens (default=False).</span>
<span class="sd">            normalization_form (NormalizeForm, optional): This parameter is used to specify a specific normalize mode,</span>
<span class="sd">                only effective when `lower_case` is False. See NormalizeUTF8 for details (default=NormalizeForm.NONE).</span>
<span class="sd">            preserve_unused_token (bool, optional): If True, do not split special tokens like</span>
<span class="sd">                &#39;[CLS]&#39;, &#39;[SEP]&#39;, &#39;[UNK]&#39;, &#39;[PAD]&#39;, &#39;[MASK]&#39; (default=True).</span>
<span class="sd">            with_offsets (bool, optional): Whether or not output offsets of tokens (default=False).</span>

<span class="sd">        Examples:</span>
<span class="sd">            &gt;&gt;&gt; from mindspore.dataset.text import NormalizeForm</span>
<span class="sd">            &gt;&gt;&gt;</span>
<span class="sd">            &gt;&gt;&gt; # If with_offsets=False, default output one column {[&quot;text&quot;, dtype=str]}</span>
<span class="sd">            &gt;&gt;&gt; vocab_list = [&quot;床&quot;, &quot;前&quot;, &quot;明&quot;, &quot;月&quot;, &quot;光&quot;, &quot;疑&quot;, &quot;是&quot;, &quot;地&quot;, &quot;上&quot;, &quot;霜&quot;, &quot;举&quot;, &quot;头&quot;, &quot;望&quot;, &quot;低&quot;,</span>
<span class="sd">            ...               &quot;思&quot;, &quot;故&quot;, &quot;乡&quot;,&quot;繁&quot;, &quot;體&quot;, &quot;字&quot;, &quot;嘿&quot;, &quot;哈&quot;, &quot;大&quot;, &quot;笑&quot;, &quot;嘻&quot;, &quot;i&quot;, &quot;am&quot;, &quot;mak&quot;,</span>
<span class="sd">            ...               &quot;make&quot;, &quot;small&quot;, &quot;mistake&quot;, &quot;##s&quot;, &quot;during&quot;, &quot;work&quot;, &quot;##ing&quot;, &quot;hour&quot;, &quot;😀&quot;, &quot;😃&quot;,</span>
<span class="sd">            ...               &quot;😄&quot;, &quot;😁&quot;, &quot;+&quot;, &quot;/&quot;, &quot;-&quot;, &quot;=&quot;, &quot;12&quot;, &quot;28&quot;, &quot;40&quot;, &quot;16&quot;, &quot; &quot;, &quot;I&quot;, &quot;[CLS]&quot;, &quot;[SEP]&quot;,</span>
<span class="sd">            ...               &quot;[UNK]&quot;, &quot;[PAD]&quot;, &quot;[MASK]&quot;, &quot;[unused1]&quot;, &quot;[unused10]&quot;]</span>
<span class="sd">            &gt;&gt;&gt; vocab = text.Vocab.from_list(vocab_list)</span>
<span class="sd">            &gt;&gt;&gt; tokenizer_op = text.BertTokenizer(vocab=vocab, suffix_indicator=&#39;##&#39;, max_bytes_per_token=100,</span>
<span class="sd">            ...                                   unknown_token=&#39;[UNK]&#39;, lower_case=False, keep_whitespace=False,</span>
<span class="sd">            ...                                   normalization_form=NormalizeForm.NONE, preserve_unused_token=True,</span>
<span class="sd">            ...                                   with_offsets=False)</span>
<span class="sd">            &gt;&gt;&gt; text_file_dataset = text_file_dataset.map(operations=tokenizer_op)</span>
<span class="sd">            &gt;&gt;&gt; # If with_offsets=True, then output three columns {[&quot;token&quot;, dtype=str],</span>
<span class="sd">            &gt;&gt;&gt; #                                                   [&quot;offsets_start&quot;, dtype=uint32],</span>
<span class="sd">            &gt;&gt;&gt; #                                                   [&quot;offsets_limit&quot;, dtype=uint32]}</span>
<span class="sd">            &gt;&gt;&gt; tokenizer_op = text.BertTokenizer(vocab=vocab, suffix_indicator=&#39;##&#39;, max_bytes_per_token=100,</span>
<span class="sd">            ...                                   unknown_token=&#39;[UNK]&#39;, lower_case=False, keep_whitespace=False,</span>
<span class="sd">            ...                                   normalization_form=NormalizeForm.NONE, preserve_unused_token=True,</span>
<span class="sd">            ...                                   with_offsets=True)</span>
<span class="sd">            &gt;&gt;&gt; text_file_dataset_1 = text_file_dataset_1.map(operations=tokenizer_op, input_columns=[&quot;text&quot;],</span>
<span class="sd">            ...                                               output_columns=[&quot;token&quot;, &quot;offsets_start&quot;,</span>
<span class="sd">            ...                                                               &quot;offsets_limit&quot;],</span>
<span class="sd">            ...                                               column_order=[&quot;token&quot;, &quot;offsets_start&quot;,</span>
<span class="sd">            ...                                                             &quot;offsets_limit&quot;])</span>

<span class="sd">        &quot;&quot;&quot;</span>

        <span class="nd">@check_bert_tokenizer</span>
        <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">vocab</span><span class="p">,</span> <span class="n">suffix_indicator</span><span class="o">=</span><span class="s1">&#39;##&#39;</span><span class="p">,</span> <span class="n">max_bytes_per_token</span><span class="o">=</span><span class="mi">100</span><span class="p">,</span> <span class="n">unknown_token</span><span class="o">=</span><span class="s1">&#39;[UNK]&#39;</span><span class="p">,</span>
                     <span class="n">lower_case</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">keep_whitespace</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">normalization_form</span><span class="o">=</span><span class="n">NormalizeForm</span><span class="o">.</span><span class="n">NONE</span><span class="p">,</span>
                     <span class="n">preserve_unused_token</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">with_offsets</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
            <span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">normalization_form</span><span class="p">,</span> <span class="n">NormalizeForm</span><span class="p">):</span>
                <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="s2">&quot;Wrong input type for normalization_form, should be enum of &#39;NormalizeForm&#39;.&quot;</span><span class="p">)</span>

            <span class="bp">self</span><span class="o">.</span><span class="n">vocab</span> <span class="o">=</span> <span class="n">vocab</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">suffix_indicator</span> <span class="o">=</span> <span class="n">suffix_indicator</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">max_bytes_per_token</span> <span class="o">=</span> <span class="n">max_bytes_per_token</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">unknown_token</span> <span class="o">=</span> <span class="n">unknown_token</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">lower_case</span> <span class="o">=</span> <span class="n">lower_case</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">keep_whitespace</span> <span class="o">=</span> <span class="n">keep_whitespace</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">normalization_form</span> <span class="o">=</span> <span class="n">DE_C_INTER_NORMALIZE_FORM</span><span class="p">[</span><span class="n">normalization_form</span><span class="p">]</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">preserve_unused_token</span> <span class="o">=</span> <span class="n">preserve_unused_token</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">with_offsets</span> <span class="o">=</span> <span class="n">with_offsets</span>

        <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
            <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">BertTokenizerOperation</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">vocab</span><span class="o">.</span><span class="n">c_vocab</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">suffix_indicator</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_bytes_per_token</span><span class="p">,</span>
                                              <span class="bp">self</span><span class="o">.</span><span class="n">unknown_token</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">lower_case</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">keep_whitespace</span><span class="p">,</span>
                                              <span class="bp">self</span><span class="o">.</span><span class="n">normalization_form</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">preserve_unused_token</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">with_offsets</span><span class="p">)</span></div>


    <span class="k">class</span> <span class="nc">CaseFold</span><span class="p">(</span><span class="n">TextTensorOperation</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Apply case fold operation on UTF-8 string tensor, which is aggressive that can convert more characters into</span>
<span class="sd">        lower case.</span>

<span class="sd">        Note:</span>
<span class="sd">            CaseFold is not supported on Windows platform yet.</span>

<span class="sd">        Examples:</span>
<span class="sd">            &gt;&gt;&gt; case_op = text.CaseFold()</span>
<span class="sd">            &gt;&gt;&gt; text_file_dataset = text_file_dataset.map(operations=case_op)</span>
<span class="sd">        &quot;&quot;&quot;</span>

        <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
            <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">CaseFoldOperation</span><span class="p">()</span>


    <span class="k">class</span> <span class="nc">NormalizeUTF8</span><span class="p">(</span><span class="n">TextTensorOperation</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Apply normalize operation on UTF-8 string tensor.</span>

<span class="sd">        Note:</span>
<span class="sd">            NormalizeUTF8 is not supported on Windows platform yet.</span>

<span class="sd">        Args:</span>
<span class="sd">            normalize_form (NormalizeForm, optional): Valid values can be [NormalizeForm.NONE, NormalizeForm.NFC,</span>
<span class="sd">                NormalizeForm.NFKC, NormalizeForm.NFD, NormalizeForm.NFKD] any of the four unicode</span>
<span class="sd">                normalized forms(default=NormalizeForm.NFKC).</span>
<span class="sd">                See http://unicode.org/reports/tr15/ for details.</span>

<span class="sd">                - NormalizeForm.NONE, do nothing for input string tensor.</span>
<span class="sd">                - NormalizeForm.NFC, normalize with Normalization Form C.</span>
<span class="sd">                - NormalizeForm.NFKC, normalize with Normalization Form KC.</span>
<span class="sd">                - NormalizeForm.NFD, normalize with Normalization Form D.</span>
<span class="sd">                - NormalizeForm.NFKD, normalize with Normalization Form KD.</span>

<span class="sd">        Examples:</span>
<span class="sd">            &gt;&gt;&gt; from mindspore.dataset.text import NormalizeForm</span>
<span class="sd">            &gt;&gt;&gt; normalize_op = text.NormalizeUTF8(normalize_form=NormalizeForm.NFC)</span>
<span class="sd">            &gt;&gt;&gt; text_file_dataset = text_file_dataset.map(operations=normalize_op)</span>
<span class="sd">        &quot;&quot;&quot;</span>

        <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">normalize_form</span><span class="o">=</span><span class="n">NormalizeForm</span><span class="o">.</span><span class="n">NFKC</span><span class="p">):</span>
            <span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">normalize_form</span><span class="p">,</span> <span class="n">NormalizeForm</span><span class="p">):</span>
                <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="s2">&quot;Wrong input type for normalization_form, should be enum of &#39;NormalizeForm&#39;.&quot;</span><span class="p">)</span>

            <span class="n">normalize_form</span> <span class="o">=</span> <span class="n">replace_none</span><span class="p">(</span><span class="n">normalize_form</span><span class="p">,</span> <span class="n">NormalizeForm</span><span class="o">.</span><span class="n">NFKC</span><span class="p">)</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">normalize_form</span> <span class="o">=</span> <span class="n">DE_C_INTER_NORMALIZE_FORM</span><span class="p">[</span><span class="n">normalize_form</span><span class="p">]</span>

        <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
            <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">NormalizeUTF8Operation</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">normalize_form</span><span class="p">)</span>


    <span class="k">class</span> <span class="nc">RegexReplace</span><span class="p">(</span><span class="n">TextTensorOperation</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Replace a part of UTF-8 string tensor with given text according to regular expressions.</span>

<span class="sd">        See https://unicode-org.github.io/icu/userguide/strings/regexp.html for supported regex pattern.</span>

<span class="sd">        Note:</span>
<span class="sd">            RegexReplace is not supported on Windows platform yet.</span>

<span class="sd">        Args:</span>
<span class="sd">            pattern (str): the regex expression patterns.</span>
<span class="sd">            replace (str): the string to replace matched element.</span>
<span class="sd">            replace_all (bool, optional): If False, only replace first matched element;</span>
<span class="sd">                if True, replace all matched elements (default=True).</span>

<span class="sd">        Examples:</span>
<span class="sd">            &gt;&gt;&gt; pattern = &#39;Canada&#39;</span>
<span class="sd">            &gt;&gt;&gt; replace = &#39;China&#39;</span>
<span class="sd">            &gt;&gt;&gt; replace_op = text.RegexReplace(pattern, replace)</span>
<span class="sd">            &gt;&gt;&gt; text_file_dataset = text_file_dataset.map(operations=replace_op)</span>
<span class="sd">        &quot;&quot;&quot;</span>

        <span class="nd">@check_regex_replace</span>
        <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">pattern</span><span class="p">,</span> <span class="n">replace</span><span class="p">,</span> <span class="n">replace_all</span><span class="o">=</span><span class="kc">True</span><span class="p">):</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">pattern</span> <span class="o">=</span> <span class="n">pattern</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">replace</span> <span class="o">=</span> <span class="n">replace</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">replace_all</span> <span class="o">=</span> <span class="n">replace_all</span>

        <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
            <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">RegexReplaceOperation</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">pattern</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">replace</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">replace_all</span><span class="p">)</span>


    <span class="k">class</span> <span class="nc">RegexTokenizer</span><span class="p">(</span><span class="n">TextTensorOperation</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Tokenize a scalar tensor of UTF-8 string by regex expression pattern.</span>

<span class="sd">        See https://unicode-org.github.io/icu/userguide/strings/regexp.html for supported regex pattern.</span>

<span class="sd">        Note:</span>
<span class="sd">            RegexTokenizer is not supported on Windows platform yet.</span>

<span class="sd">        Args:</span>
<span class="sd">            delim_pattern (str): The pattern of regex delimiters.</span>
<span class="sd">                The original string will be split by matched elements.</span>
<span class="sd">            keep_delim_pattern (str, optional): The string matched by &#39;delim_pattern&#39; can be kept as a token</span>
<span class="sd">                if it can be matched by &#39;keep_delim_pattern&#39;. The default value is an empty str</span>
<span class="sd">                which means that delimiters will not be kept as an output token (default=&#39;&#39;).</span>
<span class="sd">            with_offsets (bool, optional): Whether or not output offsets of tokens(default=False).</span>

<span class="sd">        Examples:</span>
<span class="sd">            &gt;&gt;&gt; # If with_offsets=False, default output is one column {[&quot;text&quot;, dtype=str]}</span>
<span class="sd">            &gt;&gt;&gt; delim_pattern = r&quot;[ |,]&quot;</span>
<span class="sd">            &gt;&gt;&gt; tokenizer_op = text.RegexTokenizer(delim_pattern, with_offsets=False)</span>
<span class="sd">            &gt;&gt;&gt; text_file_dataset = text_file_dataset.map(operations=tokenizer_op)</span>
<span class="sd">            &gt;&gt;&gt; # If with_offsets=True, then output three columns {[&quot;token&quot;, dtype=str],</span>
<span class="sd">            &gt;&gt;&gt; #                                                   [&quot;offsets_start&quot;, dtype=uint32],</span>
<span class="sd">            &gt;&gt;&gt; #                                                   [&quot;offsets_limit&quot;, dtype=uint32]}</span>
<span class="sd">            &gt;&gt;&gt; tokenizer_op = text.RegexTokenizer(delim_pattern, with_offsets=True)</span>
<span class="sd">            &gt;&gt;&gt; text_file_dataset_1 = text_file_dataset_1.map(operations=tokenizer_op, input_columns=[&quot;text&quot;],</span>
<span class="sd">            ...                                               output_columns=[&quot;token&quot;, &quot;offsets_start&quot;,</span>
<span class="sd">            ...                                                               &quot;offsets_limit&quot;],</span>
<span class="sd">            ...                                               column_order=[&quot;token&quot;, &quot;offsets_start&quot;,</span>
<span class="sd">            ...                                                             &quot;offsets_limit&quot;])</span>
<span class="sd">        &quot;&quot;&quot;</span>

        <span class="nd">@check_regex_tokenizer</span>
        <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">delim_pattern</span><span class="p">,</span> <span class="n">keep_delim_pattern</span><span class="o">=</span><span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="n">with_offsets</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">delim_pattern</span> <span class="o">=</span> <span class="n">delim_pattern</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">keep_delim_pattern</span> <span class="o">=</span> <span class="n">keep_delim_pattern</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">with_offsets</span> <span class="o">=</span> <span class="n">with_offsets</span>

        <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
            <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">RegexTokenizerOperation</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">delim_pattern</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">keep_delim_pattern</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">with_offsets</span><span class="p">)</span>


    <span class="k">class</span> <span class="nc">UnicodeScriptTokenizer</span><span class="p">(</span><span class="n">TextTensorOperation</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Tokenize a scalar tensor of UTF-8 string based on Unicode script boundaries.</span>

<span class="sd">        Note:</span>
<span class="sd">            UnicodeScriptTokenizer is not supported on Windows platform yet.</span>

<span class="sd">        Args:</span>
<span class="sd">            keep_whitespace (bool, optional): Whether or not emit whitespace tokens (default=False).</span>
<span class="sd">            with_offsets (bool, optional): Whether or not output offsets of tokens (default=False).</span>

<span class="sd">        Examples:</span>
<span class="sd">            &gt;&gt;&gt; # If with_offsets=False, default output one column {[&quot;text&quot;, dtype=str]}</span>
<span class="sd">            &gt;&gt;&gt; tokenizer_op = text.UnicodeScriptTokenizer(keep_whitespace=True, with_offsets=False)</span>
<span class="sd">            &gt;&gt;&gt; text_file_dataset = text_file_dataset.map(operations=tokenizer_op)</span>
<span class="sd">            &gt;&gt;&gt; # If with_offsets=True, then output three columns {[&quot;token&quot;, dtype=str],</span>
<span class="sd">            &gt;&gt;&gt; #                                                  [&quot;offsets_start&quot;, dtype=uint32],</span>
<span class="sd">            &gt;&gt;&gt; #                                                  [&quot;offsets_limit&quot;, dtype=uint32]}</span>
<span class="sd">            &gt;&gt;&gt; tokenizer_op = text.UnicodeScriptTokenizer(keep_whitespace=True, with_offsets=True)</span>
<span class="sd">            &gt;&gt;&gt; text_file_dataset = text_file_dataset.map(operations=tokenizer_op, input_columns=[&quot;text&quot;],</span>
<span class="sd">            ...                                           output_columns=[&quot;token&quot;, &quot;offsets_start&quot;, &quot;offsets_limit&quot;],</span>
<span class="sd">            ...                                           column_order=[&quot;token&quot;, &quot;offsets_start&quot;, &quot;offsets_limit&quot;])</span>

<span class="sd">        &quot;&quot;&quot;</span>

        <span class="nd">@check_unicode_script_tokenizer</span>
        <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">keep_whitespace</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">with_offsets</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
            <span class="n">keep_whitespace</span> <span class="o">=</span> <span class="n">replace_none</span><span class="p">(</span><span class="n">keep_whitespace</span><span class="p">,</span> <span class="kc">False</span><span class="p">)</span>
            <span class="n">with_offsets</span> <span class="o">=</span> <span class="n">replace_none</span><span class="p">(</span><span class="n">with_offsets</span><span class="p">,</span> <span class="kc">False</span><span class="p">)</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">keep_whitespace</span> <span class="o">=</span> <span class="n">keep_whitespace</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">with_offsets</span> <span class="o">=</span> <span class="n">with_offsets</span>

        <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
            <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">UnicodeScriptTokenizerOperation</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">keep_whitespace</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">with_offsets</span><span class="p">)</span>


    <span class="k">class</span> <span class="nc">WhitespaceTokenizer</span><span class="p">(</span><span class="n">TextTensorOperation</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Tokenize a scalar tensor of UTF-8 string on ICU4C defined whitespaces, such as: &#39; &#39;, &#39;\\\\t&#39;, &#39;\\\\r&#39;, &#39;\\\\n&#39;.</span>

<span class="sd">        Note:</span>
<span class="sd">            WhitespaceTokenizer is not supported on Windows platform yet.</span>

<span class="sd">        Args:</span>
<span class="sd">            with_offsets (bool, optional): Whether or not output offsets of tokens (default=False).</span>

<span class="sd">        Examples:</span>
<span class="sd">            &gt;&gt;&gt; # If with_offsets=False, default output one column {[&quot;text&quot;, dtype=str]}</span>
<span class="sd">            &gt;&gt;&gt; tokenizer_op = text.WhitespaceTokenizer(with_offsets=False)</span>
<span class="sd">            &gt;&gt;&gt; text_file_dataset = text_file_dataset.map(operations=tokenizer_op)</span>
<span class="sd">            &gt;&gt;&gt; # If with_offsets=True, then output three columns {[&quot;token&quot;, dtype=str],</span>
<span class="sd">            &gt;&gt;&gt; #                                                   [&quot;offsets_start&quot;, dtype=uint32],</span>
<span class="sd">            &gt;&gt;&gt; #                                                   [&quot;offsets_limit&quot;, dtype=uint32]}</span>
<span class="sd">            &gt;&gt;&gt; tokenizer_op = text.WhitespaceTokenizer(with_offsets=True)</span>
<span class="sd">            &gt;&gt;&gt; text_file_dataset = text_file_dataset.map(operations=tokenizer_op, input_columns=[&quot;text&quot;],</span>
<span class="sd">            ...                                           output_columns=[&quot;token&quot;, &quot;offsets_start&quot;, &quot;offsets_limit&quot;],</span>
<span class="sd">            ...                                           column_order=[&quot;token&quot;, &quot;offsets_start&quot;, &quot;offsets_limit&quot;])</span>
<span class="sd">        &quot;&quot;&quot;</span>

        <span class="nd">@check_with_offsets</span>
        <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">with_offsets</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">with_offsets</span> <span class="o">=</span> <span class="n">with_offsets</span>

        <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
            <span class="k">return</span> <span class="n">cde</span><span class="o">.</span><span class="n">WhitespaceTokenizerOperation</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">with_offsets</span><span class="p">)</span>
</pre></div>

           </div>
           
          </div>
          <footer>

  <hr/>

  <div role="contentinfo">
    <p>
        &#169; Copyright 2021, MindSpore.

    </p>
  </div>
    
    
    
    Built with <a href="https://www.sphinx-doc.org/">Sphinx</a> using a
    
    <a href="https://github.com/readthedocs/sphinx_rtd_theme">theme</a>
    
    provided by <a href="https://readthedocs.org">Read the Docs</a>. 

</footer>
        </div>
      </div>

    </section>

  </div>
  

  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>

  
  
    
   

</body>
</html>