<!DOCTYPE html><html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en" data-whc_version="25.0">
    <head><link rel="shortcut icon" href="../../../oxygen-webhelp/template/images/favicon.png"/><link rel="icon" href="../../../oxygen-webhelp/template/images/favicon.png"/><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/><meta name="viewport" content="width=device-width, initial-scale=1.0"/><meta http-equiv="X-UA-Compatible" content="IE=edge"/><meta name="copyright" content="(C) Copyright 2024"/><meta name="generator" content="DITA-OT"/><meta name="description" content="The Drift Synchronization Solution for Hive detects drift in incoming data and updates corresponding Hive tables. Previously known as the Hive Drift Solution, the Drift Synchronization Solution for ..."/><meta name="prodname" content="Data Collector"/><meta name="version" content="3"/><meta name="release" content="16"/><meta name="modification" content="0"/>        
      <title>Drift Synchronization Solution for Hive</title><!--  Generated with Oxygen version 25.1, build number 2023042410.  --><meta name="wh-path2root" content="../../../"/><meta name="wh-toc-id" content="concept_phk_bdf_2w-d16893e60616"/><meta name="wh-source-relpath" content="datacollector/UserGuide/Solutions/HiveDrift-Overview.dita"/><meta name="wh-out-relpath" content="datacollector/UserGuide/Solutions/HiveDrift-Overview.html"/>

    <link rel="stylesheet" type="text/css" href="../../../oxygen-webhelp/app/commons.css?buildId=2023042410"/>
    <link rel="stylesheet" type="text/css" href="../../../oxygen-webhelp/app/topic.css?buildId=2023042410"/>

    <script src="../../../oxygen-webhelp/app/options/properties.js?buildId=20240802104629"></script>
    <script src="../../../oxygen-webhelp/app/localization/strings.js?buildId=2023042410"></script>
    <script src="../../../oxygen-webhelp/app/search/index/keywords.js?buildId=20240802104629"></script>
    <script defer="defer" src="../../../oxygen-webhelp/app/commons.js?buildId=2023042410"></script>
    <script defer="defer" src="../../../oxygen-webhelp/app/topic.js?buildId=2023042410"></script>
<link rel="stylesheet" type="text/css" href="../../../oxygen-webhelp/template/light.css?buildId=2023042410"/><link rel="stylesheet" type="text/css" href="../../../skin.css"/></head>

    <body class="wh_topic_page frmBody">
        
        
        

        
<nav class="navbar navbar-default wh_header" data-whc_version="25.0">
    <div class="container-fluid">
        <div class="wh_header_flex_container navbar-nav navbar-expand-md navbar-dark">
            <div class="wh_logo_and_publication_title_container">
                <div class="wh_logo_and_publication_title">
                    
                    <!--
                            This component will be generated when the next parameters are specified in the transformation scenario:
                            'webhelp.logo.image' and 'webhelp.logo.image.target.url'.
                            See: http://oxygenxml.com/doc/versions/17.1/ug-editor/#topics/dita_webhelp_output.html.
                    -->
                    
                    <div class=" wh_publication_title "><a href="../../../index.html"><span class="booktitle">  <span class="ph mainbooktitle"><span class="ph">Data Collector</span> User Guide</span>  </span></a></div>
                    
                </div>
                
                <!-- The menu button for mobile devices is copied in the output only when the 'webhelp.show.top.menu' parameter is set to 'yes' -->
                
            </div>

            <div class="wh_top_menu_and_indexterms_link collapse navbar-collapse">
                
                
                <div class=" wh_indexterms_link "><a href="../../../indexTerms.html" title="Index" aria-label="Go to index terms page"><span>Index</span></a></div>
                
            </div>
        </div>
    </div>
</nav>

        <div class=" wh_search_input navbar-form wh_topic_page_search search " role="form">


<form id="searchForm" method="get" role="search" action="../../../search.html"><div><input type="search" placeholder="Search " class="wh_search_textfield" id="textToSearch" name="searchQuery" aria-label="Search query" required="required"/><button type="submit" class="wh_search_button" aria-label="Search"><span class="search_input_text">Search</span></button></div></form>

</div>
        
        <div class="container-fluid">
            <div class="row">

                <nav class="wh_tools d-print-none">
                    
<div data-tooltip-position="bottom" class=" wh_breadcrumb "><ol class="d-print-none"><li><span class="home"><a href="../../../index.html"><span>Home</span></a></span></li><li><div class="topicref" data-id="concept_zq5_pb4_flb"><div class="title"><a href="../../../datacollector/UserGuide/Solutions/Solutions-title.html">Solutions</a></div></div></li><li class="active"><div class="topicref" data-id="concept_phk_bdf_2w"><div class="title"><a href="../../../datacollector/UserGuide/Solutions/HiveDrift-Overview.html#concept_phk_bdf_2w">Drift Synchronization Solution for Hive</a><div class="wh-tooltip"><p class="shortdesc"></p></div></div></div></li></ol></div>



                    <div class="wh_right_tools "><button class="wh_hide_highlight" aria-label="Toggle search highlights" title="Toggle search highlights"></button><button class="webhelp_expand_collapse_sections" data-next-state="collapsed" aria-label="Collapse sections" title="Collapse sections"></button><div class=" wh_navigation_links "><span id="topic_navigation_links" class="navheader">
  
<span class="navprev"><a class="- topic/link link" href="../../../datacollector/UserGuide/Solutions/DeltaLake.html#concept_a5b_wvk_ckb" title="Loading Data into Databricks Delta Lake" aria-label="Previous topic: Loading Data into Databricks Delta Lake" rel="prev"></a></span>  
<span class="navnext"><a class="- topic/link link" href="../../../datacollector/UserGuide/Solutions/JDBC_DriftSyncSolution.html#concept_ljq_knr_4cb" title="Drift Synchronization Solution for PostgreSQL" aria-label="Next topic: Drift Synchronization Solution for PostgreSQL" rel="next"></a></span>  </span></div>
<!--External resource link-->
<div class=" wh_print_link print d-none d-md-inline-block "><button onClick="window.print()" title="Print this page" aria-label="Print this page"></button></div>
                        
                        
                        
                        
                    </div>
                </nav>
            </div>

            

<div class="wh_content_area">
                <div class="row">
                    


                        <nav role="navigation" id="wh_publication_toc" class="col-lg-3 col-md-3 col-sm-12 d-md-block d-none d-print-none">
<div id="wh_publication_toc_content">


                            <div class=" wh_publication_toc " data-tooltip-position="right"><span class="expand-button-action-labels"><span id="button-expand-action" role="button" aria-label="Expand"></span><span id="button-collapse-action" role="button" aria-label="Collapse"></span><span id="button-pending-action" role="button" aria-label="Pending"></span></span><ul role="tree" aria-label="Table of Contents"><li role="treeitem" aria-expanded="false"><div data-tocid="concept_htw_ghg_jq-d16893e53" class="topicref" data-id="concept_htw_ghg_jq" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_htw_ghg_jq-d16893e53-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Getting_Started/GettingStarted_Title.html#concept_htw_ghg_jq" id="concept_htw_ghg_jq-d16893e53-link">Getting Started</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_l2v_nlp_mpb-d16893e331" class="topicref" data-id="concept_l2v_nlp_mpb" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_l2v_nlp_mpb-d16893e331-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/ReleaseNotes/ReleaseNotes.html#concept_l2v_nlp_mpb" id="concept_l2v_nlp_mpb-d16893e331-link">Release Notes</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_l4q_flb_kr-d16893e2582" class="topicref" data-id="concept_l4q_flb_kr" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_l4q_flb_kr-d16893e2582-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Installation/Install_title.html" id="concept_l4q_flb_kr-d16893e2582-link">Installation</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_ylh_yyz_ky-d16893e3984" class="topicref" data-id="concept_ylh_yyz_ky" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_ylh_yyz_ky-d16893e3984-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Configuration/Config_title.html" id="concept_ylh_yyz_ky-d16893e3984-link">Configuration</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_ejk_f1f_5v-d16893e7058" class="topicref" data-id="concept_ejk_f1f_5v" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_ejk_f1f_5v-d16893e7058-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Upgrade/Upgrade_title.html" id="concept_ejk_f1f_5v-d16893e7058-link">Upgrade</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_qsw_cjy_bt-d16893e10103" class="topicref" data-id="concept_qsw_cjy_bt" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_qsw_cjy_bt-d16893e10103-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Pipeline_Design/PipelineDesign_title.html" id="concept_qsw_cjy_bt-d16893e10103-link">Pipeline Concepts and Design</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_qn1_wn4_kq-d16893e11199" class="topicref" data-id="concept_qn1_wn4_kq" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_qn1_wn4_kq-d16893e11199-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Pipeline_Configuration/PipelineConfiguration_title.html" id="concept_qn1_wn4_kq-d16893e11199-link">Pipeline Configuration</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_hdr_gyw_41b-d16893e13057" class="topicref" data-id="concept_hdr_gyw_41b" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_hdr_gyw_41b-d16893e13057-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Data_Formats/DataFormats-Title.html" id="concept_hdr_gyw_41b-d16893e13057-link">Data Formats</a><div class="wh-tooltip"><p class="shortdesc"></p></div></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_yjl_nc5_jq-d16893e14164" class="topicref" data-id="concept_yjl_nc5_jq" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_yjl_nc5_jq-d16893e14164-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Origins/Origins_title.html" id="concept_yjl_nc5_jq-d16893e14164-link">Origins</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_yjl_nc5_jq-d16893e35197" class="topicref" data-id="concept_yjl_nc5_jq" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_yjl_nc5_jq-d16893e35197-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Processors/Processors_title.html" id="concept_yjl_nc5_jq-d16893e35197-link">Processors</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_agj_cfj_br-d16893e44037" class="topicref" data-id="concept_agj_cfj_br" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_agj_cfj_br-d16893e44037-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Destinations/Destinations-title.html" id="concept_agj_cfj_br-d16893e44037-link">Destinations</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_umc_1lk_fx-d16893e56072" class="topicref" data-id="concept_umc_1lk_fx" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_umc_1lk_fx-d16893e56072-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Executors/Executors-title.html" id="concept_umc_1lk_fx-d16893e56072-link">Executors</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_xxd_f5r_kx-d16893e59696" class="topicref" data-id="concept_xxd_f5r_kx" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_xxd_f5r_kx-d16893e59696-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Event_Handling/EventFramework-Title.html#concept_xxd_f5r_kx" id="concept_xxd_f5r_kx-d16893e59696-link">Dataflow Triggers</a></div></div></li><li role="treeitem" aria-expanded="true"><div data-tocid="concept_zq5_pb4_flb-d16893e60134" class="topicref" data-id="concept_zq5_pb4_flb" data-state="expanded"><span role="button" tabindex="0" aria-labelledby="button-collapse-action concept_zq5_pb4_flb-d16893e60134-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Solutions/Solutions-title.html" id="concept_zq5_pb4_flb-d16893e60134-link">Solutions</a></div></div><ul role="group" class="navbar-nav nav-list"><li role="treeitem"><div data-tocid="concept_aw1_p1q_plb-d16893e60156" class="topicref" data-id="concept_aw1_p1q_plb" data-state="leaf"><span role="button" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Solutions/Overview.html#concept_aw1_p1q_plb" id="concept_aw1_p1q_plb-d16893e60156-link">Solutions Overview </a></div></div></li><li role="treeitem"><div data-tocid="concept_jkm_rnz_kx-d16893e60178" class="topicref" data-id="concept_jkm_rnz_kx" data-state="leaf"><span role="button" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Solutions/Parquet.html#concept_jkm_rnz_kx" id="concept_jkm_rnz_kx-d16893e60178-link">Converting Data to the Parquet Data Format</a><div class="wh-tooltip"><p class="shortdesc"></p></div></div></div></li><li role="treeitem"><div data-tocid="concept_szz_xwm_lx-d16893e60202" class="topicref" data-id="concept_szz_xwm_lx" data-state="leaf"><span role="button" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Solutions/Impala.html#concept_szz_xwm_lx" id="concept_szz_xwm_lx-d16893e60202-link">Automating Impala Metadata Updates for Drift Synchronization for Hive</a></div></div></li><li role="treeitem"><div data-tocid="concept_d1q_xl4_lx-d16893e60224" class="topicref" data-id="concept_d1q_xl4_lx" data-state="leaf"><span role="button" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Solutions/FileManagement.html#concept_d1q_xl4_lx" id="concept_d1q_xl4_lx-d16893e60224-link">Managing Output Files</a><div class="wh-tooltip"><p class="shortdesc"></p></div></div></div></li><li role="treeitem"><div data-tocid="concept_kff_ykv_lz-d16893e60248" class="topicref" data-id="concept_kff_ykv_lz" data-state="leaf"><span role="button" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Solutions/StopPipeline.html#concept_kff_ykv_lz" id="concept_kff_ykv_lz-d16893e60248-link">Stopping a Pipeline After Processing All Available Data</a><div class="wh-tooltip"><p class="shortdesc"></p></div></div></div></li><li role="treeitem"><div data-tocid="concept_vrh_jrs_bbb-d16893e60272" class="topicref" data-id="concept_vrh_jrs_bbb" data-state="leaf"><span role="button" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Solutions/SqoopReplacement.html#concept_vrh_jrs_bbb" id="concept_vrh_jrs_bbb-d16893e60272-link">Offloading Data from Relational Sources to Hadoop</a></div></div></li><li role="treeitem"><div data-tocid="concept_t2t_lp5_xz-d16893e60294" class="topicref" data-id="concept_t2t_lp5_xz" data-state="leaf"><span role="button" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Solutions/SendEmail.html#concept_t2t_lp5_xz" id="concept_t2t_lp5_xz-d16893e60294-link">Sending Email During Pipeline Processing</a></div></div></li><li role="treeitem"><div data-tocid="concept_ocb_nnl_px-d16893e60316" class="topicref" data-id="concept_ocb_nnl_px" data-state="leaf"><span role="button" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Solutions/EventStorage.html#concept_ocb_nnl_px" id="concept_ocb_nnl_px-d16893e60316-link">Preserving an Audit Trail of Events</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_a5b_wvk_ckb-d16893e60338" class="topicref" data-id="concept_a5b_wvk_ckb" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_a5b_wvk_ckb-d16893e60338-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Solutions/DeltaLake.html#concept_a5b_wvk_ckb" id="concept_a5b_wvk_ckb-d16893e60338-link">Loading Data into Databricks Delta Lake</a><div class="wh-tooltip"><p class="shortdesc"></p></div></div></div></li><li role="treeitem" aria-expanded="true" class="active"><div data-tocid="concept_phk_bdf_2w-d16893e60616" class="topicref" data-id="concept_phk_bdf_2w" data-state="expanded"><span role="button" tabindex="0" aria-labelledby="button-collapse-action concept_phk_bdf_2w-d16893e60616-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Solutions/HiveDrift-Overview.html#concept_phk_bdf_2w" id="concept_phk_bdf_2w-d16893e60616-link">Drift Synchronization Solution for Hive</a><div class="wh-tooltip"><p class="shortdesc"></p></div></div></div><ul role="group" class="navbar-nav nav-list"><li role="treeitem" aria-expanded="false"><div data-tocid="concept_qtt_bzw_vz-d16893e60640" class="topicref" data-id="concept_qtt_bzw_vz" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_qtt_bzw_vz-d16893e60640-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Solutions/HiveDrift-Overview.html#concept_qtt_bzw_vz" id="concept_qtt_bzw_vz-d16893e60640-link">General Processing</a></div></div></li><li role="treeitem"><div data-tocid="concept_zzs_fkg_2w-d16893e60728" class="topicref" data-id="concept_zzs_fkg_2w" data-state="leaf"><span role="button" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Solutions/HiveDrift-Overview.html#concept_zzs_fkg_2w" id="concept_zzs_fkg_2w-d16893e60728-link">Basic Avro Implementation</a></div></div></li><li role="treeitem"><div data-tocid="concept_fkm_mzw_vz-d16893e60750" class="topicref" data-id="concept_fkm_mzw_vz" data-state="leaf"><span role="button" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Solutions/HiveDrift-Overview.html#concept_fkm_mzw_vz" id="concept_fkm_mzw_vz-d16893e60750-link">Basic Parquet Implementation</a></div></div></li><li role="treeitem"><div data-tocid="concept_y5w_dj3_fw-d16893e60772" class="topicref" data-id="concept_y5w_dj3_fw" data-state="leaf"><span role="button" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Solutions/HiveDrift-Overview.html#concept_y5w_dj3_fw" id="concept_y5w_dj3_fw-d16893e60772-link">Implementation Steps</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_a1w_kkn_fw-d16893e60794" class="topicref" data-id="concept_a1w_kkn_fw" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_a1w_kkn_fw-d16893e60794-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Solutions/HiveDrift-Overview.html#concept_a1w_kkn_fw" id="concept_a1w_kkn_fw-d16893e60794-link">Avro Case Study</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_vl3_v2f_zz-d16893e60910" class="topicref" data-id="concept_vl3_v2f_zz" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_vl3_v2f_zz-d16893e60910-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Solutions/HiveDrift-Overview.html#concept_vl3_v2f_zz" id="concept_vl3_v2f_zz-d16893e60910-link">Parquet Case Study</a></div></div></li><li role="treeitem"><div data-tocid="concept_ry2_qkm_hw-d16893e61067" class="topicref" data-id="concept_ry2_qkm_hw" data-state="leaf"><span role="button" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Solutions/HiveDrift-Overview.html#concept_ry2_qkm_hw" id="concept_ry2_qkm_hw-d16893e61067-link">Hive Data Types</a></div></div></li></ul></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_ljq_knr_4cb-d16893e61090" class="topicref" data-id="concept_ljq_knr_4cb" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_ljq_knr_4cb-d16893e61090-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Solutions/JDBC_DriftSyncSolution.html#concept_ljq_knr_4cb" id="concept_ljq_knr_4cb-d16893e61090-link"><span class="ph">Drift Synchronization Solution for PostgreSQL</span></a></div></div></li></ul></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_ugp_kwf_xw-d16893e61337" class="topicref" data-id="concept_ugp_kwf_xw" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_ugp_kwf_xw-d16893e61337-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/DPM/DPM_title.html" id="concept_ugp_kwf_xw-d16893e61337-link">StreamSets Control Hub</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_fyf_gkq_4bb-d16893e62693" class="topicref" data-id="concept_fyf_gkq_4bb" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_fyf_gkq_4bb-d16893e62693-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Edge_Mode/EdgePipelines_title.html" id="concept_fyf_gkq_4bb-d16893e62693-link"><span class="ph">StreamSets Data Collector Edge</span></a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_wwq_gxc_py-d16893e63980" class="topicref" data-id="concept_wwq_gxc_py" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_wwq_gxc_py-d16893e63980-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Multithreaded_Pipelines/MultithreadedPipelines.html#concept_wwq_gxc_py" id="concept_wwq_gxc_py-d16893e63980-link">Multithreaded Pipelines</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_gzw_tdm_p2b-d16893e64187" class="topicref" data-id="concept_gzw_tdm_p2b" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_gzw_tdm_p2b-d16893e64187-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Microservice/Microservice_Title.html#concept_gzw_tdm_p2b" id="concept_gzw_tdm_p2b-d16893e64187-link">Microservice Pipelines</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="Orchestrators_Title-d16893e64348" class="topicref" data-id="Orchestrators_Title" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action Orchestrators_Title-d16893e64348-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Orchestration_Pipelines/OrchestrationPipelines_Title.html#Orchestrators_Title" id="Orchestrators_Title-d16893e64348-link">Orchestration Pipelines</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_wr1_ktz_bt-d16893e64489" class="topicref" data-id="concept_wr1_ktz_bt" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_wr1_ktz_bt-d16893e64489-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/RPC_Pipelines/SDC_RPCpipelines_title.html#concept_wr1_ktz_bt" id="concept_wr1_ktz_bt-d16893e64489-link">SDC RPC Pipelines</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_fpz_5r4_vs-d16893e64679" class="topicref" data-id="concept_fpz_5r4_vs" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_fpz_5r4_vs-d16893e64679-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Cluster_Mode/ClusterPipelines_title.html" id="concept_fpz_5r4_vs-d16893e64679-link">Cluster Pipelines</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_jjk_23z_sq-d16893e65172" class="topicref" data-id="concept_jjk_23z_sq" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_jjk_23z_sq-d16893e65172-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Data_Preview/DataPreview_Title.html#concept_jjk_23z_sq" id="concept_jjk_23z_sq-d16893e65172-link">Data Preview</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_pgk_brx_rr-d16893e65458" class="topicref" data-id="concept_pgk_brx_rr" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_pgk_brx_rr-d16893e65458-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Alerts/RulesAlerts_title.html#concept_pgk_brx_rr" id="concept_pgk_brx_rr-d16893e65458-link">Rules and Alerts</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_asx_fdz_sq-d16893e65960" class="topicref" data-id="concept_asx_fdz_sq" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_asx_fdz_sq-d16893e65960-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Pipeline_Monitoring/PipelineMonitoring_title.html#concept_asx_fdz_sq" id="concept_asx_fdz_sq-d16893e65960-link">Pipeline Monitoring</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_o3l_dtr_5q-d16893e66304" class="topicref" data-id="concept_o3l_dtr_5q" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_o3l_dtr_5q-d16893e66304-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Pipeline_Maintenance/PipelineMaintenance_title.html#concept_o3l_dtr_5q" id="concept_o3l_dtr_5q-d16893e66304-link">Pipeline Maintenance</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_yms_ftm_sq-d16893e66768" class="topicref" data-id="concept_yms_ftm_sq" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_yms_ftm_sq-d16893e66768-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Administration/Administration_title.html#concept_yms_ftm_sq" id="concept_yms_ftm_sq-d16893e66768-link">Administration</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_nls_w1r_ks-d16893e67508" class="topicref" data-id="concept_nls_w1r_ks" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_nls_w1r_ks-d16893e67508-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Tutorial/Tutorial-title.html" id="concept_nls_w1r_ks-d16893e67508-link">Tutorial</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_sh3_frm_tq-d16893e68001" class="topicref" data-id="concept_sh3_frm_tq" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_sh3_frm_tq-d16893e68001-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Troubleshooting/Troubleshooting_title.html#concept_sh3_frm_tq" id="concept_sh3_frm_tq-d16893e68001-link">Troubleshooting</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_xbx_rs1_tq-d16893e68798" class="topicref" data-id="concept_xbx_rs1_tq" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_xbx_rs1_tq-d16893e68798-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Glossary/Glossary_title.html#concept_xbx_rs1_tq" id="concept_xbx_rs1_tq-d16893e68798-link">Glossary</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_jn1_nzb_kv-d16893e68843" class="topicref" data-id="concept_jn1_nzb_kv" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_jn1_nzb_kv-d16893e68843-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Apx-DataFormats/DataFormat_Title.html#concept_jn1_nzb_kv" id="concept_jn1_nzb_kv-d16893e68843-link">Data Formats by Stage</a><div class="wh-tooltip"><p class="shortdesc"></p></div></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_pvm_yt3_wq-d16893e68958" class="topicref" data-id="concept_pvm_yt3_wq" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_pvm_yt3_wq-d16893e68958-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Expression_Language/ExpressionLanguage_title.html" id="concept_pvm_yt3_wq-d16893e68958-link">Expression Language</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_vcj_1ws_js-d16893e69669" class="topicref" data-id="concept_vcj_1ws_js" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_vcj_1ws_js-d16893e69669-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Apx-RegEx/RegEx-Title.html#concept_vcj_1ws_js" id="concept_vcj_1ws_js-d16893e69669-link">Regular Expressions</a></div></div></li><li role="treeitem" aria-expanded="false"><div data-tocid="concept_chv_vmj_wr-d16893e69787" class="topicref" data-id="concept_chv_vmj_wr" data-state="not-ready"><span role="button" tabindex="0" aria-labelledby="button-expand-action concept_chv_vmj_wr-d16893e69787-link" class="wh-expand-btn"></span><div class="title"><a href="../../../datacollector/UserGuide/Apx-GrokPatterns/GrokPatterns_title.html#concept_chv_vmj_wr" id="concept_chv_vmj_wr-d16893e69787-link">Grok Patterns</a></div></div></li></ul></div>
                        

</div>
</nav>
                    


                    
                    <div id="wh_topic_body" class="col-lg-7 col-md-9 col-sm-12">
<button id="wh_close_publication_toc_button" class="close-toc-button d-none" aria-label="Toggle publishing table of content" aria-controls="wh_publication_toc" aria-expanded="true"><span class="close-toc-icon-container"><span class="close-toc-icon"></span></span></button><button id="wh_close_topic_toc_button" class="close-toc-button d-none" aria-label="Toggle topic table of content" aria-controls="wh_topic_toc" aria-expanded="true"><span class="close-toc-icon-container"><span class="close-toc-icon"></span></span></button>

                        
<div class=" wh_topic_content body "><main role="main"><article class="" role="article" aria-labelledby="ariaid-title1"><article class="nested0" aria-labelledby="ariaid-title1" id="concept_phk_bdf_2w">
    <h1 class="- topic/title title topictitle1" id="ariaid-title1">Drift Synchronization Solution for Hive</h1>
    
    <div class="- topic/body concept/conbody body conbody"><p class="- topic/shortdesc shortdesc"></p>
        <p class="- topic/p p">The <span class="- topic/ph ph">Drift Synchronization Solution for Hive</span> detects drift in incoming data and updates corresponding Hive tables. </p>
        <p class="- topic/p p">Previously known as the Hive Drift Solution, the <span class="- topic/ph ph">Drift Synchronization Solution for Hive</span> enables creating and updating Hive tables based on record requirements and writing
            data to HDFS or MapR FS based on record header attributes. You can use the full
            functionality of the solution or individual pieces, as needed. </p>
        <div class="- topic/p p">The <span class="- topic/ph ph">Drift Synchronization Solution for Hive</span> supports processing Avro and Parquet data. When processing Parquet data, the solution
            generates temporary Avro files and uses the MapReduce executor to convert the Avro files
            to Parquet. <div class="- topic/note note note note_note"><span class="note__title">Note:</span> Starting with Cloudera CDP 7.1.4, Hive-managed internal tables can
                include ORC data only. As a result, when using Cloudera CDP 7.1.4 or later, the <span class="- topic/ph ph">Drift Synchronization Solution for Hive</span> cannot create or update managed internal tables. The solution can create or
                update external tables only.</div></div>
        <div class="- topic/p p">The solution is compatible with Impala, but requires additional steps to refresh the
            Impala metadata cache. <div class="- topic/note note tip note_tip"><span class="note__title">Tip:</span> You can also download the sample Drift
                Synchronization for Hive pipeline from the <a class="- topic/xref xref" href="https://github.com/streamsets/pipeline-library/tree/master/datacollector" target="_blank" rel="external noopener"><span class="- topic/ph ph">StreamSets</span>
                    <span class="- topic/ph ph">Data Collector</span> pipeline library</a>, import the pipeline into <span class="- topic/ph ph">Data Collector</span>, and then follow these instructions for more details on the solution.</div></div>
        <p class="- topic/p p"> </p>
    </div>
<article class="- topic/topic concept/concept topic concept nested1" aria-labelledby="ariaid-title2" id="concept_qtt_bzw_vz">
 <h2 class="- topic/title title topictitle2" id="ariaid-title2">General Processing</h2>
 <div class="- topic/body concept/conbody body conbody">
        <p class="- topic/p p">The <span class="- topic/ph ph">Drift Synchronization Solution for Hive</span>
            incorporates the Hive Metadata processor, Hive Metastore destination, and the Hadoop FS
            or MapR FS destination as follows:</p>
        <div class="- topic/p p">
            <dl class="- topic/dl dl">
                
                    <dt class="- topic/dt dt dlterm">Drift detection</dt>
                    <dd class="- topic/dd dd">When processing records, the Hive Metadata processor detects columnar drift
                        and the need for new tables and partitions. It generates metadata records
                        that describe the necessary changes and passes it to the Hive Metastore
                        destination.</dd>
                    <dd class="- topic/dd dd ddexpand">When the Hive Metastore destination receives a metadata record, it compares
                        the proposed changes with the latest Hive metadata, and creates and updates
                        Hive tables as needed.</dd>
                    <dd class="- topic/dd dd ddexpand"><span class="- topic/ph ph">The destination can
                        create tables and partitions. It can add columns to tables and ignore
                        existing columns. It does not drop existing columns from tables.</span></dd>
                
                
                    <dt class="- topic/dt dt dlterm">Record-based writes</dt>
                    <dd class="- topic/dd dd">The Hive Metadata processor also adds information to the header of each
                        record and passes the records to the Hadoop FS destination or the MapR FS
                        destination. The destinations can perform record-based writes to their
                        destination systems based on the following details: <ul class="- topic/ul ul" id="concept_qtt_bzw_vz__ul_sn1_bjg_2w" data-ofbid="concept_qtt_bzw_vz__ul_sn1_bjg_2w">
                            <li class="- topic/li li">Target directory - Based on user-defined expressions, the Hive
                                Metadata processor assembles the path where each record should be
                                stored. It writes the generated path to a
                                    <dfn class="- topic/term term">targetDirectory</dfn> attribute in each record
                                    header.<p class="- topic/p p">To write the record to the generated path, configure
                                    the destination to use the targetDirectory header attribute.
                                </p></li>
                            <li class="- topic/li li">Avro schema - The processor writes the Avro schema to the
                                    <dfn class="- topic/term term">avroSchema</dfn> attribute in each record header. It
                                generates new Avro schemas when necessary based on the record
                                structure. Used for both Avro and Parquet data. <p class="- topic/p p">To use the
                                    generated Avro schema, configure the destination to use the
                                    avroSchema header attribute.</p></li>
                            <li class="- topic/li li">Roll files - When a schema change occurs, the processor generates a
                                roll indicator - the <dfn class="- topic/term term">roll</dfn> header attribute. This allows
                                the data with the changed schema to be written to an updated Hive
                                    table.<p class="- topic/p p">To roll files based on schema changes, configure the
                                    destination use the roll header attribute.</p></li>
                        </ul></dd>
                
            </dl>
        </div>
        <p class="- topic/p p">For example, say you use this solution to write sales data to MapR FS. A partial upgrade
            of the sales system adds several new fields to a subset of the incoming data. </p>
        <p class="- topic/p p">With the <span class="- topic/ph ph">Drift Synchronization Solution for Hive</span>,
            the Hive Metadata processor notes the new fields in a metadata record and passes it to
            the Hive Metastore destination. The Hive Metastore destination adds the new columns to
            the Hive target table. The MapR FS destination then writes the data to the updated
            table. When writing data without the new fields to the updated table, the destination
            inserts null values for the missing fields.</p>
 </div>
<article class="- topic/topic concept/concept topic concept nested2" aria-labelledby="ariaid-title3" id="concept_ndg_3zw_vz">
 <h3 class="- topic/title title topictitle3" id="ariaid-title3">Parquet Processing</h3>
 <div class="- topic/body concept/conbody body conbody">
        <p class="- topic/p p">Here are some differences in how <span class="- topic/ph ph">Drift Synchronization Solution for Hive</span>
            works when processing Parquet data:</p>
        <div class="- topic/p p">
            <dl class="- topic/dl dl">
                
                    <dt class="- topic/dt dt dlterm">Uses events to trigger Avro to Parquet MapReduce jobs</dt>
                    <dd class="- topic/dd dd">When you build the pipeline, you must configure the data-processing
                        destination to generate events. The destination then generates events each
                        time it closes an output file. </dd>
                    <dd class="- topic/dd dd ddexpand">Then, you use a MapReduce executor to kick off the Convert Avro to Parquet
                        MapReduce job each time it receives an event. </dd>
                
            </dl>
        </div>
        <div class="- topic/p p">
            <dl class="- topic/dl dl">
                
                    <dt class="- topic/dt dt dlterm">Creates and updates Parquet tables</dt>
                    <dd class="- topic/dd dd">The Hive Metastore destination creates and updates Parquet tables as needed.
                            <span class="- topic/ph ph">The destination uses the Stored as Parquet clause when
                        generating the table so it does not need to generate a new schema for each
                        change.</span></dd>
                
            </dl>
        </div>
        <div class="- topic/p p">
            <dl class="- topic/dl dl">
                
                    <dt class="- topic/dt dt dlterm">Uses temporary directories for Avro output files</dt>
                    <dd class="- topic/dd dd"><p class="- topic/p p">When processing Parquet data, the Hive Metadata processor <span class="- topic/ph ph" id="concept_ndg_3zw_vz__HD-CStudy-ProcessorAvro">adds .avro to the target directory that it
                        generates for each record. This allows the data-processing destination to
                        write the Avro files to a directory that Hive ignores as a temporary
                        directory.</span></p><p class="- topic/p p">As a result, the destination writes files to the following
                  directories: <code class="+ topic/ph pr-d/codeph ph codeph" id="concept_ndg_3zw_vz__HD-ParquetDir-ph">&lt;generated
                  directory&gt;/.avro</code>.</p><span class="- topic/ph ph">You can configure
                              the MapReduce executor to write the Parquet files to the parent
                              generated directory and to delete the Avro files after processing
                              them. You can also delete the temporary directories after the files
                              are processed, as needed.</span></dd>
                
            </dl>
        </div>
    </div>
</article><article class="- topic/topic concept/concept topic concept nested2" aria-labelledby="ariaid-title4" id="concept_u2t_fgy_1x">
 <h3 class="- topic/title title topictitle3" id="ariaid-title4">Impala Support</h3>
 <div class="- topic/body concept/conbody body conbody">
  <p class="- topic/p p">Data written by the <span class="- topic/ph ph">Drift Synchronization Solution for Hive</span>
            is compatible with Impala.</p>
        <p class="- topic/p p">Impala requires using the Invalidate Metadata command to refresh the Impala metadata
            cache each time changes occur in the Hive metastore. </p>
        <p class="- topic/p p">When processing Avro data, you can use the Hive Query executor to automatically refresh
            the Impala metadata cache. For details, see <a class="- topic/xref xref" href="Impala.html#concept_szz_xwm_lx">Automating Impala Metadata Updates for Drift Synchronization for Hive</a>. </p>
        <p class="- topic/p p">When processing Parquet data, you need to run the Impala Invalidate Metadata command
            manually after the Hive Metastore destination makes changes to the Hive Metastore and
            after the MapReduce executor converts a file to Parquet. </p>
        <p class="- topic/p p">You can set up an alert to notify you when the Hive Metastore destination makes a change.
            Simply add a <a class="- topic/xref xref" href="../Alerts/RulesAlerts_title.html#concept_tpm_rsk_zq" title="Data rules define the information that you want to see about the data that passes between stages. You can create data rules based on any link in the pipeline. You can also enable metrics and create alerts for data rules.">data rule
                alert</a> on the link to the Hive Metastore destination and have the alert send
            an email or webhook when metadata records are passed to the Hive Metastore. </p>
        <p class="- topic/p p">Use external tools to determine when the Convert Avro to Parquet MapReduce jobs complete. </p>
 </div>
</article><article class="- topic/topic concept/concept topic concept nested2" aria-labelledby="ariaid-title5" id="concept_s3v_21p_hx">
    <h3 class="- topic/title title topictitle3" id="ariaid-title5">Flatten Records</h3>
    <div class="- topic/body concept/conbody body conbody">
        <p class="- topic/p p">At this time, the <span class="- topic/ph ph">Drift Synchronization Solution for Hive</span> does not process records with nested fields. If necessary, you can use the <a class="- topic/xref xref" href="../Processors/FieldFlattener.html#concept_njn_3kk_fx">Field Flattener
                processor</a> to flatten records with nested fields before passing them to the
            Hive Metadata processor. </p>
    </div>
</article></article><article class="- topic/topic concept/concept topic concept nested1" aria-labelledby="ariaid-title6" id="concept_zzs_fkg_2w">
    <h2 class="- topic/title title topictitle2" id="ariaid-title6">Basic Avro Implementation</h2>
    <div class="- topic/body concept/conbody body conbody">
        <p class="- topic/p p">You can use the Hive Metadata processor,
            Hive Metastore destination for metadata processing, and Hadoop FS or MapR FS destination
            for data processing in any pipeline where the logic is appropriate.</p>
        <p class="- topic/p p">A basic implementation of the <span class="- topic/ph ph">Drift Synchronization Solution for Hive</span>
            to process Avro data includes <span class="- topic/ph ph">the origin of your choice, the Hive Metadata processor
                        connected to the Hive Metastore destination to perform metadata updates, and
                        to either the Hadoop FS or MapR FS destination to process data</span>, as follows:</p>
        <p class="- topic/p p"><img class="- topic/image image" id="concept_zzs_fkg_2w__image_ys3_ztg_2w" src="../Graphics/HiveMeta-Pipeline.png" height="173" width="407"/></p>
        <p class="- topic/p p">The <span class="- topic/ph ph">Hive Metadata processor passes
                        records through the first output stream - the data stream. Connect the data
                        stream to the Hadoop FS or MapR FS destination to write data to the
                        destination system using record header attributes.</span>
        </p>
        <p class="- topic/p p">The Hive Metadata processor passes the metadata record
                  through the second output stream - the metadata output stream. Connect the Hive
                  Metastore destination to the metadata output stream to enable the destination to
                  create and update tables in Hive. The metadata output stream contains no record
                  data. </p>
        <p class="- topic/p p">If your data contains nested fields, you would add a Field
                  Flattener to flatten records as follows: </p>
        <p class="- topic/p p"><img class="- topic/image image" id="concept_zzs_fkg_2w__image_m1s_bzn_zz" src="../Graphics/HiveDrift-Flatten.png" height="175" width="511"/></p>
    </div>
</article><article class="- topic/topic concept/concept topic concept nested1" aria-labelledby="ariaid-title7" id="concept_fkm_mzw_vz">
 <h2 class="- topic/title title topictitle2" id="ariaid-title7">Basic Parquet Implementation</h2>
 <div class="- topic/body concept/conbody body conbody">
        <p class="- topic/p p">A basic implementation of the <span class="- topic/ph ph">Drift Synchronization Solution for Hive</span>
            to process Parquet data adds a MapReduce executor to the Avro implementation.</p>
        <p class="- topic/p p">You use <span class="- topic/ph ph">the origin of your choice, the Hive Metadata processor
                        connected to the Hive Metastore destination to perform metadata updates, and
                        to either the Hadoop FS or MapR FS destination to process data</span>. You configure the data-processing destination to generate events, and use a
            MapReduce executor to convert the closed Avro files to Parquet. </p>
        <p class="- topic/p p">The basic Parquet implementation looks like this: </p>
        <p class="- topic/p p"><img class="- topic/image image" id="concept_fkm_mzw_vz__image_opc_rq2_wz" src="../Graphics/HiveDrift-Parquet.png" height="217" width="501"/></p>
        <p class="- topic/p p">As with Avro data, the <span class="- topic/ph ph">Hive Metadata processor passes
                        records through the first output stream - the data stream. Connect the data
                        stream to the Hadoop FS or MapR FS destination to write data to the
                        destination system using record header attributes.</span> Each time the destination closes an output file, it creates a file-closure event that
            triggers the MapReduce executor to start an Avro to Parquet MapReduce job.</p>
        <p class="- topic/p p">The Hive Metadata processor passes the metadata record
                  through the second output stream - the metadata output stream. Connect the Hive
                  Metastore destination to the metadata output stream to enable the destination to
                  create and update tables in Hive. The metadata output stream contains no record
                  data. </p>
        <p class="- topic/p p">If your data contains nested fields, you would add a Field
                  Flattener to flatten records as follows: </p>
        <p class="- topic/p p"><img class="- topic/image image" id="concept_fkm_mzw_vz__image_f4h_h14_zz" src="../Graphics/Parquet-Flatten.png" height="177" width="605"/></p>
    </div>
</article><article class="- topic/topic concept/concept topic concept nested1" aria-labelledby="ariaid-title8" id="concept_y5w_dj3_fw">
    <h2 class="- topic/title title topictitle2" id="ariaid-title8">Implementation Steps</h2>
    <div class="- topic/body concept/conbody body conbody">
        <div class="- topic/p p">To implement the <span class="- topic/ph ph">Drift Synchronization Solution for Hive</span>, perform the following steps:<ol class="- topic/ol ol" id="concept_y5w_dj3_fw__ol_zr1_3j3_fw" data-ofbid="concept_y5w_dj3_fw__ol_zr1_3j3_fw">
                <li class="- topic/li li"> Configure the origin and any additional processors that you want to use. <ul class="- topic/ul ul" id="concept_y5w_dj3_fw__ul_qjn_kp4_hx" data-ofbid="concept_y5w_dj3_fw__ul_qjn_kp4_hx">
                        <li class="- topic/li li">If using the JDBC Query Consumer as the origin, enable the creation of
                            JDBC header attributes. For more information, see <a class="- topic/xref xref" href="../Origins/JDBCConsumer.html#concept_tvf_tgp_fx">Header Attributes with the Drift Synchronization Solution</a>. </li>
                        <li class="- topic/li li">If data includes records with nested fields, add a Field Flattener to
                            flatten records before passing them to the Hive Metadata processor.</li>
                    </ul></li>
                <li class="- topic/li li">To capture columnar drift and to enable record-based writes, configure the Hive
                    Metadata processor:<ul class="- topic/ul ul" id="concept_y5w_dj3_fw__ul_qsk_5x3_fw" data-ofbid="concept_y5w_dj3_fw__ul_qsk_5x3_fw">
                        <li class="- topic/li li">Configure the connection information.</li>
                        <li class="- topic/li li">Configure the database, table, and partition expressions.<p class="- topic/p p">You can
                                enter a single name or use an expression that evaluates to the names
                                to use. If necessary, you can use an Expression Evaluator earlier in
                                the pipeline to write the information to a record field or record
                                header attribute.</p></li>
                        <li class="- topic/li li">Configure the decimal field precision
                              and scale attribute names.<p class="- topic/p p" id="concept_y5w_dj3_fw__PrecisionScale-JDBCorigins" data-ofbid="concept_y5w_dj3_fw__PrecisionScale-JDBCorigins">When
                                    processing data from the JDBC Query Consumer or JDBC Multitable
                                    Consumer origins, use the default attribute names, "precision"
                                    and "scale". Both <span class="- topic/ph ph" id="concept_y5w_dj3_fw__JDBCdrift-origins-Decimal">origins
                                          store the precision and scale of Decimal columns in
                                          "precision" and "scale" field attributes for each Decimal
                                          field.</span></p><p class="- topic/p p"><span class="- topic/ph ph" id="concept_y5w_dj3_fw__JDBCdrift-OtherOrigins-Decimal">When processing data from other origins, you can use the
                                          Expression Evaluator processor earlier in the pipeline to
                                          create precision and scale field attributes for Decimal
                                          fields. </span></p></li>
                        <li class="- topic/li li">Specify the data format to use, Avro or Parquet.</li>
                        <li class="- topic/li li">Optionally configure advanced options, such as the maximum cache size,
                            time basis, and data time zone.</li>
                    </ul><p class="- topic/p p">For more information about the Hive Metadata processor, see <a class="- topic/xref xref" href="../Processors/HiveMetadata.html#concept_rz5_nft_zv">Hive Metadata</a>.</p></li>
                <li class="- topic/li li">To process metadata records generated by the processor and alter tables as
                    needed, connect the metadata output of the Hive Metadata processor to the Hive
                    Metastore destination.<div class="- topic/p p">
                        <div class="- topic/note note note note_note"><span class="note__title">Note:</span> While you might filter or route some records away from the Hive
                            Metastore destination, the destination must receive metadata records to
                            update Hive tables.</div>
                    </div></li>
                <li class="- topic/li li">Configure the Hive Metastore destination:<ul class="- topic/ul ul" id="concept_y5w_dj3_fw__ul_k12_ty3_fw" data-ofbid="concept_y5w_dj3_fw__ul_k12_ty3_fw">
                        <li class="- topic/li li">Configure the Hive connection information.</li>
                        <li class="- topic/li li">Optionally configure cache information and how tables are updated.</li>
                    </ul><p class="- topic/p p">For more information about the Hive Metastore destination, see <a class="- topic/xref xref" href="../Destinations/HiveMetastore.html#concept_gcr_z2t_zv">Hive Metastore</a>.</p></li>
                <li class="- topic/li li">Connect the data output of the Hive Metadata processor to the Hadoop FS or MapR
                    FS destination to write records to the destination system using record header
                    attributes.</li>
                <li class="- topic/li li">Configure the Hadoop FS or MapR FS destination:<ol class="- topic/ol ol" type="a" id="concept_y5w_dj3_fw__ul_n4p_dz3_fw" data-ofbid="concept_y5w_dj3_fw__ul_n4p_dz3_fw">
                        <li class="- topic/li li">To write records using the targetDirectory header attribute, on the
                            Output Files tab, select Directory in Header.</li>
                        <li class="- topic/li li">To roll records based on a roll header attribute, on the Output Files
                            tab, select Use Roll Attribute, and for Roll Attribute Name, enter
                            ârollâ.</li>
                        <li class="- topic/li li">To write records using the avroSchema header attribute, on the Data
                            Format tab, select the Avro data format, and then for the Avro Schema
                            Location property, select In Record Header.</li>
                    </ol><p class="- topic/p p">For more information about using record header attributes, see <a class="- topic/xref xref" href="../Pipeline_Design/RecordHeaderAttributes.html#concept_lmn_gdc_1w">Record Header Attributes for Record-Based Writes</a>.</p><div class="- topic/p p">
                        <div class="- topic/note note note note_note"><span class="note__title">Note:</span> To compress Avro data, use the Avro compression option on the Data
                            Formats tab, rather than the compression codec property on the Output
                            Files tab.</div>
                    </div></li>
                <li class="- topic/li li">When processing Parquet data, perform the following additional steps: <ol class="- topic/ol ol" type="a" id="concept_y5w_dj3_fw__ul_bsd_v52_wz" data-ofbid="concept_y5w_dj3_fw__ul_bsd_v52_wz">
                        <li class="- topic/li li">On the General tab of the data-processing destination, select Produce
                            Events.</li>
                        <li class="- topic/li li">Connect a MapReduce executor to the resulting event stream and configure
                            the necessary connection information for the stage.</li>
                        <li class="- topic/li li">On the Jobs tab of the MapReduce executor, select the Convert Avro to
                            Parquet job type and add any additional job parameters that are
                            required.</li>
                        <li class="- topic/li li">On the Avro Conversion tab, configure the Avro conversion properties:
                                <ul class="- topic/ul ul" id="concept_y5w_dj3_fw__ul_fbl_35f_3db" data-ofbid="concept_y5w_dj3_fw__ul_fbl_35f_3db">
                                <li class="- topic/li li">Use the default Input Avro File expression - This allows the
                                    executor to process the file that the data processing
                                    destination just closed.</li>
                                <li class="- topic/li li">Specify the Output Directory to use - To write the Parquet files
                                    to the parent directory of the .avro temporary directory, use
                                    the following
                                    expression:<pre class="+ topic/pre pr-d/codeblock pre codeblock"><code>${file:parentPath(file:parentPath(record:value('/filepath')))}</code></pre></li>
                                <li class="- topic/li li">Optionally configure additional Avro conversion properties.</li>
                            </ul></li>
                        <li class="- topic/li li">On the Avro to Parquet tab, optionally configure compression or other
                            advanced Parquet properties. </li>
                    </ol></li>
            </ol></div>
    </div>
</article><article class="- topic/topic concept/concept topic concept nested1" aria-labelledby="ariaid-title9" id="concept_a1w_kkn_fw">
 <h2 class="- topic/title title topictitle2" id="ariaid-title9">Avro Case Study</h2>
 <div class="- topic/body concept/conbody body conbody">
  <p class="- topic/p p">Let's say you have a <span class="- topic/ph ph">Data Collector</span>
            pipeline that writes Avro log data to Kafka. The File Tail origin in the pipeline
            processes data from several different web services, tagging each record with a "tag"
            header attribute that identifies the service that generated the data. </p>
        <p class="- topic/p p">Now you want a new pipeline to pass the data to HDFS where it can be stored and reviewed,
            and you'd like the data written to tables based on the web service that generated the
            data. Note that you could also write the data to MapR FS -- the steps are almost
            identical to this case study, you'd just use a different destination.</p>
        <p class="- topic/p p">To do this, add and configure a Kafka Consumer to read the data into the pipeline, then
            connect it to a Hive Metadata processor. The processor assesses the record structure and
            generates a metadata record that describes any required Hive metadata changes. Using the
            tag header attribute and other user-defined expressions, a Hive Metadata processor can
            determine the database, table, and partition to use for the target directory and write
            that information along with the Avro schema to the record header, including file roll
            indicator when necessary.</p>
        <p class="- topic/p p">You connect the Hive Metadata processor metadata output stream to a Hive Metastore
            destination. The destination, upon receiving the metadata record from the Hive Metadata
            processor, creates or updates Hive tables as needed. </p>
        <p class="- topic/p p">You connect the Hive Metadata processor data output stream to a Hadoop FS destination and
            configure it to use the information in record headers. The destination then writes each
            record where it wants to go using the target directory and Avro schema in the record
            header, and rolling files when needed. </p>
        <p class="- topic/p p">Now let's take a closer look... </p>
 </div>
<article class="- topic/topic concept/concept topic concept nested2" aria-labelledby="ariaid-title10" id="concept_fzk_mmn_fw">
 <h3 class="- topic/title title topictitle3" id="ariaid-title10">The Hive Metadata Processor</h3>
 <div class="- topic/body concept/conbody body conbody">
  <div class="- topic/p p">You set up the Kafka Consumer and connect it to the Hive Metadata processor. When you configure
            the processor, you have a few things to consider in addition to the basic connection
            details: <ol class="- topic/ol ol" id="concept_fzk_mmn_fw__ol_fzm_bmv_fw" data-ofbid="concept_fzk_mmn_fw__ol_fzm_bmv_fw">
                <li class="- topic/li li">Which database should the records be written to? <p class="- topic/p p">Hadoop FS will do the
                        writing, but the processor needs to know where the records should
                        go.</p><p class="- topic/p p">Let's write to the Hive default database. To do that, you can
                        leave the database property empty.</p></li>
                <li class="- topic/li li">What tables should the records be written to?<div class="- topic/p p">The pipeline supplying the data
                        to Kafka uses the "tag" header attribute to indicate the originating web
                        service. To use the tag attribute to write to tables, you use the following
                        expression for the table name:
                        <pre class="+ topic/pre pr-d/codeblock pre codeblock"><code>${record:attribute('tag')}</code></pre></div></li>
                <li class="- topic/li li">What partitions, if any, do you want to use? <div class="- topic/p p">Let's create daily partitions
                        using datetime variables for the partition value expression as
                        follows:<pre class="+ topic/pre pr-d/codeblock pre codeblock"><code>${YYYY()}-${MM()}-${DD()}</code></pre></div></li>
                <li class="- topic/li li">How do you want to configure the precision and scale for decimal fields?
                        <p class="- topic/p p">Though the data from the web services contains no decimal data that you
                        are aware of, to prevent new decimal data from generating error records,
                        configure the decimal field expressions. </p><p class="- topic/p p">The default expressions are
                        for data generated by the JDBC Query Consumer or the JDBC Multitable
                        Consumer. You can replace them with other expressions or with constants.
                    </p></li>
                <li class="- topic/li li">What type of data is being processed?<p class="- topic/p p">On the Data Format tab, select the Avro
                        data format.</p></li>
            </ol></div>
        <p class="- topic/p p"> At this point, your pipeline would look like this: </p>
        <p class="- topic/p p"><img class="- topic/image image" id="concept_fzk_mmn_fw__image_g5b_34n_fw" src="../Graphics/HiveMeta-Ex-Processor.png" height="292" width="579"/></p>
        <p class="- topic/p p">With this configuration, the Hadoop FS destination will write every record to the Hive
            table listed in the tag attribute and to the daily partition based on the time of
            processing.</p>
 </div>
</article><article class="- topic/topic concept/concept topic concept nested2" aria-labelledby="ariaid-title11" id="concept_vh3_s4n_fw">
 <h3 class="- topic/title title topictitle3" id="ariaid-title11">The Hive Metastore Destination</h3>
 
 <div class="- topic/body concept/conbody body conbody"><p class="- topic/shortdesc shortdesc">Now to process the metadata records - and to automatically create and update tables in
        Hive - you need the Hive Metastore destination.</p>
  <p class="- topic/p p">Connect the destination to the second output stream of the processor and configure the
            destination. Configuration of this destination is a breeze - just configure the Hive
            connection information and optionally configure some advanced options. </p>
        <p class="- topic/p p">The destination connects to Hive the same way the processor does so you can reuse that
            connection information:</p>
        <p class="- topic/p p"><img class="- topic/image image" id="concept_vh3_s4n_fw__image_imx_5pn_fw" src="../Graphics/HiveMeta-Ex-Dest.png" height="301" width="490"/></p>
        <p class="- topic/p p">When the Drift Synchronization Solution for Hive processes Avro data, the destination
            includes the <a class="- topic/xref xref" href="../Destinations/HiveMetastore.html#concept_wyr_5jv_hw">Stored As Avro clause in table creation queries</a>, by default. You can change
            that and configure other advanced properties on the Advanced tab. You can generally use
            the defaults for the advanced properties, so let's do that. </p>
        <p class="- topic/p p">Now, the beauty of the Hive Metastore destination is this: when the destination gets a
            metadata record that says you need a new table for a new web service, it creates the
            table with all the necessary columns so you can write the record (that triggered that
            metadata record) to the table.</p>
        <p class="- topic/p p">
            <span class="- topic/ph ph">And if the structure of the record going
                        to a table changes, like adding a couple new fields, the destination updates
                        the table so the record can be written to it.</span></p>
        <p class="- topic/p p">That covers the metadata, but what about the data?  </p>
 </div>
</article><article class="- topic/topic concept/concept topic concept nested2" aria-labelledby="ariaid-title12" id="concept_jzr_ypn_fw">
 <h3 class="- topic/title title topictitle3" id="ariaid-title12">The Data-Processing Destination</h3>
 <div class="- topic/body concept/conbody body conbody">
        <p class="- topic/p p">To write data to Hive using record header attributes, you can use
                  the Hadoop FS or MapR FS destinations. We'll use Hadoop FS destination. </p>
        <p class="- topic/p p">To write data to HDFS, you connect the Hadoop FS destination to the data output stream of
            the Hive Metadata processor. </p>
        <p class="- topic/p p">When you configure the destination, instead of configuring
                  a directory template, you configure the destination to use the directory in the
                  record header. Configure the destination to roll files when it sees a "roll"
                  attribute in the record header, and when configuring the Avro properties, indicate
                  that the schema is in the record header. </p>
        <p class="- topic/p p">The Output Files tab of the destination might look something like this:</p>
        <p class="- topic/p p"><img class="- topic/image image" id="concept_jzr_ypn_fw__image_sbv_xrn_fw" src="../Graphics/HiveMeta-Ex-HDFS.png" height="458" width="491"/></p>
        <p class="- topic/p p">And the Data Format tab looks like this:</p>
        <p class="- topic/p p"><img class="- topic/image image" id="concept_jzr_ypn_fw__image_wzp_1sn_fw" src="../Graphics/HiveMeta-Ex-HDFS-Avro.png" height="327" width="478"/></p>
        <p class="- topic/p p"><span class="- topic/ph ph" id="concept_jzr_ypn_fw__HD-CStudy-HDFSprocessing-ph">With this
                        configuration, the destination uses the information in record header
                        attributes to write data to HDFS. It writes each record to the directory in
                        the targetDirectory header attribute, using the Avro schema in the
                        avroSchema header attribute.</span> And it rolls a file when it spots the roll
                  attribute in a record header. </p>
        <p class="- topic/p p">Note that the destination can also use Max Records in File, Max Files Size, and Idle
            Timeout to determine when to roll files.</p>
        <p class="- topic/p p">Also, if you want to compress the Avro files, use the Avro Compression Codec property on
            the Data Formats tab, instead of the general compression option on the Output Files
            tab.</p>
 </div>
</article><article class="- topic/topic concept/concept topic concept nested2" aria-labelledby="ariaid-title13" id="concept_jlr_zlk_gw">
 <h3 class="- topic/title title topictitle3" id="ariaid-title13">Processing Avro Data</h3>
    
    <div class="- topic/body concept/conbody body conbody"><p class="- topic/shortdesc shortdesc">Now what happens when you start the pipeline?</p>
        <p class="- topic/p p">This pipeline is set up to write data to different tables based on the table name in the
            "tag" attribute that was added to the record headers in the earlier pipeline. </p>
        <div class="- topic/p p">Say the table names are "weblog" and "service." For each record with "weblog" as the tag
            attribute, the Hive Metadata processor evaluates the fields in the record as follows:
                <ul class="- topic/ul ul" id="concept_jlr_zlk_gw__ul_iml_1mk_gw" data-ofbid="concept_jlr_zlk_gw__ul_iml_1mk_gw">
                <li class="- topic/li li">If the fields match the existing Hive table, it just writes the necessary
                    information into the <code class="+ topic/ph pr-d/codeph ph codeph">targetDirectory</code> and
                        <code class="+ topic/ph pr-d/codeph ph codeph">avroSchema</code> stage attributes, and Hadoop FS writes the
                    record to the weblog table.</li>
                <li class="- topic/li li">If a record includes a new field, the processor generates a metadata record that
                    the Hive Metastore destination uses to update the weblog table to include the
                    new column. It also writes information to stage attributes so Hadoop FS can
                    write the record to the updated weblog table.</li>
                <li class="- topic/li li">If a record has missing fields, the processor just writes information to stage
                    attributes, and Hadoop FS writes the record to HDFS with null values for the
                    missing fields.</li>
                <li class="- topic/li li">If a field has been renamed, the processor treats the field as a new field,
                    generating a metadata record that the Hive Metastore destination uses to update
                    the weblog table. When Hadoop FS writes the record, data is written to the new
                    field and a null value to the old field.</li>
                <li class="- topic/li li">If a data type changes for an existing field, the processor treats the record as
                    an error record.</li>
            </ul></div>
        <p class="- topic/p p">For each record with a "service" tag, the processor performs the same actions.</p>
        <div class="- topic/note note note note_note"><span class="note__title">Note:</span> If a record includes a new tag value, the Hive Metadata processor generates a metadata
            record that the Hive Metastore destination uses to create a new table. And Hadoop FS
            writes the record to the new table. So if you spin up a new web service, you don't need
            to touch this pipeline to have it handle the new data set. </div>
    </div>
</article></article><article class="- topic/topic concept/concept topic concept nested1" aria-labelledby="ariaid-title14" id="concept_vl3_v2f_zz">
 <h2 class="- topic/title title topictitle2" id="ariaid-title14">Parquet Case Study</h2>
 <div class="- topic/body concept/conbody body conbody">
        <p class="- topic/p p">Let's say you have database data that you want to
            write to Parquet tables in Hive. You want to write the data to different Parquet tables
            based on the country of origin. You don't expect a lot of schema changes, but would like
            it handled automatically when it occurs. </p>
        <p class="- topic/p p">To do this, you'd start off with the JDBC Query Consumer to read data into the pipeline.
            You connect the origin to the Hive Metadata processor and configure expressions that
            define the corresponding database, table, and partition where each record should be
            written in the Parquet table. The Hive Metadata processor uses this information to
            assess records and generate the record header attributes that the data-processing
            destination uses to write the data. It also uses the information to generate metadata
            records that the Hive Metastore destination uses to create and update tables as
            needed.</p>
        <p class="- topic/p p">You connect the Hive Metadata processor data output stream to a Hadoop FS destination and
            configure it to use the information in record headers. The destination then writes each
            record using the target directory and schema information in the record header, and rolls
            files upon schema changes. And you configure the destination to generate events so it
            generates events each time it closes a file. </p>
        <p class="- topic/p p">You connect the Hive Metadata processor metadata output stream to a Hive Metastore
            destination. The destination, upon receiving the metadata record from the Hive Metadata
            processor, creates or updates Parquet tables as needed. </p>
        <p class="- topic/p p">And finally, you connect a MapReduce executor to the event stream of the Hadoop FS
            destination and configure the executor to use the Convert Avro to Parquet job available
            in the stage. So each time the executor receives an event from the Hadoop FS
            destination, it processes the closed Avro file and converts it to Parquet, writing it to
            the updated Parquet tables.</p>
        <p class="- topic/p p">Now let's take a closer look... </p>
    </div>
<article class="- topic/topic concept/concept topic concept nested2" aria-labelledby="ariaid-title15" id="concept_kp3_nmf_zz">
 <h3 class="- topic/title title topictitle3" id="ariaid-title15">JDBC Query Consumer</h3>
 <div class="- topic/body concept/conbody body conbody">
  <p class="- topic/p p">When you configure the origin, you configure it as you would for any normal pipeline. Specify
            the connection string to use, the query and offset column to use, and the query
            interval. If you want all the existing data, omit the initial offset. Use the default
            incremental mode to avoid requerying the entire table when the origin runs the next
            query.</p>
        <p class="- topic/p p">When using the origin to process decimal data, ensure that the origin <a class="- topic/xref xref" href="../Origins/JDBCConsumer.html#concept_egw_d4c_kw">creates JDBC
                record header attributes</a>. When creating record header attributes, the origin
            includes the precision and scale of each decimal field in record header attributes. This
            allows the Hive Metadata processor to easily determine the original precision and scale
            of decimal data. </p>
        <p class="- topic/p p">You can alternatively enter constants in the Hive Metadata processor for the precision
            and scale to be used for all decimal fields in the record, but use JDBC record header
            attributes to use field-specific values. The origin creates header attributes by
            default. </p>
        <p class="- topic/p p">Here are the JDBC record header attribute properties in the origin: </p>
        <p class="- topic/p p"><img class="- topic/image image" id="concept_kp3_nmf_zz__image_xbr_5rf_zz" src="../Graphics/Parquet-Origin.png" height="337" width="412"/></p>
 </div>
</article><article class="- topic/topic concept/concept topic concept nested2" aria-labelledby="ariaid-title16" id="concept_kt2_zrf_zz">
 <h3 class="- topic/title title topictitle3" id="ariaid-title16">The Hive Metadata Processor</h3>
 <div class="- topic/body concept/conbody body conbody">
        <p class="- topic/p p">Connect the JDBC Query Consumer origin to the Hive Metadata processor. When you configure
            the processor, you have a few things to consider in addition to the basic connection
            details: </p>
        <ol class="- topic/ol ol" id="concept_kt2_zrf_zz__ol_kx4_fsf_zz" data-ofbid="concept_kt2_zrf_zz__ol_kx4_fsf_zz">
            <li class="- topic/li li">Which database should the records be written to?<p class="- topic/p p">Hadoop FS will do the writing,
                    but the processor needs to know where the records should go. Let's write to the
                    Hive default database. To do that, you can leave the database property empty.
                </p></li>
            <li class="- topic/li li">What tables should the records be written to? <div class="- topic/p p">You can write all data to a single
                    table by hardcoding the Table Name property. But since you want to write the
                    data to different tables based on the country of origin, let's use an expression
                    to pull the table name from the Country field, as
                    follows:<pre class="+ topic/pre pr-d/codeblock pre codeblock"><code>${record:value('/Country')}</code></pre></div></li>
            <li class="- topic/li li">What partition do you want to use?<div class="- topic/p p">Let's create a dt partition column for daily
                    partitions using datetime variables in the expression as follows:
                    <pre class="+ topic/pre pr-d/codeblock pre codeblock"><code>${YYYY()}-${MM()}-${DD()}</code></pre></div></li>
            <li class="- topic/li li">How do you want to configure the precision and scale expressions for decimal fields?
                    <div class="- topic/p p">Since you have the JDBC Query Consumer generating record header attributes,
                    you can use the default expressions in the processor:
                    <pre class="+ topic/pre pr-d/codeblock pre codeblock"><code>${record:attribute(str:concat(str:concat('jdbc.', field:field()), '.scale'))}
${record:attribute(str:concat(str:concat('jdbc.', field:field()), '.precision'))}</code></pre></div><p class="- topic/p p">With
                    these expressions, the processor uses the precision and scale that is written to
                    record header attributes by the JDBC Query Consumer for each decimal field in
                    the record.</p></li>
            <li class="- topic/li li">What type of data is being processed?<p class="- topic/p p">On the Data Format tab, select the Parquet
                    data format.</p></li>
        </ol>
        <p class="- topic/p p">At this point, the pipeline looks like this:</p>
        <p class="- topic/p p"><img class="- topic/image image" id="concept_kt2_zrf_zz__image_dr3_mvg_zz" src="../Graphics/Parquet-HiveMetadata.png" height="303" width="548"/></p>
        <p class="- topic/p p">When processing records, the Hive Metadata processor uses the configuration details to
            assess records. It generates a targetDirectory header attribute for each record using
            the country listed in the record for the table and the time the record was processed for
            the partition. </p>
        <p class="- topic/p p"><span class="- topic/ph ph">When a record includes a schema change, the
                        processor writes the new schema to the avroSchema header attribute and adds
                        the roll header attribute to the record. It also generates a metadata record
                        for the Hive Metastore destination. The combination of these actions enables
                        the Hive Metastore destination to update Parquet tables as needed and for
                        the Hadoop FS destination to write the file with schema drift to the updated
                        table.</span></p>
        <p class="- topic/p p">Remember that for Parquet data, the processor <span class="- topic/ph ph">adds .avro to the target directory that it
                        generates for each record. This allows the data-processing destination to
                        write the Avro files to a directory that Hive ignores as a temporary
                        directory.</span></p>
        <p class="- topic/p p">As a result, the destination writes files to the following
                  directories: <code class="+ topic/ph pr-d/codeph ph codeph" id="concept_kt2_zrf_zz__HD-ParquetDir-ph">&lt;generated
                  directory&gt;/.avro</code>.</p>
 </div>
</article><article class="- topic/topic concept/concept topic concept nested2" aria-labelledby="ariaid-title17" id="concept_mr2_y1g_zz">
 <h3 class="- topic/title title topictitle3" id="ariaid-title17">The Hive Metastore Destination</h3>
 
 <div class="- topic/body concept/conbody body conbody"><p class="- topic/shortdesc shortdesc">Now to process the metadata records - and to automatically create and update Parquet
        tables in Hive - you need the Hive Metastore destination.</p>
        <p class="- topic/p p">Connect the destination to the second output stream
                  of the processor and configure the destination. Configuration of this destination
                  is a breeze - just configure the Hive connection information and optionally
                  configure some advanced options. </p>
        <p class="- topic/p p">The destination connects to Hive the same way the processor does, so you can reuse that
            connection information. The Advanced tab includes some properties that only apply to
            Avro data and a Max Cache Size property to limit the size of the cache that the Hive
            Metastore uses. By default, the cache size is unlimited, so let's leave it that way. </p>
        <p class="- topic/p p">Now, the beauty of the Hive Metastore destination is this: when the destination gets a
            metadata record that says you need a new table for a new country, it creates a new
            Parquet table with all the necessary columns so you can write the record (that triggered
            that metadata record) to the table. </p>
        
     <p class="- topic/p p"><span class="- topic/ph ph">And if the structure of the record going
                        to a table changes, like adding a couple new fields, the destination updates
                        the table so the record can be written to it.</span>
            <span class="- topic/ph ph">The destination uses the Stored as Parquet clause when
                        generating the table so it does not need to generate a new schema for each
                        change.</span></p>
        <p class="- topic/p p">This is how the pipeline looks at this point: </p>
        <p class="- topic/p p"><img class="- topic/image image" id="concept_mr2_y1g_zz__image_ckn_swn_zz" src="../Graphics/Parquet-HMetastore.png" height="114" width="363"/></p>
        <p class="- topic/p p">That covers the metadata, but what about the data? </p>
 </div>
</article><article class="- topic/topic concept/concept topic concept nested2" aria-labelledby="ariaid-title18" id="concept_oyq_qbh_zz">
 <h3 class="- topic/title title topictitle3" id="ariaid-title18">The Data-Processing Destination</h3>
 <div class="- topic/body concept/conbody body conbody">
        <p class="- topic/p p">To write data to Hive using record header attributes, you can use
                  the Hadoop FS or MapR FS destinations. We'll use Hadoop FS destination. </p>
        <p class="- topic/p p" id="concept_oyq_qbh_zz__HD-CStudy-ConnectHDFS" data-ofbid="concept_oyq_qbh_zz__HD-CStudy-ConnectHDFS">To write Avro files to HDFS, you connect the Hadoop FS
            destination to the data output stream of the Hive Metadata processor. </p>
        <p class="- topic/p p">First, on the General tab, enable the destination to generate events, as follows:</p>
        <p class="- topic/p p"><img class="- topic/image image" id="concept_oyq_qbh_zz__image_zc3_hjh_zz" src="../Graphics/Parquet-HDFS-Events.png" height="365" width="368"/></p>
        <p class="- topic/p p">Now, the destination generates an event each time the destination closes an output file.
            As described in the <a class="- topic/xref xref" href="../Destinations/HadoopFS-destination.html#concept_dmx_1ln_qx">Event Record
                section</a> of the Hadoop FS documentation, the event record includes the
            filepath and file name of the closed file. The MapReduce executor will use this
            information to convert the Avro files to Parquet.</p>
        <p class="- topic/p p">When you configure the destination, instead of configuring
                  a directory template, you configure the destination to use the directory in the
                  record header. Configure the destination to roll files when it sees a "roll"
                  attribute in the record header, and when configuring the Avro properties, indicate
                  that the schema is in the record header. </p>
        <p class="- topic/p p">The Output Files tab of the destination might look something like this:</p>
        <p class="- topic/p p"><img class="- topic/image image" id="concept_oyq_qbh_zz__image_ckx_kkh_zz" src="../Graphics/Parquet-HDFS-Output.png" height="421" width="338"/></p>
        <p class="- topic/p p"><span class="- topic/ph ph">With this
                        configuration, the destination uses the information in record header
                        attributes to write data to HDFS. It writes each record to the directory in
                        the targetDirectory header attribute, using the Avro schema in the
                        avroSchema header attribute.</span> It closes files when it spots the roll attribute in a record header or upon reaching
            other file closure limits configured in the destination. And it generates an event each
            time it closes a file. </p>
        <div class="- topic/p p">
            <div class="- topic/note note tip note_tip"><span class="note__title">Tip:</span> Data does not become available to Hive until the Avro files are
                converted to Parquet. If you want to convert data quickly, configure one or more of
                the file closure properties to ensure files roll regularly: Max Records in File, Max
                File Size, or Idle Timeout.</div>
        </div>
 </div>
</article><article class="- topic/topic concept/concept topic concept nested2" aria-labelledby="ariaid-title19" id="concept_gmh_b4h_zz">
    <h3 class="- topic/title title topictitle3" id="ariaid-title19">The MapReduce Executor</h3>
    <div class="- topic/body concept/conbody body conbody">
        <p class="- topic/p p">To convert the Avro files generated by the Hadoop FS destination, use the Convert Avro to
            Parquet job in the MapReduce executor. Like all executors, the MapReduce executor
            performs tasks when triggered by an event. In this case, it will be the file-closure
            events generated by the Hadoop FS destination. </p>
        <div class="- topic/p p">Connect the Hadoop FS event output stream to the MapReduce executor. In addition to the
            required configuration details, select the Convert Avro to Parquet job type, then
            configure the following Avro conversion details:<ul class="- topic/ul ul" id="concept_gmh_b4h_zz__ul_fdd_wth_zz" data-ofbid="concept_gmh_b4h_zz__ul_fdd_wth_zz">
                <li class="- topic/li li">Input Avro File - Use the default expression for this property. With the
                    default, the executor uses the directory and file name specified in the filepath
                    field of the event record. Files will be in the .avro directory, but this
                    information will be correctly noted in the event record.</li>
                <li class="- topic/li li">Keep Avro Input File - Select this if you want to keep the original Avro file.
                    By default, the executor deletes the original file after successfully converting
                    it to Parquet.</li>
                <li class="- topic/li li">Output Directory - To write the Parquet files to the original directory where
                    the data was expected - rather than the .avro directory - use the following
                        expression:<pre class="+ topic/pre pr-d/codeblock pre codeblock"><code>${file:parentPath(file:parentPath(record:value('/filepath')))}</code></pre><p class="- topic/p p">The
                            <code class="+ topic/ph pr-d/codeph ph codeph">file:parentPath</code> function returns a file path without
                        the final separator. So this expression removes /.avro/&lt;filename&gt; from
                        the filepath.</p><p class="- topic/p p">For example, if the original filepath is:
                        /sales/countries/nz/.avro/sdc-file, then <code class="+ topic/ph pr-d/codeph ph codeph">file:parentPath</code>
                        returns the following output path: /sales/countries/nz.</p></li>
            </ul></div>
        <p class="- topic/p p">When needed, you can configure additional Parquet properties, such as the compression
            codec or page size to use, on the Avro to Parquet tab.</p>
        <p class="- topic/p p">Here's the pipeline and MapReduce executor configuration: </p>
        <p class="- topic/p p"><img class="- topic/image image" id="concept_gmh_b4h_zz__image_qhp_j4n_zz" src="../Graphics/Parquet-MapReduce.png" height="346" width="464"/></p>
        <p class="- topic/p p"><span class="- topic/ph ph">For more information about dataflow
                        triggers and the event framework, see <a class="- topic/xref xref" href="../Event_Handling/EventFramework-Title.html#concept_cph_5h4_lx">Dataflow Triggers Overview</a>.</span></p>
    </div>
</article><article class="- topic/topic concept/concept topic concept nested2" aria-labelledby="ariaid-title20" id="concept_enc_2wh_zz">
    <h3 class="- topic/title title topictitle3" id="ariaid-title20">Processing Parquet Data</h3>
    <div class="- topic/body concept/conbody body conbody">
        <div class="- topic/p p">When the pipeline runs, the following actions occur:<ul class="- topic/ul ul" id="concept_enc_2wh_zz__ul_jqb_jfn_zz" data-ofbid="concept_enc_2wh_zz__ul_jqb_jfn_zz">
                <li class="- topic/li li">
                    <p class="- topic/p p">Hive Metadata processor assesses each record, using the country in the record
                        to create the output directory for the targetDirectory header attribute.</p>
                </li>
                <li class="- topic/li li">
                    <p class="- topic/p p"><span class="- topic/ph ph">When a record includes a schema change, the
                        processor writes the new schema to the avroSchema header attribute and adds
                        the roll header attribute to the record. It also generates a metadata record
                        for the Hive Metastore destination. The combination of these actions enables
                        the Hive Metastore destination to update Parquet tables as needed and for
                        the Hadoop FS destination to write the file with schema drift to the updated
                        table.</span></p>
                </li>
                <li class="- topic/li li">When the Hive Metastore destination receives a metadata record, it updates the
                    Hive metastore accordingly, creating or updating a Parquet table. </li>
                <li class="- topic/li li">The Hadoop FS destination writes records to files based on the directory in the
                    targetDirectory header, closing files based on the roll header attribute and any
                    other file closure properties configured in the stage. </li>
                <li class="- topic/li li">When the Hadoop FS destination closes a file, it sends an event record to the
                    MapReduce executor, triggering the executor to kick off the Convert Avro to
                    Parquet job. The MapReduce executor does not monitor the job.</li>
                <li class="- topic/li li">After the job completes, the Parquet data becomes available to Hive.</li>
            </ul></div>
    </div>
</article></article><article class="- topic/topic concept/concept topic concept nested1" aria-labelledby="ariaid-title21" id="concept_ry2_qkm_hw">
    <h2 class="- topic/title title topictitle2" id="ariaid-title21">Hive Data Types</h2>
    <div class="- topic/body concept/conbody body conbody">
        <p class="- topic/p p">The following table lists <span class="- topic/ph ph">Data Collector</span> data
            types and the corresponding Hive data types. The Hive Metadata processor uses these
            conversions when generating metadata records. The Hive Metadata destination uses these
            conversions when generating Hive CREATE TABLE and ALTER TABLE statements.</p>
        <div class="- topic/p p">
            <div class="table-container"><table class="- topic/table table frame-all" id="concept_ry2_qkm_hw__table_bcm_jlm_hw" data-ofbid="concept_ry2_qkm_hw__table_bcm_jlm_hw" data-cols="2"><caption></caption><colgroup><col style="width:50%"/><col style="width:50%"/></colgroup><thead class="- topic/thead thead">
                        <tr class="- topic/row">
                            <th class="- topic/entry entry colsep-1 rowsep-1" id="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__1"><span class="- topic/ph ph">Data Collector</span> Data Type</th>
                            <th class="- topic/entry entry colsep-0 rowsep-1" id="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__2">Hive Data Type</th>
                        </tr>
                    </thead><tbody class="- topic/tbody tbody">
                        <tr class="- topic/row">
                            <td class="- topic/entry entry colsep-1 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__1">Boolean</td>
                            <td class="- topic/entry entry colsep-0 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__2">Boolean</td>
                        </tr>
                        <tr class="- topic/row">
                            <td class="- topic/entry entry colsep-1 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__1">Byte</td>
                            <td class="- topic/entry entry colsep-0 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__2">Not supported</td>
                        </tr>
                        <tr class="- topic/row">
                            <td class="- topic/entry entry colsep-1 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__1">Char</td>
                            <td class="- topic/entry entry colsep-0 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__2">String</td>
                        </tr>
                        <tr class="- topic/row">
                            <td class="- topic/entry entry colsep-1 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__1">Date</td>
                            <td class="- topic/entry entry colsep-0 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__2">Date </td>
                        </tr>
                        <tr class="- topic/row">
                            <td class="- topic/entry entry colsep-1 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__1">Datetime</td>
                            <td class="- topic/entry entry colsep-0 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__2">String</td>
                        </tr>
                        <tr class="- topic/row">
                            <td class="- topic/entry entry colsep-1 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__1">Decimal</td>
                            <td class="- topic/entry entry colsep-0 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__2">Decimal</td>
                        </tr>
                        <tr class="- topic/row">
                            <td class="- topic/entry entry colsep-1 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__1">Double</td>
                            <td class="- topic/entry entry colsep-0 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__2">Double</td>
                        </tr>
                        <tr class="- topic/row">
                            <td class="- topic/entry entry colsep-1 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__1">Float</td>
                            <td class="- topic/entry entry colsep-0 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__2">Float</td>
                        </tr>
                        <tr class="- topic/row">
                            <td class="- topic/entry entry colsep-1 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__1">Integer</td>
                            <td class="- topic/entry entry colsep-0 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__2">Int</td>
                        </tr>
                        <tr class="- topic/row">
                            <td class="- topic/entry entry colsep-1 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__1">Long</td>
                            <td class="- topic/entry entry colsep-0 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__2">Bigint</td>
                        </tr>
                        <tr class="- topic/row">
                            <td class="- topic/entry entry colsep-1 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__1">List</td>
                            <td class="- topic/entry entry colsep-0 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__2">Not supported</td>
                        </tr>
                        <tr class="- topic/row">
                            <td class="- topic/entry entry colsep-1 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__1">List-Map</td>
                            <td class="- topic/entry entry colsep-0 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__2">Not supported</td>
                        </tr>
                        <tr class="- topic/row">
                            <td class="- topic/entry entry colsep-1 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__1">Map</td>
                            <td class="- topic/entry entry colsep-0 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__2">Not supported</td>
                        </tr>
                        <tr class="- topic/row">
                            <td class="- topic/entry entry colsep-1 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__1">Short</td>
                            <td class="- topic/entry entry colsep-0 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__2">Int</td>
                        </tr>
                        <tr class="- topic/row">
                            <td class="- topic/entry entry colsep-1 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__1">String</td>
                            <td class="- topic/entry entry colsep-0 rowsep-1" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__2">String</td>
                        </tr>
                        <tr class="- topic/row">
                            <td class="- topic/entry entry colsep-1 rowsep-0" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__1">Time</td>
                            <td class="- topic/entry entry colsep-0 rowsep-0" headers="concept_ry2_qkm_hw__table_bcm_jlm_hw__entry__2">String</td>
                        </tr>
                    </tbody></table></div>
        </div>
    </div>
</article></article></article></main></div>

                        
                        
                        


                    </div>
                    
                </div>
            </div>


        </div> <nav class="navbar navbar-default wh_footer" data-whc_version="25.0">
  <div class=" footer-container  mx-auto">
    <!-- script for Data Collector, all flavors, but only used when accessed directly, not from portal --><script>
  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
  })(window,document,'script','https://www.google-analytics.com/analytics.js','ga');

  ga('create', 'UA-60917135-3', 'auto');
  ga('send', 'pageview');
</script>
  </div>
</nav>

        
        <div id="go2top">
            <span class="oxy-icon oxy-icon-up"></span>
        </div>
        
        <!-- The modal container for images -->
        <div id="modal_img_large" class="modal">
            <span class="close oxy-icon oxy-icon-remove"></span>
            <!-- Modal Content (The Image) -->
            <div id="modal_img_container"></div>
            <!-- Modal Caption (Image Text) -->
            <div id="caption"></div>
        </div>
        
        
        Â© 2023 StreamSets, Inc.

    </body>
</html>