<!DOCTYPE html>
<html lang="zh-cn" dir="ltr">
  <head>
    <meta charset="utf-8">
    <meta name="viewport" content="width=device-width,initial-scale=1">
    <title>redis的redis.config文件配置与内容+10.30日之前的总结 | 雾喔</title>
    <meta name="description" content="雾喔的博客主题，基于 vitepress 实现">
    <meta name="generator" content="VitePress v1.0.0-rc.31">
    <link rel="preload stylesheet" href="/ysy-blog/assets/style.vXJl4vBr.css" as="style">
    
    <script type="module" src="/ysy-blog/assets/app.4qItX3Fv.js"></script>
    <link rel="preload" href="/ysy-blog/assets/inter-roman-latin.bvIUbFQP.woff2" as="font" type="font/woff2" crossorigin="">
    <link rel="modulepreload" href="/ysy-blog/assets/chunks/framework.pDhfCtLl.js">
    <link rel="modulepreload" href="/ysy-blog/assets/chunks/theme.RmJncuXj.js">
    <link rel="modulepreload" href="/ysy-blog/assets/chunks/giscus-2a044aea.hJSdW-mA.js">
    <link rel="modulepreload" href="/ysy-blog/assets/blog_csdn_redis的redis.config文件配置与内容_10.30日之前的总结.md.xXGvwm_i.lean.js">
    <meta name="referrer" content="no-referrer">
    <link rel="icon" href="/ysy-blog/favicon.ico">
    <script id="check-dark-mode">(()=>{const e=localStorage.getItem("vitepress-theme-appearance")||"auto",a=window.matchMedia("(prefers-color-scheme: dark)").matches;(!e||e==="auto"?a:e==="dark")&&document.documentElement.classList.add("dark")})();</script>
    <script id="check-mac-os">document.documentElement.classList.toggle("mac",/Mac|iPhone|iPod|iPad/i.test(navigator.platform));</script>
    <script>import("/ysy-blog/pagefind/pagefind.js").then(i=>{window.__pagefind__=i,i.init()}).catch(()=>{});</script>
  </head>
  <body><!--v-if--><!--teleport anchor-->
    <div id="app"><div class="Layout" data-v-af5a5a9a data-v-ae5c84bb><!--[--><!--[--><!--]--><!----><!--[--><div style="display:none;" class="theme-blog-popover" data-pagefind-ignore="all" data-v-5c56a737><div class="header" data-v-5c56a737><div class="title-wrapper" data-v-5c56a737><i class="el-icon" style="font-size:20px;" data-v-5c56a737><!--[--><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 1024 1024" data-v-5c56a737><path fill="currentColor" d="M288 128h608L736 384l160 256H288v320h-96V64h96z"></path></svg><!--]--></i><span class="title" data-v-5c56a737></span></div><i class="el-icon close-icon" style="font-size:20px;" data-v-5c56a737><!--[--><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 1024 1024" data-v-5c56a737><path fill="currentColor" d="M512 64a448 448 0 1 1 0 896 448 448 0 0 1 0-896m0 393.664L407.936 353.6a38.4 38.4 0 1 0-54.336 54.336L457.664 512 353.6 616.064a38.4 38.4 0 1 0 54.336 54.336L512 566.336 616.064 670.4a38.4 38.4 0 1 0 54.336-54.336L566.336 512 670.4 407.936a38.4 38.4 0 1 0-54.336-54.336z"></path></svg><!--]--></i></div><!----><div class="footer content" data-v-5c56a737><!--[--><!--]--></div></div><div style="display:none;" class="theme-blog-popover-close" data-v-5c56a737><i class="el-icon" style="font-size:20px;" data-v-5c56a737><!--[--><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 1024 1024" data-v-5c56a737><path fill="currentColor" d="M288 128h608L736 384l160 256H288v320h-96V64h96z"></path></svg><!--]--></i></div><!--]--><!--]--><!--[--><span tabindex="-1" data-v-0f60ec36></span><a href="#VPContent" class="VPSkipLink visually-hidden" data-v-0f60ec36> Skip to content </a><!--]--><!----><header class="VPNav" data-v-ae5c84bb data-v-ae24b3ad><div class="VPNavBar has-sidebar" data-v-ae24b3ad data-v-d83f3580><div class="container" data-v-d83f3580><div class="title" data-v-d83f3580><div class="VPNavBarTitle has-sidebar" data-v-d83f3580 data-v-86d1bed8><a class="title" href="/ysy-blog/" data-v-86d1bed8><!--[--><!--]--><!--[--><img class="VPImage logo" src="/ysy-blog/logo.png" alt data-v-8426fc1a><!--]--><!--[-->雾喔<!--]--><!--[--><!--]--></a></div></div><div class="content" data-v-d83f3580><div class="curtain" data-v-d83f3580></div><div class="content-body" data-v-d83f3580><!--[--><!--]--><div class="blog-search search" data-pagefind-ignore="all" data-v-d83f3580 style="--54fbd49d:1;" data-v-bebd53ce><div class="nav-search-btn-wait" data-v-bebd53ce><svg width="14" height="14" viewBox="0 0 20 20" data-v-bebd53ce><path d="M14.386 14.386l4.0877 4.0877-4.0877-4.0877c-2.9418 2.9419-7.7115 2.9419-10.6533 0-2.9419-2.9418-2.9419-7.7115 0-10.6533 2.9418-2.9419 7.7115-2.9419 10.6533 0 2.9419 2.9418 2.9419 7.7115 0 10.6533z" stroke="currentColor" fill="none" fill-rule="evenodd" stroke-linecap="round" stroke-linejoin="round" data-v-bebd53ce></path></svg><span class="search-tip" data-v-bebd53ce>Search</span><span class="metaKey" data-v-bebd53ce> K </span></div><!--teleport start--><!--teleport end--></div><nav aria-labelledby="main-nav-aria-label" class="VPNavBarMenu menu" data-v-d83f3580 data-v-7f418b0f><span id="main-nav-aria-label" class="visually-hidden" data-v-7f418b0f>Main Navigation</span><!--[--><!--[--><a class="VPLink link VPNavBarMenuLink" href="/ysy-blog/" tabindex="0" data-v-7f418b0f data-v-42ef59de><!--[--><span data-v-42ef59de>首页</span><!--]--></a><!--]--><!--[--><a class="VPLink link vp-external-link-icon VPNavBarMenuLink" href="https://sugarat.top/aboutme.html" target="_blank" rel="noreferrer" tabindex="0" data-v-7f418b0f data-v-42ef59de><!--[--><span data-v-42ef59de>关于作者</span><!--]--></a><!--]--><!--]--></nav><!----><div class="VPNavBarAppearance appearance" data-v-d83f3580 data-v-e6aabb21><button class="VPSwitch VPSwitchAppearance" type="button" role="switch" title="Switch to dark theme" aria-checked="false" data-v-e6aabb21 data-v-cbbe1149 data-v-b1685198><span class="check" data-v-b1685198><span class="icon" data-v-b1685198><!--[--><svg xmlns="http://www.w3.org/2000/svg" aria-hidden="true" focusable="false" viewbox="0 0 24 24" class="sun" data-v-cbbe1149><path d="M12,18c-3.3,0-6-2.7-6-6s2.7-6,6-6s6,2.7,6,6S15.3,18,12,18zM12,8c-2.2,0-4,1.8-4,4c0,2.2,1.8,4,4,4c2.2,0,4-1.8,4-4C16,9.8,14.2,8,12,8z"></path><path d="M12,4c-0.6,0-1-0.4-1-1V1c0-0.6,0.4-1,1-1s1,0.4,1,1v2C13,3.6,12.6,4,12,4z"></path><path d="M12,24c-0.6,0-1-0.4-1-1v-2c0-0.6,0.4-1,1-1s1,0.4,1,1v2C13,23.6,12.6,24,12,24z"></path><path d="M5.6,6.6c-0.3,0-0.5-0.1-0.7-0.3L3.5,4.9c-0.4-0.4-0.4-1,0-1.4s1-0.4,1.4,0l1.4,1.4c0.4,0.4,0.4,1,0,1.4C6.2,6.5,5.9,6.6,5.6,6.6z"></path><path d="M19.8,20.8c-0.3,0-0.5-0.1-0.7-0.3l-1.4-1.4c-0.4-0.4-0.4-1,0-1.4s1-0.4,1.4,0l1.4,1.4c0.4,0.4,0.4,1,0,1.4C20.3,20.7,20,20.8,19.8,20.8z"></path><path d="M3,13H1c-0.6,0-1-0.4-1-1s0.4-1,1-1h2c0.6,0,1,0.4,1,1S3.6,13,3,13z"></path><path d="M23,13h-2c-0.6,0-1-0.4-1-1s0.4-1,1-1h2c0.6,0,1,0.4,1,1S23.6,13,23,13z"></path><path d="M4.2,20.8c-0.3,0-0.5-0.1-0.7-0.3c-0.4-0.4-0.4-1,0-1.4l1.4-1.4c0.4-0.4,1-0.4,1.4,0s0.4,1,0,1.4l-1.4,1.4C4.7,20.7,4.5,20.8,4.2,20.8z"></path><path d="M18.4,6.6c-0.3,0-0.5-0.1-0.7-0.3c-0.4-0.4-0.4-1,0-1.4l1.4-1.4c0.4-0.4,1-0.4,1.4,0s0.4,1,0,1.4l-1.4,1.4C18.9,6.5,18.6,6.6,18.4,6.6z"></path></svg><svg xmlns="http://www.w3.org/2000/svg" aria-hidden="true" focusable="false" viewbox="0 0 24 24" class="moon" data-v-cbbe1149><path d="M12.1,22c-0.3,0-0.6,0-0.9,0c-5.5-0.5-9.5-5.4-9-10.9c0.4-4.8,4.2-8.6,9-9c0.4,0,0.8,0.2,1,0.5c0.2,0.3,0.2,0.8-0.1,1.1c-2,2.7-1.4,6.4,1.3,8.4c2.1,1.6,5,1.6,7.1,0c0.3-0.2,0.7-0.3,1.1-0.1c0.3,0.2,0.5,0.6,0.5,1c-0.2,2.7-1.5,5.1-3.6,6.8C16.6,21.2,14.4,22,12.1,22zM9.3,4.4c-2.9,1-5,3.6-5.2,6.8c-0.4,4.4,2.8,8.3,7.2,8.7c2.1,0.2,4.2-0.4,5.8-1.8c1.1-0.9,1.9-2.1,2.4-3.4c-2.5,0.9-5.3,0.5-7.5-1.1C9.2,11.4,8.1,7.7,9.3,4.4z"></path></svg><!--]--></span></span></button></div><div class="VPSocialLinks VPNavBarSocialLinks social-links" data-v-d83f3580 data-v-0394ad82 data-v-7bc22406><!--[--><a class="VPSocialLink no-icon" href="https://gitee.com/yang-saiya" aria-label="github" target="_blank" rel="noopener" data-v-7bc22406 data-v-f80f8133><svg role="img" viewBox="0 0 24 24" xmlns="http://www.w3.org/2000/svg"><title>GitHub</title><path d="M12 .297c-6.63 0-12 5.373-12 12 0 5.303 3.438 9.8 8.205 11.385.6.113.82-.258.82-.577 0-.285-.01-1.04-.015-2.04-3.338.724-4.042-1.61-4.042-1.61C4.422 18.07 3.633 17.7 3.633 17.7c-1.087-.744.084-.729.084-.729 1.205.084 1.838 1.236 1.838 1.236 1.07 1.835 2.809 1.305 3.495.998.108-.776.417-1.305.76-1.605-2.665-.3-5.466-1.332-5.466-5.93 0-1.31.465-2.38 1.235-3.22-.135-.303-.54-1.523.105-3.176 0 0 1.005-.322 3.3 1.23.96-.267 1.98-.399 3-.405 1.02.006 2.04.138 3 .405 2.28-1.552 3.285-1.23 3.285-1.23.645 1.653.24 2.873.12 3.176.765.84 1.23 1.91 1.23 3.22 0 4.61-2.805 5.625-5.475 5.92.42.36.81 1.096.81 2.22 0 1.606-.015 2.896-.015 3.286 0 .315.21.69.825.57C20.565 22.092 24 17.592 24 12.297c0-6.627-5.373-12-12-12"/></svg></a><!--]--></div><div class="VPFlyout VPNavBarExtra extra" data-v-d83f3580 data-v-d0bd9dde data-v-9c007e85><button type="button" class="button" aria-haspopup="true" aria-expanded="false" aria-label="extra navigation" data-v-9c007e85><svg xmlns="http://www.w3.org/2000/svg" aria-hidden="true" focusable="false" viewbox="0 0 24 24" class="icon" data-v-9c007e85><circle cx="12" cy="12" r="2"></circle><circle cx="19" cy="12" r="2"></circle><circle cx="5" cy="12" r="2"></circle></svg></button><div class="menu" data-v-9c007e85><div class="VPMenu" data-v-9c007e85 data-v-e7ea1737><!----><!--[--><!--[--><!----><div class="group" data-v-d0bd9dde><div class="item appearance" data-v-d0bd9dde><p class="label" data-v-d0bd9dde>Appearance</p><div class="appearance-action" data-v-d0bd9dde><button class="VPSwitch VPSwitchAppearance" type="button" role="switch" title="Switch to dark theme" aria-checked="false" data-v-d0bd9dde data-v-cbbe1149 data-v-b1685198><span class="check" data-v-b1685198><span class="icon" data-v-b1685198><!--[--><svg xmlns="http://www.w3.org/2000/svg" aria-hidden="true" focusable="false" viewbox="0 0 24 24" class="sun" data-v-cbbe1149><path d="M12,18c-3.3,0-6-2.7-6-6s2.7-6,6-6s6,2.7,6,6S15.3,18,12,18zM12,8c-2.2,0-4,1.8-4,4c0,2.2,1.8,4,4,4c2.2,0,4-1.8,4-4C16,9.8,14.2,8,12,8z"></path><path d="M12,4c-0.6,0-1-0.4-1-1V1c0-0.6,0.4-1,1-1s1,0.4,1,1v2C13,3.6,12.6,4,12,4z"></path><path d="M12,24c-0.6,0-1-0.4-1-1v-2c0-0.6,0.4-1,1-1s1,0.4,1,1v2C13,23.6,12.6,24,12,24z"></path><path d="M5.6,6.6c-0.3,0-0.5-0.1-0.7-0.3L3.5,4.9c-0.4-0.4-0.4-1,0-1.4s1-0.4,1.4,0l1.4,1.4c0.4,0.4,0.4,1,0,1.4C6.2,6.5,5.9,6.6,5.6,6.6z"></path><path d="M19.8,20.8c-0.3,0-0.5-0.1-0.7-0.3l-1.4-1.4c-0.4-0.4-0.4-1,0-1.4s1-0.4,1.4,0l1.4,1.4c0.4,0.4,0.4,1,0,1.4C20.3,20.7,20,20.8,19.8,20.8z"></path><path d="M3,13H1c-0.6,0-1-0.4-1-1s0.4-1,1-1h2c0.6,0,1,0.4,1,1S3.6,13,3,13z"></path><path d="M23,13h-2c-0.6,0-1-0.4-1-1s0.4-1,1-1h2c0.6,0,1,0.4,1,1S23.6,13,23,13z"></path><path d="M4.2,20.8c-0.3,0-0.5-0.1-0.7-0.3c-0.4-0.4-0.4-1,0-1.4l1.4-1.4c0.4-0.4,1-0.4,1.4,0s0.4,1,0,1.4l-1.4,1.4C4.7,20.7,4.5,20.8,4.2,20.8z"></path><path d="M18.4,6.6c-0.3,0-0.5-0.1-0.7-0.3c-0.4-0.4-0.4-1,0-1.4l1.4-1.4c0.4-0.4,1-0.4,1.4,0s0.4,1,0,1.4l-1.4,1.4C18.9,6.5,18.6,6.6,18.4,6.6z"></path></svg><svg xmlns="http://www.w3.org/2000/svg" aria-hidden="true" focusable="false" viewbox="0 0 24 24" class="moon" data-v-cbbe1149><path d="M12.1,22c-0.3,0-0.6,0-0.9,0c-5.5-0.5-9.5-5.4-9-10.9c0.4-4.8,4.2-8.6,9-9c0.4,0,0.8,0.2,1,0.5c0.2,0.3,0.2,0.8-0.1,1.1c-2,2.7-1.4,6.4,1.3,8.4c2.1,1.6,5,1.6,7.1,0c0.3-0.2,0.7-0.3,1.1-0.1c0.3,0.2,0.5,0.6,0.5,1c-0.2,2.7-1.5,5.1-3.6,6.8C16.6,21.2,14.4,22,12.1,22zM9.3,4.4c-2.9,1-5,3.6-5.2,6.8c-0.4,4.4,2.8,8.3,7.2,8.7c2.1,0.2,4.2-0.4,5.8-1.8c1.1-0.9,1.9-2.1,2.4-3.4c-2.5,0.9-5.3,0.5-7.5-1.1C9.2,11.4,8.1,7.7,9.3,4.4z"></path></svg><!--]--></span></span></button></div></div></div><div class="group" data-v-d0bd9dde><div class="item social-links" data-v-d0bd9dde><div class="VPSocialLinks social-links-list" data-v-d0bd9dde data-v-7bc22406><!--[--><a class="VPSocialLink no-icon" href="https://gitee.com/yang-saiya" aria-label="github" target="_blank" rel="noopener" data-v-7bc22406 data-v-f80f8133><svg role="img" viewBox="0 0 24 24" xmlns="http://www.w3.org/2000/svg"><title>GitHub</title><path d="M12 .297c-6.63 0-12 5.373-12 12 0 5.303 3.438 9.8 8.205 11.385.6.113.82-.258.82-.577 0-.285-.01-1.04-.015-2.04-3.338.724-4.042-1.61-4.042-1.61C4.422 18.07 3.633 17.7 3.633 17.7c-1.087-.744.084-.729.084-.729 1.205.084 1.838 1.236 1.838 1.236 1.07 1.835 2.809 1.305 3.495.998.108-.776.417-1.305.76-1.605-2.665-.3-5.466-1.332-5.466-5.93 0-1.31.465-2.38 1.235-3.22-.135-.303-.54-1.523.105-3.176 0 0 1.005-.322 3.3 1.23.96-.267 1.98-.399 3-.405 1.02.006 2.04.138 3 .405 2.28-1.552 3.285-1.23 3.285-1.23.645 1.653.24 2.873.12 3.176.765.84 1.23 1.91 1.23 3.22 0 4.61-2.805 5.625-5.475 5.92.42.36.81 1.096.81 2.22 0 1.606-.015 2.896-.015 3.286 0 .315.21.69.825.57C20.565 22.092 24 17.592 24 12.297c0-6.627-5.373-12-12-12"/></svg></a><!--]--></div></div></div><!--]--><!--]--></div></div></div><!--[--><!--]--><button type="button" class="VPNavBarHamburger hamburger" aria-label="mobile navigation" aria-expanded="false" aria-controls="VPNavScreen" data-v-d83f3580 data-v-e5dd9c1c><span class="container" data-v-e5dd9c1c><span class="top" data-v-e5dd9c1c></span><span class="middle" data-v-e5dd9c1c></span><span class="bottom" data-v-e5dd9c1c></span></span></button></div></div></div></div><!----></header><div class="VPLocalNav reached-top" data-v-ae5c84bb data-v-f84a0989><button class="menu" aria-expanded="false" aria-controls="VPSidebarNav" data-v-f84a0989><svg xmlns="http://www.w3.org/2000/svg" aria-hidden="true" focusable="false" viewbox="0 0 24 24" class="menu-icon" data-v-f84a0989><path d="M17,11H3c-0.6,0-1-0.4-1-1s0.4-1,1-1h14c0.6,0,1,0.4,1,1S17.6,11,17,11z"></path><path d="M21,7H3C2.4,7,2,6.6,2,6s0.4-1,1-1h18c0.6,0,1,0.4,1,1S21.6,7,21,7z"></path><path d="M21,15H3c-0.6,0-1-0.4-1-1s0.4-1,1-1h18c0.6,0,1,0.4,1,1S21.6,15,21,15z"></path><path d="M17,19H3c-0.6,0-1-0.4-1-1s0.4-1,1-1h14c0.6,0,1,0.4,1,1S17.6,19,17,19z"></path></svg><span class="menu-text" data-v-f84a0989>Menu</span></button><div class="VPLocalNavOutlineDropdown" style="--vp-vh:0px;" data-v-f84a0989 data-v-1c15a60a><button data-v-1c15a60a>Return to top</button><!----></div></div><aside class="VPSidebar" data-v-ae5c84bb data-v-7f44e717><div class="curtain" data-v-7f44e717></div><nav class="nav" id="VPSidebarNav" aria-labelledby="sidebar-aria-label" tabindex="-1" data-v-7f44e717><span class="visually-hidden" id="sidebar-aria-label" data-v-7f44e717> Sidebar Navigation </span><!--[--><!--]--><!--[--><div class="group" data-v-7f44e717><div class="VPSidebarItem level-0" data-v-7f44e717 data-v-e31bd47b><!----><!----></div></div><!--]--><!--[--><!--[--><!--[--><!--]--><div class="sidebar" data-pagefind-ignore="all" data-v-af5a5a9a style="--35c3ff37:40px;--ae4fb7a4:60px;" data-v-15b2e0fa><div class="recommend" data-pagefind-ignore="all" data-v-15b2e0fa style="--9285819c:0px;" data-v-065e53b5><div class="card-header" data-v-065e53b5><span class="title" data-v-065e53b5><span class="svg-icon"><svg width="512" height="512" viewBox="0 0 128 128" xmlns="http://www.w3.org/2000/svg">
<radialGradient id="notoFaceWithMonocle0" cx="63.6" cy="-2088.9" r="56.96" gradientTransform="matrix(1 0 0 -1 0 -2026)" gradientUnits="userSpaceOnUse">
    <stop offset=".5" stop-color="#FDE030"/>
    <stop offset=".919" stop-color="#F7C02B"/>
    <stop offset="1" stop-color="#F4A223"/>
</radialGradient>
<path fill="url(#notoFaceWithMonocle0)" d="M63.6 118.8c-27.9 0-58-17.5-58-55.9S35.7 7 63.6 7c15.5 0 29.8 5.1 40.4 14.4c11.5 10.2 17.6 24.6 17.6 41.5s-6.1 31.2-17.6 41.4c-10.6 9.3-25 14.5-40.4 14.5z"/>
<path fill="#EB8F00" d="M111.49 29.67c5.33 8.6 8.11 18.84 8.11 30.23c0 16.9-6.1 31.2-17.6 41.4c-10.6 9.3-25 14.5-40.4 14.5c-18.06 0-37.04-7.35-48.18-22.94c10.76 17.66 30.99 25.94 50.18 25.94c15.4 0 29.8-5.2 40.4-14.5c11.5-10.2 17.6-24.5 17.6-41.4c0-12.74-3.47-24.06-10.11-33.23z"/>
<path fill="#422B0D" d="M74.3 24.5c.9-.8 1.8-1.5 2.8-2.2s2.1-1.2 3.1-1.8c2.2-1 4.6-2 7.5-2.1c.7-.1 1.4.1 2.2.1c.7 0 1.5.3 2.2.5c.2.1.4.1.5.2l.5.3l1 .5c.6.4 1.2.9 1.8 1.4c1.1 1 1.8 2.3 2.4 3.5c.3.6.5 1.2.7 1.8c.2.6.3 1.2.4 1.6l.1.3c.2 1.2-.6 2.4-1.8 2.6c-1 .2-1.9-.3-2.4-1.1c-.3-.6-.6-1.1-.8-1.5c-.2-.4-.5-.8-.7-1.2c-.5-.7-1.1-1.3-1.7-1.8c-.1-.1-.3-.2-.5-.3c-.2-.1-.3-.2-.4-.3c-.2-.1-.3-.1-.5-.2l-.2-.1h-.3c-.7-.2-1.4-.3-2.2-.2c-1.6 0-3.5.6-5.4 1.3c-.9.4-1.9.8-2.8 1.2c-.9.5-1.9.9-2.9 1.3l-.2.1c-1.2.5-2.5-.1-2.9-1.2c-.5-1-.2-2.1.5-2.7zm-28.7 14c-.8.2-1.3.3-2 .3c-.6 0-1.3.1-1.9 0c-1.3 0-2.6-.2-3.8-.6c-2-.5-4-.8-5.7-.8c-.8 0-1.6.1-2.2.3s-1.1.5-1.6 1c-1 1-2 2.8-3 4.6c-.6 1.1-2 1.5-3.1.9c-.9-.5-1.3-1.5-1.2-2.4c.2-1.1.5-2.3.9-3.5c.5-1.2 1-2.4 2-3.6c1-1.2 2.3-2.2 3.8-2.7s2.9-.7 4.3-.7c2.7.1 5 .8 7.2 1.6c.9.3 1.8.6 2.7.8c.5.1.9.2 1.4.2c.4 0 1 .1 1.3.1h.4c1.2 0 2.2 1.1 2.1 2.3c.1 1.2-.6 2-1.6 2.2zM44 91.3c1.8-2 6.6-4.3 13.3-4.2c8.9.2 13.7 4.2 13.7 4.2c2.9 2.4 1.6 6.1-1.5 5.8c-5-.6-4.7-1.5-13.7-1.7c-6.5-.1-9.6.9-9.9.9c-1.8.1-2.5-.6-2.8-1.8c-.3-1 0-2.3.9-3.2zm38.3-47.5c4.4 0 8.4 3.7 8.4 9.9s-4 9.9-8.4 9.9s-8.4-3.7-8.4-9.9s4-9.9 8.4-9.9zm-45.1 1.7c-4.2 0-8 3.5-8 9.4s3.8 9.4 8 9.4s8-3.5 8-9.4s-3.8-9.4-8-9.4z"/>
<path fill="#896024" d="M81.37 48.35c-1.03-.72-2.58-.49-3.58.95c-1 1.45-.67 2.97.36 3.69c1.03.72 2.58.49 3.58-.95c1.01-1.45.67-2.98-.36-3.69z"/>
<path fill="#404040" d="M106.5 49.9h-5.6v9h5.6c2.7 0 5.4-1.7 5.4-4.3v-.4c0-2.7-2.7-4.3-5.4-4.3zm-.5 7.7c-1.9 0-3.4-1.5-3.4-3.3s1.5-3.3 3.4-3.3s3.4 1.5 3.4 3.3s-1.5 3.3-3.4 3.3zm.1 13.2c-.3 0-.6-.2-.6-.5v-4.6c0-.3.2-.5.5-.5h.1c.3 0 .5.2.5.5v4.6c0 .2-.2.5-.5.5zm0 7.6c-.3 0-.6-.2-.6-.5v-4.6c0-.3.2-.5.5-.5h.1c.3 0 .5.2.5.5v4.6c0 .3-.2.5-.5.5zm0 7.7c-.3 0-.6-.2-.6-.5V81c0-.3.2-.5.5-.5h.1c.3 0 .5.2.5.5v4.6c0 .3-.2.5-.5.5zm0 7.7c-.3 0-.6-.2-.6-.5v-4.6c0-.3.2-.5.5-.5h.1c.3 0 .5.2.5.5v4.6c0 .3-.2.5-.5.5zm0 7.7c-.3 0-.6-.2-.6-.5v-4.6c0-.3.2-.5.5-.5h.1c.3 0 .5.2.5.5v4.6c0 .3-.2.5-.5.5zm0 7.7c-.3 0-.6-.2-.6-.5v-4.6c0-.3.2-.5.5-.5h.1c.3 0 .5.2.5.5v4.6c0 .3-.2.5-.5.5zm0 7.7c-.3 0-.6-.2-.6-.5v-4.6c0-.3.2-.5.5-.5h.1c.3 0 .5.2.5.5v4.6c0 .3-.2.5-.5.5z"/>
<path fill="#404040" d="M106.1 62.7c-.3 0-.6-.2-.6-.5v-4.6c0-.3.2-.5.5-.5h.1c.3 0 .5.2.5.5v4.6c0 .2-.2.5-.5.5z"/>
<circle cx="106.1" cy="64.1" r="2.6" fill="none" stroke="#404040" stroke-miterlimit="10"/>
<circle cx="106.1" cy="71.8" r="2.6" fill="none" stroke="#404040" stroke-miterlimit="10"/>
<circle cx="106.1" cy="79.4" r="2.6" fill="none" stroke="#404040" stroke-miterlimit="10"/>
<circle cx="106.1" cy="87.1" r="2.6" fill="none" stroke="#404040" stroke-miterlimit="10"/>
<circle cx="106.1" cy="94.7" r="2.6" fill="none" stroke="#404040" stroke-miterlimit="10"/>
<circle cx="106.1" cy="102.4" r="2.6" fill="none" stroke="#404040" stroke-miterlimit="10"/>
<circle cx="106.1" cy="110" r="2.6" fill="none" stroke="#404040" stroke-miterlimit="10"/>
<circle cx="106.1" cy="117.7" r="2.6" fill="none" stroke="#404040" stroke-miterlimit="10"/>
<circle cx="79.6" cy="53.7" r="20.6" fill="#CCC" fill-opacity=".5" stroke="#404040" stroke-miterlimit="10" stroke-width="3.5"/>
<path fill="#FFF" d="M94.8 52.5c-.4-3.8-2.1-7.3-4.9-9.7s-6.4-3.7-10-3.7c-3.7 0-7.3 1.2-10.1 3.5s-4.6 5.8-4.9 9.6c0 .3-.3.5-.5.4c-.2 0-.4-.2-.4-.5c-.1-2 .3-4.1 1.1-6c.8-1.9 2-3.6 3.5-5.1c3-2.9 7.3-4.3 11.4-4.3c4.2 0 8.3 1.6 11.3 4.5c1.5 1.5 2.7 3.2 3.5 5.1c.8 1.9 1.1 4 1 6c0 .3-.2.5-.5.5c-.3.1-.5-.1-.5-.3z"/>
<path fill="#896024" d="M37.01 49.35c-1.03-.72-2.58-.49-3.58.95s-.67 2.97.36 3.69c1.03.72 2.58.49 3.58-.95c1.01-1.45.68-2.98-.36-3.69z"/>
</svg></span>相关文章</span><button ariadisabled="false" type="button" class="el-button el-button--primary el-button--small is-text" style="" data-v-065e53b5><!--v-if--><span class=""><!--[-->换一组<!--]--></span></button></div><ol class="recommend-container" data-v-065e53b5><!--[--><li data-v-065e53b5><i class="num" data-v-065e53b5>1</i><div class="des" data-v-065e53b5><a class="el-link el-link--info is-underline title" href="/ysy-blog/blog/csdn/114. 二叉树展开为链表" data-v-065e53b5><!--v-if--><span class="el-link__inner"><!--[-->114. 二叉树展开为链表<!--]--></span><!--v-if--></a><div class="suffix" data-v-065e53b5><span class="tag" data-v-065e53b5>5天前</span></div></div></li><li data-v-065e53b5><i class="num" data-v-065e53b5>2</i><div class="des" data-v-065e53b5><a class="el-link el-link--info is-underline title" href="/ysy-blog/blog/csdn/199. 二叉树的右视图" data-v-065e53b5><!--v-if--><span class="el-link__inner"><!--[-->199. 二叉树的右视图<!--]--></span><!--v-if--></a><div class="suffix" data-v-065e53b5><span class="tag" data-v-065e53b5>5天前</span></div></div></li><li data-v-065e53b5><i class="num" data-v-065e53b5>3</i><div class="des" data-v-065e53b5><a class="el-link el-link--info is-underline title" href="/ysy-blog/blog/csdn/java小记（1）" data-v-065e53b5><!--v-if--><span class="el-link__inner"><!--[-->java小记（1）<!--]--></span><!--v-if--></a><div class="suffix" data-v-065e53b5><span class="tag" data-v-065e53b5>6天前</span></div></div></li><li data-v-065e53b5><i class="num" data-v-065e53b5>4</i><div class="des" data-v-065e53b5><a class="el-link el-link--info is-underline title" href="/ysy-blog/blog/csdn/蓝桥杯-数字三角形" data-v-065e53b5><!--v-if--><span class="el-link__inner"><!--[-->蓝桥杯-数字三角形<!--]--></span><!--v-if--></a><div class="suffix" data-v-065e53b5><span class="tag" data-v-065e53b5>2024-02-25</span></div></div></li><li data-v-065e53b5><i class="num" data-v-065e53b5>5</i><div class="des" data-v-065e53b5><a class="el-link el-link--info is-underline title" href="/ysy-blog/blog/csdn/蓝桥杯-乘积最大" data-v-065e53b5><!--v-if--><span class="el-link__inner"><!--[-->蓝桥杯-乘积最大<!--]--></span><!--v-if--></a><div class="suffix" data-v-065e53b5><span class="tag" data-v-065e53b5>2024-02-25</span></div></div></li><li data-v-065e53b5><i class="num" data-v-065e53b5>6</i><div class="des" data-v-065e53b5><a class="el-link el-link--info is-underline title" href="/ysy-blog/blog/csdn/蓝桥杯-答疑" data-v-065e53b5><!--v-if--><span class="el-link__inner"><!--[-->蓝桥杯-答疑<!--]--></span><!--v-if--></a><div class="suffix" data-v-065e53b5><span class="tag" data-v-065e53b5>2024-02-25</span></div></div></li><li data-v-065e53b5><i class="num" data-v-065e53b5>7</i><div class="des" data-v-065e53b5><a class="el-link el-link--info is-underline title" href="/ysy-blog/blog/csdn/蓝桥杯-X图形" data-v-065e53b5><!--v-if--><span class="el-link__inner"><!--[-->蓝桥杯-X图形<!--]--></span><!--v-if--></a><div class="suffix" data-v-065e53b5><span class="tag" data-v-065e53b5>2024-02-12</span></div></div></li><li data-v-065e53b5><i class="num" data-v-065e53b5>8</i><div class="des" data-v-065e53b5><a class="el-link el-link--info is-underline title" href="/ysy-blog/blog/csdn/除夕---总结" data-v-065e53b5><!--v-if--><span class="el-link__inner"><!--[-->除夕---总结<!--]--></span><!--v-if--></a><div class="suffix" data-v-065e53b5><span class="tag" data-v-065e53b5>2024-02-09</span></div></div></li><li data-v-065e53b5><i class="num" data-v-065e53b5>9</i><div class="des" data-v-065e53b5><a class="el-link el-link--info is-underline title" href="/ysy-blog/blog/csdn/计网小记-1" data-v-065e53b5><!--v-if--><span class="el-link__inner"><!--[-->计网小记-1<!--]--></span><!--v-if--></a><div class="suffix" data-v-065e53b5><span class="tag" data-v-065e53b5>2024-01-18</span></div></div></li><!--]--></ol></div></div><!--]--><!--]--></nav></aside><div class="VPContent has-sidebar" id="VPContent" data-pagefind-body data-v-ae5c84bb data-v-669faec9><div class="VPDoc has-sidebar has-aside" data-v-669faec9 data-v-6b87e69f><!--[--><!--]--><div class="container" data-v-6b87e69f><div class="aside" data-v-6b87e69f><div class="aside-curtain" data-v-6b87e69f></div><div class="aside-container" data-v-6b87e69f><div class="aside-content" data-v-6b87e69f><div class="VPDocAside" data-v-6b87e69f data-v-3f215769><!--[--><!--]--><!--[--><!--]--><div class="VPDocAsideOutline" role="navigation" data-v-3f215769 data-v-d330b1bb><div class="content" data-v-d330b1bb><div class="outline-marker" data-v-d330b1bb></div><div class="outline-title" role="heading" aria-level="2" data-v-d330b1bb>On this page</div><nav aria-labelledby="doc-outline-aria-label" data-v-d330b1bb><span class="visually-hidden" id="doc-outline-aria-label" data-v-d330b1bb> Table of Contents for current page </span><ul class="root" data-v-d330b1bb data-v-d0ee3533><!--[--><!--]--></ul></nav></div></div><!--[--><!--]--><div class="spacer" data-v-3f215769></div><!--[--><!--]--><!----><!--[--><!--]--><!--[--><!--]--></div></div></div></div><div class="content" data-v-6b87e69f><div class="content-container" data-v-6b87e69f><!--[--><!--[--><!--[--><!--[--><!--]--><!----><!----><!--]--><!--]--><!--]--><!----><main class="main" data-v-6b87e69f><div style="position:relative;" class="vp-doc _ysy-blog_blog_csdn_redis%E7%9A%84redis_config%E6%96%87%E4%BB%B6%E9%85%8D%E7%BD%AE%E4%B8%8E%E5%86%85%E5%AE%B9+10_30%E6%97%A5%E4%B9%8B%E5%89%8D%E7%9A%84%E6%80%BB%E7%BB%93" data-v-6b87e69f><div><p>参考博客：<a href="https://blog.csdn.net/Hubery_sky/article/details/125345068?spm=1001.2014.3001.5501" title="redis.conf的一些配置+密码的设置（mac）+个人总结_雾喔的博客-CSDN博客_redis密码配置文件" target="_blank" rel="noreferrer">redis.conf的一些配置+密码的设置（mac）+个人总结_雾喔的博客-CSDN博客_redis密码配置文件</a></p><p>这个是初始的redis.config的内容</p><div class="language- vp-adaptive-theme"><button title="Copy Code" class="copy"></button><span class="lang"></span><pre class="shiki shiki-themes github-light github-dark vp-code"><code><span class="line"><span># Redis configuration file example.</span></span>
<span class="line"><span>requirepass 956766</span></span>
<span class="line"><span>maxclients 10000</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Note that in order to read the configuration file, Redis must be</span></span>
<span class="line"><span># started with the file path as first argument:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># ./redis-server /path/to/redis.conf</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Note on units: when memory size is needed, it is possible to specify</span></span>
<span class="line"><span># it in the usual form of 1k 5GB 4M and so forth:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 1k =&gt; 1000 bytes</span></span>
<span class="line"><span># 1kb =&gt; 1024 bytes</span></span>
<span class="line"><span># 1m =&gt; 1000000 bytes</span></span>
<span class="line"><span># 1mb =&gt; 1024*1024 bytes</span></span>
<span class="line"><span># 1g =&gt; 1000000000 bytes</span></span>
<span class="line"><span># 1gb =&gt; 1024*1024*1024 bytes</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># units are case insensitive so 1GB 1Gb 1gB are all the same.</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################## INCLUDES ###################################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Include one or more other config files here.  This is useful if you</span></span>
<span class="line"><span># have a standard template that goes to all Redis servers but also need</span></span>
<span class="line"><span># to customize a few per-server settings.  Include files can include</span></span>
<span class="line"><span># other files, so use this wisely.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Notice option &quot;include&quot; won&#39;t be rewritten by command &quot;CONFIG REWRITE&quot;</span></span>
<span class="line"><span># from admin or Redis Sentinel. Since Redis always uses the last processed</span></span>
<span class="line"><span># line as value of a configuration directive, you&#39;d better put includes</span></span>
<span class="line"><span># at the beginning of this file to avoid overwriting config change at runtime.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If instead you are interested in using includes to override configuration</span></span>
<span class="line"><span># options, it is better to use include as the last line.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># include /path/to/local.conf</span></span>
<span class="line"><span># include /path/to/other.conf</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################## MODULES #####################################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Load modules at startup. If the server is not able to load modules</span></span>
<span class="line"><span># it will abort. It is possible to use multiple loadmodule directives.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># loadmodule /path/to/my_module.so</span></span>
<span class="line"><span># loadmodule /path/to/other_module.so</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################## NETWORK #####################################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># By default, if no &quot;bind&quot; configuration directive is specified, Redis listens</span></span>
<span class="line"><span># for connections from all the network interfaces available on the server.</span></span>
<span class="line"><span># It is possible to listen to just one or multiple selected interfaces using</span></span>
<span class="line"><span># the &quot;bind&quot; configuration directive, followed by one or more IP addresses.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Examples:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># bind 192.168.1.100 10.0.0.1</span></span>
<span class="line"><span># bind 127.0.0.1 ::1</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the</span></span>
<span class="line"><span># internet, binding to all the interfaces is dangerous and will expose the</span></span>
<span class="line"><span># instance to everybody on the internet. So by default we uncomment the</span></span>
<span class="line"><span># following bind directive, that will force Redis to listen only into</span></span>
<span class="line"><span># the IPv4 loopback interface address (this means Redis will be able to</span></span>
<span class="line"><span># accept connections only from clients running into the same computer it</span></span>
<span class="line"><span># is running).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES</span></span>
<span class="line"><span># JUST COMMENT THE FOLLOWING LINE.</span></span>
<span class="line"><span># ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~</span></span>
<span class="line"><span>bind 127.0.0.1</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Protected mode is a layer of security protection, in order to avoid that</span></span>
<span class="line"><span># Redis instances left open on the internet are accessed and exploited.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># When protected mode is on and if:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 1) The server is not binding explicitly to a set of addresses using the</span></span>
<span class="line"><span>#    &quot;bind&quot; directive.</span></span>
<span class="line"><span># 2) No password is configured.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The server only accepts connections from clients connecting from the</span></span>
<span class="line"><span># IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain</span></span>
<span class="line"><span># sockets.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># By default protected mode is enabled. You should disable it only if</span></span>
<span class="line"><span># you are sure you want clients from other hosts to connect to Redis</span></span>
<span class="line"><span># even if no authentication is configured, nor a specific set of interfaces</span></span>
<span class="line"><span># are explicitly listed using the &quot;bind&quot; directive.</span></span>
<span class="line"><span>protected-mode yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Accept connections on the specified port, default is 6379 (IANA #815344).</span></span>
<span class="line"><span># If port 0 is specified Redis will not listen on a TCP socket.</span></span>
<span class="line"><span>port 6379</span></span>
<span class="line"><span></span></span>
<span class="line"><span># TCP listen() backlog.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># In high requests-per-second environments you need an high backlog in order</span></span>
<span class="line"><span># to avoid slow clients connections issues. Note that the Linux kernel</span></span>
<span class="line"><span># will silently truncate it to the value of /proc/sys/net/core/somaxconn so</span></span>
<span class="line"><span># make sure to raise both the value of somaxconn and tcp_max_syn_backlog</span></span>
<span class="line"><span># in order to get the desired effect.</span></span>
<span class="line"><span>tcp-backlog 511</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Unix socket.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Specify the path for the Unix socket that will be used to listen for</span></span>
<span class="line"><span># incoming connections. There is no default, so Redis will not listen</span></span>
<span class="line"><span># on a unix socket when not specified.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># unixsocket /tmp/redis.sock</span></span>
<span class="line"><span># unixsocketperm 700</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Close the connection after a client is idle for N seconds (0 to disable)</span></span>
<span class="line"><span>timeout 0</span></span>
<span class="line"><span></span></span>
<span class="line"><span># TCP keepalive.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence</span></span>
<span class="line"><span># of communication. This is useful for two reasons:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 1) Detect dead peers.</span></span>
<span class="line"><span># 2) Take the connection alive from the point of view of network</span></span>
<span class="line"><span>#    equipment in the middle.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># On Linux, the specified value (in seconds) is the period used to send ACKs.</span></span>
<span class="line"><span># Note that to close the connection the double of the time is needed.</span></span>
<span class="line"><span># On other kernels the period depends on the kernel configuration.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># A reasonable value for this option is 300 seconds, which is the new</span></span>
<span class="line"><span># Redis default starting with Redis 3.2.1.</span></span>
<span class="line"><span>tcp-keepalive 300</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################# GENERAL #####################################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># By default Redis does not run as a daemon. Use &#39;yes&#39; if you need it.</span></span>
<span class="line"><span># Note that Redis will write a pid file in /var/run/redis.pid when daemonized.</span></span>
<span class="line"><span>daemonize yes </span></span>
<span class="line"><span></span></span>
<span class="line"><span># If you run Redis from upstart or systemd, Redis can interact with your</span></span>
<span class="line"><span># supervision tree. Options:</span></span>
<span class="line"><span>#   supervised no      - no supervision interaction</span></span>
<span class="line"><span>#   supervised upstart - signal upstart by putting Redis into SIGSTOP mode</span></span>
<span class="line"><span>#   supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET</span></span>
<span class="line"><span>#   supervised auto    - detect upstart or systemd method based on</span></span>
<span class="line"><span>#                        UPSTART_JOB or NOTIFY_SOCKET environment variables</span></span>
<span class="line"><span># Note: these supervision methods only signal &quot;process is ready.&quot;</span></span>
<span class="line"><span>#       They do not enable continuous liveness pings back to your supervisor.</span></span>
<span class="line"><span>supervised no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># If a pid file is specified, Redis writes it where specified at startup</span></span>
<span class="line"><span># and removes it at exit.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># When the server runs non daemonized, no pid file is created if none is</span></span>
<span class="line"><span># specified in the configuration. When the server is daemonized, the pid file</span></span>
<span class="line"><span># is used even if not specified, defaulting to &quot;/var/run/redis.pid&quot;.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Creating a pid file is best effort: if Redis is not able to create it</span></span>
<span class="line"><span># nothing bad happens, the server will start and run normally.</span></span>
<span class="line"><span>pidfile /www/server/redis/redis.pid </span></span>
<span class="line"><span></span></span>
<span class="line"><span># Specify the server verbosity level.</span></span>
<span class="line"><span># This can be one of:</span></span>
<span class="line"><span># debug (a lot of information, useful for development/testing)</span></span>
<span class="line"><span># verbose (many rarely useful info, but not a mess like the debug level)</span></span>
<span class="line"><span># notice (moderately verbose, what you want in production probably)</span></span>
<span class="line"><span># warning (only very important / critical messages are logged)</span></span>
<span class="line"><span>loglevel notice</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Specify the log file name. Also the empty string can be used to force</span></span>
<span class="line"><span># Redis to log on the standard output. Note that if you use standard</span></span>
<span class="line"><span># output for logging but daemonize, logs will be sent to /dev/null</span></span>
<span class="line"><span>logfile &quot;/www/server/redis/redis.log&quot;</span></span>
<span class="line"><span></span></span>
<span class="line"><span># To enable logging to the system logger, just set &#39;syslog-enabled&#39; to yes,</span></span>
<span class="line"><span># and optionally update the other syslog parameters to suit your needs.</span></span>
<span class="line"><span># syslog-enabled no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Specify the syslog identity.</span></span>
<span class="line"><span># syslog-ident redis</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.</span></span>
<span class="line"><span># syslog-facility local0</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Set the number of databases. The default database is DB 0, you can select</span></span>
<span class="line"><span># a different one on a per-connection basis using SELECT &lt;dbid&gt; where</span></span>
<span class="line"><span># dbid is a number between 0 and &#39;databases&#39;-1</span></span>
<span class="line"><span>databases 16</span></span>
<span class="line"><span></span></span>
<span class="line"><span># By default Redis shows an ASCII art logo only when started to log to the</span></span>
<span class="line"><span># standard output and if the standard output is a TTY. Basically this means</span></span>
<span class="line"><span># that normally a logo is displayed only in interactive sessions.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># However it is possible to force the pre-4.0 behavior and always show a</span></span>
<span class="line"><span># ASCII art logo in startup logs by setting the following option to yes.</span></span>
<span class="line"><span>always-show-logo yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################ SNAPSHOTTING  ################################</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Save the DB on disk:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#   save &lt;seconds&gt; &lt;changes&gt;</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#   Will save the DB if both the given number of seconds and the given</span></span>
<span class="line"><span>#   number of write operations against the DB occurred.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#   In the example below the behaviour will be to save:</span></span>
<span class="line"><span>#   after 900 sec (15 min) if at least 1 key changed</span></span>
<span class="line"><span>#   after 300 sec (5 min) if at least 10 keys changed</span></span>
<span class="line"><span>#   after 60 sec if at least 10000 keys changed</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#   Note: you can disable saving completely by commenting out all &quot;save&quot; lines.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#   It is also possible to remove all the previously configured save</span></span>
<span class="line"><span>#   points by adding a save directive with a single empty string argument</span></span>
<span class="line"><span>#   like in the following example:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#   save &quot;&quot;</span></span>
<span class="line"><span></span></span>
<span class="line"><span>save 900 1</span></span>
<span class="line"><span>save 300 10</span></span>
<span class="line"><span>save 60 10000</span></span>
<span class="line"><span></span></span>
<span class="line"><span># By default Redis will stop accepting writes if RDB snapshots are enabled</span></span>
<span class="line"><span># (at least one save point) and the latest background save failed.</span></span>
<span class="line"><span># This will make the user aware (in a hard way) that data is not persisting</span></span>
<span class="line"><span># on disk properly, otherwise chances are that no one will notice and some</span></span>
<span class="line"><span># disaster will happen.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If the background saving process will start working again Redis will</span></span>
<span class="line"><span># automatically allow writes again.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># However if you have setup your proper monitoring of the Redis server</span></span>
<span class="line"><span># and persistence, you may want to disable this feature so that Redis will</span></span>
<span class="line"><span># continue to work as usual even if there are problems with disk,</span></span>
<span class="line"><span># permissions, and so forth.</span></span>
<span class="line"><span>stop-writes-on-bgsave-error yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Compress string objects using LZF when dump .rdb databases?</span></span>
<span class="line"><span># For default that&#39;s set to &#39;yes&#39; as it&#39;s almost always a win.</span></span>
<span class="line"><span># If you want to save some CPU in the saving child set it to &#39;no&#39; but</span></span>
<span class="line"><span># the dataset will likely be bigger if you have compressible values or keys.</span></span>
<span class="line"><span>rdbcompression yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Since version 5 of RDB a CRC64 checksum is placed at the end of the file.</span></span>
<span class="line"><span># This makes the format more resistant to corruption but there is a performance</span></span>
<span class="line"><span># hit to pay (around 10%) when saving and loading RDB files, so you can disable it</span></span>
<span class="line"><span># for maximum performances.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># RDB files created with checksum disabled have a checksum of zero that will</span></span>
<span class="line"><span># tell the loading code to skip the check.</span></span>
<span class="line"><span>rdbchecksum yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># The filename where to dump the DB</span></span>
<span class="line"><span>dbfilename dump.rdb</span></span>
<span class="line"><span></span></span>
<span class="line"><span># The working directory.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The DB will be written inside this directory, with the filename specified</span></span>
<span class="line"><span># above using the &#39;dbfilename&#39; configuration directive.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The Append Only File will also be created inside this directory.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Note that you must specify a directory here, not a file name.</span></span>
<span class="line"><span>dir /www/server/redis/</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################# REPLICATION #################################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Master-Replica replication. Use replicaof to make a Redis instance a copy of</span></span>
<span class="line"><span># another Redis server. A few things to understand ASAP about Redis replication.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#   +------------------+      +---------------+</span></span>
<span class="line"><span>#   |      Master      | ---&gt; |    Replica    |</span></span>
<span class="line"><span>#   | (receive writes) |      |  (exact copy) |</span></span>
<span class="line"><span>#   +------------------+      +---------------+</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 1) Redis replication is asynchronous, but you can configure a master to</span></span>
<span class="line"><span>#    stop accepting writes if it appears to be not connected with at least</span></span>
<span class="line"><span>#    a given number of replicas.</span></span>
<span class="line"><span># 2) Redis replicas are able to perform a partial resynchronization with the</span></span>
<span class="line"><span>#    master if the replication link is lost for a relatively small amount of</span></span>
<span class="line"><span>#    time. You may want to configure the replication backlog size (see the next</span></span>
<span class="line"><span>#    sections of this file) with a sensible value depending on your needs.</span></span>
<span class="line"><span># 3) Replication is automatic and does not need user intervention. After a</span></span>
<span class="line"><span>#    network partition replicas automatically try to reconnect to masters</span></span>
<span class="line"><span>#    and resynchronize with them.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># replicaof &lt;masterip&gt; &lt;masterport&gt;</span></span>
<span class="line"><span></span></span>
<span class="line"><span># If the master is password protected (using the &quot;requirepass&quot; configuration</span></span>
<span class="line"><span># directive below) it is possible to tell the replica to authenticate before</span></span>
<span class="line"><span># starting the replication synchronization process, otherwise the master will</span></span>
<span class="line"><span># refuse the replica request.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># masterauth &lt;master-password&gt;</span></span>
<span class="line"><span></span></span>
<span class="line"><span># When a replica loses its connection with the master, or when the replication</span></span>
<span class="line"><span># is still in progress, the replica can act in two different ways:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 1) if replica-serve-stale-data is set to &#39;yes&#39; (the default) the replica will</span></span>
<span class="line"><span>#    still reply to client requests, possibly with out of date data, or the</span></span>
<span class="line"><span>#    data set may just be empty if this is the first synchronization.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 2) if replica-serve-stale-data is set to &#39;no&#39; the replica will reply with</span></span>
<span class="line"><span>#    an error &quot;SYNC with master in progress&quot; to all the kind of commands</span></span>
<span class="line"><span>#    but to INFO, replicaOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG,</span></span>
<span class="line"><span>#    SUBSCRIBE, UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB,</span></span>
<span class="line"><span>#    COMMAND, POST, HOST: and LATENCY.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>replica-serve-stale-data yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># You can configure a replica instance to accept writes or not. Writing against</span></span>
<span class="line"><span># a replica instance may be useful to store some ephemeral data (because data</span></span>
<span class="line"><span># written on a replica will be easily deleted after resync with the master) but</span></span>
<span class="line"><span># may also cause problems if clients are writing to it because of a</span></span>
<span class="line"><span># misconfiguration.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Since Redis 2.6 by default replicas are read-only.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Note: read only replicas are not designed to be exposed to untrusted clients</span></span>
<span class="line"><span># on the internet. It&#39;s just a protection layer against misuse of the instance.</span></span>
<span class="line"><span># Still a read only replica exports by default all the administrative commands</span></span>
<span class="line"><span># such as CONFIG, DEBUG, and so forth. To a limited extent you can improve</span></span>
<span class="line"><span># security of read only replicas using &#39;rename-command&#39; to shadow all the</span></span>
<span class="line"><span># administrative / dangerous commands.</span></span>
<span class="line"><span>replica-read-only yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Replication SYNC strategy: disk or socket.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># -------------------------------------------------------</span></span>
<span class="line"><span># WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY</span></span>
<span class="line"><span># -------------------------------------------------------</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># New replicas and reconnecting replicas that are not able to continue the replication</span></span>
<span class="line"><span># process just receiving differences, need to do what is called a &quot;full</span></span>
<span class="line"><span># synchronization&quot;. An RDB file is transmitted from the master to the replicas.</span></span>
<span class="line"><span># The transmission can happen in two different ways:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 1) Disk-backed: The Redis master creates a new process that writes the RDB</span></span>
<span class="line"><span>#                 file on disk. Later the file is transferred by the parent</span></span>
<span class="line"><span>#                 process to the replicas incrementally.</span></span>
<span class="line"><span># 2) Diskless: The Redis master creates a new process that directly writes the</span></span>
<span class="line"><span>#              RDB file to replica sockets, without touching the disk at all.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># With disk-backed replication, while the RDB file is generated, more replicas</span></span>
<span class="line"><span># can be queued and served with the RDB file as soon as the current child producing</span></span>
<span class="line"><span># the RDB file finishes its work. With diskless replication instead once</span></span>
<span class="line"><span># the transfer starts, new replicas arriving will be queued and a new transfer</span></span>
<span class="line"><span># will start when the current one terminates.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># When diskless replication is used, the master waits a configurable amount of</span></span>
<span class="line"><span># time (in seconds) before starting the transfer in the hope that multiple replicas</span></span>
<span class="line"><span># will arrive and the transfer can be parallelized.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># With slow disks and fast (large bandwidth) networks, diskless replication</span></span>
<span class="line"><span># works better.</span></span>
<span class="line"><span>repl-diskless-sync no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># When diskless replication is enabled, it is possible to configure the delay</span></span>
<span class="line"><span># the server waits in order to spawn the child that transfers the RDB via socket</span></span>
<span class="line"><span># to the replicas.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># This is important since once the transfer starts, it is not possible to serve</span></span>
<span class="line"><span># new replicas arriving, that will be queued for the next RDB transfer, so the server</span></span>
<span class="line"><span># waits a delay in order to let more replicas arrive.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The delay is specified in seconds, and by default is 5 seconds. To disable</span></span>
<span class="line"><span># it entirely just set it to 0 seconds and the transfer will start ASAP.</span></span>
<span class="line"><span>repl-diskless-sync-delay 5</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Replicas send PINGs to server in a predefined interval. It&#39;s possible to change</span></span>
<span class="line"><span># this interval with the repl_ping_replica_period option. The default value is 10</span></span>
<span class="line"><span># seconds.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># repl-ping-replica-period 10</span></span>
<span class="line"><span></span></span>
<span class="line"><span># The following option sets the replication timeout for:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 1) Bulk transfer I/O during SYNC, from the point of view of replica.</span></span>
<span class="line"><span># 2) Master timeout from the point of view of replicas (data, pings).</span></span>
<span class="line"><span># 3) Replica timeout from the point of view of masters (REPLCONF ACK pings).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># It is important to make sure that this value is greater than the value</span></span>
<span class="line"><span># specified for repl-ping-replica-period otherwise a timeout will be detected</span></span>
<span class="line"><span># every time there is low traffic between the master and the replica.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># repl-timeout 60</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Disable TCP_NODELAY on the replica socket after SYNC?</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If you select &quot;yes&quot; Redis will use a smaller number of TCP packets and</span></span>
<span class="line"><span># less bandwidth to send data to replicas. But this can add a delay for</span></span>
<span class="line"><span># the data to appear on the replica side, up to 40 milliseconds with</span></span>
<span class="line"><span># Linux kernels using a default configuration.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If you select &quot;no&quot; the delay for data to appear on the replica side will</span></span>
<span class="line"><span># be reduced but more bandwidth will be used for replication.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># By default we optimize for low latency, but in very high traffic conditions</span></span>
<span class="line"><span># or when the master and replicas are many hops away, turning this to &quot;yes&quot; may</span></span>
<span class="line"><span># be a good idea.</span></span>
<span class="line"><span>repl-disable-tcp-nodelay no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Set the replication backlog size. The backlog is a buffer that accumulates</span></span>
<span class="line"><span># replica data when replicas are disconnected for some time, so that when a replica</span></span>
<span class="line"><span># wants to reconnect again, often a full resync is not needed, but a partial</span></span>
<span class="line"><span># resync is enough, just passing the portion of data the replica missed while</span></span>
<span class="line"><span># disconnected.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The bigger the replication backlog, the longer the time the replica can be</span></span>
<span class="line"><span># disconnected and later be able to perform a partial resynchronization.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The backlog is only allocated once there is at least a replica connected.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># repl-backlog-size 1mb</span></span>
<span class="line"><span></span></span>
<span class="line"><span># After a master has no longer connected replicas for some time, the backlog</span></span>
<span class="line"><span># will be freed. The following option configures the amount of seconds that</span></span>
<span class="line"><span># need to elapse, starting from the time the last replica disconnected, for</span></span>
<span class="line"><span># the backlog buffer to be freed.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Note that replicas never free the backlog for timeout, since they may be</span></span>
<span class="line"><span># promoted to masters later, and should be able to correctly &quot;partially</span></span>
<span class="line"><span># resynchronize&quot; with the replicas: hence they should always accumulate backlog.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># A value of 0 means to never release the backlog.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># repl-backlog-ttl 3600</span></span>
<span class="line"><span></span></span>
<span class="line"><span># The replica priority is an integer number published by Redis in the INFO output.</span></span>
<span class="line"><span># It is used by Redis Sentinel in order to select a replica to promote into a</span></span>
<span class="line"><span># master if the master is no longer working correctly.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># A replica with a low priority number is considered better for promotion, so</span></span>
<span class="line"><span># for instance if there are three replicas with priority 10, 100, 25 Sentinel will</span></span>
<span class="line"><span># pick the one with priority 10, that is the lowest.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># However a special priority of 0 marks the replica as not able to perform the</span></span>
<span class="line"><span># role of master, so a replica with priority of 0 will never be selected by</span></span>
<span class="line"><span># Redis Sentinel for promotion.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># By default the priority is 100.</span></span>
<span class="line"><span>replica-priority 100</span></span>
<span class="line"><span></span></span>
<span class="line"><span># It is possible for a master to stop accepting writes if there are less than</span></span>
<span class="line"><span># N replicas connected, having a lag less or equal than M seconds.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The N replicas need to be in &quot;online&quot; state.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The lag in seconds, that must be &lt;= the specified value, is calculated from</span></span>
<span class="line"><span># the last ping received from the replica, that is usually sent every second.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># This option does not GUARANTEE that N replicas will accept the write, but</span></span>
<span class="line"><span># will limit the window of exposure for lost writes in case not enough replicas</span></span>
<span class="line"><span># are available, to the specified number of seconds.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># For example to require at least 3 replicas with a lag &lt;= 10 seconds use:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># min-replicas-to-write 3</span></span>
<span class="line"><span># min-replicas-max-lag 10</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Setting one or the other to 0 disables the feature.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># By default min-replicas-to-write is set to 0 (feature disabled) and</span></span>
<span class="line"><span># min-replicas-max-lag is set to 10.</span></span>
<span class="line"><span></span></span>
<span class="line"><span># A Redis master is able to list the address and port of the attached</span></span>
<span class="line"><span># replicas in different ways. For example the &quot;INFO replication&quot; section</span></span>
<span class="line"><span># offers this information, which is used, among other tools, by</span></span>
<span class="line"><span># Redis Sentinel in order to discover replica instances.</span></span>
<span class="line"><span># Another place where this info is available is in the output of the</span></span>
<span class="line"><span># &quot;ROLE&quot; command of a master.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The listed IP and address normally reported by a replica is obtained</span></span>
<span class="line"><span># in the following way:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#   IP: The address is auto detected by checking the peer address</span></span>
<span class="line"><span>#   of the socket used by the replica to connect with the master.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#   Port: The port is communicated by the replica during the replication</span></span>
<span class="line"><span>#   handshake, and is normally the port that the replica is using to</span></span>
<span class="line"><span>#   listen for connections.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># However when port forwarding or Network Address Translation (NAT) is</span></span>
<span class="line"><span># used, the replica may be actually reachable via different IP and port</span></span>
<span class="line"><span># pairs. The following two options can be used by a replica in order to</span></span>
<span class="line"><span># report to its master a specific set of IP and port, so that both INFO</span></span>
<span class="line"><span># and ROLE will report those values.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># There is no need to use both the options if you need to override just</span></span>
<span class="line"><span># the port or the IP address.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># replica-announce-ip 5.5.5.5</span></span>
<span class="line"><span># replica-announce-port 1234</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################## SECURITY ###################################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Require clients to issue AUTH &lt;PASSWORD&gt; before processing any other</span></span>
<span class="line"><span># commands.  This might be useful in environments in which you do not trust</span></span>
<span class="line"><span># others with access to the host running redis-server.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># This should stay commented out for backward compatibility and because most</span></span>
<span class="line"><span># people do not need auth (e.g. they run their own servers).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Warning: since Redis is pretty fast an outside user can try up to</span></span>
<span class="line"><span># 150k passwords per second against a good box. This means that you should</span></span>
<span class="line"><span># use a very strong password otherwise it will be very easy to break.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># requirepass foobared</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Command renaming.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># It is possible to change the name of dangerous commands in a shared</span></span>
<span class="line"><span># environment. For instance the CONFIG command may be renamed into something</span></span>
<span class="line"><span># hard to guess so that it will still be available for internal-use tools</span></span>
<span class="line"><span># but not available for general clients.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Example:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># It is also possible to completely kill a command by renaming it into</span></span>
<span class="line"><span># an empty string:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># rename-command CONFIG &quot;&quot;</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Please note that changing the name of commands that are logged into the</span></span>
<span class="line"><span># AOF file or transmitted to replicas may cause problems.</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################### CLIENTS ####################################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Set the max number of connected clients at the same time. By default</span></span>
<span class="line"><span># this limit is set to 10000 clients, however if the Redis server is not</span></span>
<span class="line"><span># able to configure the process file limit to allow for the specified limit</span></span>
<span class="line"><span># the max number of allowed clients is set to the current file limit</span></span>
<span class="line"><span># minus 32 (as Redis reserves a few file descriptors for internal uses).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Once the limit is reached Redis will close all the new connections sending</span></span>
<span class="line"><span># an error &#39;max number of clients reached&#39;.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># maxclients 10000</span></span>
<span class="line"><span></span></span>
<span class="line"><span>############################## MEMORY MANAGEMENT ################################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Set a memory usage limit to the specified amount of bytes.</span></span>
<span class="line"><span># When the memory limit is reached Redis will try to remove keys</span></span>
<span class="line"><span># according to the eviction policy selected (see maxmemory-policy).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If Redis can&#39;t remove keys according to the policy, or if the policy is</span></span>
<span class="line"><span># set to &#39;noeviction&#39;, Redis will start to reply with errors to commands</span></span>
<span class="line"><span># that would use more memory, like SET, LPUSH, and so on, and will continue</span></span>
<span class="line"><span># to reply to read-only commands like GET.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># This option is usually useful when using Redis as an LRU or LFU cache, or to</span></span>
<span class="line"><span># set a hard memory limit for an instance (using the &#39;noeviction&#39; policy).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># WARNING: If you have replicas attached to an instance with maxmemory on,</span></span>
<span class="line"><span># the size of the output buffers needed to feed the replicas are subtracted</span></span>
<span class="line"><span># from the used memory count, so that network problems / resyncs will</span></span>
<span class="line"><span># not trigger a loop where keys are evicted, and in turn the output</span></span>
<span class="line"><span># buffer of replicas is full with DELs of keys evicted triggering the deletion</span></span>
<span class="line"><span># of more keys, and so forth until the database is completely emptied.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># In short... if you have replicas attached it is suggested that you set a lower</span></span>
<span class="line"><span># limit for maxmemory so that there is some free RAM on the system for replica</span></span>
<span class="line"><span># output buffers (but this is not needed if the policy is &#39;noeviction&#39;).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># maxmemory &lt;bytes&gt;</span></span>
<span class="line"><span></span></span>
<span class="line"><span># MAXMEMORY POLICY: how Redis will select what to remove when maxmemory</span></span>
<span class="line"><span># is reached. You can select among five behaviors:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># volatile-lru -&gt; Evict using approximated LRU among the keys with an expire set.</span></span>
<span class="line"><span># allkeys-lru -&gt; Evict any key using approximated LRU.</span></span>
<span class="line"><span># volatile-lfu -&gt; Evict using approximated LFU among the keys with an expire set.</span></span>
<span class="line"><span># allkeys-lfu -&gt; Evict any key using approximated LFU.</span></span>
<span class="line"><span># volatile-random -&gt; Remove a random key among the ones with an expire set.</span></span>
<span class="line"><span># allkeys-random -&gt; Remove a random key, any key.</span></span>
<span class="line"><span># volatile-ttl -&gt; Remove the key with the nearest expire time (minor TTL)</span></span>
<span class="line"><span># noeviction -&gt; Don&#39;t evict anything, just return an error on write operations.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># LRU means Least Recently Used</span></span>
<span class="line"><span># LFU means Least Frequently Used</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Both LRU, LFU and volatile-ttl are implemented using approximated</span></span>
<span class="line"><span># randomized algorithms.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Note: with any of the above policies, Redis will return an error on write</span></span>
<span class="line"><span>#       operations, when there are no suitable keys for eviction.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#       At the date of writing these commands are: set setnx setex append</span></span>
<span class="line"><span>#       incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd</span></span>
<span class="line"><span>#       sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby</span></span>
<span class="line"><span>#       zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby</span></span>
<span class="line"><span>#       getset mset msetnx exec sort</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The default is:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># maxmemory-policy noeviction</span></span>
<span class="line"><span></span></span>
<span class="line"><span># LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated</span></span>
<span class="line"><span># algorithms (in order to save memory), so you can tune it for speed or</span></span>
<span class="line"><span># accuracy. For default Redis will check five keys and pick the one that was</span></span>
<span class="line"><span># used less recently, you can change the sample size using the following</span></span>
<span class="line"><span># configuration directive.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The default of 5 produces good enough results. 10 Approximates very closely</span></span>
<span class="line"><span># true LRU but costs more CPU. 3 is faster but not very accurate.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># maxmemory-samples 5</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Starting from Redis 5, by default a replica will ignore its maxmemory setting</span></span>
<span class="line"><span># (unless it is promoted to master after a failover or manually). It means</span></span>
<span class="line"><span># that the eviction of keys will be just handled by the master, sending the</span></span>
<span class="line"><span># DEL commands to the replica as keys evict in the master side.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># This behavior ensures that masters and replicas stay consistent, and is usually</span></span>
<span class="line"><span># what you want, however if your replica is writable, or you want the replica to have</span></span>
<span class="line"><span># a different memory setting, and you are sure all the writes performed to the</span></span>
<span class="line"><span># replica are idempotent, then you may change this default (but be sure to understand</span></span>
<span class="line"><span># what you are doing).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Note that since the replica by default does not evict, it may end using more</span></span>
<span class="line"><span># memory than the one set via maxmemory (there are certain buffers that may</span></span>
<span class="line"><span># be larger on the replica, or data structures may sometimes take more memory and so</span></span>
<span class="line"><span># forth). So make sure you monitor your replicas and make sure they have enough</span></span>
<span class="line"><span># memory to never hit a real out-of-memory condition before the master hits</span></span>
<span class="line"><span># the configured maxmemory setting.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># replica-ignore-maxmemory yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span>############################# LAZY FREEING ####################################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Redis has two primitives to delete keys. One is called DEL and is a blocking</span></span>
<span class="line"><span># deletion of the object. It means that the server stops processing new commands</span></span>
<span class="line"><span># in order to reclaim all the memory associated with an object in a synchronous</span></span>
<span class="line"><span># way. If the key deleted is associated with a small object, the time needed</span></span>
<span class="line"><span># in order to execute the DEL command is very small and comparable to most other</span></span>
<span class="line"><span># O(1) or O(log_N) commands in Redis. However if the key is associated with an</span></span>
<span class="line"><span># aggregated value containing millions of elements, the server can block for</span></span>
<span class="line"><span># a long time (even seconds) in order to complete the operation.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># For the above reasons Redis also offers non blocking deletion primitives</span></span>
<span class="line"><span># such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and</span></span>
<span class="line"><span># FLUSHDB commands, in order to reclaim memory in background. Those commands</span></span>
<span class="line"><span># are executed in constant time. Another thread will incrementally free the</span></span>
<span class="line"><span># object in the background as fast as possible.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled.</span></span>
<span class="line"><span># It&#39;s up to the design of the application to understand when it is a good</span></span>
<span class="line"><span># idea to use one or the other. However the Redis server sometimes has to</span></span>
<span class="line"><span># delete keys or flush the whole database as a side effect of other operations.</span></span>
<span class="line"><span># Specifically Redis deletes objects independently of a user call in the</span></span>
<span class="line"><span># following scenarios:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 1) On eviction, because of the maxmemory and maxmemory policy configurations,</span></span>
<span class="line"><span>#    in order to make room for new data, without going over the specified</span></span>
<span class="line"><span>#    memory limit.</span></span>
<span class="line"><span># 2) Because of expire: when a key with an associated time to live (see the</span></span>
<span class="line"><span>#    EXPIRE command) must be deleted from memory.</span></span>
<span class="line"><span># 3) Because of a side effect of a command that stores data on a key that may</span></span>
<span class="line"><span>#    already exist. For example the RENAME command may delete the old key</span></span>
<span class="line"><span>#    content when it is replaced with another one. Similarly SUNIONSTORE</span></span>
<span class="line"><span>#    or SORT with STORE option may delete existing keys. The SET command</span></span>
<span class="line"><span>#    itself removes any old content of the specified key in order to replace</span></span>
<span class="line"><span>#    it with the specified string.</span></span>
<span class="line"><span># 4) During replication, when a replica performs a full resynchronization with</span></span>
<span class="line"><span>#    its master, the content of the whole database is removed in order to</span></span>
<span class="line"><span>#    load the RDB file just transferred.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># In all the above cases the default is to delete objects in a blocking way,</span></span>
<span class="line"><span># like if DEL was called. However you can configure each case specifically</span></span>
<span class="line"><span># in order to instead release memory in a non-blocking way like if UNLINK</span></span>
<span class="line"><span># was called, using the following configuration directives:</span></span>
<span class="line"><span></span></span>
<span class="line"><span>lazyfree-lazy-eviction no</span></span>
<span class="line"><span>lazyfree-lazy-expire no</span></span>
<span class="line"><span>lazyfree-lazy-server-del no</span></span>
<span class="line"><span>replica-lazy-flush no</span></span>
<span class="line"><span></span></span>
<span class="line"><span>############################## APPEND ONLY MODE ###############################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># By default Redis asynchronously dumps the dataset on disk. This mode is</span></span>
<span class="line"><span># good enough in many applications, but an issue with the Redis process or</span></span>
<span class="line"><span># a power outage may result into a few minutes of writes lost (depending on</span></span>
<span class="line"><span># the configured save points).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The Append Only File is an alternative persistence mode that provides</span></span>
<span class="line"><span># much better durability. For instance using the default data fsync policy</span></span>
<span class="line"><span># (see later in the config file) Redis can lose just one second of writes in a</span></span>
<span class="line"><span># dramatic event like a server power outage, or a single write if something</span></span>
<span class="line"><span># wrong with the Redis process itself happens, but the operating system is</span></span>
<span class="line"><span># still running correctly.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># AOF and RDB persistence can be enabled at the same time without problems.</span></span>
<span class="line"><span># If the AOF is enabled on startup Redis will load the AOF, that is the file</span></span>
<span class="line"><span># with the better durability guarantees.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Please check http://redis.io/topics/persistence for more information.</span></span>
<span class="line"><span></span></span>
<span class="line"><span>appendonly no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># The name of the append only file (default: &quot;appendonly.aof&quot;)</span></span>
<span class="line"><span></span></span>
<span class="line"><span>appendfilename &quot;appendonly.aof&quot;</span></span>
<span class="line"><span></span></span>
<span class="line"><span># The fsync() call tells the Operating System to actually write data on disk</span></span>
<span class="line"><span># instead of waiting for more data in the output buffer. Some OS will really flush</span></span>
<span class="line"><span># data on disk, some other OS will just try to do it ASAP.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Redis supports three different modes:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># no: don&#39;t fsync, just let the OS flush the data when it wants. Faster.</span></span>
<span class="line"><span># always: fsync after every write to the append only log. Slow, Safest.</span></span>
<span class="line"><span># everysec: fsync only one time every second. Compromise.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The default is &quot;everysec&quot;, as that&#39;s usually the right compromise between</span></span>
<span class="line"><span># speed and data safety. It&#39;s up to you to understand if you can relax this to</span></span>
<span class="line"><span># &quot;no&quot; that will let the operating system flush the output buffer when</span></span>
<span class="line"><span># it wants, for better performances (but if you can live with the idea of</span></span>
<span class="line"><span># some data loss consider the default persistence mode that&#39;s snapshotting),</span></span>
<span class="line"><span># or on the contrary, use &quot;always&quot; that&#39;s very slow but a bit safer than</span></span>
<span class="line"><span># everysec.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># More details please check the following article:</span></span>
<span class="line"><span># http://antirez.com/post/redis-persistence-demystified.html</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If unsure, use &quot;everysec&quot;.</span></span>
<span class="line"><span></span></span>
<span class="line"><span># appendfsync always</span></span>
<span class="line"><span>appendfsync everysec</span></span>
<span class="line"><span># appendfsync no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># When the AOF fsync policy is set to always or everysec, and a background</span></span>
<span class="line"><span># saving process (a background save or AOF log background rewriting) is</span></span>
<span class="line"><span># performing a lot of I/O against the disk, in some Linux configurations</span></span>
<span class="line"><span># Redis may block too long on the fsync() call. Note that there is no fix for</span></span>
<span class="line"><span># this currently, as even performing fsync in a different thread will block</span></span>
<span class="line"><span># our synchronous write(2) call.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># In order to mitigate this problem it&#39;s possible to use the following option</span></span>
<span class="line"><span># that will prevent fsync() from being called in the main process while a</span></span>
<span class="line"><span># BGSAVE or BGREWRITEAOF is in progress.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># This means that while another child is saving, the durability of Redis is</span></span>
<span class="line"><span># the same as &quot;appendfsync none&quot;. In practical terms, this means that it is</span></span>
<span class="line"><span># possible to lose up to 30 seconds of log in the worst scenario (with the</span></span>
<span class="line"><span># default Linux settings).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If you have latency problems turn this to &quot;yes&quot;. Otherwise leave it as</span></span>
<span class="line"><span># &quot;no&quot; that is the safest pick from the point of view of durability.</span></span>
<span class="line"><span></span></span>
<span class="line"><span>no-appendfsync-on-rewrite no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Automatic rewrite of the append only file.</span></span>
<span class="line"><span># Redis is able to automatically rewrite the log file implicitly calling</span></span>
<span class="line"><span># BGREWRITEAOF when the AOF log size grows by the specified percentage.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># This is how it works: Redis remembers the size of the AOF file after the</span></span>
<span class="line"><span># latest rewrite (if no rewrite has happened since the restart, the size of</span></span>
<span class="line"><span># the AOF at startup is used).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># This base size is compared to the current size. If the current size is</span></span>
<span class="line"><span># bigger than the specified percentage, the rewrite is triggered. Also</span></span>
<span class="line"><span># you need to specify a minimal size for the AOF file to be rewritten, this</span></span>
<span class="line"><span># is useful to avoid rewriting the AOF file even if the percentage increase</span></span>
<span class="line"><span># is reached but it is still pretty small.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Specify a percentage of zero in order to disable the automatic AOF</span></span>
<span class="line"><span># rewrite feature.</span></span>
<span class="line"><span></span></span>
<span class="line"><span>auto-aof-rewrite-percentage 100</span></span>
<span class="line"><span>auto-aof-rewrite-min-size 64mb</span></span>
<span class="line"><span></span></span>
<span class="line"><span># An AOF file may be found to be truncated at the end during the Redis</span></span>
<span class="line"><span># startup process, when the AOF data gets loaded back into memory.</span></span>
<span class="line"><span># This may happen when the system where Redis is running</span></span>
<span class="line"><span># crashes, especially when an ext4 filesystem is mounted without the</span></span>
<span class="line"><span># data=ordered option (however this can&#39;t happen when Redis itself</span></span>
<span class="line"><span># crashes or aborts but the operating system still works correctly).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Redis can either exit with an error when this happens, or load as much</span></span>
<span class="line"><span># data as possible (the default now) and start if the AOF file is found</span></span>
<span class="line"><span># to be truncated at the end. The following option controls this behavior.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If aof-load-truncated is set to yes, a truncated AOF file is loaded and</span></span>
<span class="line"><span># the Redis server starts emitting a log to inform the user of the event.</span></span>
<span class="line"><span># Otherwise if the option is set to no, the server aborts with an error</span></span>
<span class="line"><span># and refuses to start. When the option is set to no, the user requires</span></span>
<span class="line"><span># to fix the AOF file using the &quot;redis-check-aof&quot; utility before to restart</span></span>
<span class="line"><span># the server.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Note that if the AOF file will be found to be corrupted in the middle</span></span>
<span class="line"><span># the server will still exit with an error. This option only applies when</span></span>
<span class="line"><span># Redis will try to read more data from the AOF file but not enough bytes</span></span>
<span class="line"><span># will be found.</span></span>
<span class="line"><span>aof-load-truncated yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># When rewriting the AOF file, Redis is able to use an RDB preamble in the</span></span>
<span class="line"><span># AOF file for faster rewrites and recoveries. When this option is turned</span></span>
<span class="line"><span># on the rewritten AOF file is composed of two different stanzas:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#   [RDB file][AOF tail]</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># When loading Redis recognizes that the AOF file starts with the &quot;REDIS&quot;</span></span>
<span class="line"><span># string and loads the prefixed RDB file, and continues loading the AOF</span></span>
<span class="line"><span># tail.</span></span>
<span class="line"><span>aof-use-rdb-preamble yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################ LUA SCRIPTING  ###############################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Max execution time of a Lua script in milliseconds.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If the maximum execution time is reached Redis will log that a script is</span></span>
<span class="line"><span># still in execution after the maximum allowed time and will start to</span></span>
<span class="line"><span># reply to queries with an error.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># When a long running script exceeds the maximum execution time only the</span></span>
<span class="line"><span># SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be</span></span>
<span class="line"><span># used to stop a script that did not yet called write commands. The second</span></span>
<span class="line"><span># is the only way to shut down the server in the case a write command was</span></span>
<span class="line"><span># already issued by the script but the user doesn&#39;t want to wait for the natural</span></span>
<span class="line"><span># termination of the script.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Set it to 0 or a negative value for unlimited execution without warnings.</span></span>
<span class="line"><span>lua-time-limit 5000</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################ REDIS CLUSTER  ###############################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Normal Redis instances can&#39;t be part of a Redis Cluster; only nodes that are</span></span>
<span class="line"><span># started as cluster nodes can. In order to start a Redis instance as a</span></span>
<span class="line"><span># cluster node enable the cluster support uncommenting the following:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># cluster-enabled yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Every cluster node has a cluster configuration file. This file is not</span></span>
<span class="line"><span># intended to be edited by hand. It is created and updated by Redis nodes.</span></span>
<span class="line"><span># Every Redis Cluster node requires a different cluster configuration file.</span></span>
<span class="line"><span># Make sure that instances running in the same system do not have</span></span>
<span class="line"><span># overlapping cluster configuration file names.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># cluster-config-file nodes-6379.conf</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Cluster node timeout is the amount of milliseconds a node must be unreachable</span></span>
<span class="line"><span># for it to be considered in failure state.</span></span>
<span class="line"><span># Most other internal time limits are multiple of the node timeout.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># cluster-node-timeout 15000</span></span>
<span class="line"><span></span></span>
<span class="line"><span># A replica of a failing master will avoid to start a failover if its data</span></span>
<span class="line"><span># looks too old.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># There is no simple way for a replica to actually have an exact measure of</span></span>
<span class="line"><span># its &quot;data age&quot;, so the following two checks are performed:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 1) If there are multiple replicas able to failover, they exchange messages</span></span>
<span class="line"><span>#    in order to try to give an advantage to the replica with the best</span></span>
<span class="line"><span>#    replication offset (more data from the master processed).</span></span>
<span class="line"><span>#    Replicas will try to get their rank by offset, and apply to the start</span></span>
<span class="line"><span>#    of the failover a delay proportional to their rank.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 2) Every single replica computes the time of the last interaction with</span></span>
<span class="line"><span>#    its master. This can be the last ping or command received (if the master</span></span>
<span class="line"><span>#    is still in the &quot;connected&quot; state), or the time that elapsed since the</span></span>
<span class="line"><span>#    disconnection with the master (if the replication link is currently down).</span></span>
<span class="line"><span>#    If the last interaction is too old, the replica will not try to failover</span></span>
<span class="line"><span>#    at all.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The point &quot;2&quot; can be tuned by user. Specifically a replica will not perform</span></span>
<span class="line"><span># the failover if, since the last interaction with the master, the time</span></span>
<span class="line"><span># elapsed is greater than:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#   (node-timeout * replica-validity-factor) + repl-ping-replica-period</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># So for example if node-timeout is 30 seconds, and the replica-validity-factor</span></span>
<span class="line"><span># is 10, and assuming a default repl-ping-replica-period of 10 seconds, the</span></span>
<span class="line"><span># replica will not try to failover if it was not able to talk with the master</span></span>
<span class="line"><span># for longer than 310 seconds.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># A large replica-validity-factor may allow replicas with too old data to failover</span></span>
<span class="line"><span># a master, while a too small value may prevent the cluster from being able to</span></span>
<span class="line"><span># elect a replica at all.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># For maximum availability, it is possible to set the replica-validity-factor</span></span>
<span class="line"><span># to a value of 0, which means, that replicas will always try to failover the</span></span>
<span class="line"><span># master regardless of the last time they interacted with the master.</span></span>
<span class="line"><span># (However they&#39;ll always try to apply a delay proportional to their</span></span>
<span class="line"><span># offset rank).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Zero is the only value able to guarantee that when all the partitions heal</span></span>
<span class="line"><span># the cluster will always be able to continue.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># cluster-replica-validity-factor 10</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Cluster replicas are able to migrate to orphaned masters, that are masters</span></span>
<span class="line"><span># that are left without working replicas. This improves the cluster ability</span></span>
<span class="line"><span># to resist to failures as otherwise an orphaned master can&#39;t be failed over</span></span>
<span class="line"><span># in case of failure if it has no working replicas.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Replicas migrate to orphaned masters only if there are still at least a</span></span>
<span class="line"><span># given number of other working replicas for their old master. This number</span></span>
<span class="line"><span># is the &quot;migration barrier&quot;. A migration barrier of 1 means that a replica</span></span>
<span class="line"><span># will migrate only if there is at least 1 other working replica for its master</span></span>
<span class="line"><span># and so forth. It usually reflects the number of replicas you want for every</span></span>
<span class="line"><span># master in your cluster.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Default is 1 (replicas migrate only if their masters remain with at least</span></span>
<span class="line"><span># one replica). To disable migration just set it to a very large value.</span></span>
<span class="line"><span># A value of 0 can be set but is useful only for debugging and dangerous</span></span>
<span class="line"><span># in production.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># cluster-migration-barrier 1</span></span>
<span class="line"><span></span></span>
<span class="line"><span># By default Redis Cluster nodes stop accepting queries if they detect there</span></span>
<span class="line"><span># is at least an hash slot uncovered (no available node is serving it).</span></span>
<span class="line"><span># This way if the cluster is partially down (for example a range of hash slots</span></span>
<span class="line"><span># are no longer covered) all the cluster becomes, eventually, unavailable.</span></span>
<span class="line"><span># It automatically returns available as soon as all the slots are covered again.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># However sometimes you want the subset of the cluster which is working,</span></span>
<span class="line"><span># to continue to accept queries for the part of the key space that is still</span></span>
<span class="line"><span># covered. In order to do so, just set the cluster-require-full-coverage</span></span>
<span class="line"><span># option to no.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># cluster-require-full-coverage yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># This option, when set to yes, prevents replicas from trying to failover its</span></span>
<span class="line"><span># master during master failures. However the master can still perform a</span></span>
<span class="line"><span># manual failover, if forced to do so.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># This is useful in different scenarios, especially in the case of multiple</span></span>
<span class="line"><span># data center operations, where we want one side to never be promoted if not</span></span>
<span class="line"><span># in the case of a total DC failure.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># cluster-replica-no-failover no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># In order to setup your cluster make sure to read the documentation</span></span>
<span class="line"><span># available at http://redis.io web site.</span></span>
<span class="line"><span></span></span>
<span class="line"><span>########################## CLUSTER DOCKER/NAT support  ########################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># In certain deployments, Redis Cluster nodes address discovery fails, because</span></span>
<span class="line"><span># addresses are NAT-ted or because ports are forwarded (the typical case is</span></span>
<span class="line"><span># Docker and other containers).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># In order to make Redis Cluster working in such environments, a static</span></span>
<span class="line"><span># configuration where each node knows its public address is needed. The</span></span>
<span class="line"><span># following two options are used for this scope, and are:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># * cluster-announce-ip</span></span>
<span class="line"><span># * cluster-announce-port</span></span>
<span class="line"><span># * cluster-announce-bus-port</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Each instruct the node about its address, client port, and cluster message</span></span>
<span class="line"><span># bus port. The information is then published in the header of the bus packets</span></span>
<span class="line"><span># so that other nodes will be able to correctly map the address of the node</span></span>
<span class="line"><span># publishing the information.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If the above options are not used, the normal Redis Cluster auto-detection</span></span>
<span class="line"><span># will be used instead.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Note that when remapped, the bus port may not be at the fixed offset of</span></span>
<span class="line"><span># clients port + 10000, so you can specify any port and bus-port depending</span></span>
<span class="line"><span># on how they get remapped. If the bus-port is not set, a fixed offset of</span></span>
<span class="line"><span># 10000 will be used as usually.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Example:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># cluster-announce-ip 10.1.1.5</span></span>
<span class="line"><span># cluster-announce-port 6379</span></span>
<span class="line"><span># cluster-announce-bus-port 6380</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################## SLOW LOG ###################################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># The Redis Slow Log is a system to log queries that exceeded a specified</span></span>
<span class="line"><span># execution time. The execution time does not include the I/O operations</span></span>
<span class="line"><span># like talking with the client, sending the reply and so forth,</span></span>
<span class="line"><span># but just the time needed to actually execute the command (this is the only</span></span>
<span class="line"><span># stage of command execution where the thread is blocked and can not serve</span></span>
<span class="line"><span># other requests in the meantime).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># You can configure the slow log with two parameters: one tells Redis</span></span>
<span class="line"><span># what is the execution time, in microseconds, to exceed in order for the</span></span>
<span class="line"><span># command to get logged, and the other parameter is the length of the</span></span>
<span class="line"><span># slow log. When a new command is logged the oldest one is removed from the</span></span>
<span class="line"><span># queue of logged commands.</span></span>
<span class="line"><span></span></span>
<span class="line"><span># The following time is expressed in microseconds, so 1000000 is equivalent</span></span>
<span class="line"><span># to one second. Note that a negative number disables the slow log, while</span></span>
<span class="line"><span># a value of zero forces the logging of every command.</span></span>
<span class="line"><span>slowlog-log-slower-than 10000</span></span>
<span class="line"><span></span></span>
<span class="line"><span># There is no limit to this length. Just be aware that it will consume memory.</span></span>
<span class="line"><span># You can reclaim memory used by the slow log with SLOWLOG RESET.</span></span>
<span class="line"><span>slowlog-max-len 128</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################ LATENCY MONITOR ##############################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># The Redis latency monitoring subsystem samples different operations</span></span>
<span class="line"><span># at runtime in order to collect data related to possible sources of</span></span>
<span class="line"><span># latency of a Redis instance.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Via the LATENCY command this information is available to the user that can</span></span>
<span class="line"><span># print graphs and obtain reports.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The system only logs operations that were performed in a time equal or</span></span>
<span class="line"><span># greater than the amount of milliseconds specified via the</span></span>
<span class="line"><span># latency-monitor-threshold configuration directive. When its value is set</span></span>
<span class="line"><span># to zero, the latency monitor is turned off.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># By default latency monitoring is disabled since it is mostly not needed</span></span>
<span class="line"><span># if you don&#39;t have latency issues, and collecting data has a performance</span></span>
<span class="line"><span># impact, that while very small, can be measured under big load. Latency</span></span>
<span class="line"><span># monitoring can easily be enabled at runtime using the command</span></span>
<span class="line"><span># &quot;CONFIG SET latency-monitor-threshold &lt;milliseconds&gt;&quot; if needed.</span></span>
<span class="line"><span>latency-monitor-threshold 0</span></span>
<span class="line"><span></span></span>
<span class="line"><span>############################# EVENT NOTIFICATION ##############################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Redis can notify Pub/Sub clients about events happening in the key space.</span></span>
<span class="line"><span># This feature is documented at http://redis.io/topics/notifications</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># For instance if keyspace events notification is enabled, and a client</span></span>
<span class="line"><span># performs a DEL operation on key &quot;foo&quot; stored in the Database 0, two</span></span>
<span class="line"><span># messages will be published via Pub/Sub:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># PUBLISH __keyspace@0__:foo del</span></span>
<span class="line"><span># PUBLISH __keyevent@0__:del foo</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># It is possible to select the events that Redis will notify among a set</span></span>
<span class="line"><span># of classes. Every class is identified by a single character:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#  K     Keyspace events, published with __keyspace@&lt;db&gt;__ prefix.</span></span>
<span class="line"><span>#  E     Keyevent events, published with __keyevent@&lt;db&gt;__ prefix.</span></span>
<span class="line"><span>#  g     Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...</span></span>
<span class="line"><span>#  $     String commands</span></span>
<span class="line"><span>#  l     List commands</span></span>
<span class="line"><span>#  s     Set commands</span></span>
<span class="line"><span>#  h     Hash commands</span></span>
<span class="line"><span>#  z     Sorted set commands</span></span>
<span class="line"><span>#  x     Expired events (events generated every time a key expires)</span></span>
<span class="line"><span>#  e     Evicted events (events generated when a key is evicted for maxmemory)</span></span>
<span class="line"><span>#  A     Alias for g$lshzxe, so that the &quot;AKE&quot; string means all the events.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#  The &quot;notify-keyspace-events&quot; takes as argument a string that is composed</span></span>
<span class="line"><span>#  of zero or multiple characters. The empty string means that notifications</span></span>
<span class="line"><span>#  are disabled.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#  Example: to enable list and generic events, from the point of view of the</span></span>
<span class="line"><span>#           event name, use:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#  notify-keyspace-events Elg</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#  Example 2: to get the stream of the expired keys subscribing to channel</span></span>
<span class="line"><span>#             name __keyevent@0__:expired use:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#  notify-keyspace-events Ex</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#  By default all notifications are disabled because most users don&#39;t need</span></span>
<span class="line"><span>#  this feature and the feature has some overhead. Note that if you don&#39;t</span></span>
<span class="line"><span>#  specify at least one of K or E, no events will be delivered.</span></span>
<span class="line"><span>notify-keyspace-events &quot;&quot;</span></span>
<span class="line"><span></span></span>
<span class="line"><span>############################### ADVANCED CONFIG ###############################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Hashes are encoded using a memory efficient data structure when they have a</span></span>
<span class="line"><span># small number of entries, and the biggest entry does not exceed a given</span></span>
<span class="line"><span># threshold. These thresholds can be configured using the following directives.</span></span>
<span class="line"><span>hash-max-ziplist-entries 512</span></span>
<span class="line"><span>hash-max-ziplist-value 64</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Lists are also encoded in a special way to save a lot of space.</span></span>
<span class="line"><span># The number of entries allowed per internal list node can be specified</span></span>
<span class="line"><span># as a fixed maximum size or a maximum number of elements.</span></span>
<span class="line"><span># For a fixed maximum size, use -5 through -1, meaning:</span></span>
<span class="line"><span># -5: max size: 64 Kb  &lt;-- not recommended for normal workloads</span></span>
<span class="line"><span># -4: max size: 32 Kb  &lt;-- not recommended</span></span>
<span class="line"><span># -3: max size: 16 Kb  &lt;-- probably not recommended</span></span>
<span class="line"><span># -2: max size: 8 Kb   &lt;-- good</span></span>
<span class="line"><span># -1: max size: 4 Kb   &lt;-- good</span></span>
<span class="line"><span># Positive numbers mean store up to _exactly_ that number of elements</span></span>
<span class="line"><span># per list node.</span></span>
<span class="line"><span># The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size),</span></span>
<span class="line"><span># but if your use case is unique, adjust the settings as necessary.</span></span>
<span class="line"><span>list-max-ziplist-size -2</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Lists may also be compressed.</span></span>
<span class="line"><span># Compress depth is the number of quicklist ziplist nodes from *each* side of</span></span>
<span class="line"><span># the list to *exclude* from compression.  The head and tail of the list</span></span>
<span class="line"><span># are always uncompressed for fast push/pop operations.  Settings are:</span></span>
<span class="line"><span># 0: disable all list compression</span></span>
<span class="line"><span># 1: depth 1 means &quot;don&#39;t start compressing until after 1 node into the list,</span></span>
<span class="line"><span>#    going from either the head or tail&quot;</span></span>
<span class="line"><span>#    So: [head]-&gt;node-&gt;node-&gt;...-&gt;node-&gt;[tail]</span></span>
<span class="line"><span>#    [head], [tail] will always be uncompressed; inner nodes will compress.</span></span>
<span class="line"><span># 2: [head]-&gt;[next]-&gt;node-&gt;node-&gt;...-&gt;node-&gt;[prev]-&gt;[tail]</span></span>
<span class="line"><span>#    2 here means: don&#39;t compress head or head-&gt;next or tail-&gt;prev or tail,</span></span>
<span class="line"><span>#    but compress all nodes between them.</span></span>
<span class="line"><span># 3: [head]-&gt;[next]-&gt;[next]-&gt;node-&gt;node-&gt;...-&gt;node-&gt;[prev]-&gt;[prev]-&gt;[tail]</span></span>
<span class="line"><span># etc.</span></span>
<span class="line"><span>list-compress-depth 0</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Sets have a special encoding in just one case: when a set is composed</span></span>
<span class="line"><span># of just strings that happen to be integers in radix 10 in the range</span></span>
<span class="line"><span># of 64 bit signed integers.</span></span>
<span class="line"><span># The following configuration setting sets the limit in the size of the</span></span>
<span class="line"><span># set in order to use this special memory saving encoding.</span></span>
<span class="line"><span>set-max-intset-entries 512</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Similarly to hashes and lists, sorted sets are also specially encoded in</span></span>
<span class="line"><span># order to save a lot of space. This encoding is only used when the length and</span></span>
<span class="line"><span># elements of a sorted set are below the following limits:</span></span>
<span class="line"><span>zset-max-ziplist-entries 128</span></span>
<span class="line"><span>zset-max-ziplist-value 64</span></span>
<span class="line"><span></span></span>
<span class="line"><span># HyperLogLog sparse representation bytes limit. The limit includes the</span></span>
<span class="line"><span># 16 bytes header. When an HyperLogLog using the sparse representation crosses</span></span>
<span class="line"><span># this limit, it is converted into the dense representation.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># A value greater than 16000 is totally useless, since at that point the</span></span>
<span class="line"><span># dense representation is more memory efficient.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The suggested value is ~ 3000 in order to have the benefits of</span></span>
<span class="line"><span># the space efficient encoding without slowing down too much PFADD,</span></span>
<span class="line"><span># which is O(N) with the sparse encoding. The value can be raised to</span></span>
<span class="line"><span># ~ 10000 when CPU is not a concern, but space is, and the data set is</span></span>
<span class="line"><span># composed of many HyperLogLogs with cardinality in the 0 - 15000 range.</span></span>
<span class="line"><span>hll-sparse-max-bytes 3000</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Streams macro node max size / items. The stream data structure is a radix</span></span>
<span class="line"><span># tree of big nodes that encode multiple items inside. Using this configuration</span></span>
<span class="line"><span># it is possible to configure how big a single node can be in bytes, and the</span></span>
<span class="line"><span># maximum number of items it may contain before switching to a new node when</span></span>
<span class="line"><span># appending new stream entries. If any of the following settings are set to</span></span>
<span class="line"><span># zero, the limit is ignored, so for instance it is possible to set just a</span></span>
<span class="line"><span># max entires limit by setting max-bytes to 0 and max-entries to the desired</span></span>
<span class="line"><span># value.</span></span>
<span class="line"><span>stream-node-max-bytes 4096</span></span>
<span class="line"><span>stream-node-max-entries 100</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in</span></span>
<span class="line"><span># order to help rehashing the main Redis hash table (the one mapping top-level</span></span>
<span class="line"><span># keys to values). The hash table implementation Redis uses (see dict.c)</span></span>
<span class="line"><span># performs a lazy rehashing: the more operation you run into a hash table</span></span>
<span class="line"><span># that is rehashing, the more rehashing &quot;steps&quot; are performed, so if the</span></span>
<span class="line"><span># server is idle the rehashing is never complete and some more memory is used</span></span>
<span class="line"><span># by the hash table.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The default is to use this millisecond 10 times every second in order to</span></span>
<span class="line"><span># actively rehash the main dictionaries, freeing memory when possible.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If unsure:</span></span>
<span class="line"><span># use &quot;activerehashing no&quot; if you have hard latency requirements and it is</span></span>
<span class="line"><span># not a good thing in your environment that Redis can reply from time to time</span></span>
<span class="line"><span># to queries with 2 milliseconds delay.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># use &quot;activerehashing yes&quot; if you don&#39;t have such hard requirements but</span></span>
<span class="line"><span># want to free memory asap when possible.</span></span>
<span class="line"><span>activerehashing yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># The client output buffer limits can be used to force disconnection of clients</span></span>
<span class="line"><span># that are not reading data from the server fast enough for some reason (a</span></span>
<span class="line"><span># common reason is that a Pub/Sub client can&#39;t consume messages as fast as the</span></span>
<span class="line"><span># publisher can produce them).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The limit can be set differently for the three different classes of clients:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># normal -&gt; normal clients including MONITOR clients</span></span>
<span class="line"><span># replica  -&gt; replica clients</span></span>
<span class="line"><span># pubsub -&gt; clients subscribed to at least one pubsub channel or pattern</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The syntax of every client-output-buffer-limit directive is the following:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># client-output-buffer-limit &lt;class&gt; &lt;hard limit&gt; &lt;soft limit&gt; &lt;soft seconds&gt;</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># A client is immediately disconnected once the hard limit is reached, or if</span></span>
<span class="line"><span># the soft limit is reached and remains reached for the specified number of</span></span>
<span class="line"><span># seconds (continuously).</span></span>
<span class="line"><span># So for instance if the hard limit is 32 megabytes and the soft limit is</span></span>
<span class="line"><span># 16 megabytes / 10 seconds, the client will get disconnected immediately</span></span>
<span class="line"><span># if the size of the output buffers reach 32 megabytes, but will also get</span></span>
<span class="line"><span># disconnected if the client reaches 16 megabytes and continuously overcomes</span></span>
<span class="line"><span># the limit for 10 seconds.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># By default normal clients are not limited because they don&#39;t receive data</span></span>
<span class="line"><span># without asking (in a push way), but just after a request, so only</span></span>
<span class="line"><span># asynchronous clients may create a scenario where data is requested faster</span></span>
<span class="line"><span># than it can read.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Instead there is a default limit for pubsub and replica clients, since</span></span>
<span class="line"><span># subscribers and replicas receive data in a push fashion.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Both the hard or the soft limit can be disabled by setting them to zero.</span></span>
<span class="line"><span>client-output-buffer-limit normal 0 0 0</span></span>
<span class="line"><span>client-output-buffer-limit replica 256mb 64mb 60</span></span>
<span class="line"><span>client-output-buffer-limit pubsub 32mb 8mb 60</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Client query buffers accumulate new commands. They are limited to a fixed</span></span>
<span class="line"><span># amount by default in order to avoid that a protocol desynchronization (for</span></span>
<span class="line"><span># instance due to a bug in the client) will lead to unbound memory usage in</span></span>
<span class="line"><span># the query buffer. However you can configure it here if you have very special</span></span>
<span class="line"><span># needs, such us huge multi/exec requests or alike.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># client-query-buffer-limit 1gb</span></span>
<span class="line"><span></span></span>
<span class="line"><span># In the Redis protocol, bulk requests, that are, elements representing single</span></span>
<span class="line"><span># strings, are normally limited ot 512 mb. However you can change this limit</span></span>
<span class="line"><span># here.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># proto-max-bulk-len 512mb</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Redis calls an internal function to perform many background tasks, like</span></span>
<span class="line"><span># closing connections of clients in timeout, purging expired keys that are</span></span>
<span class="line"><span># never requested, and so forth.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Not all tasks are performed with the same frequency, but Redis checks for</span></span>
<span class="line"><span># tasks to perform according to the specified &quot;hz&quot; value.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># By default &quot;hz&quot; is set to 10. Raising the value will use more CPU when</span></span>
<span class="line"><span># Redis is idle, but at the same time will make Redis more responsive when</span></span>
<span class="line"><span># there are many keys expiring at the same time, and timeouts may be</span></span>
<span class="line"><span># handled with more precision.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The range is between 1 and 500, however a value over 100 is usually not</span></span>
<span class="line"><span># a good idea. Most users should use the default of 10 and raise this up to</span></span>
<span class="line"><span># 100 only in environments where very low latency is required.</span></span>
<span class="line"><span>hz 10</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Normally it is useful to have an HZ value which is proportional to the</span></span>
<span class="line"><span># number of clients connected. This is useful in order, for instance, to</span></span>
<span class="line"><span># avoid too many clients are processed for each background task invocation</span></span>
<span class="line"><span># in order to avoid latency spikes.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Since the default HZ value by default is conservatively set to 10, Redis</span></span>
<span class="line"><span># offers, and enables by default, the ability to use an adaptive HZ value</span></span>
<span class="line"><span># which will temporary raise when there are many connected clients.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># When dynamic HZ is enabled, the actual configured HZ will be used as</span></span>
<span class="line"><span># as a baseline, but multiples of the configured HZ value will be actually</span></span>
<span class="line"><span># used as needed once more clients are connected. In this way an idle</span></span>
<span class="line"><span># instance will use very little CPU time while a busy instance will be</span></span>
<span class="line"><span># more responsive.</span></span>
<span class="line"><span>dynamic-hz yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># When a child rewrites the AOF file, if the following option is enabled</span></span>
<span class="line"><span># the file will be fsync-ed every 32 MB of data generated. This is useful</span></span>
<span class="line"><span># in order to commit the file to the disk more incrementally and avoid</span></span>
<span class="line"><span># big latency spikes.</span></span>
<span class="line"><span>aof-rewrite-incremental-fsync yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># When redis saves RDB file, if the following option is enabled</span></span>
<span class="line"><span># the file will be fsync-ed every 32 MB of data generated. This is useful</span></span>
<span class="line"><span># in order to commit the file to the disk more incrementally and avoid</span></span>
<span class="line"><span># big latency spikes.</span></span>
<span class="line"><span>rdb-save-incremental-fsync yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good</span></span>
<span class="line"><span># idea to start with the default settings and only change them after investigating</span></span>
<span class="line"><span># how to improve the performances and how the keys LFU change over time, which</span></span>
<span class="line"><span># is possible to inspect via the OBJECT FREQ command.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># There are two tunable parameters in the Redis LFU implementation: the</span></span>
<span class="line"><span># counter logarithm factor and the counter decay time. It is important to</span></span>
<span class="line"><span># understand what the two parameters mean before changing them.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The LFU counter is just 8 bits per key, it&#39;s maximum value is 255, so Redis</span></span>
<span class="line"><span># uses a probabilistic increment with logarithmic behavior. Given the value</span></span>
<span class="line"><span># of the old counter, when a key is accessed, the counter is incremented in</span></span>
<span class="line"><span># this way:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 1. A random number R between 0 and 1 is extracted.</span></span>
<span class="line"><span># 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1).</span></span>
<span class="line"><span># 3. The counter is incremented only if R &lt; P.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The default lfu-log-factor is 10. This is a table of how the frequency</span></span>
<span class="line"><span># counter changes with a different number of accesses with different</span></span>
<span class="line"><span># logarithmic factors:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># +--------+------------+------------+------------+------------+------------+</span></span>
<span class="line"><span># | factor | 100 hits   | 1000 hits  | 100K hits  | 1M hits    | 10M hits   |</span></span>
<span class="line"><span># +--------+------------+------------+------------+------------+------------+</span></span>
<span class="line"><span># | 0      | 104        | 255        | 255        | 255        | 255        |</span></span>
<span class="line"><span># +--------+------------+------------+------------+------------+------------+</span></span>
<span class="line"><span># | 1      | 18         | 49         | 255        | 255        | 255        |</span></span>
<span class="line"><span># +--------+------------+------------+------------+------------+------------+</span></span>
<span class="line"><span># | 10     | 10         | 18         | 142        | 255        | 255        |</span></span>
<span class="line"><span># +--------+------------+------------+------------+------------+------------+</span></span>
<span class="line"><span># | 100    | 8          | 11         | 49         | 143        | 255        |</span></span>
<span class="line"><span># +--------+------------+------------+------------+------------+------------+</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># NOTE: The above table was obtained by running the following commands:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#   redis-benchmark -n 1000000 incr foo</span></span>
<span class="line"><span>#   redis-cli object freq foo</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># NOTE 2: The counter initial value is 5 in order to give new objects a chance</span></span>
<span class="line"><span># to accumulate hits.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The counter decay time is the time, in minutes, that must elapse in order</span></span>
<span class="line"><span># for the key counter to be divided by two (or decremented if it has a value</span></span>
<span class="line"><span># less &lt;= 10).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The default value for the lfu-decay-time is 1. A Special value of 0 means to</span></span>
<span class="line"><span># decay the counter every time it happens to be scanned.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># lfu-log-factor 10</span></span>
<span class="line"><span># lfu-decay-time 1</span></span>
<span class="line"><span></span></span>
<span class="line"><span>########################### ACTIVE DEFRAGMENTATION #######################</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># WARNING THIS FEATURE IS EXPERIMENTAL. However it was stress tested</span></span>
<span class="line"><span># even in production and manually tested by multiple engineers for some</span></span>
<span class="line"><span># time.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># What is active defragmentation?</span></span>
<span class="line"><span># -------------------------------</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Active (online) defragmentation allows a Redis server to compact the</span></span>
<span class="line"><span># spaces left between small allocations and deallocations of data in memory,</span></span>
<span class="line"><span># thus allowing to reclaim back memory.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Fragmentation is a natural process that happens with every allocator (but</span></span>
<span class="line"><span># less so with Jemalloc, fortunately) and certain workloads. Normally a server</span></span>
<span class="line"><span># restart is needed in order to lower the fragmentation, or at least to flush</span></span>
<span class="line"><span># away all the data and create it again. However thanks to this feature</span></span>
<span class="line"><span># implemented by Oran Agra for Redis 4.0 this process can happen at runtime</span></span>
<span class="line"><span># in an &quot;hot&quot; way, while the server is running.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Basically when the fragmentation is over a certain level (see the</span></span>
<span class="line"><span># configuration options below) Redis will start to create new copies of the</span></span>
<span class="line"><span># values in contiguous memory regions by exploiting certain specific Jemalloc</span></span>
<span class="line"><span># features (in order to understand if an allocation is causing fragmentation</span></span>
<span class="line"><span># and to allocate it in a better place), and at the same time, will release the</span></span>
<span class="line"><span># old copies of the data. This process, repeated incrementally for all the keys</span></span>
<span class="line"><span># will cause the fragmentation to drop back to normal values.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Important things to understand:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 1. This feature is disabled by default, and only works if you compiled Redis</span></span>
<span class="line"><span>#    to use the copy of Jemalloc we ship with the source code of Redis.</span></span>
<span class="line"><span>#    This is the default with Linux builds.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 2. You never need to enable this feature if you don&#39;t have fragmentation</span></span>
<span class="line"><span>#    issues.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 3. Once you experience fragmentation, you can enable this feature when</span></span>
<span class="line"><span>#    needed with the command &quot;CONFIG SET activedefrag yes&quot;.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The configuration parameters are able to fine tune the behavior of the</span></span>
<span class="line"><span># defragmentation process. If you are not sure about what they mean it is</span></span>
<span class="line"><span># a good idea to leave the defaults untouched.</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Enabled active defragmentation</span></span>
<span class="line"><span># activedefrag yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Minimum amount of fragmentation waste to start active defrag</span></span>
<span class="line"><span># active-defrag-ignore-bytes 100mb</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Minimum percentage of fragmentation to start active defrag</span></span>
<span class="line"><span># active-defrag-threshold-lower 10</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Maximum percentage of fragmentation at which we use maximum effort</span></span>
<span class="line"><span># active-defrag-threshold-upper 100</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Minimal effort for defrag in CPU percentage</span></span>
<span class="line"><span># active-defrag-cycle-min 5</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Maximal effort for defrag in CPU percentage</span></span>
<span class="line"><span># active-defrag-cycle-max 75</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Maximum number of set/hash/zset/list fields that will be processed from</span></span>
<span class="line"><span># the main dictionary scan</span></span>
<span class="line"><span># active-defrag-max-scan-fields 1000</span></span></code></pre></div><p>这个是redis.config的文件配置与内容：</p><div class="language- vp-adaptive-theme"><button title="Copy Code" class="copy"></button><span class="lang"></span><pre class="shiki shiki-themes github-light github-dark vp-code"><code><span class="line"><span># Redis configuration file example.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Note that in order to read the configuration file, Redis must be</span></span>
<span class="line"><span># started with the file path as first argument:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># ./redis-server /path/to/redis.conf</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Note on units: when memory size is needed, it is possible to specify</span></span>
<span class="line"><span># it in the usual form of 1k 5GB 4M and so forth:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 1k =&gt; 1000 bytes</span></span>
<span class="line"><span># 1kb =&gt; 1024 bytes</span></span>
<span class="line"><span># 1m =&gt; 1000000 bytes</span></span>
<span class="line"><span># 1mb =&gt; 1024*1024 bytes</span></span>
<span class="line"><span># 1g =&gt; 1000000000 bytes</span></span>
<span class="line"><span># 1gb =&gt; 1024*1024*1024 bytes</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># units are case insensitive so 1GB 1Gb 1gB are all the same.</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################## INCLUDES ###################################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Include one or more other config files here.  This is useful if you</span></span>
<span class="line"><span># have a standard template that goes to all Redis servers but also need</span></span>
<span class="line"><span># to customize a few per-server settings.  Include files can include</span></span>
<span class="line"><span># other files, so use this wisely.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Note that option &quot;include&quot; won&#39;t be rewritten by command &quot;CONFIG REWRITE&quot;</span></span>
<span class="line"><span># from admin or Redis Sentinel. Since Redis always uses the last processed</span></span>
<span class="line"><span># line as value of a configuration directive, you&#39;d better put includes</span></span>
<span class="line"><span># at the beginning of this file to avoid overwriting config change at runtime.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If instead you are interested in using includes to override configuration</span></span>
<span class="line"><span># options, it is better to use include as the last line.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># include /path/to/local.conf</span></span>
<span class="line"><span># include /path/to/other.conf</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################## MODULES #####################################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Load modules at startup. If the server is not able to load modules</span></span>
<span class="line"><span># it will abort. It is possible to use multiple loadmodule directives.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># loadmodule /path/to/my_module.so</span></span>
<span class="line"><span># loadmodule /path/to/other_module.so</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################## NETWORK #####################################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># By default, if no &quot;bind&quot; configuration directive is specified, Redis listens</span></span>
<span class="line"><span># for connections from all available network interfaces on the host machine.</span></span>
<span class="line"><span># It is possible to listen to just one or multiple selected interfaces using</span></span>
<span class="line"><span># the &quot;bind&quot; configuration directive, followed by one or more IP addresses.</span></span>
<span class="line"><span># Each address can be prefixed by &quot;-&quot;, which means that redis will not fail to</span></span>
<span class="line"><span># start if the address is not available. Being not available only refers to</span></span>
<span class="line"><span># addresses that does not correspond to any network interfece. Addresses that</span></span>
<span class="line"><span># are already in use will always fail, and unsupported protocols will always BE</span></span>
<span class="line"><span># silently skipped.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Examples:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># bind 192.168.1.100 10.0.0.1     # listens on two specific IPv4 addresses</span></span>
<span class="line"><span># bind 127.0.0.1 ::1              # listens on loopback IPv4 and IPv6</span></span>
<span class="line"><span># bind * -::*                     # like the default, all available interfaces</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the</span></span>
<span class="line"><span># internet, binding to all the interfaces is dangerous and will expose the</span></span>
<span class="line"><span># instance to everybody on the internet. So by default we uncomment the</span></span>
<span class="line"><span># following bind directive, that will force Redis to listen only on the</span></span>
<span class="line"><span># IPv4 and IPv6 (if available) loopback interface addresses (this means Redis</span></span>
<span class="line"><span># will only be able to accept client connections from the same host that it is</span></span>
<span class="line"><span># running on).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES</span></span>
<span class="line"><span># JUST COMMENT OUT THE FOLLOWING LINE.</span></span>
<span class="line"><span># ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~</span></span>
<span class="line"><span>#bind 127.0.0.1 -::1</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Protected mode is a layer of security protection, in order to avoid that</span></span>
<span class="line"><span># Redis instances left open on the internet are accessed and exploited.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># When protected mode is on and if:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 1) The server is not binding explicitly to a set of addresses using the</span></span>
<span class="line"><span>#    &quot;bind&quot; directive.</span></span>
<span class="line"><span># 2) No password is configured.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The server only accepts connections from clients connecting from the</span></span>
<span class="line"><span># IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain</span></span>
<span class="line"><span># sockets.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># By default protected mode is enabled. You should disable it only if</span></span>
<span class="line"><span># you are sure you want clients from other hosts to connect to Redis</span></span>
<span class="line"><span># even if no authentication is configured, nor a specific set of interfaces</span></span>
<span class="line"><span># are explicitly listed using the &quot;bind&quot; directive.</span></span>
<span class="line"><span>protected-mode no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Accept connections on the specified port, default is 6379 (IANA #815344).</span></span>
<span class="line"><span># If port 0 is specified Redis will not listen on a TCP socket.</span></span>
<span class="line"><span>port 6379</span></span>
<span class="line"><span></span></span>
<span class="line"><span># TCP listen() backlog.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># In high requests-per-second environments you need a high backlog in order</span></span>
<span class="line"><span># to avoid slow clients connection issues. Note that the Linux kernel</span></span>
<span class="line"><span># will silently truncate it to the value of /proc/sys/net/core/somaxconn so</span></span>
<span class="line"><span># make sure to raise both the value of somaxconn and tcp_max_syn_backlog</span></span>
<span class="line"><span># in order to get the desired effect.</span></span>
<span class="line"><span>tcp-backlog 511</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Unix socket.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Specify the path for the Unix socket that will be used to listen for</span></span>
<span class="line"><span># incoming connections. There is no default, so Redis will not listen</span></span>
<span class="line"><span># on a unix socket when not specified.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># unixsocket /run/redis.sock</span></span>
<span class="line"><span># unixsocketperm 700</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Close the connection after a client is idle for N seconds (0 to disable)</span></span>
<span class="line"><span>timeout 0</span></span>
<span class="line"><span></span></span>
<span class="line"><span># TCP keepalive.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence</span></span>
<span class="line"><span># of communication. This is useful for two reasons:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 1) Detect dead peers.</span></span>
<span class="line"><span># 2) Force network equipment in the middle to consider the connection to be</span></span>
<span class="line"><span>#    alive.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># On Linux, the specified value (in seconds) is the period used to send ACKs.</span></span>
<span class="line"><span># Note that to close the connection the double of the time is needed.</span></span>
<span class="line"><span># On other kernels the period depends on the kernel configuration.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># A reasonable value for this option is 300 seconds, which is the new</span></span>
<span class="line"><span># Redis default starting with Redis 3.2.1.</span></span>
<span class="line"><span>tcp-keepalive 300</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################# TLS/SSL #####################################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># By default, TLS/SSL is disabled. To enable it, the &quot;tls-port&quot; configuration</span></span>
<span class="line"><span># directive can be used to define TLS-listening ports. To enable TLS on the</span></span>
<span class="line"><span># default port, use:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># port 0</span></span>
<span class="line"><span># tls-port 6379</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Configure a X.509 certificate and private key to use for authenticating the</span></span>
<span class="line"><span># server to connected clients, masters or cluster peers.  These files should be</span></span>
<span class="line"><span># PEM formatted.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># tls-cert-file redis.crt </span></span>
<span class="line"><span># tls-key-file redis.key</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If the key file is encrypted using a passphrase, it can be included here</span></span>
<span class="line"><span># as well.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># tls-key-file-pass secret</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Normally Redis uses the same certificate for both server functions (accepting</span></span>
<span class="line"><span># connections) and client functions (replicating from a master, establishing</span></span>
<span class="line"><span># cluster bus connections, etc.).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Sometimes certificates are issued with attributes that designate them as</span></span>
<span class="line"><span># client-only or server-only certificates. In that case it may be desired to use</span></span>
<span class="line"><span># different certificates for incoming (server) and outgoing (client)</span></span>
<span class="line"><span># connections. To do that, use the following directives:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># tls-client-cert-file client.crt</span></span>
<span class="line"><span># tls-client-key-file client.key</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If the key file is encrypted using a passphrase, it can be included here</span></span>
<span class="line"><span># as well.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># tls-client-key-file-pass secret</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># tls-dh-params-file redis.dh</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL</span></span>
<span class="line"><span># clients and peers.  Redis requires an explicit configuration of at least one</span></span>
<span class="line"><span># of these, and will not implicitly use the system wide configuration.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># tls-ca-cert-file ca.crt</span></span>
<span class="line"><span># tls-ca-cert-dir /etc/ssl/certs</span></span>
<span class="line"><span></span></span>
<span class="line"><span># By default, clients (including replica servers) on a TLS port are required</span></span>
<span class="line"><span># to authenticate using valid client side certificates.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If &quot;no&quot; is specified, client certificates are not required and not accepted.</span></span>
<span class="line"><span># If &quot;optional&quot; is specified, client certificates are accepted and must be</span></span>
<span class="line"><span># valid if provided, but are not required.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># tls-auth-clients no</span></span>
<span class="line"><span># tls-auth-clients optional</span></span>
<span class="line"><span></span></span>
<span class="line"><span># By default, a Redis replica does not attempt to establish a TLS connection</span></span>
<span class="line"><span># with its master.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Use the following directive to enable TLS on replication links.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># tls-replication yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># By default, the Redis Cluster bus uses a plain TCP connection. To enable</span></span>
<span class="line"><span># TLS for the bus protocol, use the following directive:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># tls-cluster yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># By default, only TLSv1.2 and TLSv1.3 are enabled and it is highly recommended</span></span>
<span class="line"><span># that older formally deprecated versions are kept disabled to reduce the attack surface.</span></span>
<span class="line"><span># You can explicitly specify TLS versions to support.</span></span>
<span class="line"><span># Allowed values are case insensitive and include &quot;TLSv1&quot;, &quot;TLSv1.1&quot;, &quot;TLSv1.2&quot;,</span></span>
<span class="line"><span># &quot;TLSv1.3&quot; (OpenSSL &gt;= 1.1.1) or any combination.</span></span>
<span class="line"><span># To enable only TLSv1.2 and TLSv1.3, use:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># tls-protocols &quot;TLSv1.2 TLSv1.3&quot;</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Configure allowed ciphers.  See the ciphers(1ssl) manpage for more information</span></span>
<span class="line"><span># about the syntax of this string.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Note: this configuration applies only to &lt;= TLSv1.2.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># tls-ciphers DEFAULT:!MEDIUM</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Configure allowed TLSv1.3 ciphersuites.  See the ciphers(1ssl) manpage for more</span></span>
<span class="line"><span># information about the syntax of this string, and specifically for TLSv1.3</span></span>
<span class="line"><span># ciphersuites.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256</span></span>
<span class="line"><span></span></span>
<span class="line"><span># When choosing a cipher, use the server&#39;s preference instead of the client</span></span>
<span class="line"><span># preference. By default, the server follows the client&#39;s preference.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># tls-prefer-server-ciphers yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># By default, TLS session caching is enabled to allow faster and less expensive</span></span>
<span class="line"><span># reconnections by clients that support it. Use the following directive to disable</span></span>
<span class="line"><span># caching.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># tls-session-caching no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Change the default number of TLS sessions cached. A zero value sets the cache</span></span>
<span class="line"><span># to unlimited size. The default size is 20480.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># tls-session-cache-size 5000</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Change the default timeout of cached TLS sessions. The default timeout is 300</span></span>
<span class="line"><span># seconds.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># tls-session-cache-timeout 60</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################# GENERAL #####################################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># By default Redis does not run as a daemon. Use &#39;yes&#39; if you need it.</span></span>
<span class="line"><span># Note that Redis will write a pid file in /var/run/redis.pid when daemonized.</span></span>
<span class="line"><span># When Redis is supervised by upstart or systemd, this parameter has no impact.</span></span>
<span class="line"><span>daemonize yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># If you run Redis from upstart or systemd, Redis can interact with your</span></span>
<span class="line"><span># supervision tree. Options:</span></span>
<span class="line"><span>#   supervised no      - no supervision interaction</span></span>
<span class="line"><span>#   supervised upstart - signal upstart by putting Redis into SIGSTOP mode</span></span>
<span class="line"><span>#                        requires &quot;expect stop&quot; in your upstart job config</span></span>
<span class="line"><span>#   supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET</span></span>
<span class="line"><span>#                        on startup, and updating Redis status on a regular</span></span>
<span class="line"><span>#                        basis.</span></span>
<span class="line"><span>#   supervised auto    - detect upstart or systemd method based on</span></span>
<span class="line"><span>#                        UPSTART_JOB or NOTIFY_SOCKET environment variables</span></span>
<span class="line"><span># Note: these supervision methods only signal &quot;process is ready.&quot;</span></span>
<span class="line"><span>#       They do not enable continuous pings back to your supervisor.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The default is &quot;no&quot;. To run under upstart/systemd, you can simply uncomment</span></span>
<span class="line"><span># the line below:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># supervised auto</span></span>
<span class="line"><span></span></span>
<span class="line"><span># If a pid file is specified, Redis writes it where specified at startup</span></span>
<span class="line"><span># and removes it at exit.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># When the server runs non daemonized, no pid file is created if none is</span></span>
<span class="line"><span># specified in the configuration. When the server is daemonized, the pid file</span></span>
<span class="line"><span># is used even if not specified, defaulting to &quot;/var/run/redis.pid&quot;.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Creating a pid file is best effort: if Redis is not able to create it</span></span>
<span class="line"><span># nothing bad happens, the server will start and run normally.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Note that on modern Linux systems &quot;/run/redis.pid&quot; is more conforming</span></span>
<span class="line"><span># and should be used instead.</span></span>
<span class="line"><span>pidfile /var/run/redis_6379.pid</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Specify the server verbosity level.</span></span>
<span class="line"><span># This can be one of:</span></span>
<span class="line"><span># debug (a lot of information, useful for development/testing)</span></span>
<span class="line"><span># verbose (many rarely useful info, but not a mess like the debug level)</span></span>
<span class="line"><span># notice (moderately verbose, what you want in production probably)</span></span>
<span class="line"><span># warning (only very important / critical messages are logged)</span></span>
<span class="line"><span>loglevel notice</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Specify the log file name. Also the empty string can be used to force</span></span>
<span class="line"><span># Redis to log on the standard output. Note that if you use standard</span></span>
<span class="line"><span># output for logging but daemonize, logs will be sent to /dev/null</span></span>
<span class="line"><span>logfile &quot;&quot;</span></span>
<span class="line"><span></span></span>
<span class="line"><span># To enable logging to the system logger, just set &#39;syslog-enabled&#39; to yes,</span></span>
<span class="line"><span># and optionally update the other syslog parameters to suit your needs.</span></span>
<span class="line"><span># syslog-enabled no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Specify the syslog identity.</span></span>
<span class="line"><span># syslog-ident redis</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.</span></span>
<span class="line"><span># syslog-facility local0</span></span>
<span class="line"><span></span></span>
<span class="line"><span># To disable the built in crash log, which will possibly produce cleaner core</span></span>
<span class="line"><span># dumps when they are needed, uncomment the following:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># crash-log-enabled no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># To disable the fast memory check that&#39;s run as part of the crash log, which</span></span>
<span class="line"><span># will possibly let redis terminate sooner, uncomment the following:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># crash-memcheck-enabled no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Set the number of databases. The default database is DB 0, you can select</span></span>
<span class="line"><span># a different one on a per-connection basis using SELECT &lt;dbid&gt; where</span></span>
<span class="line"><span># dbid is a number between 0 and &#39;databases&#39;-1</span></span>
<span class="line"><span>databases 16</span></span>
<span class="line"><span></span></span>
<span class="line"><span># By default Redis shows an ASCII art logo only when started to log to the</span></span>
<span class="line"><span># standard output and if the standard output is a TTY and syslog logging is</span></span>
<span class="line"><span># disabled. Basically this means that normally a logo is displayed only in</span></span>
<span class="line"><span># interactive sessions.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># However it is possible to force the pre-4.0 behavior and always show a</span></span>
<span class="line"><span># ASCII art logo in startup logs by setting the following option to yes.</span></span>
<span class="line"><span>always-show-logo no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># By default, Redis modifies the process title (as seen in &#39;top&#39; and &#39;ps&#39;) to</span></span>
<span class="line"><span># provide some runtime information. It is possible to disable this and leave</span></span>
<span class="line"><span># the process name as executed by setting the following to no.</span></span>
<span class="line"><span>set-proc-title yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># When changing the process title, Redis uses the following template to construct</span></span>
<span class="line"><span># the modified title.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Template variables are specified in curly brackets. The following variables are</span></span>
<span class="line"><span># supported:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># {title}           Name of process as executed if parent, or type of child process.</span></span>
<span class="line"><span># {listen-addr}     Bind address or &#39;*&#39; followed by TCP or TLS port listening on, or</span></span>
<span class="line"><span>#                   Unix socket if only that&#39;s available.</span></span>
<span class="line"><span># {server-mode}     Special mode, i.e. &quot;[sentinel]&quot; or &quot;[cluster]&quot;.</span></span>
<span class="line"><span># {port}            TCP port listening on, or 0.</span></span>
<span class="line"><span># {tls-port}        TLS port listening on, or 0.</span></span>
<span class="line"><span># {unixsocket}      Unix domain socket listening on, or &quot;&quot;.</span></span>
<span class="line"><span># {config-file}     Name of configuration file used.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>proc-title-template &quot;{title} {listen-addr} {server-mode}&quot;</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################ SNAPSHOTTING  ################################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Save the DB to disk.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># save &lt;seconds&gt; &lt;changes&gt;</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Redis will save the DB if both the given number of seconds and the given</span></span>
<span class="line"><span># number of write operations against the DB occurred.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Snapshotting can be completely disabled with a single empty string argument</span></span>
<span class="line"><span># as in following example:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># save &quot;&quot;</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Unless specified otherwise, by default Redis will save the DB:</span></span>
<span class="line"><span>#   * After 3600 seconds (an hour) if at least 1 key changed</span></span>
<span class="line"><span>#   * After 300 seconds (5 minutes) if at least 100 keys changed</span></span>
<span class="line"><span>#   * After 60 seconds if at least 10000 keys changed</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># You can set these explicitly by uncommenting the three following lines.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># save 3600 1</span></span>
<span class="line"><span># save 300 100</span></span>
<span class="line"><span># save 60 10000</span></span>
<span class="line"><span></span></span>
<span class="line"><span># By default Redis will stop accepting writes if RDB snapshots are enabled</span></span>
<span class="line"><span># (at least one save point) and the latest background save failed.</span></span>
<span class="line"><span># This will make the user aware (in a hard way) that data is not persisting</span></span>
<span class="line"><span># on disk properly, otherwise chances are that no one will notice and some</span></span>
<span class="line"><span># disaster will happen.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If the background saving process will start working again Redis will</span></span>
<span class="line"><span># automatically allow writes again.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># However if you have setup your proper monitoring of the Redis server</span></span>
<span class="line"><span># and persistence, you may want to disable this feature so that Redis will</span></span>
<span class="line"><span># continue to work as usual even if there are problems with disk,</span></span>
<span class="line"><span># permissions, and so forth.</span></span>
<span class="line"><span>stop-writes-on-bgsave-error yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Compress string objects using LZF when dump .rdb databases?</span></span>
<span class="line"><span># By default compression is enabled as it&#39;s almost always a win.</span></span>
<span class="line"><span># If you want to save some CPU in the saving child set it to &#39;no&#39; but</span></span>
<span class="line"><span># the dataset will likely be bigger if you have compressible values or keys.</span></span>
<span class="line"><span>rdbcompression yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Since version 5 of RDB a CRC64 checksum is placed at the end of the file.</span></span>
<span class="line"><span># This makes the format more resistant to corruption but there is a performance</span></span>
<span class="line"><span># hit to pay (around 10%) when saving and loading RDB files, so you can disable it</span></span>
<span class="line"><span># for maximum performances.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># RDB files created with checksum disabled have a checksum of zero that will</span></span>
<span class="line"><span># tell the loading code to skip the check.</span></span>
<span class="line"><span>rdbchecksum yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Enables or disables full sanitation checks for ziplist and listpack etc when</span></span>
<span class="line"><span># loading an RDB or RESTORE payload. This reduces the chances of a assertion or</span></span>
<span class="line"><span># crash later on while processing commands.</span></span>
<span class="line"><span># Options:</span></span>
<span class="line"><span>#   no         - Never perform full sanitation</span></span>
<span class="line"><span>#   yes        - Always perform full sanitation</span></span>
<span class="line"><span>#   clients    - Perform full sanitation only for user connections.</span></span>
<span class="line"><span>#                Excludes: RDB files, RESTORE commands received from the master</span></span>
<span class="line"><span>#                connection, and client connections which have the</span></span>
<span class="line"><span>#                skip-sanitize-payload ACL flag.</span></span>
<span class="line"><span># The default should be &#39;clients&#39; but since it currently affects cluster</span></span>
<span class="line"><span># resharding via MIGRATE, it is temporarily set to &#39;no&#39; by default.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># sanitize-dump-payload no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># The filename where to dump the DB</span></span>
<span class="line"><span>dbfilename dump.rdb</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Remove RDB files used by replication in instances without persistence</span></span>
<span class="line"><span># enabled. By default this option is disabled, however there are environments</span></span>
<span class="line"><span># where for regulations or other security concerns, RDB files persisted on</span></span>
<span class="line"><span># disk by masters in order to feed replicas, or stored on disk by replicas</span></span>
<span class="line"><span># in order to load them for the initial synchronization, should be deleted</span></span>
<span class="line"><span># ASAP. Note that this option ONLY WORKS in instances that have both AOF</span></span>
<span class="line"><span># and RDB persistence disabled, otherwise is completely ignored.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># An alternative (and sometimes better) way to obtain the same effect is</span></span>
<span class="line"><span># to use diskless replication on both master and replicas instances. However</span></span>
<span class="line"><span># in the case of replicas, diskless is not always an option.</span></span>
<span class="line"><span>rdb-del-sync-files no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># The working directory.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The DB will be written inside this directory, with the filename specified</span></span>
<span class="line"><span># above using the &#39;dbfilename&#39; configuration directive.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The Append Only File will also be created inside this directory.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Note that you must specify a directory here, not a file name.</span></span>
<span class="line"><span>dir ./</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################# REPLICATION #################################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Master-Replica replication. Use replicaof to make a Redis instance a copy of</span></span>
<span class="line"><span># another Redis server. A few things to understand ASAP about Redis replication.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#   +------------------+      +---------------+</span></span>
<span class="line"><span>#   |      Master      | ---&gt; |    Replica    |</span></span>
<span class="line"><span>#   | (receive writes) |      |  (exact copy) |</span></span>
<span class="line"><span>#   +------------------+      +---------------+</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 1) Redis replication is asynchronous, but you can configure a master to</span></span>
<span class="line"><span>#    stop accepting writes if it appears to be not connected with at least</span></span>
<span class="line"><span>#    a given number of replicas.</span></span>
<span class="line"><span># 2) Redis replicas are able to perform a partial resynchronization with the</span></span>
<span class="line"><span>#    master if the replication link is lost for a relatively small amount of</span></span>
<span class="line"><span>#    time. You may want to configure the replication backlog size (see the next</span></span>
<span class="line"><span>#    sections of this file) with a sensible value depending on your needs.</span></span>
<span class="line"><span># 3) Replication is automatic and does not need user intervention. After a</span></span>
<span class="line"><span>#    network partition replicas automatically try to reconnect to masters</span></span>
<span class="line"><span>#    and resynchronize with them.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># replicaof &lt;masterip&gt; &lt;masterport&gt;</span></span>
<span class="line"><span></span></span>
<span class="line"><span># If the master is password protected (using the &quot;requirepass&quot; configuration</span></span>
<span class="line"><span># directive below) it is possible to tell the replica to authenticate before</span></span>
<span class="line"><span># starting the replication synchronization process, otherwise the master will</span></span>
<span class="line"><span># refuse the replica request.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># masterauth &lt;master-password&gt;</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># However this is not enough if you are using Redis ACLs (for Redis version</span></span>
<span class="line"><span># 6 or greater), and the default user is not capable of running the PSYNC</span></span>
<span class="line"><span># command and/or other commands needed for replication. In this case it&#39;s</span></span>
<span class="line"><span># better to configure a special user to use with replication, and specify the</span></span>
<span class="line"><span># masteruser configuration as such:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># masteruser &lt;username&gt;</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># When masteruser is specified, the replica will authenticate against its</span></span>
<span class="line"><span># master using the new AUTH form: AUTH &lt;username&gt; &lt;password&gt;.</span></span>
<span class="line"><span></span></span>
<span class="line"><span># When a replica loses its connection with the master, or when the replication</span></span>
<span class="line"><span># is still in progress, the replica can act in two different ways:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 1) if replica-serve-stale-data is set to &#39;yes&#39; (the default) the replica will</span></span>
<span class="line"><span>#    still reply to client requests, possibly with out of date data, or the</span></span>
<span class="line"><span>#    data set may just be empty if this is the first synchronization.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 2) If replica-serve-stale-data is set to &#39;no&#39; the replica will reply with</span></span>
<span class="line"><span>#    an error &quot;SYNC with master in progress&quot; to all commands except:</span></span>
<span class="line"><span>#    INFO, REPLICAOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE,</span></span>
<span class="line"><span>#    UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST,</span></span>
<span class="line"><span>#    HOST and LATENCY.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>replica-serve-stale-data yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># You can configure a replica instance to accept writes or not. Writing against</span></span>
<span class="line"><span># a replica instance may be useful to store some ephemeral data (because data</span></span>
<span class="line"><span># written on a replica will be easily deleted after resync with the master) but</span></span>
<span class="line"><span># may also cause problems if clients are writing to it because of a</span></span>
<span class="line"><span># misconfiguration.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Since Redis 2.6 by default replicas are read-only.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Note: read only replicas are not designed to be exposed to untrusted clients</span></span>
<span class="line"><span># on the internet. It&#39;s just a protection layer against misuse of the instance.</span></span>
<span class="line"><span># Still a read only replica exports by default all the administrative commands</span></span>
<span class="line"><span># such as CONFIG, DEBUG, and so forth. To a limited extent you can improve</span></span>
<span class="line"><span># security of read only replicas using &#39;rename-command&#39; to shadow all the</span></span>
<span class="line"><span># administrative / dangerous commands.</span></span>
<span class="line"><span>replica-read-only yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Replication SYNC strategy: disk or socket.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># New replicas and reconnecting replicas that are not able to continue the</span></span>
<span class="line"><span># replication process just receiving differences, need to do what is called a</span></span>
<span class="line"><span># &quot;full synchronization&quot;. An RDB file is transmitted from the master to the</span></span>
<span class="line"><span># replicas.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The transmission can happen in two different ways:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 1) Disk-backed: The Redis master creates a new process that writes the RDB</span></span>
<span class="line"><span>#                 file on disk. Later the file is transferred by the parent</span></span>
<span class="line"><span>#                 process to the replicas incrementally.</span></span>
<span class="line"><span># 2) Diskless: The Redis master creates a new process that directly writes the</span></span>
<span class="line"><span>#              RDB file to replica sockets, without touching the disk at all.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># With disk-backed replication, while the RDB file is generated, more replicas</span></span>
<span class="line"><span># can be queued and served with the RDB file as soon as the current child</span></span>
<span class="line"><span># producing the RDB file finishes its work. With diskless replication instead</span></span>
<span class="line"><span># once the transfer starts, new replicas arriving will be queued and a new</span></span>
<span class="line"><span># transfer will start when the current one terminates.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># When diskless replication is used, the master waits a configurable amount of</span></span>
<span class="line"><span># time (in seconds) before starting the transfer in the hope that multiple</span></span>
<span class="line"><span># replicas will arrive and the transfer can be parallelized.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># With slow disks and fast (large bandwidth) networks, diskless replication</span></span>
<span class="line"><span># works better.</span></span>
<span class="line"><span>repl-diskless-sync no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># When diskless replication is enabled, it is possible to configure the delay</span></span>
<span class="line"><span># the server waits in order to spawn the child that transfers the RDB via socket</span></span>
<span class="line"><span># to the replicas.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># This is important since once the transfer starts, it is not possible to serve</span></span>
<span class="line"><span># new replicas arriving, that will be queued for the next RDB transfer, so the</span></span>
<span class="line"><span># server waits a delay in order to let more replicas arrive.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The delay is specified in seconds, and by default is 5 seconds. To disable</span></span>
<span class="line"><span># it entirely just set it to 0 seconds and the transfer will start ASAP.</span></span>
<span class="line"><span>repl-diskless-sync-delay 5</span></span>
<span class="line"><span></span></span>
<span class="line"><span># -----------------------------------------------------------------------------</span></span>
<span class="line"><span># WARNING: RDB diskless load is experimental. Since in this setup the replica</span></span>
<span class="line"><span># does not immediately store an RDB on disk, it may cause data loss during</span></span>
<span class="line"><span># failovers. RDB diskless load + Redis modules not handling I/O reads may also</span></span>
<span class="line"><span># cause Redis to abort in case of I/O errors during the initial synchronization</span></span>
<span class="line"><span># stage with the master. Use only if you know what you are doing.</span></span>
<span class="line"><span># -----------------------------------------------------------------------------</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Replica can load the RDB it reads from the replication link directly from the</span></span>
<span class="line"><span># socket, or store the RDB to a file and read that file after it was completely</span></span>
<span class="line"><span># received from the master.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># In many cases the disk is slower than the network, and storing and loading</span></span>
<span class="line"><span># the RDB file may increase replication time (and even increase the master&#39;s</span></span>
<span class="line"><span># Copy on Write memory and salve buffers).</span></span>
<span class="line"><span># However, parsing the RDB file directly from the socket may mean that we have</span></span>
<span class="line"><span># to flush the contents of the current database before the full rdb was</span></span>
<span class="line"><span># received. For this reason we have the following options:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># &quot;disabled&quot;    - Don&#39;t use diskless load (store the rdb file to the disk first)</span></span>
<span class="line"><span># &quot;on-empty-db&quot; - Use diskless load only when it is completely safe.</span></span>
<span class="line"><span># &quot;swapdb&quot;      - Keep a copy of the current db contents in RAM while parsing</span></span>
<span class="line"><span>#                 the data directly from the socket. note that this requires</span></span>
<span class="line"><span>#                 sufficient memory, if you don&#39;t have it, you risk an OOM kill.</span></span>
<span class="line"><span>repl-diskless-load disabled</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Replicas send PINGs to server in a predefined interval. It&#39;s possible to</span></span>
<span class="line"><span># change this interval with the repl_ping_replica_period option. The default</span></span>
<span class="line"><span># value is 10 seconds.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># repl-ping-replica-period 10</span></span>
<span class="line"><span></span></span>
<span class="line"><span># The following option sets the replication timeout for:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 1) Bulk transfer I/O during SYNC, from the point of view of replica.</span></span>
<span class="line"><span># 2) Master timeout from the point of view of replicas (data, pings).</span></span>
<span class="line"><span># 3) Replica timeout from the point of view of masters (REPLCONF ACK pings).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># It is important to make sure that this value is greater than the value</span></span>
<span class="line"><span># specified for repl-ping-replica-period otherwise a timeout will be detected</span></span>
<span class="line"><span># every time there is low traffic between the master and the replica. The default</span></span>
<span class="line"><span># value is 60 seconds.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># repl-timeout 60</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Disable TCP_NODELAY on the replica socket after SYNC?</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If you select &quot;yes&quot; Redis will use a smaller number of TCP packets and</span></span>
<span class="line"><span># less bandwidth to send data to replicas. But this can add a delay for</span></span>
<span class="line"><span># the data to appear on the replica side, up to 40 milliseconds with</span></span>
<span class="line"><span># Linux kernels using a default configuration.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If you select &quot;no&quot; the delay for data to appear on the replica side will</span></span>
<span class="line"><span># be reduced but more bandwidth will be used for replication.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># By default we optimize for low latency, but in very high traffic conditions</span></span>
<span class="line"><span># or when the master and replicas are many hops away, turning this to &quot;yes&quot; may</span></span>
<span class="line"><span># be a good idea.</span></span>
<span class="line"><span>repl-disable-tcp-nodelay no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Set the replication backlog size. The backlog is a buffer that accumulates</span></span>
<span class="line"><span># replica data when replicas are disconnected for some time, so that when a</span></span>
<span class="line"><span># replica wants to reconnect again, often a full resync is not needed, but a</span></span>
<span class="line"><span># partial resync is enough, just passing the portion of data the replica</span></span>
<span class="line"><span># missed while disconnected.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The bigger the replication backlog, the longer the replica can endure the</span></span>
<span class="line"><span># disconnect and later be able to perform a partial resynchronization.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The backlog is only allocated if there is at least one replica connected.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># repl-backlog-size 1mb</span></span>
<span class="line"><span></span></span>
<span class="line"><span># After a master has no connected replicas for some time, the backlog will be</span></span>
<span class="line"><span># freed. The following option configures the amount of seconds that need to</span></span>
<span class="line"><span># elapse, starting from the time the last replica disconnected, for the backlog</span></span>
<span class="line"><span># buffer to be freed.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Note that replicas never free the backlog for timeout, since they may be</span></span>
<span class="line"><span># promoted to masters later, and should be able to correctly &quot;partially</span></span>
<span class="line"><span># resynchronize&quot; with other replicas: hence they should always accumulate backlog.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># A value of 0 means to never release the backlog.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># repl-backlog-ttl 3600</span></span>
<span class="line"><span></span></span>
<span class="line"><span># The replica priority is an integer number published by Redis in the INFO</span></span>
<span class="line"><span># output. It is used by Redis Sentinel in order to select a replica to promote</span></span>
<span class="line"><span># into a master if the master is no longer working correctly.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># A replica with a low priority number is considered better for promotion, so</span></span>
<span class="line"><span># for instance if there are three replicas with priority 10, 100, 25 Sentinel</span></span>
<span class="line"><span># will pick the one with priority 10, that is the lowest.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># However a special priority of 0 marks the replica as not able to perform the</span></span>
<span class="line"><span># role of master, so a replica with priority of 0 will never be selected by</span></span>
<span class="line"><span># Redis Sentinel for promotion.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># By default the priority is 100.</span></span>
<span class="line"><span>replica-priority 100</span></span>
<span class="line"><span></span></span>
<span class="line"><span># -----------------------------------------------------------------------------</span></span>
<span class="line"><span># By default, Redis Sentinel includes all replicas in its reports. A replica</span></span>
<span class="line"><span># can be excluded from Redis Sentinel&#39;s announcements. An unannounced replica</span></span>
<span class="line"><span># will be ignored by the &#39;sentinel replicas &lt;master&gt;&#39; command and won&#39;t be</span></span>
<span class="line"><span># exposed to Redis Sentinel&#39;s clients.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># This option does not change the behavior of replica-priority. Even with</span></span>
<span class="line"><span># replica-announced set to &#39;no&#39;, the replica can be promoted to master. To</span></span>
<span class="line"><span># prevent this behavior, set replica-priority to 0.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># replica-announced yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># It is possible for a master to stop accepting writes if there are less than</span></span>
<span class="line"><span># N replicas connected, having a lag less or equal than M seconds.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The N replicas need to be in &quot;online&quot; state.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The lag in seconds, that must be &lt;= the specified value, is calculated from</span></span>
<span class="line"><span># the last ping received from the replica, that is usually sent every second.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># This option does not GUARANTEE that N replicas will accept the write, but</span></span>
<span class="line"><span># will limit the window of exposure for lost writes in case not enough replicas</span></span>
<span class="line"><span># are available, to the specified number of seconds.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># For example to require at least 3 replicas with a lag &lt;= 10 seconds use:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># min-replicas-to-write 3</span></span>
<span class="line"><span># min-replicas-max-lag 10</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Setting one or the other to 0 disables the feature.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># By default min-replicas-to-write is set to 0 (feature disabled) and</span></span>
<span class="line"><span># min-replicas-max-lag is set to 10.</span></span>
<span class="line"><span></span></span>
<span class="line"><span># A Redis master is able to list the address and port of the attached</span></span>
<span class="line"><span># replicas in different ways. For example the &quot;INFO replication&quot; section</span></span>
<span class="line"><span># offers this information, which is used, among other tools, by</span></span>
<span class="line"><span># Redis Sentinel in order to discover replica instances.</span></span>
<span class="line"><span># Another place where this info is available is in the output of the</span></span>
<span class="line"><span># &quot;ROLE&quot; command of a master.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The listed IP address and port normally reported by a replica is</span></span>
<span class="line"><span># obtained in the following way:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#   IP: The address is auto detected by checking the peer address</span></span>
<span class="line"><span>#   of the socket used by the replica to connect with the master.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#   Port: The port is communicated by the replica during the replication</span></span>
<span class="line"><span>#   handshake, and is normally the port that the replica is using to</span></span>
<span class="line"><span>#   listen for connections.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># However when port forwarding or Network Address Translation (NAT) is</span></span>
<span class="line"><span># used, the replica may actually be reachable via different IP and port</span></span>
<span class="line"><span># pairs. The following two options can be used by a replica in order to</span></span>
<span class="line"><span># report to its master a specific set of IP and port, so that both INFO</span></span>
<span class="line"><span># and ROLE will report those values.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># There is no need to use both the options if you need to override just</span></span>
<span class="line"><span># the port or the IP address.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># replica-announce-ip 5.5.5.5</span></span>
<span class="line"><span># replica-announce-port 1234</span></span>
<span class="line"><span></span></span>
<span class="line"><span>############################### KEYS TRACKING #################################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Redis implements server assisted support for client side caching of values.</span></span>
<span class="line"><span># This is implemented using an invalidation table that remembers, using</span></span>
<span class="line"><span># a radix key indexed by key name, what clients have which keys. In turn</span></span>
<span class="line"><span># this is used in order to send invalidation messages to clients. Please</span></span>
<span class="line"><span># check this page to understand more about the feature:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#   https://redis.io/topics/client-side-caching</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># When tracking is enabled for a client, all the read only queries are assumed</span></span>
<span class="line"><span># to be cached: this will force Redis to store information in the invalidation</span></span>
<span class="line"><span># table. When keys are modified, such information is flushed away, and</span></span>
<span class="line"><span># invalidation messages are sent to the clients. However if the workload is</span></span>
<span class="line"><span># heavily dominated by reads, Redis could use more and more memory in order</span></span>
<span class="line"><span># to track the keys fetched by many clients.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># For this reason it is possible to configure a maximum fill value for the</span></span>
<span class="line"><span># invalidation table. By default it is set to 1M of keys, and once this limit</span></span>
<span class="line"><span># is reached, Redis will start to evict keys in the invalidation table</span></span>
<span class="line"><span># even if they were not modified, just to reclaim memory: this will in turn</span></span>
<span class="line"><span># force the clients to invalidate the cached values. Basically the table</span></span>
<span class="line"><span># maximum size is a trade off between the memory you want to spend server</span></span>
<span class="line"><span># side to track information about who cached what, and the ability of clients</span></span>
<span class="line"><span># to retain cached objects in memory.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If you set the value to 0, it means there are no limits, and Redis will</span></span>
<span class="line"><span># retain as many keys as needed in the invalidation table.</span></span>
<span class="line"><span># In the &quot;stats&quot; INFO section, you can find information about the number of</span></span>
<span class="line"><span># keys in the invalidation table at every given moment.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Note: when key tracking is used in broadcasting mode, no memory is used</span></span>
<span class="line"><span># in the server side so this setting is useless.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># tracking-table-max-keys 1000000</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################## SECURITY ###################################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Warning: since Redis is pretty fast, an outside user can try up to</span></span>
<span class="line"><span># 1 million passwords per second against a modern box. This means that you</span></span>
<span class="line"><span># should use very strong passwords, otherwise they will be very easy to break.</span></span>
<span class="line"><span># Note that because the password is really a shared secret between the client</span></span>
<span class="line"><span># and the server, and should not be memorized by any human, the password</span></span>
<span class="line"><span># can be easily a long string from /dev/urandom or whatever, so by using a</span></span>
<span class="line"><span># long and unguessable password no brute force attack will be possible.</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Redis ACL users are defined in the following format:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#   user &lt;username&gt; ... acl rules ...</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># For example:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#   user worker +@list +@connection ~jobs:* on &gt;ffa9203c493aa99</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The special username &quot;default&quot; is used for new connections. If this user</span></span>
<span class="line"><span># has the &quot;nopass&quot; rule, then new connections will be immediately authenticated</span></span>
<span class="line"><span># as the &quot;default&quot; user without the need of any password provided via the</span></span>
<span class="line"><span># AUTH command. Otherwise if the &quot;default&quot; user is not flagged with &quot;nopass&quot;</span></span>
<span class="line"><span># the connections will start in not authenticated state, and will require</span></span>
<span class="line"><span># AUTH (or the HELLO command AUTH option) in order to be authenticated and</span></span>
<span class="line"><span># start to work.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The ACL rules that describe what a user can do are the following:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#  on           Enable the user: it is possible to authenticate as this user.</span></span>
<span class="line"><span>#  off          Disable the user: it&#39;s no longer possible to authenticate</span></span>
<span class="line"><span>#               with this user, however the already authenticated connections</span></span>
<span class="line"><span>#               will still work.</span></span>
<span class="line"><span>#  skip-sanitize-payload    RESTORE dump-payload sanitation is skipped.</span></span>
<span class="line"><span>#  sanitize-payload         RESTORE dump-payload is sanitized (default).</span></span>
<span class="line"><span>#  +&lt;command&gt;   Allow the execution of that command</span></span>
<span class="line"><span>#  -&lt;command&gt;   Disallow the execution of that command</span></span>
<span class="line"><span>#  +@&lt;category&gt; Allow the execution of all the commands in such category</span></span>
<span class="line"><span>#               with valid categories are like @admin, @set, @sortedset, ...</span></span>
<span class="line"><span>#               and so forth, see the full list in the server.c file where</span></span>
<span class="line"><span>#               the Redis command table is described and defined.</span></span>
<span class="line"><span>#               The special category @all means all the commands, but currently</span></span>
<span class="line"><span>#               present in the server, and that will be loaded in the future</span></span>
<span class="line"><span>#               via modules.</span></span>
<span class="line"><span>#  +&lt;command&gt;|subcommand    Allow a specific subcommand of an otherwise</span></span>
<span class="line"><span>#                           disabled command. Note that this form is not</span></span>
<span class="line"><span>#                           allowed as negative like -DEBUG|SEGFAULT, but</span></span>
<span class="line"><span>#                           only additive starting with &quot;+&quot;.</span></span>
<span class="line"><span>#  allcommands  Alias for +@all. Note that it implies the ability to execute</span></span>
<span class="line"><span>#               all the future commands loaded via the modules system.</span></span>
<span class="line"><span>#  nocommands   Alias for -@all.</span></span>
<span class="line"><span>#  ~&lt;pattern&gt;   Add a pattern of keys that can be mentioned as part of</span></span>
<span class="line"><span>#               commands. For instance ~* allows all the keys. The pattern</span></span>
<span class="line"><span>#               is a glob-style pattern like the one of KEYS.</span></span>
<span class="line"><span>#               It is possible to specify multiple patterns.</span></span>
<span class="line"><span>#  allkeys      Alias for ~*</span></span>
<span class="line"><span>#  resetkeys    Flush the list of allowed keys patterns.</span></span>
<span class="line"><span>#  &amp;&lt;pattern&gt;   Add a glob-style pattern of Pub/Sub channels that can be</span></span>
<span class="line"><span>#               accessed by the user. It is possible to specify multiple channel</span></span>
<span class="line"><span>#               patterns.</span></span>
<span class="line"><span>#  allchannels  Alias for &amp;*</span></span>
<span class="line"><span>#  resetchannels            Flush the list of allowed channel patterns.</span></span>
<span class="line"><span>#  &gt;&lt;password&gt;  Add this password to the list of valid password for the user.</span></span>
<span class="line"><span>#               For example &gt;mypass will add &quot;mypass&quot; to the list.</span></span>
<span class="line"><span>#               This directive clears the &quot;nopass&quot; flag (see later).</span></span>
<span class="line"><span>#  &lt;&lt;password&gt;  Remove this password from the list of valid passwords.</span></span>
<span class="line"><span>#  nopass       All the set passwords of the user are removed, and the user</span></span>
<span class="line"><span>#               is flagged as requiring no password: it means that every</span></span>
<span class="line"><span>#               password will work against this user. If this directive is</span></span>
<span class="line"><span>#               used for the default user, every new connection will be</span></span>
<span class="line"><span>#               immediately authenticated with the default user without</span></span>
<span class="line"><span>#               any explicit AUTH command required. Note that the &quot;resetpass&quot;</span></span>
<span class="line"><span>#               directive will clear this condition.</span></span>
<span class="line"><span>#  resetpass    Flush the list of allowed passwords. Moreover removes the</span></span>
<span class="line"><span>#               &quot;nopass&quot; status. After &quot;resetpass&quot; the user has no associated</span></span>
<span class="line"><span>#               passwords and there is no way to authenticate without adding</span></span>
<span class="line"><span>#               some password (or setting it as &quot;nopass&quot; later).</span></span>
<span class="line"><span>#  reset        Performs the following actions: resetpass, resetkeys, off,</span></span>
<span class="line"><span>#               -@all. The user returns to the same state it has immediately</span></span>
<span class="line"><span>#               after its creation.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># ACL rules can be specified in any order: for instance you can start with</span></span>
<span class="line"><span># passwords, then flags, or key patterns. However note that the additive</span></span>
<span class="line"><span># and subtractive rules will CHANGE MEANING depending on the ordering.</span></span>
<span class="line"><span># For instance see the following example:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#   user alice on +@all -DEBUG ~* &gt;somepassword</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># This will allow &quot;alice&quot; to use all the commands with the exception of the</span></span>
<span class="line"><span># DEBUG command, since +@all added all the commands to the set of the commands</span></span>
<span class="line"><span># alice can use, and later DEBUG was removed. However if we invert the order</span></span>
<span class="line"><span># of two ACL rules the result will be different:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#   user alice on -DEBUG +@all ~* &gt;somepassword</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Now DEBUG was removed when alice had yet no commands in the set of allowed</span></span>
<span class="line"><span># commands, later all the commands are added, so the user will be able to</span></span>
<span class="line"><span># execute everything.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Basically ACL rules are processed left-to-right.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># For more information about ACL configuration please refer to</span></span>
<span class="line"><span># the Redis web site at https://redis.io/topics/acl</span></span>
<span class="line"><span></span></span>
<span class="line"><span># ACL LOG</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The ACL Log tracks failed commands and authentication events associated</span></span>
<span class="line"><span># with ACLs. The ACL Log is useful to troubleshoot failed commands blocked </span></span>
<span class="line"><span># by ACLs. The ACL Log is stored in memory. You can reclaim memory with </span></span>
<span class="line"><span># ACL LOG RESET. Define the maximum entry length of the ACL Log below.</span></span>
<span class="line"><span>acllog-max-len 128</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Using an external ACL file</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Instead of configuring users here in this file, it is possible to use</span></span>
<span class="line"><span># a stand-alone file just listing users. The two methods cannot be mixed:</span></span>
<span class="line"><span># if you configure users here and at the same time you activate the external</span></span>
<span class="line"><span># ACL file, the server will refuse to start.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The format of the external ACL user file is exactly the same as the</span></span>
<span class="line"><span># format that is used inside redis.conf to describe users.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># aclfile /etc/redis/users.acl</span></span>
<span class="line"><span></span></span>
<span class="line"><span># IMPORTANT NOTE: starting with Redis 6 &quot;requirepass&quot; is just a compatibility</span></span>
<span class="line"><span># layer on top of the new ACL system. The option effect will be just setting</span></span>
<span class="line"><span># the password for the default user. Clients will still authenticate using</span></span>
<span class="line"><span># AUTH &lt;password&gt; as usually, or more explicitly with AUTH default &lt;password&gt;</span></span>
<span class="line"><span># if they follow the new protocol: both will work.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The requirepass is not compatable with aclfile option and the ACL LOAD</span></span>
<span class="line"><span># command, these will cause requirepass to be ignored.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>requirepass 956766</span></span>
<span class="line"><span></span></span>
<span class="line"><span># New users are initialized with restrictive permissions by default, via the</span></span>
<span class="line"><span># equivalent of this ACL rule &#39;off resetkeys -@all&#39;. Starting with Redis 6.2, it</span></span>
<span class="line"><span># is possible to manage access to Pub/Sub channels with ACL rules as well. The</span></span>
<span class="line"><span># default Pub/Sub channels permission if new users is controlled by the </span></span>
<span class="line"><span># acl-pubsub-default configuration directive, which accepts one of these values:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># allchannels: grants access to all Pub/Sub channels</span></span>
<span class="line"><span># resetchannels: revokes access to all Pub/Sub channels</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># To ensure backward compatibility while upgrading Redis 6.0, acl-pubsub-default</span></span>
<span class="line"><span># defaults to the &#39;allchannels&#39; permission.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Future compatibility note: it is very likely that in a future version of Redis</span></span>
<span class="line"><span># the directive&#39;s default of &#39;allchannels&#39; will be changed to &#39;resetchannels&#39; in</span></span>
<span class="line"><span># order to provide better out-of-the-box Pub/Sub security. Therefore, it is</span></span>
<span class="line"><span># recommended that you explicitly define Pub/Sub permissions for all users</span></span>
<span class="line"><span># rather then rely on implicit default values. Once you&#39;ve set explicit</span></span>
<span class="line"><span># Pub/Sub for all existing users, you should uncomment the following line.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># acl-pubsub-default resetchannels</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Command renaming (DEPRECATED).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># ------------------------------------------------------------------------</span></span>
<span class="line"><span># WARNING: avoid using this option if possible. Instead use ACLs to remove</span></span>
<span class="line"><span># commands from the default user, and put them only in some admin user you</span></span>
<span class="line"><span># create for administrative purposes.</span></span>
<span class="line"><span># ------------------------------------------------------------------------</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># It is possible to change the name of dangerous commands in a shared</span></span>
<span class="line"><span># environment. For instance the CONFIG command may be renamed into something</span></span>
<span class="line"><span># hard to guess so that it will still be available for internal-use tools</span></span>
<span class="line"><span># but not available for general clients.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Example:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># It is also possible to completely kill a command by renaming it into</span></span>
<span class="line"><span># an empty string:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># rename-command CONFIG &quot;&quot;</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Please note that changing the name of commands that are logged into the</span></span>
<span class="line"><span># AOF file or transmitted to replicas may cause problems.</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################### CLIENTS ####################################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Set the max number of connected clients at the same time. By default</span></span>
<span class="line"><span># this limit is set to 10000 clients, however if the Redis server is not</span></span>
<span class="line"><span># able to configure the process file limit to allow for the specified limit</span></span>
<span class="line"><span># the max number of allowed clients is set to the current file limit</span></span>
<span class="line"><span># minus 32 (as Redis reserves a few file descriptors for internal uses).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Once the limit is reached Redis will close all the new connections sending</span></span>
<span class="line"><span># an error &#39;max number of clients reached&#39;.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># IMPORTANT: When Redis Cluster is used, the max number of connections is also</span></span>
<span class="line"><span># shared with the cluster bus: every node in the cluster will use two</span></span>
<span class="line"><span># connections, one incoming and another outgoing. It is important to size the</span></span>
<span class="line"><span># limit accordingly in case of very large clusters.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># maxclients 10000</span></span>
<span class="line"><span></span></span>
<span class="line"><span>############################## MEMORY MANAGEMENT ################################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Set a memory usage limit to the specified amount of bytes.</span></span>
<span class="line"><span># When the memory limit is reached Redis will try to remove keys</span></span>
<span class="line"><span># according to the eviction policy selected (see maxmemory-policy).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If Redis can&#39;t remove keys according to the policy, or if the policy is</span></span>
<span class="line"><span># set to &#39;noeviction&#39;, Redis will start to reply with errors to commands</span></span>
<span class="line"><span># that would use more memory, like SET, LPUSH, and so on, and will continue</span></span>
<span class="line"><span># to reply to read-only commands like GET.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># This option is usually useful when using Redis as an LRU or LFU cache, or to</span></span>
<span class="line"><span># set a hard memory limit for an instance (using the &#39;noeviction&#39; policy).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># WARNING: If you have replicas attached to an instance with maxmemory on,</span></span>
<span class="line"><span># the size of the output buffers needed to feed the replicas are subtracted</span></span>
<span class="line"><span># from the used memory count, so that network problems / resyncs will</span></span>
<span class="line"><span># not trigger a loop where keys are evicted, and in turn the output</span></span>
<span class="line"><span># buffer of replicas is full with DELs of keys evicted triggering the deletion</span></span>
<span class="line"><span># of more keys, and so forth until the database is completely emptied.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># In short... if you have replicas attached it is suggested that you set a lower</span></span>
<span class="line"><span># limit for maxmemory so that there is some free RAM on the system for replica</span></span>
<span class="line"><span># output buffers (but this is not needed if the policy is &#39;noeviction&#39;).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># maxmemory &lt;bytes&gt;</span></span>
<span class="line"><span></span></span>
<span class="line"><span># MAXMEMORY POLICY: how Redis will select what to remove when maxmemory</span></span>
<span class="line"><span># is reached. You can select one from the following behaviors:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># volatile-lru -&gt; Evict using approximated LRU, only keys with an expire set.</span></span>
<span class="line"><span># allkeys-lru -&gt; Evict any key using approximated LRU.</span></span>
<span class="line"><span># volatile-lfu -&gt; Evict using approximated LFU, only keys with an expire set.</span></span>
<span class="line"><span># allkeys-lfu -&gt; Evict any key using approximated LFU.</span></span>
<span class="line"><span># volatile-random -&gt; Remove a random key having an expire set.</span></span>
<span class="line"><span># allkeys-random -&gt; Remove a random key, any key.</span></span>
<span class="line"><span># volatile-ttl -&gt; Remove the key with the nearest expire time (minor TTL)</span></span>
<span class="line"><span># noeviction -&gt; Don&#39;t evict anything, just return an error on write operations.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># LRU means Least Recently Used</span></span>
<span class="line"><span># LFU means Least Frequently Used</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Both LRU, LFU and volatile-ttl are implemented using approximated</span></span>
<span class="line"><span># randomized algorithms.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Note: with any of the above policies, when there are no suitable keys for</span></span>
<span class="line"><span># eviction, Redis will return an error on write operations that require</span></span>
<span class="line"><span># more memory. These are usually commands that create new keys, add data or</span></span>
<span class="line"><span># modify existing keys. A few examples are: SET, INCR, HSET, LPUSH, SUNIONSTORE,</span></span>
<span class="line"><span># SORT (due to the STORE argument), and EXEC (if the transaction includes any</span></span>
<span class="line"><span># command that requires memory).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The default is:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># maxmemory-policy noeviction</span></span>
<span class="line"><span></span></span>
<span class="line"><span># LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated</span></span>
<span class="line"><span># algorithms (in order to save memory), so you can tune it for speed or</span></span>
<span class="line"><span># accuracy. By default Redis will check five keys and pick the one that was</span></span>
<span class="line"><span># used least recently, you can change the sample size using the following</span></span>
<span class="line"><span># configuration directive.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The default of 5 produces good enough results. 10 Approximates very closely</span></span>
<span class="line"><span># true LRU but costs more CPU. 3 is faster but not very accurate.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># maxmemory-samples 5</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Eviction processing is designed to function well with the default setting.</span></span>
<span class="line"><span># If there is an unusually large amount of write traffic, this value may need to</span></span>
<span class="line"><span># be increased.  Decreasing this value may reduce latency at the risk of </span></span>
<span class="line"><span># eviction processing effectiveness</span></span>
<span class="line"><span>#   0 = minimum latency, 10 = default, 100 = process without regard to latency</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># maxmemory-eviction-tenacity 10</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Starting from Redis 5, by default a replica will ignore its maxmemory setting</span></span>
<span class="line"><span># (unless it is promoted to master after a failover or manually). It means</span></span>
<span class="line"><span># that the eviction of keys will be just handled by the master, sending the</span></span>
<span class="line"><span># DEL commands to the replica as keys evict in the master side.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># This behavior ensures that masters and replicas stay consistent, and is usually</span></span>
<span class="line"><span># what you want, however if your replica is writable, or you want the replica</span></span>
<span class="line"><span># to have a different memory setting, and you are sure all the writes performed</span></span>
<span class="line"><span># to the replica are idempotent, then you may change this default (but be sure</span></span>
<span class="line"><span># to understand what you are doing).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Note that since the replica by default does not evict, it may end using more</span></span>
<span class="line"><span># memory than the one set via maxmemory (there are certain buffers that may</span></span>
<span class="line"><span># be larger on the replica, or data structures may sometimes take more memory</span></span>
<span class="line"><span># and so forth). So make sure you monitor your replicas and make sure they</span></span>
<span class="line"><span># have enough memory to never hit a real out-of-memory condition before the</span></span>
<span class="line"><span># master hits the configured maxmemory setting.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># replica-ignore-maxmemory yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Redis reclaims expired keys in two ways: upon access when those keys are</span></span>
<span class="line"><span># found to be expired, and also in background, in what is called the</span></span>
<span class="line"><span># &quot;active expire key&quot;. The key space is slowly and interactively scanned</span></span>
<span class="line"><span># looking for expired keys to reclaim, so that it is possible to free memory</span></span>
<span class="line"><span># of keys that are expired and will never be accessed again in a short time.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The default effort of the expire cycle will try to avoid having more than</span></span>
<span class="line"><span># ten percent of expired keys still in memory, and will try to avoid consuming</span></span>
<span class="line"><span># more than 25% of total memory and to add latency to the system. However</span></span>
<span class="line"><span># it is possible to increase the expire &quot;effort&quot; that is normally set to</span></span>
<span class="line"><span># &quot;1&quot;, to a greater value, up to the value &quot;10&quot;. At its maximum value the</span></span>
<span class="line"><span># system will use more CPU, longer cycles (and technically may introduce</span></span>
<span class="line"><span># more latency), and will tolerate less already expired keys still present</span></span>
<span class="line"><span># in the system. It&#39;s a tradeoff between memory, CPU and latency.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># active-expire-effort 1</span></span>
<span class="line"><span></span></span>
<span class="line"><span>############################# LAZY FREEING ####################################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Redis has two primitives to delete keys. One is called DEL and is a blocking</span></span>
<span class="line"><span># deletion of the object. It means that the server stops processing new commands</span></span>
<span class="line"><span># in order to reclaim all the memory associated with an object in a synchronous</span></span>
<span class="line"><span># way. If the key deleted is associated with a small object, the time needed</span></span>
<span class="line"><span># in order to execute the DEL command is very small and comparable to most other</span></span>
<span class="line"><span># O(1) or O(log_N) commands in Redis. However if the key is associated with an</span></span>
<span class="line"><span># aggregated value containing millions of elements, the server can block for</span></span>
<span class="line"><span># a long time (even seconds) in order to complete the operation.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># For the above reasons Redis also offers non blocking deletion primitives</span></span>
<span class="line"><span># such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and</span></span>
<span class="line"><span># FLUSHDB commands, in order to reclaim memory in background. Those commands</span></span>
<span class="line"><span># are executed in constant time. Another thread will incrementally free the</span></span>
<span class="line"><span># object in the background as fast as possible.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled.</span></span>
<span class="line"><span># It&#39;s up to the design of the application to understand when it is a good</span></span>
<span class="line"><span># idea to use one or the other. However the Redis server sometimes has to</span></span>
<span class="line"><span># delete keys or flush the whole database as a side effect of other operations.</span></span>
<span class="line"><span># Specifically Redis deletes objects independently of a user call in the</span></span>
<span class="line"><span># following scenarios:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 1) On eviction, because of the maxmemory and maxmemory policy configurations,</span></span>
<span class="line"><span>#    in order to make room for new data, without going over the specified</span></span>
<span class="line"><span>#    memory limit.</span></span>
<span class="line"><span># 2) Because of expire: when a key with an associated time to live (see the</span></span>
<span class="line"><span>#    EXPIRE command) must be deleted from memory.</span></span>
<span class="line"><span># 3) Because of a side effect of a command that stores data on a key that may</span></span>
<span class="line"><span>#    already exist. For example the RENAME command may delete the old key</span></span>
<span class="line"><span>#    content when it is replaced with another one. Similarly SUNIONSTORE</span></span>
<span class="line"><span>#    or SORT with STORE option may delete existing keys. The SET command</span></span>
<span class="line"><span>#    itself removes any old content of the specified key in order to replace</span></span>
<span class="line"><span>#    it with the specified string.</span></span>
<span class="line"><span># 4) During replication, when a replica performs a full resynchronization with</span></span>
<span class="line"><span>#    its master, the content of the whole database is removed in order to</span></span>
<span class="line"><span>#    load the RDB file just transferred.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># In all the above cases the default is to delete objects in a blocking way,</span></span>
<span class="line"><span># like if DEL was called. However you can configure each case specifically</span></span>
<span class="line"><span># in order to instead release memory in a non-blocking way like if UNLINK</span></span>
<span class="line"><span># was called, using the following configuration directives.</span></span>
<span class="line"><span></span></span>
<span class="line"><span>lazyfree-lazy-eviction no</span></span>
<span class="line"><span>lazyfree-lazy-expire no</span></span>
<span class="line"><span>lazyfree-lazy-server-del no</span></span>
<span class="line"><span>replica-lazy-flush no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># It is also possible, for the case when to replace the user code DEL calls</span></span>
<span class="line"><span># with UNLINK calls is not easy, to modify the default behavior of the DEL</span></span>
<span class="line"><span># command to act exactly like UNLINK, using the following configuration</span></span>
<span class="line"><span># directive:</span></span>
<span class="line"><span></span></span>
<span class="line"><span>lazyfree-lazy-user-del no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># FLUSHDB, FLUSHALL, and SCRIPT FLUSH support both asynchronous and synchronous</span></span>
<span class="line"><span># deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the</span></span>
<span class="line"><span># commands. When neither flag is passed, this directive will be used to determine</span></span>
<span class="line"><span># if the data should be deleted asynchronously.</span></span>
<span class="line"><span></span></span>
<span class="line"><span>lazyfree-lazy-user-flush no</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################ THREADED I/O #################################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Redis is mostly single threaded, however there are certain threaded</span></span>
<span class="line"><span># operations such as UNLINK, slow I/O accesses and other things that are</span></span>
<span class="line"><span># performed on side threads.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Now it is also possible to handle Redis clients socket reads and writes</span></span>
<span class="line"><span># in different I/O threads. Since especially writing is so slow, normally</span></span>
<span class="line"><span># Redis users use pipelining in order to speed up the Redis performances per</span></span>
<span class="line"><span># core, and spawn multiple instances in order to scale more. Using I/O</span></span>
<span class="line"><span># threads it is possible to easily speedup two times Redis without resorting</span></span>
<span class="line"><span># to pipelining nor sharding of the instance.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># By default threading is disabled, we suggest enabling it only in machines</span></span>
<span class="line"><span># that have at least 4 or more cores, leaving at least one spare core.</span></span>
<span class="line"><span># Using more than 8 threads is unlikely to help much. We also recommend using</span></span>
<span class="line"><span># threaded I/O only if you actually have performance problems, with Redis</span></span>
<span class="line"><span># instances being able to use a quite big percentage of CPU time, otherwise</span></span>
<span class="line"><span># there is no point in using this feature.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># So for instance if you have a four cores boxes, try to use 2 or 3 I/O</span></span>
<span class="line"><span># threads, if you have a 8 cores, try to use 6 threads. In order to</span></span>
<span class="line"><span># enable I/O threads use the following configuration directive:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># io-threads 4</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Setting io-threads to 1 will just use the main thread as usual.</span></span>
<span class="line"><span># When I/O threads are enabled, we only use threads for writes, that is</span></span>
<span class="line"><span># to thread the write(2) syscall and transfer the client buffers to the</span></span>
<span class="line"><span># socket. However it is also possible to enable threading of reads and</span></span>
<span class="line"><span># protocol parsing using the following configuration directive, by setting</span></span>
<span class="line"><span># it to yes:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># io-threads-do-reads no</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Usually threading reads doesn&#39;t help much.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># NOTE 1: This configuration directive cannot be changed at runtime via</span></span>
<span class="line"><span># CONFIG SET. Aso this feature currently does not work when SSL is</span></span>
<span class="line"><span># enabled.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># NOTE 2: If you want to test the Redis speedup using redis-benchmark, make</span></span>
<span class="line"><span># sure you also run the benchmark itself in threaded mode, using the</span></span>
<span class="line"><span># --threads option to match the number of Redis threads, otherwise you&#39;ll not</span></span>
<span class="line"><span># be able to notice the improvements.</span></span>
<span class="line"><span></span></span>
<span class="line"><span>############################ KERNEL OOM CONTROL ##############################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># On Linux, it is possible to hint the kernel OOM killer on what processes</span></span>
<span class="line"><span># should be killed first when out of memory.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Enabling this feature makes Redis actively control the oom_score_adj value</span></span>
<span class="line"><span># for all its processes, depending on their role. The default scores will</span></span>
<span class="line"><span># attempt to have background child processes killed before all others, and</span></span>
<span class="line"><span># replicas killed before masters.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Redis supports three options:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># no:       Don&#39;t make changes to oom-score-adj (default).</span></span>
<span class="line"><span># yes:      Alias to &quot;relative&quot; see below.</span></span>
<span class="line"><span># absolute: Values in oom-score-adj-values are written as is to the kernel.</span></span>
<span class="line"><span># relative: Values are used relative to the initial value of oom_score_adj when</span></span>
<span class="line"><span>#           the server starts and are then clamped to a range of -1000 to 1000.</span></span>
<span class="line"><span>#           Because typically the initial value is 0, they will often match the</span></span>
<span class="line"><span>#           absolute values.</span></span>
<span class="line"><span>oom-score-adj no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># When oom-score-adj is used, this directive controls the specific values used</span></span>
<span class="line"><span># for master, replica and background child processes. Values range -2000 to</span></span>
<span class="line"><span># 2000 (higher means more likely to be killed).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities)</span></span>
<span class="line"><span># can freely increase their value, but not decrease it below its initial</span></span>
<span class="line"><span># settings. This means that setting oom-score-adj to &quot;relative&quot; and setting the</span></span>
<span class="line"><span># oom-score-adj-values to positive values will always succeed.</span></span>
<span class="line"><span>oom-score-adj-values 0 200 800</span></span>
<span class="line"><span></span></span>
<span class="line"><span></span></span>
<span class="line"><span>#################### KERNEL transparent hugepage CONTROL ######################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Usually the kernel Transparent Huge Pages control is set to &quot;madvise&quot; or</span></span>
<span class="line"><span># or &quot;never&quot; by default (/sys/kernel/mm/transparent_hugepage/enabled), in which</span></span>
<span class="line"><span># case this config has no effect. On systems in which it is set to &quot;always&quot;,</span></span>
<span class="line"><span># redis will attempt to disable it specifically for the redis process in order</span></span>
<span class="line"><span># to avoid latency problems specifically with fork(2) and CoW.</span></span>
<span class="line"><span># If for some reason you prefer to keep it enabled, you can set this config to</span></span>
<span class="line"><span># &quot;no&quot; and the kernel global to &quot;always&quot;.</span></span>
<span class="line"><span></span></span>
<span class="line"><span>disable-thp yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span>############################## APPEND ONLY MODE ###############################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># By default Redis asynchronously dumps the dataset on disk. This mode is</span></span>
<span class="line"><span># good enough in many applications, but an issue with the Redis process or</span></span>
<span class="line"><span># a power outage may result into a few minutes of writes lost (depending on</span></span>
<span class="line"><span># the configured save points).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The Append Only File is an alternative persistence mode that provides</span></span>
<span class="line"><span># much better durability. For instance using the default data fsync policy</span></span>
<span class="line"><span># (see later in the config file) Redis can lose just one second of writes in a</span></span>
<span class="line"><span># dramatic event like a server power outage, or a single write if something</span></span>
<span class="line"><span># wrong with the Redis process itself happens, but the operating system is</span></span>
<span class="line"><span># still running correctly.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># AOF and RDB persistence can be enabled at the same time without problems.</span></span>
<span class="line"><span># If the AOF is enabled on startup Redis will load the AOF, that is the file</span></span>
<span class="line"><span># with the better durability guarantees.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Please check https://redis.io/topics/persistence for more information.</span></span>
<span class="line"><span></span></span>
<span class="line"><span>appendonly no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># The name of the append only file (default: &quot;appendonly.aof&quot;)</span></span>
<span class="line"><span></span></span>
<span class="line"><span>appendfilename &quot;appendonly.aof&quot;</span></span>
<span class="line"><span></span></span>
<span class="line"><span># The fsync() call tells the Operating System to actually write data on disk</span></span>
<span class="line"><span># instead of waiting for more data in the output buffer. Some OS will really flush</span></span>
<span class="line"><span># data on disk, some other OS will just try to do it ASAP.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Redis supports three different modes:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># no: don&#39;t fsync, just let the OS flush the data when it wants. Faster.</span></span>
<span class="line"><span># always: fsync after every write to the append only log. Slow, Safest.</span></span>
<span class="line"><span># everysec: fsync only one time every second. Compromise.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The default is &quot;everysec&quot;, as that&#39;s usually the right compromise between</span></span>
<span class="line"><span># speed and data safety. It&#39;s up to you to understand if you can relax this to</span></span>
<span class="line"><span># &quot;no&quot; that will let the operating system flush the output buffer when</span></span>
<span class="line"><span># it wants, for better performances (but if you can live with the idea of</span></span>
<span class="line"><span># some data loss consider the default persistence mode that&#39;s snapshotting),</span></span>
<span class="line"><span># or on the contrary, use &quot;always&quot; that&#39;s very slow but a bit safer than</span></span>
<span class="line"><span># everysec.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># More details please check the following article:</span></span>
<span class="line"><span># http://antirez.com/post/redis-persistence-demystified.html</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If unsure, use &quot;everysec&quot;.</span></span>
<span class="line"><span></span></span>
<span class="line"><span># appendfsync always</span></span>
<span class="line"><span>appendfsync everysec</span></span>
<span class="line"><span># appendfsync no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># When the AOF fsync policy is set to always or everysec, and a background</span></span>
<span class="line"><span># saving process (a background save or AOF log background rewriting) is</span></span>
<span class="line"><span># performing a lot of I/O against the disk, in some Linux configurations</span></span>
<span class="line"><span># Redis may block too long on the fsync() call. Note that there is no fix for</span></span>
<span class="line"><span># this currently, as even performing fsync in a different thread will block</span></span>
<span class="line"><span># our synchronous write(2) call.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># In order to mitigate this problem it&#39;s possible to use the following option</span></span>
<span class="line"><span># that will prevent fsync() from being called in the main process while a</span></span>
<span class="line"><span># BGSAVE or BGREWRITEAOF is in progress.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># This means that while another child is saving, the durability of Redis is</span></span>
<span class="line"><span># the same as &quot;appendfsync none&quot;. In practical terms, this means that it is</span></span>
<span class="line"><span># possible to lose up to 30 seconds of log in the worst scenario (with the</span></span>
<span class="line"><span># default Linux settings).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If you have latency problems turn this to &quot;yes&quot;. Otherwise leave it as</span></span>
<span class="line"><span># &quot;no&quot; that is the safest pick from the point of view of durability.</span></span>
<span class="line"><span></span></span>
<span class="line"><span>no-appendfsync-on-rewrite no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Automatic rewrite of the append only file.</span></span>
<span class="line"><span># Redis is able to automatically rewrite the log file implicitly calling</span></span>
<span class="line"><span># BGREWRITEAOF when the AOF log size grows by the specified percentage.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># This is how it works: Redis remembers the size of the AOF file after the</span></span>
<span class="line"><span># latest rewrite (if no rewrite has happened since the restart, the size of</span></span>
<span class="line"><span># the AOF at startup is used).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># This base size is compared to the current size. If the current size is</span></span>
<span class="line"><span># bigger than the specified percentage, the rewrite is triggered. Also</span></span>
<span class="line"><span># you need to specify a minimal size for the AOF file to be rewritten, this</span></span>
<span class="line"><span># is useful to avoid rewriting the AOF file even if the percentage increase</span></span>
<span class="line"><span># is reached but it is still pretty small.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Specify a percentage of zero in order to disable the automatic AOF</span></span>
<span class="line"><span># rewrite feature.</span></span>
<span class="line"><span></span></span>
<span class="line"><span>auto-aof-rewrite-percentage 100</span></span>
<span class="line"><span>auto-aof-rewrite-min-size 64mb</span></span>
<span class="line"><span></span></span>
<span class="line"><span># An AOF file may be found to be truncated at the end during the Redis</span></span>
<span class="line"><span># startup process, when the AOF data gets loaded back into memory.</span></span>
<span class="line"><span># This may happen when the system where Redis is running</span></span>
<span class="line"><span># crashes, especially when an ext4 filesystem is mounted without the</span></span>
<span class="line"><span># data=ordered option (however this can&#39;t happen when Redis itself</span></span>
<span class="line"><span># crashes or aborts but the operating system still works correctly).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Redis can either exit with an error when this happens, or load as much</span></span>
<span class="line"><span># data as possible (the default now) and start if the AOF file is found</span></span>
<span class="line"><span># to be truncated at the end. The following option controls this behavior.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If aof-load-truncated is set to yes, a truncated AOF file is loaded and</span></span>
<span class="line"><span># the Redis server starts emitting a log to inform the user of the event.</span></span>
<span class="line"><span># Otherwise if the option is set to no, the server aborts with an error</span></span>
<span class="line"><span># and refuses to start. When the option is set to no, the user requires</span></span>
<span class="line"><span># to fix the AOF file using the &quot;redis-check-aof&quot; utility before to restart</span></span>
<span class="line"><span># the server.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Note that if the AOF file will be found to be corrupted in the middle</span></span>
<span class="line"><span># the server will still exit with an error. This option only applies when</span></span>
<span class="line"><span># Redis will try to read more data from the AOF file but not enough bytes</span></span>
<span class="line"><span># will be found.</span></span>
<span class="line"><span>aof-load-truncated yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># When rewriting the AOF file, Redis is able to use an RDB preamble in the</span></span>
<span class="line"><span># AOF file for faster rewrites and recoveries. When this option is turned</span></span>
<span class="line"><span># on the rewritten AOF file is composed of two different stanzas:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#   [RDB file][AOF tail]</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># When loading, Redis recognizes that the AOF file starts with the &quot;REDIS&quot;</span></span>
<span class="line"><span># string and loads the prefixed RDB file, then continues loading the AOF</span></span>
<span class="line"><span># tail.</span></span>
<span class="line"><span>aof-use-rdb-preamble yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################ LUA SCRIPTING  ###############################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Max execution time of a Lua script in milliseconds.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If the maximum execution time is reached Redis will log that a script is</span></span>
<span class="line"><span># still in execution after the maximum allowed time and will start to</span></span>
<span class="line"><span># reply to queries with an error.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># When a long running script exceeds the maximum execution time only the</span></span>
<span class="line"><span># SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be</span></span>
<span class="line"><span># used to stop a script that did not yet call any write commands. The second</span></span>
<span class="line"><span># is the only way to shut down the server in the case a write command was</span></span>
<span class="line"><span># already issued by the script but the user doesn&#39;t want to wait for the natural</span></span>
<span class="line"><span># termination of the script.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Set it to 0 or a negative value for unlimited execution without warnings.</span></span>
<span class="line"><span>lua-time-limit 5000</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################ REDIS CLUSTER  ###############################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Normal Redis instances can&#39;t be part of a Redis Cluster; only nodes that are</span></span>
<span class="line"><span># started as cluster nodes can. In order to start a Redis instance as a</span></span>
<span class="line"><span># cluster node enable the cluster support uncommenting the following:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># cluster-enabled yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Every cluster node has a cluster configuration file. This file is not</span></span>
<span class="line"><span># intended to be edited by hand. It is created and updated by Redis nodes.</span></span>
<span class="line"><span># Every Redis Cluster node requires a different cluster configuration file.</span></span>
<span class="line"><span># Make sure that instances running in the same system do not have</span></span>
<span class="line"><span># overlapping cluster configuration file names.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># cluster-config-file nodes-6379.conf</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Cluster node timeout is the amount of milliseconds a node must be unreachable</span></span>
<span class="line"><span># for it to be considered in failure state.</span></span>
<span class="line"><span># Most other internal time limits are a multiple of the node timeout.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># cluster-node-timeout 15000</span></span>
<span class="line"><span></span></span>
<span class="line"><span># A replica of a failing master will avoid to start a failover if its data</span></span>
<span class="line"><span># looks too old.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># There is no simple way for a replica to actually have an exact measure of</span></span>
<span class="line"><span># its &quot;data age&quot;, so the following two checks are performed:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 1) If there are multiple replicas able to failover, they exchange messages</span></span>
<span class="line"><span>#    in order to try to give an advantage to the replica with the best</span></span>
<span class="line"><span>#    replication offset (more data from the master processed).</span></span>
<span class="line"><span>#    Replicas will try to get their rank by offset, and apply to the start</span></span>
<span class="line"><span>#    of the failover a delay proportional to their rank.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 2) Every single replica computes the time of the last interaction with</span></span>
<span class="line"><span>#    its master. This can be the last ping or command received (if the master</span></span>
<span class="line"><span>#    is still in the &quot;connected&quot; state), or the time that elapsed since the</span></span>
<span class="line"><span>#    disconnection with the master (if the replication link is currently down).</span></span>
<span class="line"><span>#    If the last interaction is too old, the replica will not try to failover</span></span>
<span class="line"><span>#    at all.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The point &quot;2&quot; can be tuned by user. Specifically a replica will not perform</span></span>
<span class="line"><span># the failover if, since the last interaction with the master, the time</span></span>
<span class="line"><span># elapsed is greater than:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#   (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor</span></span>
<span class="line"><span># is 10, and assuming a default repl-ping-replica-period of 10 seconds, the</span></span>
<span class="line"><span># replica will not try to failover if it was not able to talk with the master</span></span>
<span class="line"><span># for longer than 310 seconds.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># A large cluster-replica-validity-factor may allow replicas with too old data to failover</span></span>
<span class="line"><span># a master, while a too small value may prevent the cluster from being able to</span></span>
<span class="line"><span># elect a replica at all.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># For maximum availability, it is possible to set the cluster-replica-validity-factor</span></span>
<span class="line"><span># to a value of 0, which means, that replicas will always try to failover the</span></span>
<span class="line"><span># master regardless of the last time they interacted with the master.</span></span>
<span class="line"><span># (However they&#39;ll always try to apply a delay proportional to their</span></span>
<span class="line"><span># offset rank).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Zero is the only value able to guarantee that when all the partitions heal</span></span>
<span class="line"><span># the cluster will always be able to continue.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># cluster-replica-validity-factor 10</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Cluster replicas are able to migrate to orphaned masters, that are masters</span></span>
<span class="line"><span># that are left without working replicas. This improves the cluster ability</span></span>
<span class="line"><span># to resist to failures as otherwise an orphaned master can&#39;t be failed over</span></span>
<span class="line"><span># in case of failure if it has no working replicas.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Replicas migrate to orphaned masters only if there are still at least a</span></span>
<span class="line"><span># given number of other working replicas for their old master. This number</span></span>
<span class="line"><span># is the &quot;migration barrier&quot;. A migration barrier of 1 means that a replica</span></span>
<span class="line"><span># will migrate only if there is at least 1 other working replica for its master</span></span>
<span class="line"><span># and so forth. It usually reflects the number of replicas you want for every</span></span>
<span class="line"><span># master in your cluster.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Default is 1 (replicas migrate only if their masters remain with at least</span></span>
<span class="line"><span># one replica). To disable migration just set it to a very large value or</span></span>
<span class="line"><span># set cluster-allow-replica-migration to &#39;no&#39;.</span></span>
<span class="line"><span># A value of 0 can be set but is useful only for debugging and dangerous</span></span>
<span class="line"><span># in production.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># cluster-migration-barrier 1</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Turning off this option allows to use less automatic cluster configuration.</span></span>
<span class="line"><span># It both disables migration to orphaned masters and migration from masters</span></span>
<span class="line"><span># that became empty.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Default is &#39;yes&#39; (allow automatic migrations).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># cluster-allow-replica-migration yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># By default Redis Cluster nodes stop accepting queries if they detect there</span></span>
<span class="line"><span># is at least a hash slot uncovered (no available node is serving it).</span></span>
<span class="line"><span># This way if the cluster is partially down (for example a range of hash slots</span></span>
<span class="line"><span># are no longer covered) all the cluster becomes, eventually, unavailable.</span></span>
<span class="line"><span># It automatically returns available as soon as all the slots are covered again.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># However sometimes you want the subset of the cluster which is working,</span></span>
<span class="line"><span># to continue to accept queries for the part of the key space that is still</span></span>
<span class="line"><span># covered. In order to do so, just set the cluster-require-full-coverage</span></span>
<span class="line"><span># option to no.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># cluster-require-full-coverage yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># This option, when set to yes, prevents replicas from trying to failover its</span></span>
<span class="line"><span># master during master failures. However the replica can still perform a</span></span>
<span class="line"><span># manual failover, if forced to do so.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># This is useful in different scenarios, especially in the case of multiple</span></span>
<span class="line"><span># data center operations, where we want one side to never be promoted if not</span></span>
<span class="line"><span># in the case of a total DC failure.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># cluster-replica-no-failover no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># This option, when set to yes, allows nodes to serve read traffic while the</span></span>
<span class="line"><span># the cluster is in a down state, as long as it believes it owns the slots. </span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># This is useful for two cases.  The first case is for when an application </span></span>
<span class="line"><span># doesn&#39;t require consistency of data during node failures or network partitions.</span></span>
<span class="line"><span># One example of this is a cache, where as long as the node has the data it</span></span>
<span class="line"><span># should be able to serve it. </span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The second use case is for configurations that don&#39;t meet the recommended  </span></span>
<span class="line"><span># three shards but want to enable cluster mode and scale later. A </span></span>
<span class="line"><span># master outage in a 1 or 2 shard configuration causes a read/write outage to the</span></span>
<span class="line"><span># entire cluster without this option set, with it set there is only a write outage.</span></span>
<span class="line"><span># Without a quorum of masters, slot ownership will not change automatically. </span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># cluster-allow-reads-when-down no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># In order to setup your cluster make sure to read the documentation</span></span>
<span class="line"><span># available at https://redis.io web site.</span></span>
<span class="line"><span></span></span>
<span class="line"><span>########################## CLUSTER DOCKER/NAT support  ########################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># In certain deployments, Redis Cluster nodes address discovery fails, because</span></span>
<span class="line"><span># addresses are NAT-ted or because ports are forwarded (the typical case is</span></span>
<span class="line"><span># Docker and other containers).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># In order to make Redis Cluster working in such environments, a static</span></span>
<span class="line"><span># configuration where each node knows its public address is needed. The</span></span>
<span class="line"><span># following four options are used for this scope, and are:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># * cluster-announce-ip</span></span>
<span class="line"><span># * cluster-announce-port</span></span>
<span class="line"><span># * cluster-announce-tls-port</span></span>
<span class="line"><span># * cluster-announce-bus-port</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Each instructs the node about its address, client ports (for connections</span></span>
<span class="line"><span># without and with TLS) and cluster message bus port. The information is then</span></span>
<span class="line"><span># published in the header of the bus packets so that other nodes will be able to</span></span>
<span class="line"><span># correctly map the address of the node publishing the information.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If cluster-tls is set to yes and cluster-announce-tls-port is omitted or set</span></span>
<span class="line"><span># to zero, then cluster-announce-port refers to the TLS port. Note also that</span></span>
<span class="line"><span># cluster-announce-tls-port has no effect if cluster-tls is set to no.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If the above options are not used, the normal Redis Cluster auto-detection</span></span>
<span class="line"><span># will be used instead.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Note that when remapped, the bus port may not be at the fixed offset of</span></span>
<span class="line"><span># clients port + 10000, so you can specify any port and bus-port depending</span></span>
<span class="line"><span># on how they get remapped. If the bus-port is not set, a fixed offset of</span></span>
<span class="line"><span># 10000 will be used as usual.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Example:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># cluster-announce-ip 10.1.1.5</span></span>
<span class="line"><span># cluster-announce-tls-port 6379</span></span>
<span class="line"><span># cluster-announce-port 0</span></span>
<span class="line"><span># cluster-announce-bus-port 6380</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################## SLOW LOG ###################################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># The Redis Slow Log is a system to log queries that exceeded a specified</span></span>
<span class="line"><span># execution time. The execution time does not include the I/O operations</span></span>
<span class="line"><span># like talking with the client, sending the reply and so forth,</span></span>
<span class="line"><span># but just the time needed to actually execute the command (this is the only</span></span>
<span class="line"><span># stage of command execution where the thread is blocked and can not serve</span></span>
<span class="line"><span># other requests in the meantime).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># You can configure the slow log with two parameters: one tells Redis</span></span>
<span class="line"><span># what is the execution time, in microseconds, to exceed in order for the</span></span>
<span class="line"><span># command to get logged, and the other parameter is the length of the</span></span>
<span class="line"><span># slow log. When a new command is logged the oldest one is removed from the</span></span>
<span class="line"><span># queue of logged commands.</span></span>
<span class="line"><span></span></span>
<span class="line"><span># The following time is expressed in microseconds, so 1000000 is equivalent</span></span>
<span class="line"><span># to one second. Note that a negative number disables the slow log, while</span></span>
<span class="line"><span># a value of zero forces the logging of every command.</span></span>
<span class="line"><span>slowlog-log-slower-than 10000</span></span>
<span class="line"><span></span></span>
<span class="line"><span># There is no limit to this length. Just be aware that it will consume memory.</span></span>
<span class="line"><span># You can reclaim memory used by the slow log with SLOWLOG RESET.</span></span>
<span class="line"><span>slowlog-max-len 128</span></span>
<span class="line"><span></span></span>
<span class="line"><span>################################ LATENCY MONITOR ##############################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># The Redis latency monitoring subsystem samples different operations</span></span>
<span class="line"><span># at runtime in order to collect data related to possible sources of</span></span>
<span class="line"><span># latency of a Redis instance.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Via the LATENCY command this information is available to the user that can</span></span>
<span class="line"><span># print graphs and obtain reports.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The system only logs operations that were performed in a time equal or</span></span>
<span class="line"><span># greater than the amount of milliseconds specified via the</span></span>
<span class="line"><span># latency-monitor-threshold configuration directive. When its value is set</span></span>
<span class="line"><span># to zero, the latency monitor is turned off.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># By default latency monitoring is disabled since it is mostly not needed</span></span>
<span class="line"><span># if you don&#39;t have latency issues, and collecting data has a performance</span></span>
<span class="line"><span># impact, that while very small, can be measured under big load. Latency</span></span>
<span class="line"><span># monitoring can easily be enabled at runtime using the command</span></span>
<span class="line"><span># &quot;CONFIG SET latency-monitor-threshold &lt;milliseconds&gt;&quot; if needed.</span></span>
<span class="line"><span>latency-monitor-threshold 0</span></span>
<span class="line"><span></span></span>
<span class="line"><span>############################# EVENT NOTIFICATION ##############################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Redis can notify Pub/Sub clients about events happening in the key space.</span></span>
<span class="line"><span># This feature is documented at https://redis.io/topics/notifications</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># For instance if keyspace events notification is enabled, and a client</span></span>
<span class="line"><span># performs a DEL operation on key &quot;foo&quot; stored in the Database 0, two</span></span>
<span class="line"><span># messages will be published via Pub/Sub:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># PUBLISH __keyspace@0__:foo del</span></span>
<span class="line"><span># PUBLISH __keyevent@0__:del foo</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># It is possible to select the events that Redis will notify among a set</span></span>
<span class="line"><span># of classes. Every class is identified by a single character:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#  K     Keyspace events, published with __keyspace@&lt;db&gt;__ prefix.</span></span>
<span class="line"><span>#  E     Keyevent events, published with __keyevent@&lt;db&gt;__ prefix.</span></span>
<span class="line"><span>#  g     Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...</span></span>
<span class="line"><span>#  $     String commands</span></span>
<span class="line"><span>#  l     List commands</span></span>
<span class="line"><span>#  s     Set commands</span></span>
<span class="line"><span>#  h     Hash commands</span></span>
<span class="line"><span>#  z     Sorted set commands</span></span>
<span class="line"><span>#  x     Expired events (events generated every time a key expires)</span></span>
<span class="line"><span>#  e     Evicted events (events generated when a key is evicted for maxmemory)</span></span>
<span class="line"><span>#  t     Stream commands</span></span>
<span class="line"><span>#  d     Module key type events</span></span>
<span class="line"><span>#  m     Key-miss events (Note: It is not included in the &#39;A&#39; class)</span></span>
<span class="line"><span>#  A     Alias for g$lshzxetd, so that the &quot;AKE&quot; string means all the events</span></span>
<span class="line"><span>#        (Except key-miss events which are excluded from &#39;A&#39; due to their</span></span>
<span class="line"><span>#         unique nature).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#  The &quot;notify-keyspace-events&quot; takes as argument a string that is composed</span></span>
<span class="line"><span>#  of zero or multiple characters. The empty string means that notifications</span></span>
<span class="line"><span>#  are disabled.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#  Example: to enable list and generic events, from the point of view of the</span></span>
<span class="line"><span>#           event name, use:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#  notify-keyspace-events Elg</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#  Example 2: to get the stream of the expired keys subscribing to channel</span></span>
<span class="line"><span>#             name __keyevent@0__:expired use:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#  notify-keyspace-events Ex</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#  By default all notifications are disabled because most users don&#39;t need</span></span>
<span class="line"><span>#  this feature and the feature has some overhead. Note that if you don&#39;t</span></span>
<span class="line"><span>#  specify at least one of K or E, no events will be delivered.</span></span>
<span class="line"><span>notify-keyspace-events &quot;&quot;</span></span>
<span class="line"><span></span></span>
<span class="line"><span>############################### GOPHER SERVER #################################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Redis contains an implementation of the Gopher protocol, as specified in</span></span>
<span class="line"><span># the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The Gopher protocol was very popular in the late &#39;90s. It is an alternative</span></span>
<span class="line"><span># to the web, and the implementation both server and client side is so simple</span></span>
<span class="line"><span># that the Redis server has just 100 lines of code in order to implement this</span></span>
<span class="line"><span># support.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># What do you do with Gopher nowadays? Well Gopher never *really* died, and</span></span>
<span class="line"><span># lately there is a movement in order for the Gopher more hierarchical content</span></span>
<span class="line"><span># composed of just plain text documents to be resurrected. Some want a simpler</span></span>
<span class="line"><span># internet, others believe that the mainstream internet became too much</span></span>
<span class="line"><span># controlled, and it&#39;s cool to create an alternative space for people that</span></span>
<span class="line"><span># want a bit of fresh air.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol</span></span>
<span class="line"><span># as a gift.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># --- HOW IT WORKS? ---</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The Redis Gopher support uses the inline protocol of Redis, and specifically</span></span>
<span class="line"><span># two kind of inline requests that were anyway illegal: an empty request</span></span>
<span class="line"><span># or any request that starts with &quot;/&quot; (there are no Redis commands starting</span></span>
<span class="line"><span># with such a slash). Normal RESP2/RESP3 requests are completely out of the</span></span>
<span class="line"><span># path of the Gopher protocol implementation and are served as usual as well.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If you open a connection to Redis when Gopher is enabled and send it</span></span>
<span class="line"><span># a string like &quot;/foo&quot;, if there is a key named &quot;/foo&quot; it is served via the</span></span>
<span class="line"><span># Gopher protocol.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># In order to create a real Gopher &quot;hole&quot; (the name of a Gopher site in Gopher</span></span>
<span class="line"><span># talking), you likely need a script like the following:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#   https://github.com/antirez/gopher2redis</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># --- SECURITY WARNING ---</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If you plan to put Redis on the internet in a publicly accessible address</span></span>
<span class="line"><span># to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance.</span></span>
<span class="line"><span># Once a password is set:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#   1. The Gopher server (when enabled, not by default) will still serve</span></span>
<span class="line"><span>#      content via Gopher.</span></span>
<span class="line"><span>#   2. However other commands cannot be called before the client will</span></span>
<span class="line"><span>#      authenticate.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># So use the &#39;requirepass&#39; option to protect your instance.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Note that Gopher is not currently supported when &#39;io-threads-do-reads&#39;</span></span>
<span class="line"><span># is enabled.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># To enable Gopher support, uncomment the following line and set the option</span></span>
<span class="line"><span># from no (the default) to yes.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># gopher-enabled no</span></span>
<span class="line"><span></span></span>
<span class="line"><span>############################### ADVANCED CONFIG ###############################</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Hashes are encoded using a memory efficient data structure when they have a</span></span>
<span class="line"><span># small number of entries, and the biggest entry does not exceed a given</span></span>
<span class="line"><span># threshold. These thresholds can be configured using the following directives.</span></span>
<span class="line"><span>hash-max-ziplist-entries 512</span></span>
<span class="line"><span>hash-max-ziplist-value 64</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Lists are also encoded in a special way to save a lot of space.</span></span>
<span class="line"><span># The number of entries allowed per internal list node can be specified</span></span>
<span class="line"><span># as a fixed maximum size or a maximum number of elements.</span></span>
<span class="line"><span># For a fixed maximum size, use -5 through -1, meaning:</span></span>
<span class="line"><span># -5: max size: 64 Kb  &lt;-- not recommended for normal workloads</span></span>
<span class="line"><span># -4: max size: 32 Kb  &lt;-- not recommended</span></span>
<span class="line"><span># -3: max size: 16 Kb  &lt;-- probably not recommended</span></span>
<span class="line"><span># -2: max size: 8 Kb   &lt;-- good</span></span>
<span class="line"><span># -1: max size: 4 Kb   &lt;-- good</span></span>
<span class="line"><span># Positive numbers mean store up to _exactly_ that number of elements</span></span>
<span class="line"><span># per list node.</span></span>
<span class="line"><span># The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size),</span></span>
<span class="line"><span># but if your use case is unique, adjust the settings as necessary.</span></span>
<span class="line"><span>list-max-ziplist-size -2</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Lists may also be compressed.</span></span>
<span class="line"><span># Compress depth is the number of quicklist ziplist nodes from *each* side of</span></span>
<span class="line"><span># the list to *exclude* from compression.  The head and tail of the list</span></span>
<span class="line"><span># are always uncompressed for fast push/pop operations.  Settings are:</span></span>
<span class="line"><span># 0: disable all list compression</span></span>
<span class="line"><span># 1: depth 1 means &quot;don&#39;t start compressing until after 1 node into the list,</span></span>
<span class="line"><span>#    going from either the head or tail&quot;</span></span>
<span class="line"><span>#    So: [head]-&gt;node-&gt;node-&gt;...-&gt;node-&gt;[tail]</span></span>
<span class="line"><span>#    [head], [tail] will always be uncompressed; inner nodes will compress.</span></span>
<span class="line"><span># 2: [head]-&gt;[next]-&gt;node-&gt;node-&gt;...-&gt;node-&gt;[prev]-&gt;[tail]</span></span>
<span class="line"><span>#    2 here means: don&#39;t compress head or head-&gt;next or tail-&gt;prev or tail,</span></span>
<span class="line"><span>#    but compress all nodes between them.</span></span>
<span class="line"><span># 3: [head]-&gt;[next]-&gt;[next]-&gt;node-&gt;node-&gt;...-&gt;node-&gt;[prev]-&gt;[prev]-&gt;[tail]</span></span>
<span class="line"><span># etc.</span></span>
<span class="line"><span>list-compress-depth 0</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Sets have a special encoding in just one case: when a set is composed</span></span>
<span class="line"><span># of just strings that happen to be integers in radix 10 in the range</span></span>
<span class="line"><span># of 64 bit signed integers.</span></span>
<span class="line"><span># The following configuration setting sets the limit in the size of the</span></span>
<span class="line"><span># set in order to use this special memory saving encoding.</span></span>
<span class="line"><span>set-max-intset-entries 512</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Similarly to hashes and lists, sorted sets are also specially encoded in</span></span>
<span class="line"><span># order to save a lot of space. This encoding is only used when the length and</span></span>
<span class="line"><span># elements of a sorted set are below the following limits:</span></span>
<span class="line"><span>zset-max-ziplist-entries 128</span></span>
<span class="line"><span>zset-max-ziplist-value 64</span></span>
<span class="line"><span></span></span>
<span class="line"><span># HyperLogLog sparse representation bytes limit. The limit includes the</span></span>
<span class="line"><span># 16 bytes header. When an HyperLogLog using the sparse representation crosses</span></span>
<span class="line"><span># this limit, it is converted into the dense representation.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># A value greater than 16000 is totally useless, since at that point the</span></span>
<span class="line"><span># dense representation is more memory efficient.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The suggested value is ~ 3000 in order to have the benefits of</span></span>
<span class="line"><span># the space efficient encoding without slowing down too much PFADD,</span></span>
<span class="line"><span># which is O(N) with the sparse encoding. The value can be raised to</span></span>
<span class="line"><span># ~ 10000 when CPU is not a concern, but space is, and the data set is</span></span>
<span class="line"><span># composed of many HyperLogLogs with cardinality in the 0 - 15000 range.</span></span>
<span class="line"><span>hll-sparse-max-bytes 3000</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Streams macro node max size / items. The stream data structure is a radix</span></span>
<span class="line"><span># tree of big nodes that encode multiple items inside. Using this configuration</span></span>
<span class="line"><span># it is possible to configure how big a single node can be in bytes, and the</span></span>
<span class="line"><span># maximum number of items it may contain before switching to a new node when</span></span>
<span class="line"><span># appending new stream entries. If any of the following settings are set to</span></span>
<span class="line"><span># zero, the limit is ignored, so for instance it is possible to set just a</span></span>
<span class="line"><span># max entries limit by setting max-bytes to 0 and max-entries to the desired</span></span>
<span class="line"><span># value.</span></span>
<span class="line"><span>stream-node-max-bytes 4096</span></span>
<span class="line"><span>stream-node-max-entries 100</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in</span></span>
<span class="line"><span># order to help rehashing the main Redis hash table (the one mapping top-level</span></span>
<span class="line"><span># keys to values). The hash table implementation Redis uses (see dict.c)</span></span>
<span class="line"><span># performs a lazy rehashing: the more operation you run into a hash table</span></span>
<span class="line"><span># that is rehashing, the more rehashing &quot;steps&quot; are performed, so if the</span></span>
<span class="line"><span># server is idle the rehashing is never complete and some more memory is used</span></span>
<span class="line"><span># by the hash table.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The default is to use this millisecond 10 times every second in order to</span></span>
<span class="line"><span># actively rehash the main dictionaries, freeing memory when possible.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># If unsure:</span></span>
<span class="line"><span># use &quot;activerehashing no&quot; if you have hard latency requirements and it is</span></span>
<span class="line"><span># not a good thing in your environment that Redis can reply from time to time</span></span>
<span class="line"><span># to queries with 2 milliseconds delay.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># use &quot;activerehashing yes&quot; if you don&#39;t have such hard requirements but</span></span>
<span class="line"><span># want to free memory asap when possible.</span></span>
<span class="line"><span>activerehashing yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># The client output buffer limits can be used to force disconnection of clients</span></span>
<span class="line"><span># that are not reading data from the server fast enough for some reason (a</span></span>
<span class="line"><span># common reason is that a Pub/Sub client can&#39;t consume messages as fast as the</span></span>
<span class="line"><span># publisher can produce them).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The limit can be set differently for the three different classes of clients:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># normal -&gt; normal clients including MONITOR clients</span></span>
<span class="line"><span># replica  -&gt; replica clients</span></span>
<span class="line"><span># pubsub -&gt; clients subscribed to at least one pubsub channel or pattern</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The syntax of every client-output-buffer-limit directive is the following:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># client-output-buffer-limit &lt;class&gt; &lt;hard limit&gt; &lt;soft limit&gt; &lt;soft seconds&gt;</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># A client is immediately disconnected once the hard limit is reached, or if</span></span>
<span class="line"><span># the soft limit is reached and remains reached for the specified number of</span></span>
<span class="line"><span># seconds (continuously).</span></span>
<span class="line"><span># So for instance if the hard limit is 32 megabytes and the soft limit is</span></span>
<span class="line"><span># 16 megabytes / 10 seconds, the client will get disconnected immediately</span></span>
<span class="line"><span># if the size of the output buffers reach 32 megabytes, but will also get</span></span>
<span class="line"><span># disconnected if the client reaches 16 megabytes and continuously overcomes</span></span>
<span class="line"><span># the limit for 10 seconds.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># By default normal clients are not limited because they don&#39;t receive data</span></span>
<span class="line"><span># without asking (in a push way), but just after a request, so only</span></span>
<span class="line"><span># asynchronous clients may create a scenario where data is requested faster</span></span>
<span class="line"><span># than it can read.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Instead there is a default limit for pubsub and replica clients, since</span></span>
<span class="line"><span># subscribers and replicas receive data in a push fashion.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Both the hard or the soft limit can be disabled by setting them to zero.</span></span>
<span class="line"><span>client-output-buffer-limit normal 0 0 0</span></span>
<span class="line"><span>client-output-buffer-limit replica 256mb 64mb 60</span></span>
<span class="line"><span>client-output-buffer-limit pubsub 32mb 8mb 60</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Client query buffers accumulate new commands. They are limited to a fixed</span></span>
<span class="line"><span># amount by default in order to avoid that a protocol desynchronization (for</span></span>
<span class="line"><span># instance due to a bug in the client) will lead to unbound memory usage in</span></span>
<span class="line"><span># the query buffer. However you can configure it here if you have very special</span></span>
<span class="line"><span># needs, such us huge multi/exec requests or alike.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># client-query-buffer-limit 1gb</span></span>
<span class="line"><span></span></span>
<span class="line"><span># In the Redis protocol, bulk requests, that are, elements representing single</span></span>
<span class="line"><span># strings, are normally limited to 512 mb. However you can change this limit</span></span>
<span class="line"><span># here, but must be 1mb or greater</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># proto-max-bulk-len 512mb</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Redis calls an internal function to perform many background tasks, like</span></span>
<span class="line"><span># closing connections of clients in timeout, purging expired keys that are</span></span>
<span class="line"><span># never requested, and so forth.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Not all tasks are performed with the same frequency, but Redis checks for</span></span>
<span class="line"><span># tasks to perform according to the specified &quot;hz&quot; value.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># By default &quot;hz&quot; is set to 10. Raising the value will use more CPU when</span></span>
<span class="line"><span># Redis is idle, but at the same time will make Redis more responsive when</span></span>
<span class="line"><span># there are many keys expiring at the same time, and timeouts may be</span></span>
<span class="line"><span># handled with more precision.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The range is between 1 and 500, however a value over 100 is usually not</span></span>
<span class="line"><span># a good idea. Most users should use the default of 10 and raise this up to</span></span>
<span class="line"><span># 100 only in environments where very low latency is required.</span></span>
<span class="line"><span>hz 10</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Normally it is useful to have an HZ value which is proportional to the</span></span>
<span class="line"><span># number of clients connected. This is useful in order, for instance, to</span></span>
<span class="line"><span># avoid too many clients are processed for each background task invocation</span></span>
<span class="line"><span># in order to avoid latency spikes.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Since the default HZ value by default is conservatively set to 10, Redis</span></span>
<span class="line"><span># offers, and enables by default, the ability to use an adaptive HZ value</span></span>
<span class="line"><span># which will temporarily raise when there are many connected clients.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># When dynamic HZ is enabled, the actual configured HZ will be used</span></span>
<span class="line"><span># as a baseline, but multiples of the configured HZ value will be actually</span></span>
<span class="line"><span># used as needed once more clients are connected. In this way an idle</span></span>
<span class="line"><span># instance will use very little CPU time while a busy instance will be</span></span>
<span class="line"><span># more responsive.</span></span>
<span class="line"><span>dynamic-hz yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># When a child rewrites the AOF file, if the following option is enabled</span></span>
<span class="line"><span># the file will be fsync-ed every 32 MB of data generated. This is useful</span></span>
<span class="line"><span># in order to commit the file to the disk more incrementally and avoid</span></span>
<span class="line"><span># big latency spikes.</span></span>
<span class="line"><span>aof-rewrite-incremental-fsync yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># When redis saves RDB file, if the following option is enabled</span></span>
<span class="line"><span># the file will be fsync-ed every 32 MB of data generated. This is useful</span></span>
<span class="line"><span># in order to commit the file to the disk more incrementally and avoid</span></span>
<span class="line"><span># big latency spikes.</span></span>
<span class="line"><span>rdb-save-incremental-fsync yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good</span></span>
<span class="line"><span># idea to start with the default settings and only change them after investigating</span></span>
<span class="line"><span># how to improve the performances and how the keys LFU change over time, which</span></span>
<span class="line"><span># is possible to inspect via the OBJECT FREQ command.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># There are two tunable parameters in the Redis LFU implementation: the</span></span>
<span class="line"><span># counter logarithm factor and the counter decay time. It is important to</span></span>
<span class="line"><span># understand what the two parameters mean before changing them.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The LFU counter is just 8 bits per key, it&#39;s maximum value is 255, so Redis</span></span>
<span class="line"><span># uses a probabilistic increment with logarithmic behavior. Given the value</span></span>
<span class="line"><span># of the old counter, when a key is accessed, the counter is incremented in</span></span>
<span class="line"><span># this way:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 1. A random number R between 0 and 1 is extracted.</span></span>
<span class="line"><span># 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1).</span></span>
<span class="line"><span># 3. The counter is incremented only if R &lt; P.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The default lfu-log-factor is 10. This is a table of how the frequency</span></span>
<span class="line"><span># counter changes with a different number of accesses with different</span></span>
<span class="line"><span># logarithmic factors:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># +--------+------------+------------+------------+------------+------------+</span></span>
<span class="line"><span># | factor | 100 hits   | 1000 hits  | 100K hits  | 1M hits    | 10M hits   |</span></span>
<span class="line"><span># +--------+------------+------------+------------+------------+------------+</span></span>
<span class="line"><span># | 0      | 104        | 255        | 255        | 255        | 255        |</span></span>
<span class="line"><span># +--------+------------+------------+------------+------------+------------+</span></span>
<span class="line"><span># | 1      | 18         | 49         | 255        | 255        | 255        |</span></span>
<span class="line"><span># +--------+------------+------------+------------+------------+------------+</span></span>
<span class="line"><span># | 10     | 10         | 18         | 142        | 255        | 255        |</span></span>
<span class="line"><span># +--------+------------+------------+------------+------------+------------+</span></span>
<span class="line"><span># | 100    | 8          | 11         | 49         | 143        | 255        |</span></span>
<span class="line"><span># +--------+------------+------------+------------+------------+------------+</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># NOTE: The above table was obtained by running the following commands:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span>#   redis-benchmark -n 1000000 incr foo</span></span>
<span class="line"><span>#   redis-cli object freq foo</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># NOTE 2: The counter initial value is 5 in order to give new objects a chance</span></span>
<span class="line"><span># to accumulate hits.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The counter decay time is the time, in minutes, that must elapse in order</span></span>
<span class="line"><span># for the key counter to be divided by two (or decremented if it has a value</span></span>
<span class="line"><span># less &lt;= 10).</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The default value for the lfu-decay-time is 1. A special value of 0 means to</span></span>
<span class="line"><span># decay the counter every time it happens to be scanned.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># lfu-log-factor 10</span></span>
<span class="line"><span># lfu-decay-time 1</span></span>
<span class="line"><span></span></span>
<span class="line"><span>########################### ACTIVE DEFRAGMENTATION #######################</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># What is active defragmentation?</span></span>
<span class="line"><span># -------------------------------</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Active (online) defragmentation allows a Redis server to compact the</span></span>
<span class="line"><span># spaces left between small allocations and deallocations of data in memory,</span></span>
<span class="line"><span># thus allowing to reclaim back memory.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Fragmentation is a natural process that happens with every allocator (but</span></span>
<span class="line"><span># less so with Jemalloc, fortunately) and certain workloads. Normally a server</span></span>
<span class="line"><span># restart is needed in order to lower the fragmentation, or at least to flush</span></span>
<span class="line"><span># away all the data and create it again. However thanks to this feature</span></span>
<span class="line"><span># implemented by Oran Agra for Redis 4.0 this process can happen at runtime</span></span>
<span class="line"><span># in a &quot;hot&quot; way, while the server is running.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Basically when the fragmentation is over a certain level (see the</span></span>
<span class="line"><span># configuration options below) Redis will start to create new copies of the</span></span>
<span class="line"><span># values in contiguous memory regions by exploiting certain specific Jemalloc</span></span>
<span class="line"><span># features (in order to understand if an allocation is causing fragmentation</span></span>
<span class="line"><span># and to allocate it in a better place), and at the same time, will release the</span></span>
<span class="line"><span># old copies of the data. This process, repeated incrementally for all the keys</span></span>
<span class="line"><span># will cause the fragmentation to drop back to normal values.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Important things to understand:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 1. This feature is disabled by default, and only works if you compiled Redis</span></span>
<span class="line"><span>#    to use the copy of Jemalloc we ship with the source code of Redis.</span></span>
<span class="line"><span>#    This is the default with Linux builds.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 2. You never need to enable this feature if you don&#39;t have fragmentation</span></span>
<span class="line"><span>#    issues.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># 3. Once you experience fragmentation, you can enable this feature when</span></span>
<span class="line"><span>#    needed with the command &quot;CONFIG SET activedefrag yes&quot;.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># The configuration parameters are able to fine tune the behavior of the</span></span>
<span class="line"><span># defragmentation process. If you are not sure about what they mean it is</span></span>
<span class="line"><span># a good idea to leave the defaults untouched.</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Enabled active defragmentation</span></span>
<span class="line"><span># activedefrag no</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Minimum amount of fragmentation waste to start active defrag</span></span>
<span class="line"><span># active-defrag-ignore-bytes 100mb</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Minimum percentage of fragmentation to start active defrag</span></span>
<span class="line"><span># active-defrag-threshold-lower 10</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Maximum percentage of fragmentation at which we use maximum effort</span></span>
<span class="line"><span># active-defrag-threshold-upper 100</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Minimal effort for defrag in CPU percentage, to be used when the lower</span></span>
<span class="line"><span># threshold is reached</span></span>
<span class="line"><span># active-defrag-cycle-min 1</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Maximal effort for defrag in CPU percentage, to be used when the upper</span></span>
<span class="line"><span># threshold is reached</span></span>
<span class="line"><span># active-defrag-cycle-max 25</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Maximum number of set/hash/zset/list fields that will be processed from</span></span>
<span class="line"><span># the main dictionary scan</span></span>
<span class="line"><span># active-defrag-max-scan-fields 1000</span></span>
<span class="line"><span></span></span>
<span class="line"><span># Jemalloc background thread for purging will be enabled by default</span></span>
<span class="line"><span>jemalloc-bg-thread yes</span></span>
<span class="line"><span></span></span>
<span class="line"><span># It is possible to pin different threads and processes of Redis to specific</span></span>
<span class="line"><span># CPUs in your system, in order to maximize the performances of the server.</span></span>
<span class="line"><span># This is useful both in order to pin different Redis threads in different</span></span>
<span class="line"><span># CPUs, but also in order to make sure that multiple Redis instances running</span></span>
<span class="line"><span># in the same host will be pinned to different CPUs.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Normally you can do this using the &quot;taskset&quot; command, however it is also</span></span>
<span class="line"><span># possible to this via Redis configuration directly, both in Linux and FreeBSD.</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># You can pin the server/IO threads, bio threads, aof rewrite child process, and</span></span>
<span class="line"><span># the bgsave child process. The syntax to specify the cpu list is the same as</span></span>
<span class="line"><span># the taskset command:</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Set redis server/io threads to cpu affinity 0,2,4,6:</span></span>
<span class="line"><span># server_cpulist 0-7:2</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Set bio threads to cpu affinity 1,3:</span></span>
<span class="line"><span># bio_cpulist 1,3</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Set aof rewrite child process to cpu affinity 8,9,10,11:</span></span>
<span class="line"><span># aof_rewrite_cpulist 8-11</span></span>
<span class="line"><span>#</span></span>
<span class="line"><span># Set bgsave child process to cpu affinity 1,10,11</span></span>
<span class="line"><span># bgsave_cpulist 1,10-11</span></span>
<span class="line"><span></span></span>
<span class="line"><span># In some cases redis will emit warnings and even refuse to start if it detects</span></span>
<span class="line"><span># that the system is in bad state, it is possible to suppress these warnings</span></span>
<span class="line"><span># by setting the following config which takes a space delimited list of warnings</span></span>
<span class="line"><span># to suppress</span></span>
<span class="line"><span>#</span></span></code></pre></div><p>今天先不写技术文了，水一下博客。</p><p>说说最近发生的事情吧，还是疫情原因，前段时间被封在宿舍，现在虽然能出去宿舍，但也是线上上课，快递和外卖也都不能拿，唉。</p><p>说一下我为什么会想要水博客吧，原因之一，最近一直在宿舍，补专业课，后端学习方面有些许搁置，再加上学了几天四级，最后却没报上四级，这个属实是有点让人难受。</p><p>还有就是我今早上没吃饭，提不起精力来写技术文了，现在只想去干饭，但还不到吃饭的时间，也还没下班，唉。</p><p>啊，好难受。</p><p>呜呜呜呜，再补一点，昨天中午（也就是10.30日中午），我被骗走了三十元巨款，没想到我有生之年还会被诈骗，可恶的骗子，呜呜呜呜呜呜，我的三十元巨款啊，呜呜呜呜呜呜呜。</p><p>呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜。</p><p>这几天过的好难受啊呜呜呜呜呜呜，快递也不能拿，还想再买点东西，都被告知快递不支持所在地区呜呜呜呜呜呜呜，我的生活啊，呜呜呜呜呜呜，被疫情搞得一团糟呜呜呜呜呜呜呜呜，可恶的疫情，赶紧见鬼去吧！</p><p>呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜呜。</p></div></div></main><footer class="VPDocFooter" data-v-6b87e69f data-v-48f9bb55><!--[--><!--]--><!----><!----></footer><!--[--><!--[--><!--[--><!--[--><!--]--><!----><!--]--><!--]--><!--]--></div></div></div><!--[--><!--]--></div></div><!----><!--[--><!--]--></div></div>
    <script>window.__VP_HASH_MAP__=JSON.parse("{\"blog_csdn_js中的箭头函数_＞.md\":\"YAhpwxwO\",\"blog_csdn_http小记1.md\":\"kPBwr6NW\",\"blog_csdn_560. 和为 k 的子数组.md\":\"oNe7BuCq\",\"about.md\":\"YgbP6nft\",\"blog_csdn_389. 找不同.md\":\"L2yHU_sg\",\"blog_csdn_2022年第一篇总结.md\":\"uWFlvrhw\",\"blog_csdn_integer.bitcount().md\":\"w_YwA3AH\",\"blog_csdn_failed to configure a datasource! ‘url‘ attribute is not specified and no embedded datasource could.md\":\"ATmVUk6n\",\"blog_csdn_could not resolve placeholder ‘coupon.user.name‘ in value “__coupon.user.name_“.md\":\"2MfWZpKX\",\"blog_csdn_could not find class _org.springframework.cloud.client.loadbalancer.loadbalancerproperties_.md\":\"weFa6Ykk\",\"blog_csdn_junit单元测试.md\":\"ngaiA8Nn\",\"blog_csdn_114. 二叉树展开为链表.md\":\"YNXbicRN\",\"blog_csdn_mybatisplus实现乐观锁（实战）.md\":\"3JZCEd3T\",\"blog_csdn_js中的布尔类型.md\":\"NiWOftkZ\",\"blog_csdn_199. 二叉树的右视图.md\":\"9w7BWnrq\",\"blog_csdn_feign.md\":\"yFC9Anxi\",\"blog_csdn_no feign client for loadbalancing defined. did you forget to include spring-cloud-starter-loadbalanc.md\":\"ce_9lmef\",\"blog_csdn_es6基本知识点.md\":\"w8IGSmUD\",\"blog_csdn_linux保存退出和不保存退出命令.md\":\"NyEwZM1Y\",\"blog_csdn_799. 最长连续不重复子序列 java.md\":\"xUBIkxKq\",\"blog_csdn_jsr303.md\":\"2iGXSSM7\",\"blog_csdn_arrays.fill()_arrays.equals()_http的请求方式.md\":\"PHc3-81d\",\"blog_csdn_189. 轮转数组.md\":\"EzoH4DT_\",\"blog_csdn_http小记2.md\":\"bbTSRE4B\",\"blog_csdn_maven右侧子项目maven变成灰色.md\":\"Elm_JHql\",\"blog_csdn_could not autowire. no beans of‘managerservice‘‘ type found. could not autowire. no beans of “manag.md\":\"mC-oBMQp\",\"blog_csdn_io_集合_泛型实现僵尸查询系统.md\":\"C1vHqa4P\",\"blog_csdn_oauth2.0.md\":\"UaB7PTaR\",\"blog_csdn_mybatis小记.md\":\"g41Ys3cP\",\"blog_csdn_mybatis-plus实战项目演示_自定义元数据对象处理器_ws.md\":\"7NOvFsDI\",\"blog_csdn_swift(2).md\":\"keVCjYRj\",\"blog_csdn_sentinel持久化规则.md\":\"T31B5CGo\",\"blog_csdn_spring的下载.md\":\"58E0LOyh\",\"blog_csdn_swiftui（1）.md\":\"yProIc_S\",\"blog_csdn_sso单点登录.md\":\"Bu5lUm_7\",\"blog_csdn_swiftui（2）.md\":\"d6EkWdX6\",\"blog_csdn_swiftui（3）.md\":\"-ceG1RKf\",\"blog_csdn_swift（4）.md\":\"uEx-Hd35\",\"blog_csdn_swiftui（4）.md\":\"je3zq7lp\",\"blog_csdn_swift（1）.md\":\"UP7Rhlr5\",\"blog_csdn_virtualbox.md\":\"qHi8bb7o\",\"blog_csdn_unable to serialize jobdatamap for insertion into database borg.apache.catalina.core.applicationpart.md\":\"7U7H1q0v\",\"blog_csdn_swift（5）.md\":\"HUpccfEe\",\"blog_csdn_tcp三次握手.md\":\"TX4cNzkP\",\"blog_csdn_swiftui（5）.md\":\"MCAHrzrO\",\"blog_csdn_xml declaratlon should precede all document content.md\":\"hc7qp3Gv\",\"blog_csdn_win11安装vmware中的镜像的下载.md\":\"UIpWgc2x\",\"blog_csdn_ps一些快捷键_一些基础知识点（mac版）.md\":\"mbpAiYz7\",\"blog_csdn_docker镜像加速器配置.md\":\"pd6-mmit\",\"blog_csdn_swiftui（6）.md\":\"8J7tHFgK\",\"blog_csdn_docker部署微服务.md\":\"TQmGdyJF\",\"blog_csdn_git在idea中的使用_终止端口进程mac.md\":\"-Uihz8uN\",\"blog_csdn_foreach，thymeleaf相关jar包的下载，spring，数据库的概念.md\":\"3G0kyO7w\",\"blog_csdn_vue学习随堂记录.md\":\"O7BBp0oy\",\"blog_csdn_http请求方式__过滤器与拦截器的区别.md\":\"pFACyiZR\",\"blog_csdn_http请求报错：406 not acceptable的解决办法.md\":\"lZqPLzjP\",\"blog_csdn_idea创建包时无法分层.md\":\"UN5kmv0O\",\"blog_csdn_gitlab提交项目log in with access token错误.md\":\"Rw4txhB4\",\"blog_csdn_idea意外退出mac.md\":\"ZYiPNy6F\",\"blog_csdn_idea右侧的maven项目下的tomcat7插件报红.md\":\"svnEZK0z\",\"blog_csdn_java实现qq自动发送无限条消息.md\":\"7FZUlHYV\",\"blog_csdn_java与c__中的交换方法.md\":\"HymlmL52\",\"blog_csdn_java中的_，＞＞_＜＜位运算.md\":\"TeTOteox\",\"blog_csdn_io实现登录注册功能_本周总结.md\":\"jUH8XSAf\",\"blog_csdn_idea所有历史版本下载.md\":\"goYSIsX2\",\"blog_csdn_idea显示左下角service.md\":\"o_Sk-Z1f\",\"blog_csdn_hosts文件位置mac.md\":\"l3hC0saF\",\"blog_csdn_java.lang.exceptionininitializererror.md\":\"Ckcfqwvr\",\"blog_csdn_gitlab-runner安装和部署项目.md\":\"wi7zL4xY\",\"blog_csdn_java.lang.unsupportedoperationexception与cleanmymac x.md\":\"wS4Qq2MS\",\"blog_csdn_java小记（1）.md\":\"_Ewpvsjs\",\"blog_csdn_java接口防刷机制.md\":\"sliPyuWv\",\"blog_csdn_java实现阿里云文件存储oss.md\":\"VQnQWO2C\",\"blog_csdn_easyexcel.md\":\"zQINRmFd\",\"blog_csdn_java笔记.md\":\"yIE40rW2\",\"blog_csdn_java碎碎碎碎碎碎.md\":\"UHHF2clV\",\"blog_csdn_js中children和childnodes的区别.md\":\"QiWReAxD\",\"blog_csdn_linux下搭建redis_设置密码.md\":\"KGemEQdy\",\"blog_csdn_java部分排序算法.md\":\"mB8SWyEg\",\"blog_csdn_linux安装tomcat（docker）.md\":\"N57qAhbw\",\"blog_csdn_linux开启端口.md\":\"Vti-GziU\",\"blog_csdn_mybatis-plus中的@select注解里面写sql语句的in.md\":\"T1BsnWlE\",\"blog_csdn_maven install could not resolve dependencies for project解决办法.md\":\"_qBjGkMP\",\"blog_csdn_js中_new date().md\":\"L-ENlKzt\",\"blog_csdn_mybatis-plus中的逻辑删除.md\":\"3pFrTcd6\",\"blog_csdn_nacos怎么修改密码（保姆教程）.md\":\"TQrLPkXI\",\"blog_csdn_python的安装(推荐).md\":\"jrbDU9d5\",\"blog_csdn_mysql课堂笔记 mac.md\":\"ayQai_qX\",\"blog_csdn_mysql一些小知识点.md\":\"YdFgmijI\",\"blog_csdn_redis.conf的一些配置_密码的设置（mac）_个人总结.md\":\"UV5sqr8f\",\"blog_csdn_quartz中jdbc.initialize-schema.md\":\"dz5ul94L\",\"blog_csdn_redis整合通过qq邮箱发送验证码.md\":\"jnJKfx8S\",\"blog_csdn_mybatis固定代码.md\":\"gdcq5IXA\",\"blog_csdn_springboot整合liquibase（补充）.md\":\"oL8WvX5T\",\"blog_csdn_springcloud跨域重复问题allow-origin header contains multiple values... but only one is allowed.md\":\"B1YiqUEV\",\"blog_csdn_个人简介.md\":\"M6EqUaDo\",\"blog_csdn_spring小记.md\":\"2hscPwrx\",\"blog_csdn_swift（3）.md\":\"pheNh2gO\",\"blog_csdn_yml基本语法与支持的数据格式.md\":\"TFsg-wqF\",\"blog_csdn_springmvc固定代码.md\":\"J1SwxuYJ\",\"blog_csdn_人生是一场盛大的遇见.md\":\"UeVYUtiW\",\"blog_csdn_修改mysql密码与mac中mysql的启动与终止.md\":\"KA9bbLNQ\",\"blog_csdn_使用arrays.aslist与不使用的区别.md\":\"-Ll3MkVL\",\"blog_csdn_关于多个项目使用同一个nacos的解决方法.md\":\"_YP2tk85\",\"blog_csdn_关于xcode中swiftui代码旁边的模拟机不见了.md\":\"YPP6Ywl4\",\"blog_csdn_创建springboot项目时改为国内网站.md\":\"w470ByIW\",\"blog_csdn_分页查询与集合分页查询与html基础知识.md\":\"O0afbm4y\",\"blog_csdn_关于gateway中lb失效.md\":\"QQHN8OHw\",\"blog_csdn_关于京造k6蓝牙在连接一次windows之后就没有再连回来mac.md\":\"vF78bb4H\",\"blog_csdn_springboot_springdata-jpa_thymeleaf项目实战.md\":\"hdZ-dEG5\",\"blog_csdn_写完项目后.md\":\"F-IBjrRs\",\"blog_csdn_关于mac上的所有东西都变小了.md\":\"BPTcT522\",\"blog_csdn_关于我写的循环遍历.md\":\"Y-Z6ba0d\",\"blog_csdn_几天的总结.md\":\"ZEhThs2z\",\"blog_csdn_分布式文件存储系统minio.md\":\"qfPZbaTX\",\"blog_csdn_剑指 offer 10- i. 斐波那契数列.md\":\"oFtAn1X1\",\"blog_csdn_剑指 offer 13. 机器人的运动范围.md\":\"KCGQ2vNj\",\"blog_csdn_优化------聊聊缓存.md\":\"1RfJUQ_E\",\"blog_csdn_剑指 offer 22. 链表中倒数第k个节点.md\":\"lqqJOW-i\",\"blog_csdn_反转字符串中的单词 iii.md\":\"-ubjQ8D7\",\"blog_csdn_剑指 offer 58 - i. 翻转单词顺序.md\":\"TgXrw3uC\",\"blog_csdn_剑指 offer 20. 表示数值的字符串.md\":\"JPyZ9TZW\",\"blog_csdn_剑指 offer 32 - iii. 从上到下打印二叉树 iii.md\":\"nBB1iJUQ\",\"blog_csdn_如果你和我加在一起能让我变得更好，那我们就在一起，否则我就丢下你，自己往前走如果前途和爱情二选一，毫不犹豫选前途~.md\":\"X2CvnRzx\",\"blog_csdn_后端接收json格式的字符串出现json格式错误.md\":\"KczhSI3_\",\"blog_csdn_在服务器上搭建nacos集群---记录我的心酸历程.md\":\"RDMS-eso\",\"blog_csdn_在排序数组中查找元素的第一个和最后一个位置.md\":\"ItGW00tj\",\"blog_csdn_十六进制转八进制.md\":\"_tiUt9cG\",\"blog_csdn_回文日期java(蓝桥杯）_个人总结.md\":\"MQaTs20w\",\"blog_csdn_redis的redis.config文件配置与内容_10.30日之前的总结.md\":\"xXGvwm_i\",\"blog_csdn_如何更简洁查看接口返回的树状图信息.md\":\"MkXaeMfO\",\"blog_csdn_如何删除docker镜像与容器.md\":\"a5kZoq_H\",\"blog_csdn_在服务器上搭建gitlab.md\":\"GAUKEJm4\",\"blog_csdn_在服务器上搭建jenkins.md\":\"lf06mKYr\",\"blog_csdn_字符串中的第一个唯一字符.md\":\"9KFdfzMC\",\"blog_csdn_异步实现邮件发送.md\":\"Koik2VMW\",\"blog_csdn_我心狂野，我梦无岸.md\":\"KVmcylTW\",\"blog_csdn_局部异常处理.md\":\"3GteU7JF\",\"blog_csdn_将十进制数 （24!512） 表示成浮点规格化数，要求阶码4位(含符号），移码表示；尾数6位（含符号），用补码表示.md\":\"28tlApOf\",\"blog_csdn_微服务加载多个nacos配置文件.md\":\"_kS2HJDC\",\"blog_csdn_常用dos命令_关键字保留字_命名规范_基本数据类型_引用数据类型_基本数据类型转换_算术运算符需要注意的问题_以前笔记_部分运算符_方法重载_变量赋值_构造器的作用_package关键字.md\":\"PrBIdQnP\",\"blog_csdn_接口性能优化.md\":\"MGLmiPQK\",\"blog_csdn_推荐跨域配置.md\":\"EGd8ujGl\",\"blog_csdn_数据库基础知识1.md\":\"C4tu1S7F\",\"blog_csdn_数据库 范式.md\":\"_CwsVwyb\",\"blog_csdn_提交到远程仓库.md\":\"ky9KtPU8\",\"blog_csdn_数据库实验4作业.md\":\"OEFYkbdL\",\"blog_csdn_数据库实验报告（六）.md\":\"6Wzf16l3\",\"blog_csdn_数据库实验报告（十）.md\":\"DuaCuqAU\",\"blog_csdn_数据库基本知识2.md\":\"o97jHzlO\",\"blog_csdn_数据库实验7.md\":\"M8pU-q3d\",\"blog_csdn_提笔小叙@.md\":\"owkNdS_H\",\"blog_csdn_数据库原理与分析实验三.md\":\"2FvikDxI\",\"blog_csdn_数据库实验报告（五）.md\":\"AMbMBPVx\",\"blog_csdn_数据库实验八.md\":\"eth4K_y9\",\"blog_csdn_数据库往年试卷.md\":\"PgZoRFfW\",\"blog_csdn_数据库实验9.md\":\"kQXhEkN_\",\"blog_csdn_子类继承父类_object类中的主要结构_object类中tostring（）的使用_关键字final_string的常用方法_string stringbuffe_java中的日期时间.md\":\"VtFP4M76\",\"blog_csdn_数据库范式例题.md\":\"WM3vqwTj\",\"blog_csdn_数据库密码加密处理.md\":\"zLI99ATm\",\"blog_csdn_本周总结.md\":\"PRxK0V_J\",\"blog_csdn_每周总结：情绪管理，无效社交.md\":\"XJiXIqnH\",\"blog_csdn_整数拆分乘积最大.md\":\"JlwDqW1l\",\"blog_csdn_数据结构_java基础（1）_进制之间的转换.md\":\"GF2uYLO5\",\"blog_csdn_数据恢复与并发控制例题.md\":\"z01F_roq\",\"blog_csdn_浅聊docker.md\":\"ixi3p5Wp\",\"blog_csdn_泛型_io流基础知识_java-＞符号 lambda表达式.md\":\"LJKa1MEK\",\"blog_csdn_浅聊一下lambda表达式.md\":\"K5t_Gp0u\",\"blog_csdn_浅浅的计算机网络知识.md\":\"SXMnWiRE\",\"blog_csdn_浅聊一下nginx.md\":\"G3X9Yptg\",\"blog_csdn_浅聊一下内网穿透.md\":\"Mf-nrt5e\",\"blog_csdn_父工程在clean和install时报错，子工程不报错.md\":\"obeuMa5V\",\"blog_csdn_祝大家2022幸福安康.md\":\"3L_wJaCE\",\"blog_csdn_用户名验证（正则表达式）.md\":\"X8re7knp\",\"blog_csdn_窗口加载事件.md\":\"uUSToKjY\",\"blog_csdn_终止端口进程命令.md\":\"xt637TaI\",\"blog_csdn_蓝桥杯-数字三角形.md\":\"wrlBJjex\",\"blog_csdn_蓝桥杯-x图形.md\":\"HsQxIRQ0\",\"blog_csdn_蓝桥杯-乘积最大.md\":\"W56U3cZF\",\"blog_csdn_自己的工具类和分页查询.md\":\"bPvvSSZj\",\"blog_csdn_蓝桥杯-答疑.md\":\"-F6JE70j\",\"blog_csdn_试题 基础练习 01字串.md\":\"tOP3a4pL\",\"blog_csdn_计网小记-1.md\":\"zjUVQh0n\",\"blog_csdn_试题 基础练习 序列求和.md\":\"VhfgkLhg\",\"blog_csdn_试题 基础练习 圆的面积.md\":\"SLwcXUWs\",\"blog_csdn_试题 基础练习 fibonacci数列.md\":\"MfOqqMH4\",\"blog_csdn_近日小结（非技术文）.md\":\"cCoTXXFG\",\"blog_csdn_近日总结（12.21.md\":\"pZvJo6GP\",\"blog_csdn_近几日总结（5月8日）.md\":\"M5DFYkiL\",\"blog_csdn_通过location实现几秒后页面跳转.md\":\"oP5oC4Yb\",\"blog_csdn_除夕---总结.md\":\"wPoB5YGL\",\"blog_csdn_浅聊一下stream流.md\":\"ysLuBhS3\",\"blog_csdn_面经学习一.md\":\"oMmYfvFV\",\"blog_csdn_集合转数组.md\":\"AUX_2K8a\",\"blog_csdn_面经------锁.md\":\"G7btfq-z\",\"blog_csdn_面经学习三.md\":\"0y_ZfGeD\",\"index.md\":\"inez67Dc\",\"blog_csdn_验证回文串.md\":\"dZEj8KpD\",\"blog_csdn_请求方法_super_枚举_包装类_正则表达式_学习资料.md\":\"vf7EeD-E\"}");window.__VP_SITE_DATA__=JSON.parse("{\"lang\":\"zh-cn\",\"dir\":\"ltr\",\"title\":\"雾喔\",\"description\":\"雾喔的博客主题，基于 vitepress 实现\",\"base\":\"/ysy-blog/\",\"head\":[],\"router\":{\"prefetchLinks\":true},\"appearance\":true,\"themeConfig\":{\"blog\":{\"pagesData\":[{\"route\":\"/about\",\"meta\":{\"top\":1,\"sticky\":10,\"recommend\":1,\"comment\":true,\"description\":\"作者的个人介绍\",\"descriptionHTML\":\" <span style=\\\"color:var(--description-font-color);\\\">作者的个人介绍</span> <ul  style=\\\"color:var(--vp-c-text-1)\\\"> <li>👷‍♂️ 方向：前端开发</li><li>🐱‍👤 github：<a href=\\\"https://github.com/wzz778\\\" target=\\\"_blank\\\" rel=\\\"nofollow\\\">https://github.com/wzz778</a></li> <li >📫 邮箱: <a href=\\\"mailto:15038727708@163.com\\\" target=\\\"_blank\\\">15038727708@163.com</a></li> <li>🚀 博客：<a href=\\\"https://zezhengyyds.gitee.io/aze-blog/\\\" target=\\\"_blank\\\" rel=\\\"nofollow\\\">https://zezhengyyds.gitee.io/aze-blog/</a></li><li>🛸 掘金：<a target=\\\"_blank\\\" href=\\\"https://juejin.cn/user/3004330270263432\\\" rel=\\\"nofollow\\\">https://juejin.cn/user/3004330270263432</a></li><li>🤳 介绍：一个积极向上的00后，喜欢折腾一些有趣的东西。</li><li>✨ 个签：乐观、积极、感恩、承担！</li> </ul> </code>\",\"title\":\"关于作者\",\"date\":\"2024-03-04 19:16:05\",\"tag\":[],\"cover\":\"\"}},{\"route\":\"/blog/csdn/114. 二叉树展开为链表\",\"meta\":{\"title\":\"114. 二叉树展开为链表\",\"date\":\"2024-02-27 21:28:48\",\"tags\":[\"链表\",\"java\",\"数据结构\"],\"cover\":\"https://img-blog.csdnimg.cn/img_convert/909113289665b1182798733a63a6a317.jpeg\",\"description\":\"简单介绍主题的由来和实现原理\",\"outline\":[2,3],\"tag\":[\"链表\",\"java\",\"数据结构\"]}},{\"route\":\"/blog/csdn/189. 轮转数组\",\"meta\":{\"title\":\"189. 轮转数组\",\"date\":\"2023-10-26 15:53:16\",\"tags\":[\"算法\",\"数据结构\",\"leetcode\"],\"tag\":[\"算法\",\"数据结构\",\"leetcode\"],\"cover\":\"https://img-blog.csdnimg.cn/9941f551822340fc86c42c6d8a8af5d7.png\"}},{\"route\":\"/blog/csdn/199. 二叉树的右视图\",\"meta\":{\"title\":\"199. 二叉树的右视图\",\"date\":\"2024-02-27 21:21:24\",\"tags\":[\"算法\"],\"tag\":[\"算法\"],\"cover\":\"https://img-blog.csdnimg.cn/img_convert/6f8e6d18b5440e651d8726bc125e1b35.jpeg\"}},{\"route\":\"/blog/csdn/2022年第一篇总结\",\"meta\":{\"title\":\"2022年第一篇总结\",\"date\":\"2022-01-08 10:01:40\",\"tags\":null,\"tag\":[],\"cover\":\"\"}},{\"route\":\"/blog/csdn/389. 找不同\",\"meta\":{\"title\":\"389. 找不同\",\"date\":\"2023-05-20 17:48:48\",\"tags\":[\"leetcode\",\"java\",\"javascript\"],\"tag\":[\"leetcode\",\"java\",\"javascript\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/560. 和为 K 的子数组\",\"meta\":{\"title\":\"560. 和为 K 的子数组\",\"date\":\"2023-11-22 16:43:05\",\"tags\":[\"算法\",\"数据结构\",\"leetcode\"],\"tag\":[\"算法\",\"数据结构\",\"leetcode\"],\"cover\":\"https://img-blog.csdnimg.cn/5f915bbb65574d1c8f5a072f6134a82a.png\"}},{\"route\":\"/blog/csdn/799. 最长连续不重复子序列 java\",\"meta\":{\"title\":\"799. 最长连续不重复子序列 java\",\"date\":\"2023-11-01 21:08:15\",\"outline\":[2,3],\"tags\":[\"算法\"],\"tag\":[\"算法\"],\"cover\":\"https://img-blog.csdnimg.cn/5b5b3198901d4d09b5959580997ec135.jpeg\"}},{\"route\":\"/blog/csdn/Arrays.fill(),Arrays.equals(),http的请求方式\",\"meta\":{\"title\":\"Arrays.fill(),Arrays.equals(),http的请求方式\",\"date\":\"2022-05-15 10:48:36\",\"tags\":[\"java\",\"经验分享\"],\"tag\":[\"java\",\"经验分享\"],\"cover\":\"https://img-blog.csdnimg.cn/6694a2bd0fd2491c8bcca107bbed546e.png\"}},{\"route\":\"/blog/csdn/Could not autowire. No beans of‘ManagerService‘‘ type found. Could not autowire. No beans of “Manag\",\"meta\":{\"title\":\"Could not autowire. No beans of‘ManagerService‘‘ type found. Could not autowire. No beans of “Manag\",\"date\":\"2022-04-16 22:02:50\",\"tags\":[\"spring\",\"maven\",\"经验分享\"],\"tag\":[\"spring\",\"maven\",\"经验分享\"],\"cover\":\"https://img-blog.csdnimg.cn/4a5ef0e702414777a91d9fa117325430.jpg?x-oss-process=image/watermark,type_d3F5LXplbmhlaQ,shadow_50,text_Q1NETiBA6Zu-5ZaU,size_20,color_FFFFFF,t_70,g_se,x_16\"}},{\"route\":\"/blog/csdn/Could not find class [org.springframework.cloud.client.loadbalancer.LoadBalancerProperties]\",\"meta\":{\"title\":\"Could not find class [org.springframework.cloud.client.loadbalancer.LoadBalancerProperties]\",\"date\":\"2023-07-16 10:34:07\",\"tags\":[\"java\",\"spring\",\"spring boot\",\"报错\"],\"tag\":[\"java\",\"spring\",\"spring boot\",\"报错\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/Could not resolve placeholder ‘coupon.user.name‘ in value “${coupon.user.name}“\",\"meta\":{\"title\":\"Could not resolve placeholder ‘coupon.user.name‘ in value “${coupon.user.name}“\",\"date\":\"2023-05-20 10:39:32\",\"tags\":[\"spring\",\"java\",\"spring boot\"],\"tag\":[\"spring\",\"java\",\"spring boot\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/ES6基本知识点\",\"meta\":{\"title\":\"ES6基本知识点\",\"date\":\"2023-07-10 21:08:08\",\"tags\":[\"es6\",\"前端\",\"ecmascript\"],\"tag\":[\"es6\",\"前端\",\"ecmascript\"],\"cover\":\"https://img-blog.csdnimg.cn/f1f8e1301b9e4794ab8691afd8599ab5.png\"}},{\"route\":\"/blog/csdn/Failed to configure a DataSource! ‘url‘ attribute is not specified and no embedded datasource could\",\"meta\":{\"title\":\"???\",\"date\":\"2023-05-11 21:00:01\",\"tags\":[\"java\",\"数据库\",\"mysql\"],\"tag\":[\"java\",\"数据库\",\"mysql\"],\"cover\":\"https://img-blog.csdnimg.cn/483bc5f935b344c192e4ba42bbef42dd.png\"}},{\"route\":\"/blog/csdn/Feign\",\"meta\":{\"title\":\"Feign\",\"date\":\"2023-03-09 22:27:54\",\"tags\":[\"eureka\",\"java\",\"spring cloud\"],\"tag\":[\"eureka\",\"java\",\"spring cloud\"],\"cover\":\"https://img-blog.csdnimg.cn/img_convert/8adc0f1a8fb3c1185c16355f47c90218.png\"}},{\"route\":\"/blog/csdn/HTTP小记1\",\"meta\":{\"title\":\"HTTP小记1\",\"date\":\"2023-12-21 10:07:27\",\"tags\":[\"http\",\"https\"],\"tag\":[\"http\",\"https\"],\"cover\":\"https://img-blog.csdnimg.cn/img_convert/f4f54a2efc47b7fb0a32ab52bf403a05.png\"}},{\"route\":\"/blog/csdn/HTTP小记2\",\"meta\":{\"title\":\"HTTP小记2\",\"date\":\"2023-12-26 20:24:05\",\"tags\":[\"http\",\"网络协议\",\"网络\"],\"tag\":[\"http\",\"网络协议\",\"网络\"],\"cover\":\"https://img-blog.csdnimg.cn/img_convert/fc3da7136eba41405be0944aef0f1ebf.png\"}},{\"route\":\"/blog/csdn/IO+集合+泛型实现僵尸查询系统\",\"meta\":{\"title\":\"IO+集合+泛型实现僵尸查询系统\",\"date\":\"2022-01-19 11:19:15\",\"tags\":null,\"tag\":[],\"cover\":\"\"}},{\"route\":\"/blog/csdn/Integer.bitCount()\",\"meta\":{\"title\":\"Integer.bitCount()\",\"date\":\"2023-08-07 20:35:46\",\"tags\":[\"java基础\",\"算法\"],\"tag\":[\"java基础\",\"算法\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/JSR303\",\"meta\":{\"title\":\"JSR303\",\"date\":\"2023-07-18 08:50:09\",\"tags\":[\"java\",\"前端\",\"spring\"],\"tag\":[\"java\",\"前端\",\"spring\"],\"cover\":\"https://img-blog.csdnimg.cn/faddb0c2dbe14cc19a387b7fca993f94.png\"}},{\"route\":\"/blog/csdn/JS中的布尔类型\",\"meta\":{\"title\":\"JS中的布尔类型\",\"date\":\"2023-05-26 09:38:26\",\"tags\":[\"javascript\"],\"tag\":[\"javascript\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/JS中的箭头函数=＞\",\"meta\":{\"title\":\"JS中的箭头函数=＞\",\"date\":\"2023-07-05 11:27:37\",\"tags\":[\"javascript\",\"前端\",\"开发语言\"],\"tag\":[\"javascript\",\"前端\",\"开发语言\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/Junit单元测试\",\"meta\":{\"title\":\"Junit单元测试\",\"date\":\"2023-11-06 21:26:27\",\"tags\":[\"junit\",\"单元测试\"],\"tag\":[\"junit\",\"单元测试\"],\"cover\":\"https://img-blog.csdnimg.cn/c3e78736dae04be588f695e915d6cbb0.png\"}},{\"route\":\"/blog/csdn/Linux保存退出和不保存退出命令\",\"meta\":{\"title\":\"Linux保存退出和不保存退出命令\",\"date\":\"2023-08-26 17:08:06\",\"tags\":[\"linux\",\"运维\",\"服务器\"],\"tag\":[\"linux\",\"运维\",\"服务器\"],\"cover\":\"https://img-blog.csdnimg.cn/df72822fcf4c4d9f99414f31536cdc39.jpeg\"}},{\"route\":\"/blog/csdn/Maven右侧子项目maven变成灰色\",\"meta\":{\"title\":\"Maven右侧子项目maven变成灰色\",\"date\":\"2022-09-17 17:01:48\",\"tags\":[\"maven\",\"java\",\"开发语言\",\"经验分享\"],\"tag\":[\"maven\",\"java\",\"开发语言\",\"经验分享\"],\"cover\":\"https://img-blog.csdnimg.cn/7b81d9edee92409c970c2aab7045243e.png\"}},{\"route\":\"/blog/csdn/Mybatis-plus实战项目演示+自定义元数据对象处理器+ws\",\"meta\":{\"title\":\"Mybatis-plus实战项目演示+自定义元数据对象处理器+ws\",\"date\":\"2022-12-09 17:21:26\",\"tags\":[\"mybatis\",\"java\",\"数据库\",\"WebSocket\",\"元数据\"],\"tag\":[\"mybatis\",\"java\",\"数据库\",\"WebSocket\",\"元数据\"],\"description\":\"  global-config:\\n    db-config:\\n       设置实体类所对应的表的统一前缀\\n      table-prefix: t_\\n       设置统一的主键生成策略\\n   \",\"cover\":\"https://img-blog.csdnimg.cn/b273136714724aeab068e2c6768738a3.png\"}},{\"route\":\"/blog/csdn/MybatisPlus实现乐观锁（实战）\",\"meta\":{\"title\":\"MybatisPlus实现乐观锁（实战）\",\"date\":\"2022-09-15 16:30:17\",\"tags\":[\"java\",\"开发语言\",\"mybatis\",\"经验分享\",\"spring boot\"],\"tag\":[\"java\",\"开发语言\",\"mybatis\",\"经验分享\",\"spring boot\"],\"cover\":\"https://img-blog.csdnimg.cn/91c02712fbf24041b104d0ec6b177b8d.png\"}},{\"route\":\"/blog/csdn/Mybatis小记\",\"meta\":{\"title\":\"Mybatis小记\",\"date\":\"2023-08-28 17:08:58\",\"tags\":[\"mybatis\",\"java\",\"开发语言\"],\"tag\":[\"mybatis\",\"java\",\"开发语言\"],\"cover\":\"https://img-blog.csdnimg.cn/0c0603d5eb104ed98d37db674cf4c60f.jpeg\"}},{\"route\":\"/blog/csdn/No Feign Client for loadBalancing defined. Did you forget to include spring-cloud-starter-loadbalanc\",\"meta\":{\"title\":\"No Feign Client for loadBalancing defined. Did you forget to include spring-cloud-starter-loadbalanc\",\"date\":\"2023-05-19 20:49:08\",\"tags\":[\"java\",\"spring\",\"spring boot\"],\"tag\":[\"java\",\"spring\",\"spring boot\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/OAuth2.0\",\"meta\":{\"title\":\"OAuth2.0\",\"date\":\"2023-09-20 17:35:35\",\"tags\":[\"java\"],\"tag\":[\"java\"],\"cover\":\"https://img-blog.csdnimg.cn/2a200005c2e04008b92193123a6ed5ef.jpeg\"}},{\"route\":\"/blog/csdn/PS一些快捷键+一些基础知识点（mac版）\",\"meta\":{\"title\":\"PS一些快捷键+一些基础知识点（mac版）\",\"date\":\"2022-06-11 19:54:39\",\"tags\":[\"经验分享\"],\"tag\":[\"经验分享\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/SSO单点登录\",\"meta\":{\"title\":\"SSO单点登录\",\"date\":\"2023-07-25 18:24:10\",\"tags\":[\"bootstrap\",\"前端\",\"html\"],\"tag\":[\"bootstrap\",\"前端\",\"html\"],\"description\":\"server.port=8080\\nsso.server.url=http://localhost:8080\\nspring.thymeleaf.cache=false\\n 检查模板是否存在，然后再呈现\\ns\",\"cover\":\"https://img-blog.csdnimg.cn/img_convert/9003a3782fc2449f9def0dbb04dbbe43.png\"}},{\"route\":\"/blog/csdn/Sentinel持久化规则\",\"meta\":{\"title\":\"Sentinel持久化规则\",\"date\":\"2023-03-26 09:25:01\",\"tags\":[\"sentinel\",\"java\",\"开发语言\"],\"tag\":[\"sentinel\",\"java\",\"开发语言\"],\"description\":\"    enabled: true  激活Sentinel对Feign的支持\\n```\\n在nacos中新增一个配置：\\n```\\n[\\n    {\\n        \\\"resource\\\": \\\"/rateLimi\",\"cover\":\"https://img-blog.csdnimg.cn/img_convert/bbc7e9c8966c66c45cae6090c3800900.png\"}},{\"route\":\"/blog/csdn/Spring的下载\",\"meta\":{\"title\":\"Spring的下载\",\"date\":\"2022-03-11 16:48:29\",\"tags\":[\"spring\"],\"tag\":[\"spring\"],\"cover\":\"https://img-blog.csdnimg.cn/7a38be9b8aff44a9acd7c33ee00e2203.png?x-oss-process=image/watermark,type_d3F5LXplbmhlaQ,shadow_50,text_Q1NETiBA6Zu-5ZaU,size_20,color_FFFFFF,t_70,g_se,x_16\"}},{\"route\":\"/blog/csdn/Swift(2)\",\"meta\":{\"title\":\"Swift(2)\",\"date\":\"2023-01-18 20:35:00\",\"tags\":[\"swift\",\"开发语言\",\"ios\"],\"tag\":[\"swift\",\"开发语言\",\"ios\"],\"cover\":\"https://img-blog.csdnimg.cn/e3ea333459c04cbb9d978738832ad21e.png\"}},{\"route\":\"/blog/csdn/SwiftUi（1）\",\"meta\":{\"title\":\"SwiftUi（1）\",\"date\":\"2023-02-14 08:59:50\",\"tags\":[\"swiftui\",\"swift\",\"ios\"],\"tag\":[\"swiftui\",\"swift\",\"ios\"],\"cover\":\"https://img-blog.csdnimg.cn/cb91c3b1fc024844a5c026981672a41d.png\"}},{\"route\":\"/blog/csdn/SwiftUi（2）\",\"meta\":{\"title\":\"SwiftUi（2）\",\"date\":\"2023-02-20 19:50:56\",\"tags\":[\"swiftui\",\"ios\",\"swift\"],\"tag\":[\"swiftui\",\"ios\",\"swift\"],\"cover\":\"https://img-blog.csdnimg.cn/4558822f0cba45e9bd786b8c12929263.png\"}},{\"route\":\"/blog/csdn/SwiftUi（3）\",\"meta\":{\"title\":\"SwiftUi（3）\",\"date\":\"2023-02-20 19:51:29\",\"tags\":[\"swiftui\",\"ios\",\"swift\"],\"tag\":[\"swiftui\",\"ios\",\"swift\"],\"cover\":\"https://img-blog.csdnimg.cn/83c32ca8837b4554a4c9718307709dec.png\"}},{\"route\":\"/blog/csdn/SwiftUi（4）\",\"meta\":{\"title\":\"SwiftUi（4）\",\"date\":\"2023-02-20 19:53:51\",\"tags\":[\"swiftui\",\"ios\",\"swift\"],\"tag\":[\"swiftui\",\"ios\",\"swift\"],\"cover\":\"https://img-blog.csdnimg.cn/ddae354e199a43ecbc3f89d0d4bb7538.png\"}},{\"route\":\"/blog/csdn/SwiftUi（5）\",\"meta\":{\"title\":\"SwiftUi（5）\",\"date\":\"2023-02-24 14:48:57\",\"tags\":[\"swiftui\",\"ios\",\"swift\"],\"tag\":[\"swiftui\",\"ios\",\"swift\"],\"cover\":\"https://img-blog.csdnimg.cn/21622d84a1254089a0f973f74d2a62af.png\"}},{\"route\":\"/blog/csdn/SwiftUi（6）\",\"meta\":{\"title\":\"SwiftUi（6）\",\"date\":\"2023-03-01 08:08:44\",\"tags\":[\"swiftui\",\"ios\",\"swift\"],\"tag\":[\"swiftui\",\"ios\",\"swift\"],\"cover\":\"https://img-blog.csdnimg.cn/5d729cd46a554916aa5c5c8543c20acb.png\"}},{\"route\":\"/blog/csdn/Swift（1）\",\"meta\":{\"title\":\"Swift（1）\",\"date\":\"2023-01-13 22:56:42\",\"tags\":[\"swift\",\"ios\",\"开发语言\"],\"tag\":[\"swift\",\"ios\",\"开发语言\"],\"cover\":\"https://img-blog.csdnimg.cn/df7865a462c0457a9ef4a1105c7007a1.png\"}},{\"route\":\"/blog/csdn/Swift（4）\",\"meta\":{\"title\":\"Swift（4）\",\"date\":\"2023-01-25 21:32:57\",\"tags\":[\"swift\",\"开发语言\",\"ios\"],\"tag\":[\"swift\",\"开发语言\",\"ios\"],\"cover\":\"https://img-blog.csdnimg.cn/38d63d77ea5144239ac02b2ad98e6eb2.png)![](https://img-blog.csdnimg.cn/6918597552ac4de9922cb9c421d78139.png\"}},{\"route\":\"/blog/csdn/Swift（5）\",\"meta\":{\"title\":\"Swift（5）\",\"date\":\"2023-02-13 10:39:38\",\"tags\":[\"swift\",\"开发语言\",\"ios\"],\"tag\":[\"swift\",\"开发语言\",\"ios\"],\"cover\":\"https://img-blog.csdnimg.cn/5531669dc41c498da3aaa4f3b7894b54.png\"}},{\"route\":\"/blog/csdn/TCP三次握手\",\"meta\":{\"title\":\"TCP三次握手\",\"date\":\"2023-11-13 22:16:33\",\"tags\":[\"tcp/ip\",\"网络\",\"网络协议\"],\"tag\":[\"tcp/ip\",\"网络\",\"网络协议\"],\"cover\":\"https://img-blog.csdnimg.cn/img_convert/3a5c1f466e138547ffd4d3d68e74f70f.png\"}},{\"route\":\"/blog/csdn/Unable to serialize JobDataMap for insertion into database borg.apache.catalina.core.ApplicationPart\",\"meta\":{\"title\":\"Unable to serialize JobDataMap for insertion into database borg.apache.catalina.core.ApplicationPart\",\"date\":\"2023-10-18 16:46:12\",\"tags\":[\"数据库\",\"apache\"],\"tag\":[\"数据库\",\"apache\"],\"cover\":\"https://img-blog.csdnimg.cn/79e9b548498b43f3909fa58b110b302b.jpeg\"}},{\"route\":\"/blog/csdn/VirtualBox\",\"meta\":{\"title\":\"VirtualBox\",\"date\":\"2023-04-20 09:24:07\",\"tags\":[\"linux\",\"运维\",\"服务器\"],\"tag\":[\"linux\",\"运维\",\"服务器\"],\"cover\":\"https://img-blog.csdnimg.cn/1c1fe7cd0e1e4130a0e54bd41a570c6c.png\"}},{\"route\":\"/blog/csdn/Vue学习随堂记录\",\"meta\":{\"title\":\"Vue学习随堂记录\",\"date\":\"2023-07-17 08:51:13\",\"tags\":[\"vue.js\",\"前端\",\"javascript\"],\"tag\":[\"vue.js\",\"前端\",\"javascript\"],\"cover\":\"https://img-blog.csdnimg.cn/a6b5c2d7ac744bd6b2210b010e888b0d.png\"}},{\"route\":\"/blog/csdn/Win11安装VMware中的镜像的下载\",\"meta\":{\"title\":\"Win11安装VMware中的镜像的下载\",\"date\":\"2023-08-25 20:24:17\",\"tags\":[\"windows\"],\"tag\":[\"windows\"],\"cover\":\"https://img-blog.csdnimg.cn/8ccb5226339f45e6952c5cc301deee11.png\"}},{\"route\":\"/blog/csdn/Xml declaratlon should precede all document content\",\"meta\":{\"title\":\"Xml declaratlon should precede all document content\",\"date\":\"2022-03-24 09:05:41\",\"tags\":[\"spring\",\"tomcat\",\"maven\",\"spring\"],\"tag\":[\"spring\",\"tomcat\",\"maven\"],\"cover\":\"https://img-blog.csdnimg.cn/8c3f9b26acfa4d05a7960eb36029623c.png?x-oss-process=image/watermark,type_d3F5LXplbmhlaQ,shadow_50,text_Q1NETiBA6Zu-5ZaU,size_20,color_FFFFFF,t_70,g_se,x_16\"}},{\"route\":\"/blog/csdn/docker部署微服务\",\"meta\":{\"title\":\"docker部署微服务\",\"date\":\"2023-11-23 21:01:02\",\"tags\":[\"eureka\",\"云原生\"],\"tag\":[\"eureka\",\"云原生\"],\"description\":\"docker --help\\n 查看指定命令的帮助\\ndocker [command] --help\\n 查看当前所拥有的镜像\\ndocker images\\n```\\n 拉取镜像\\n```\\n 拉取镜像，未指定版本\",\"cover\":\"https://img-blog.csdnimg.cn/img_convert/c60cd94e937a9beeb8ccf403ab444e02.png\"}},{\"route\":\"/blog/csdn/docker镜像加速器配置\",\"meta\":{\"title\":\"docker镜像加速器配置\",\"date\":\"2023-04-21 10:47:49\",\"tags\":[\"docker\",\"linux\",\"运维\"],\"tag\":[\"docker\",\"linux\",\"运维\"],\"cover\":\"https://img-blog.csdnimg.cn/img_convert/f89e1203f60a3da7d0bbdef38d576b3b.png\"}},{\"route\":\"/blog/csdn/easyexcel\",\"meta\":{\"title\":\"easyexcel\",\"date\":\"2023-04-12 11:20:40\",\"tags\":[\"java\",\"spring\",\"servlet\"],\"tag\":[\"java\",\"spring\",\"servlet\"],\"cover\":\"https://img-blog.csdnimg.cn/d8d0fdc2784b42f28172b9bbd795a4ee.png\"}},{\"route\":\"/blog/csdn/foreach，Thymeleaf相关jar包的下载，spring，数据库的概念\",\"meta\":{\"title\":\"foreach，Thymeleaf相关jar包的下载，spring，数据库的概念\",\"date\":\"2022-04-02 20:35:42\",\"tags\":[\"spring\",\"经验分享\",\"数据库开发\",\"其他\"],\"tag\":[\"spring\",\"经验分享\",\"数据库开发\",\"其他\"],\"cover\":\"https://img-blog.csdnimg.cn/10aabd71becb4b89a1f7307ecd0041bc.jpg?x-oss-process=image/watermark,type_d3F5LXplbmhlaQ,shadow_50,text_Q1NETiBA6Zu-5ZaU,size_20,color_FFFFFF,t_70,g_se,x_16\"}},{\"route\":\"/blog/csdn/gitlab-runner安装和部署项目\",\"meta\":{\"title\":\"gitlab-runner安装和部署项目\",\"date\":\"2023-08-29 22:14:07\",\"tags\":[\"运维\",\"git\",\"linux\"],\"tag\":[\"运维\",\"git\",\"linux\"],\"cover\":\"https://img-blog.csdnimg.cn/35175344c67645d0a41af66b474d93a0.jpeg\"}},{\"route\":\"/blog/csdn/gitlab提交项目Log in with Access Token错误\",\"meta\":{\"title\":\"gitlab提交项目Log in with Access Token错误\",\"date\":\"2023-08-29 10:30:02\",\"tags\":[\"gitlab\"],\"tag\":[\"gitlab\"],\"cover\":\"https://img-blog.csdnimg.cn/7e381cb829af4dce8a8a63fa1b780f73.jpeg\"}},{\"route\":\"/blog/csdn/git在idea中的使用+终止端口进程mac\",\"meta\":{\"title\":\"git在idea中的使用+终止端口进程mac\",\"date\":\"2022-10-21 17:18:32\",\"tags\":[\"macos\",\"1024程序员节\"],\"tag\":[\"macos\",\"1024程序员节\"],\"cover\":\"https://img-blog.csdnimg.cn/4869b518f85c40aab30b82e251840fdb.png\"}},{\"route\":\"/blog/csdn/hosts文件位置mac\",\"meta\":{\"title\":\"hosts文件位置mac\",\"date\":\"2023-03-29 10:58:07\",\"tags\":[\"macos\"],\"tag\":[\"macos\"],\"cover\":\"https://img-blog.csdnimg.cn/9fb129bb6e2f407d98735fb41fb95e81.png\"}},{\"route\":\"/blog/csdn/http请求报错：406 Not Acceptable的解决办法\",\"meta\":{\"title\":\"http请求报错：406 Not Acceptable的解决办法\",\"date\":\"2023-09-30 10:54:12\",\"tags\":[\"http\",\"网络协议\",\"网络\"],\"tag\":[\"http\",\"网络协议\",\"网络\"],\"cover\":\"https://img-blog.csdnimg.cn/c49bf0e1dc46442485b53c7da318b707.png\"}},{\"route\":\"/blog/csdn/http请求方式&&过滤器与拦截器的区别\",\"meta\":{\"title\":\"http请求方式&&过滤器与拦截器的区别\",\"date\":\"2023-08-28 16:45:30\",\"tags\":[\"数据库\"],\"tag\":[\"数据库\"],\"cover\":\"https://img-blog.csdnimg.cn/3242272ae80b44d786f99c3904445c2c.jpeg\"}},{\"route\":\"/blog/csdn/idea创建包时无法分层\",\"meta\":{\"title\":\"idea创建包时无法分层\",\"date\":\"2023-09-21 20:20:18\",\"tags\":[\"idea\"],\"tag\":[\"idea\"],\"cover\":\"https://img-blog.csdnimg.cn/12d2943285e44fdbaa5b30e9e712ed4f.jpeg\"}},{\"route\":\"/blog/csdn/idea右侧的maven项目下的tomcat7插件报红\",\"meta\":{\"title\":\"idea右侧的maven项目下的tomcat7插件报红\",\"date\":\"2022-04-03 08:52:40\",\"tags\":[\"tomcat\",\"经验分享\",\"maven\",\"其他\"],\"tag\":[\"tomcat\",\"经验分享\",\"maven\",\"其他\"],\"cover\":\"https://img-blog.csdnimg.cn/c742f71d8d504c92a0d39f8cef1c57a5.jpg?x-oss-process=image/watermark,type_d3F5LXplbmhlaQ,shadow_50,text_Q1NETiBA6Zu-5ZaU,size_20,color_FFFFFF,t_70,g_se,x_16\"}},{\"route\":\"/blog/csdn/idea意外退出mac\",\"meta\":{\"title\":\"idea意外退出mac\",\"date\":\"2023-09-07 11:26:45\",\"tags\":[\"intellij-idea\",\"java\",\"ide\"],\"tag\":[\"intellij-idea\",\"java\",\"ide\"],\"cover\":\"https://img-blog.csdnimg.cn/7ad16dcfd71345c6b333f17303eb16f0.png\"}},{\"route\":\"/blog/csdn/idea所有历史版本下载\",\"meta\":{\"title\":\"idea所有历史版本下载\",\"date\":\"2023-09-07 11:08:37\",\"tags\":[\"intellij-idea\",\"java\",\"ide\"],\"tag\":[\"intellij-idea\",\"java\",\"ide\"],\"cover\":\"https://img-blog.csdnimg.cn/690b2d08d6ca4a86a6c80aabd089482f.png\"}},{\"route\":\"/blog/csdn/idea显示左下角service\",\"meta\":{\"title\":\"idea显示左下角service\",\"date\":\"2023-07-10 17:28:25\",\"tags\":[\"idea\"],\"tag\":[\"idea\"],\"cover\":\"https://img-blog.csdnimg.cn/ffb8e98e049b405ba753113698616821.png\"}},{\"route\":\"/blog/csdn/io实现登录注册功能+本周总结\",\"meta\":{\"title\":\"io实现登录注册功能+本周总结\",\"date\":\"2022-01-16 15:17:24\",\"tags\":null,\"tag\":[],\"cover\":\"\"}},{\"route\":\"/blog/csdn/java.lang.ExceptionInInitializerError\",\"meta\":{\"title\":\"java.lang.ExceptionInInitializerError\",\"date\":\"2022-03-19 11:27:43\",\"tags\":[\"spring\",\"经验分享\"],\"tag\":[\"spring\",\"经验分享\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/java.lang.UnsupportedOperationException与CleanMyMac X\",\"meta\":{\"title\":\"java.lang.UnsupportedOperationException与CleanMyMac X\",\"date\":\"2022-10-12 20:17:02\",\"tags\":[\"java\",\"spring\",\"jdk\",\"idea\"],\"tag\":[\"java\",\"spring\",\"jdk\",\"idea\"],\"cover\":\"https://img-blog.csdnimg.cn/f5ff0083fa3a4a3a862fa774262a881b.png\"}},{\"route\":\"/blog/csdn/java与c++中的交换方法\",\"meta\":{\"title\":\"java与c++中的交换方法\",\"date\":\"2023-10-21 15:57:26\",\"tags\":[\"c++\",\"开发语言\"],\"tag\":[\"c++\",\"开发语言\"],\"cover\":\"https://img-blog.csdnimg.cn/7762f4d6b4fc4cfc82ff4e7156a1c58c.png\"}},{\"route\":\"/blog/csdn/java中的&，＞＞,＜＜位运算\",\"meta\":{\"title\":\"java中的&，＞＞,＜＜位运算\",\"date\":\"2023-08-09 11:00:05\",\"tags\":[\"java\",\"python\",\"算法\"],\"tag\":[\"java\",\"python\",\"算法\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/java实现qq自动发送无限条消息\",\"meta\":{\"title\":\"java实现qq自动发送无限条消息\",\"date\":\"2023-07-20 21:57:24\",\"tags\":[\"java\",\"开发语言\",\"炸屏\"],\"tag\":[\"java\",\"开发语言\",\"炸屏\"],\"cover\":\"https://img-blog.csdnimg.cn/ae1f772795344c5d8c2cfacc79705150.png\"}},{\"route\":\"/blog/csdn/java实现阿里云文件存储OSS\",\"meta\":{\"title\":\"java实现阿里云文件存储OSS\",\"date\":\"2023-07-17 11:00:19\",\"tags\":[\"阿里云\",\"云计算\"],\"tag\":[\"阿里云\",\"云计算\"],\"cover\":\"https://img-blog.csdnimg.cn/9c4b52ad04d542efb74a5410b2d8bb1a.png\"}},{\"route\":\"/blog/csdn/java小记（1）\",\"meta\":{\"title\":\"java小记（1）\",\"date\":\"2024-02-27 17:42:22\",\"tags\":[\"java\",\"开发语言\"],\"tag\":[\"java\",\"开发语言\"],\"cover\":\"https://img-blog.csdnimg.cn/direct/a05480fa2a2e45d2a417f73560d598fb.png\"}},{\"route\":\"/blog/csdn/java接口防刷机制\",\"meta\":{\"title\":\"java接口防刷机制\",\"date\":\"2023-07-21 20:53:40\",\"tags\":[\"java\",\"开发语言\"],\"tag\":[\"java\",\"开发语言\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/java碎碎碎碎碎碎\",\"meta\":{\"title\":\"java碎碎碎碎碎碎\",\"date\":\"2022-09-09 20:55:33\",\"tags\":[\"jvm\",\"java\"],\"tag\":[\"jvm\",\"java\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/java笔记\",\"meta\":{\"title\":\"java笔记\",\"date\":\"2022-03-26 09:57:45\",\"tags\":[\"经验分享\",\"其他\"],\"tag\":[\"经验分享\",\"其他\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/java部分排序算法\",\"meta\":{\"title\":\"java部分排序算法\",\"date\":\"2022-09-23 10:36:16\",\"tags\":[\"排序算法\",\"算法\"],\"tag\":[\"排序算法\",\"算法\"],\"cover\":\"https://img-blog.csdnimg.cn/47944437d6ed4e9da33faedabafd4420.png\"}},{\"route\":\"/blog/csdn/js中+new Date()\",\"meta\":{\"title\":\"js中+new Date()\",\"date\":\"2023-05-31 16:03:16\",\"tags\":[\"javascript\",\"开发语言\",\"ecmascript\"],\"tag\":[\"javascript\",\"开发语言\",\"ecmascript\"],\"cover\":\"https://img-blog.csdnimg.cn/cb5c88f3523c4f2b82a8e1d34bb49b0f.png\"}},{\"route\":\"/blog/csdn/js中children和childNodes的区别\",\"meta\":{\"title\":\"js中children和childNodes的区别\",\"date\":\"2023-05-17 09:25:23\",\"tags\":[\"javascript\",\"html\",\"前端\"],\"tag\":[\"javascript\",\"html\",\"前端\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/linux下搭建redis+设置密码\",\"meta\":{\"title\":\"linux下搭建redis+设置密码\",\"date\":\"2022-06-16 19:32:26\",\"tags\":[\"服务器\",\"linux\"],\"tag\":[\"服务器\",\"linux\"],\"cover\":\"https://img-blog.csdnimg.cn/85bcb1f3eaa14c9789ed9f54fb2fb433.png\"}},{\"route\":\"/blog/csdn/linux安装tomcat（docker）\",\"meta\":{\"title\":\"linux安装tomcat（docker）\",\"date\":\"2023-04-01 16:17:27\",\"tags\":[\"docker\",\"tomcat\",\"linux\"],\"tag\":[\"docker\",\"tomcat\",\"linux\"],\"cover\":\"https://img-blog.csdnimg.cn/img_convert/4dec3d4273b6f7e828e7ebd8bc786711.png\"}},{\"route\":\"/blog/csdn/linux开启端口\",\"meta\":{\"title\":\"linux开启端口\",\"date\":\"2023-08-29 22:23:15\",\"tags\":[\"linux\",\"windows\",\"服务器\"],\"tag\":[\"linux\",\"windows\",\"服务器\"],\"cover\":\"https://img-blog.csdnimg.cn/84bc2c435f884e91926a138daa438e66.jpeg\"}},{\"route\":\"/blog/csdn/maven install Could not resolve dependencies for project解决办法\",\"meta\":{\"title\":\"maven install Could not resolve dependencies for project解决办法\",\"date\":\"2023-04-26 22:11:55\",\"tags\":[\"java\",\"mysql\"],\"tag\":[\"java\",\"mysql\"],\"cover\":\"https://img-blog.csdnimg.cn/eda5ce1825e6432a86be0c091ebe572e.png\"}},{\"route\":\"/blog/csdn/mybatis-plus中的@Select注解里面写sql语句的in\",\"meta\":{\"title\":\"mybatis-plus中的@Select注解里面写sql语句的in\",\"date\":\"2023-08-01 20:46:10\",\"tags\":[\"mybatis\",\"sql\",\"windows\"],\"tag\":[\"mybatis\",\"sql\",\"windows\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/mybatis-plus中的逻辑删除\",\"meta\":{\"title\":\"mybatis-plus中的逻辑删除\",\"date\":\"2023-07-16 18:03:43\",\"tags\":[\"java\",\"数据库\",\"开发语言\"],\"tag\":[\"java\",\"数据库\",\"开发语言\"],\"cover\":\"https://img-blog.csdnimg.cn/03c04f2418d4429bb2b8b9318b14d8e1.png\"}},{\"route\":\"/blog/csdn/mybatis固定代码\",\"meta\":{\"title\":\"mybatis固定代码\",\"date\":\"2022-03-24 09:13:55\",\"tags\":[\"spring\",\"数据库开发\"],\"tag\":[\"spring\",\"数据库开发\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/mysql一些小知识点\",\"meta\":{\"title\":\"mysql一些小知识点\",\"date\":\"2022-12-27 17:10:40\",\"tags\":[\"mysql\",\"数据库\",\"java\"],\"tag\":[\"mysql\",\"数据库\",\"java\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/mysql课堂笔记 mac\",\"meta\":{\"title\":\"mysql课堂笔记 mac\",\"date\":\"2023-09-09 10:36:13\",\"tags\":[\"mysql\",\"笔记\",\"macos\"],\"tag\":[\"mysql\",\"笔记\",\"macos\"],\"cover\":\"https://img-blog.csdnimg.cn/img_convert/055038761a50e79b656cb58dbd785896.png\"}},{\"route\":\"/blog/csdn/nacos怎么修改密码（保姆教程）\",\"meta\":{\"title\":\"nacos怎么修改密码（保姆教程）\",\"date\":\"2023-04-11 16:59:20\",\"tags\":[\"java\",\"spring\",\"开发语言\"],\"tag\":[\"java\",\"spring\",\"开发语言\"],\"cover\":\"https://img-blog.csdnimg.cn/6f94e9ec4b96431ea3b1f218abe07d72.png\"}},{\"route\":\"/blog/csdn/python的安装(推荐)\",\"meta\":{\"title\":\"python的安装(推荐)\",\"date\":\"2023-08-28 16:54:43\",\"tags\":[\"python\",\"pytorch\"],\"tag\":[\"python\",\"pytorch\"],\"cover\":\"https://img-blog.csdnimg.cn/7cfc5a6e975245d18ea6c8764c237230.jpeg\"}},{\"route\":\"/blog/csdn/quartz中jdbc.initialize-schema\",\"meta\":{\"title\":\"quartz中jdbc.initialize-schema\",\"date\":\"2023-10-17 21:48:30\",\"tags\":[\"数据库\"],\"tag\":[\"数据库\"],\"cover\":\"https://img-blog.csdnimg.cn/3f64047ea5c747dda8b2686aebe1f6c5.jpeg\"}},{\"route\":\"/blog/csdn/redis.conf的一些配置+密码的设置（mac）+个人总结\",\"meta\":{\"title\":\"redis.conf的一些配置+密码的设置（mac）+个人总结\",\"date\":\"2022-06-18 12:15:29\",\"tags\":[\"redis\",\"数据库\",\"java\"],\"tag\":[\"redis\",\"数据库\",\"java\"],\"cover\":\"https://img-blog.csdnimg.cn/be9d09c724a8428d95d95096d72c7461.png\"}},{\"route\":\"/blog/csdn/redis整合通过QQ邮箱发送验证码\",\"meta\":{\"title\":\"redis整合通过QQ邮箱发送验证码\",\"date\":\"2023-06-06 09:29:51\",\"tags\":[\"redis\",\"java\",\"数据库\"],\"tag\":[\"redis\",\"java\",\"数据库\"],\"cover\":\"https://img-blog.csdnimg.cn/c158ef4e6c0b4bd28e216a360780c7fd.png\"}},{\"route\":\"/blog/csdn/redis的redis.config文件配置与内容+10.30日之前的总结\",\"meta\":{\"title\":\"redis的redis.config文件配置与内容+10.30日之前的总结\",\"date\":\"2022-10-30 10:16:56\",\"tags\":[\"经验分享\"],\"tag\":[\"经验分享\"],\"description\":\"requirepass 956766\\nmaxclients 10000\\n Note that in order to read the configuration file, Redis must b\",\"cover\":\"\"}},{\"route\":\"/blog/csdn/springboot+springdata-jpa+thymeleaf项目实战\",\"meta\":{\"title\":\"springboot+springdata jpa+thymeleaf项目实战\",\"date\":\"2022-07-16 22:12:12\",\"tags\":[\"java\",\"大数据\",\"spring\",\"spring boot\",\"后端\"],\"tag\":[\"java\",\"大数据\",\"spring\",\"spring boot\",\"后端\"],\"description\":\"server.port=8080\\n THYMELEAF (ThymeleafAutoConfiguration)\\n 开启模板缓存（默认值： true ）\\nspring.thymeleaf.cache=\",\"cover\":\"https://img-blog.csdnimg.cn/7faf23748cb64f6a9146c08fbb98eca1.png\"}},{\"route\":\"/blog/csdn/springboot整合liquibase（补充）\",\"meta\":{\"title\":\"springboot整合liquibase（补充）\",\"date\":\"2022-10-03 09:43:09\",\"tags\":[\"spring boot\",\"java\",\"缓存\",\"经验分享\",\"数据库\"],\"tag\":[\"spring boot\",\"java\",\"缓存\",\"经验分享\",\"数据库\"],\"cover\":\"https://img-blog.csdnimg.cn/ec6db025cc9345aca2c288d400250d8e.png\"}},{\"route\":\"/blog/csdn/springcloud跨域重复问题Allow-Origin header contains multiple values... but only one is allowed\",\"meta\":{\"title\":\"springcloud跨域重复问题Allow-Origin header contains multiple values... but only one is allowed\",\"date\":\"2023-10-12 11:43:38\",\"tags\":[\"spring cloud\",\"spring\",\"后端\"],\"tag\":[\"spring cloud\",\"spring\",\"后端\"],\"cover\":\"https://img-blog.csdnimg.cn/662665fed5e64754b4bdb45b570162e2.png\"}},{\"route\":\"/blog/csdn/springmvc固定代码\",\"meta\":{\"title\":\"springmvc固定代码\",\"date\":\"2022-03-24 09:40:53\",\"tags\":[\"spring\"],\"tag\":[\"spring\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/spring小记\",\"meta\":{\"title\":\"spring小记\",\"date\":\"2023-08-28 17:10:50\",\"tags\":[\"spring\",\"java\",\"后端\"],\"tag\":[\"spring\",\"java\",\"后端\"],\"cover\":\"https://img-blog.csdnimg.cn/514cc7f742d541b78852b2e02ed5aa62.jpeg\"}},{\"route\":\"/blog/csdn/swift（3）\",\"meta\":{\"title\":\"swift（3）\",\"date\":\"2023-01-21 17:48:04\",\"tags\":[\"swift\",\"开发语言\",\"ios\"],\"tag\":[\"swift\",\"开发语言\",\"ios\"],\"cover\":\"https://img-blog.csdnimg.cn/87c36b17f586447095c260cae427f54b.png\"}},{\"route\":\"/blog/csdn/yml基本语法与支持的数据格式\",\"meta\":{\"title\":\"yml基本语法与支持的数据格式\",\"date\":\"2023-08-28 16:47:47\",\"tags\":[\"yml\",\"springboot\",\"java\"],\"tag\":[\"yml\",\"springboot\",\"java\"],\"cover\":\"https://img-blog.csdnimg.cn/b60688b2c88244e6af04dc0316a61063.jpeg\"}},{\"route\":\"/blog/csdn/个人简介\",\"meta\":{\"title\":\"个人简介\",\"date\":\"2023-04-07 20:16:49\",\"tags\":[\"总结\"],\"tag\":[\"总结\"],\"cover\":\"https://img-blog.csdnimg.cn/e229848df40040b59598b2d5f04612de.png\"}},{\"route\":\"/blog/csdn/人生是一场盛大的遇见\",\"meta\":{\"title\":\"人生是一场盛大的遇见\",\"date\":\"2021-11-27 20:52:27\",\"tags\":[\"安全\",\"其他\"],\"tag\":[\"安全\",\"其他\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/优化------聊聊缓存\",\"meta\":{\"title\":\"优化------聊聊缓存\",\"date\":\"2022-11-03 16:47:14\",\"tags\":[\"git\",\"java\",\"github\",\"redis\",\"intellij-idea\"],\"tag\":[\"git\",\"java\",\"github\",\"redis\",\"intellij-idea\"],\"cover\":\"https://img-blog.csdnimg.cn/d22971946db540b4a2984eeff5294ea7.png\"}},{\"route\":\"/blog/csdn/使用Arrays.asList与不使用的区别\",\"meta\":{\"title\":\"使用Arrays.asList与不使用的区别\",\"date\":\"2023-11-21 17:43:07\",\"tags\":[\"leetcode\",\"算法\",\"职场和发展\"],\"tag\":[\"leetcode\",\"算法\",\"职场和发展\"],\"cover\":\"https://img-blog.csdnimg.cn/e619cf41112b4324a1932901757276e3.png\"}},{\"route\":\"/blog/csdn/修改mysql密码与mac中mysql的启动与终止\",\"meta\":{\"title\":\"修改mysql密码与mac中mysql的启动与终止\",\"date\":\"2023-05-30 15:22:46\",\"tags\":[\"mysql\",\"数据库\",\"java\"],\"tag\":[\"mysql\",\"数据库\",\"java\"],\"cover\":\"https://img-blog.csdnimg.cn/60d992ffbf744d99aef2b9e278327f8d.png\"}},{\"route\":\"/blog/csdn/关于Xcode中SwiftUi代码旁边的模拟机不见了\",\"meta\":{\"title\":\"关于Xcode中SwiftUi代码旁边的模拟机不见了\",\"date\":\"2023-02-24 21:08:37\",\"tags\":[\"swiftui\",\"ios\",\"swift\"],\"tag\":[\"swiftui\",\"ios\",\"swift\"],\"cover\":\"https://img-blog.csdnimg.cn/57d3d5b12b234d2aa5b19cecfdaef530.png\"}},{\"route\":\"/blog/csdn/关于gateway中lb失效\",\"meta\":{\"title\":\"关于gateway中lb失效\",\"date\":\"2023-07-16 15:57:58\",\"tags\":[\"gateway\"],\"tag\":[\"gateway\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/关于mac上的所有东西都变小了\",\"meta\":{\"title\":\"关于mac上的所有东西都变小了\",\"date\":\"2022-12-12 02:45:00\",\"tags\":[\"mac\",\"macos\"],\"tag\":[\"mac\",\"macos\"],\"cover\":\"https://img-blog.csdnimg.cn/bb97593cb82146ecaa3c212ca594e0a2.png\"}},{\"route\":\"/blog/csdn/关于京造k6蓝牙在连接一次windows之后就没有再连回来mac\",\"meta\":{\"title\":\"关于京造k6蓝牙在连接一次windows之后就没有再连回来mac\",\"date\":\"2023-05-29 22:33:40\",\"tags\":[\"macos\"],\"tag\":[\"macos\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/关于多个项目使用同一个nacos的解决方法\",\"meta\":{\"title\":\"关于多个项目使用同一个nacos的解决方法\",\"date\":\"2023-05-23 17:26:24\",\"tags\":[\"java\",\"开发语言\"],\"tag\":[\"java\",\"开发语言\"],\"cover\":\"https://img-blog.csdnimg.cn/ba76f00b018043d8850212a2eb8cdaea.png\"}},{\"route\":\"/blog/csdn/关于我写的循环遍历\",\"meta\":{\"title\":\"关于我写的循环遍历\",\"date\":\"2022-08-01 10:42:38\",\"tags\":[\"java\",\"开发语言\"],\"tag\":[\"java\",\"开发语言\"],\"cover\":\"https://img-blog.csdnimg.cn/9325e88b63fa42d0b2f9e8145a15fe20.png\"}},{\"route\":\"/blog/csdn/写完项目后\",\"meta\":{\"title\":\"写完项目后\",\"date\":\"2022-03-05 22:03:16\",\"tags\":null,\"tag\":[],\"cover\":\"\"}},{\"route\":\"/blog/csdn/几天的总结\",\"meta\":{\"title\":\"几天的总结\",\"date\":\"2022-02-14 14:02:24\",\"tags\":[\"其他\"],\"tag\":[\"其他\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/分布式文件存储系统minio\",\"meta\":{\"title\":\"分布式文件存储系统minio\",\"date\":\"2022-11-23 15:28:17\",\"tags\":[\"数据库\",\"minio\",\"java\"],\"tag\":[\"数据库\",\"minio\",\"java\"],\"cover\":\"https://img-blog.csdnimg.cn/3f017ab7096f4d8dbbeb7bc5e1d4766f.png)![](https://img-blog.csdnimg.cn/3f017ab7096f4d8dbbeb7bc5e1d4766f.png)![](https://img-blog.csdnimg.cn/3f017ab7096f4d8dbbeb7bc5e1d4766f.png)![](https://img-blog.csdnimg.cn/3f017ab7096f4d8dbbeb7bc5e1d4766f.png\"}},{\"route\":\"/blog/csdn/分页查询与集合分页查询与html基础知识\",\"meta\":{\"title\":\"分页查询与集合分页查询与html基础知识\",\"date\":\"2022-08-04 16:38:18\",\"tags\":[\"java\",\"servlet\",\"数据库\",\"经验分享\",\"spring boot\"],\"tag\":[\"java\",\"servlet\",\"数据库\",\"经验分享\",\"spring boot\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/创建springboot项目时改为国内网站\",\"meta\":{\"title\":\"创建springboot项目时改为国内网站\",\"date\":\"2022-04-09 21:11:10\",\"tags\":[\"spring\"],\"tag\":[\"spring\"],\"cover\":\"https://img-blog.csdnimg.cn/59f3143f3eeb4d98a56ecde86aeaf448.jpg?x-oss-process=image/watermark,type_d3F5LXplbmhlaQ,shadow_50,text_Q1NETiBA6Zu-5ZaU,size_20,color_FFFFFF,t_70,g_se,x_16\"}},{\"route\":\"/blog/csdn/剑指 Offer 10- I. 斐波那契数列\",\"meta\":{\"title\":\"剑指 Offer 10- I. 斐波那契数列\",\"date\":\"2023-08-07 09:48:44\",\"tags\":[\"算法\"],\"tag\":[\"算法\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/剑指 Offer 13. 机器人的运动范围\",\"meta\":{\"title\":\"剑指 Offer 13. 机器人的运动范围\",\"date\":\"2023-08-07 15:47:51\",\"tags\":[\"机器人\"],\"tag\":[\"机器人\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/剑指 Offer 20. 表示数值的字符串\",\"meta\":{\"title\":\"剑指 Offer 20. 表示数值的字符串\",\"date\":\"2023-08-08 10:59:09\",\"tags\":[\"算法\"],\"tag\":[\"算法\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/剑指 Offer 22. 链表中倒数第k个节点\",\"meta\":{\"title\":\"剑指 Offer 22. 链表中倒数第k个节点\",\"date\":\"2023-08-09 09:23:44\",\"tags\":[\"链表\",\"数据结构\"],\"tag\":[\"链表\",\"数据结构\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/剑指 Offer 32 - III. 从上到下打印二叉树 III\",\"meta\":{\"title\":\"剑指 Offer 32 - III. 从上到下打印二叉树 III\",\"date\":\"2023-08-24 22:00:14\",\"tags\":[\"算法\",\"数据结构\"],\"tag\":[\"算法\",\"数据结构\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/剑指 Offer 58 - I. 翻转单词顺序\",\"meta\":{\"title\":\"剑指 Offer 58 - I. 翻转单词顺序\",\"date\":\"2023-09-06 21:32:29\",\"tags\":[\"java\",\"前端\",\"javascript\"],\"tag\":[\"java\",\"前端\",\"javascript\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/十六进制转八进制\",\"meta\":{\"title\":\"十六进制转八进制\",\"date\":\"2023-04-06 16:05:13\",\"tags\":[\"java\",\"开发语言\"],\"tag\":[\"java\",\"开发语言\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/反转字符串中的单词 III\",\"meta\":{\"title\":\"反转字符串中的单词 III\",\"date\":\"2023-05-19 09:27:42\",\"tags\":[\"leetcode\",\"java\",\"算法\"],\"tag\":[\"leetcode\",\"java\",\"算法\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/后端接收json格式的字符串出现json格式错误\",\"meta\":{\"title\":\"后端接收json格式的字符串出现json格式错误\",\"date\":\"2023-08-01 17:46:03\",\"tags\":[\"json\"],\"tag\":[\"json\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/回文日期java(蓝桥杯）+个人总结\",\"meta\":{\"title\":\"回文日期java(蓝桥杯）+个人总结\",\"date\":\"2022-06-25 00:09:29\",\"tags\":[\"蓝桥杯\",\"职场和发展\"],\"tag\":[\"蓝桥杯\",\"职场和发展\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/在排序数组中查找元素的第一个和最后一个位置\",\"meta\":{\"title\":\"在排序数组中查找元素的第一个和最后一个位置\",\"date\":\"2022-04-03 23:40:50\",\"tags\":[\"经验分享\",\"java\",\"算法\",\"其他\"],\"tag\":[\"经验分享\",\"java\",\"算法\",\"其他\"],\"cover\":\"https://img-blog.csdnimg.cn/38bc583d09014c989f7a0357984d5a9d.jpg?x-oss-process=image/watermark,type_d3F5LXplbmhlaQ,shadow_50,text_Q1NETiBA6Zu-5ZaU,size_20,color_FFFFFF,t_70,g_se,x_16\"}},{\"route\":\"/blog/csdn/在服务器上搭建Jenkins\",\"meta\":{\"title\":\"在服务器上搭建Jenkins\",\"date\":\"2023-08-26 17:17:17\",\"tags\":[\"服务器\",\"jenkins\",\"运维\"],\"tag\":[\"服务器\",\"jenkins\",\"运维\"],\"cover\":\"https://img-blog.csdnimg.cn/3f915ab3ebc1431b83a94866d503ff08.jpeg\"}},{\"route\":\"/blog/csdn/在服务器上搭建gitlab\",\"meta\":{\"title\":\"在服务器上搭建gitlab\",\"date\":\"2023-08-06 16:02:57\",\"tags\":[\"服务器\",\"gitlab\",\"linux\"],\"tag\":[\"服务器\",\"gitlab\",\"linux\"],\"cover\":\"https://img-blog.csdnimg.cn/img_convert/e254e0647e126c8ffdcb50d9c3de008f.png\"}},{\"route\":\"/blog/csdn/在服务器上搭建nacos集群---记录我的心酸历程\",\"meta\":{\"title\":\"在服务器上搭建nacos集群---记录我的心酸历程\",\"date\":\"2023-03-22 11:32:30\",\"tags\":[\"java\",\"开发语言\"],\"tag\":[\"java\",\"开发语言\"],\"cover\":\"https://img-blog.csdnimg.cn/da20e365c46545a39cc7ac1dbff0b460.png\"}},{\"route\":\"/blog/csdn/如何删除docker镜像与容器\",\"meta\":{\"title\":\"如何删除docker镜像与容器\",\"date\":\"2023-04-21 15:59:07\",\"tags\":[\"docker\",\"容器\",\"运维\"],\"tag\":[\"docker\",\"容器\",\"运维\"],\"cover\":\"https://img-blog.csdnimg.cn/fe2290cd7eac48ecbb3e2324e048e266.png\"}},{\"route\":\"/blog/csdn/如何更简洁查看接口返回的树状图信息\",\"meta\":{\"title\":\"如何更简洁查看接口返回的树状图信息\",\"date\":\"2023-07-15 21:44:13\",\"tags\":[\"javascript\",\"前端\"],\"tag\":[\"javascript\",\"前端\"],\"cover\":\"https://img-blog.csdnimg.cn/3429e0ad85984259974a8272d9f21c85.png\"}},{\"route\":\"/blog/csdn/如果你和我加在一起能让我变得更好，那我们就在一起，否则我就丢下你，自己往前走如果前途和爱情二选一，毫不犹豫选前途~\",\"meta\":{\"title\":\"如果你和我加在一起能让我变得更好，那我们就在一起，否则我就丢下你，自己往前走如果前途和爱情二选一，毫不犹豫选前途~\",\"date\":\"2023-08-26 10:22:23\",\"tags\":[\"算法\"],\"tag\":[\"算法\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/子类继承父类+Object类中的主要结构+Object类中toString（）的使用+关键字final+String的常用方法+String StringBuffe+java中的日期时间\",\"meta\":{\"title\":\"子类继承父类+Object类中的主要结构+Object类中toString（）的使用+关键字final+String的常用方法+String StringBuffe+java中的日期时间\",\"date\":\"2022-08-25 22:59:47\",\"tags\":[\"java\"],\"tag\":[\"java\"],\"cover\":\"https://img-blog.csdnimg.cn/337f1eac976f4e1dbf3f3d6a0b74df64.png\"}},{\"route\":\"/blog/csdn/字符串中的第一个唯一字符\",\"meta\":{\"title\":\"字符串中的第一个唯一字符\",\"date\":\"2023-05-20 17:25:22\",\"tags\":[\"leetcode\",\"java\",\"算法\"],\"tag\":[\"leetcode\",\"java\",\"算法\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/将十进制数 （24!512） 表示成浮点规格化数，要求阶码4位(含符号），移码表示；尾数6位（含符号），用补码表示\",\"meta\":{\"title\":\"将十进制数 （24/512） 表示成浮点规格化数，要求阶码4位(含符号），移码表示；尾数6位（含符号），用补码表示\",\"date\":\"2023-06-22 15:52:03\",\"tags\":[\"算法\"],\"tag\":[\"算法\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/局部异常处理\",\"meta\":{\"title\":\"局部异常处理\",\"date\":\"2023-07-22 15:15:23\",\"tags\":[\"java\",\"开发语言\",\"异常处理\",\"异常\",\"局部异常\",\"处理\"],\"tag\":[\"java\",\"开发语言\",\"异常处理\",\"异常\",\"局部异常\",\"处理\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/常用dos命令+关键字保留字+命名规范+基本数据类型+引用数据类型+基本数据类型转换+算术运算符需要注意的问题+以前笔记+部分运算符+方法重载+变量赋值+构造器的作用+package关键字\",\"meta\":{\"title\":\"常用dos命令+关键字保留字+命名规范+基本数据类型+引用数据类型+基本数据类型转换+算术运算符需要注意的问题+以前笔记+部分运算符+方法重载+变量赋值+构造器的作用+package关键字\",\"date\":\"2022-08-18 18:18:44\",\"tags\":[\"java\"],\"tag\":[\"java\"],\"cover\":\"https://img-blog.csdnimg.cn/c393a69947744360baf4533d20f4e239.png\"}},{\"route\":\"/blog/csdn/异步实现邮件发送\",\"meta\":{\"title\":\"异步实现邮件发送\",\"date\":\"2023-08-01 18:25:00\",\"tags\":[\"网易\",\"邮件发送\",\"java\",\"java实现邮件发送\",\"异步\"],\"tag\":[\"网易\",\"邮件发送\",\"java\",\"java实现邮件发送\",\"异步\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/微服务加载多个nacos配置文件\",\"meta\":{\"title\":\"微服务加载多个nacos配置文件\",\"date\":\"2023-05-23 20:29:25\",\"tags\":[\"微服务\",\"java\",\"架构\"],\"tag\":[\"微服务\",\"java\",\"架构\"],\"cover\":\"https://img-blog.csdnimg.cn/adc88db893d246ef9a1e05a5a7b29acb.png\"}},{\"route\":\"/blog/csdn/我心狂野，我梦无岸\",\"meta\":{\"title\":\"我心狂野，我梦无岸\",\"date\":\"2022-02-24 23:23:48\",\"tags\":[\"其他\"],\"tag\":[\"其他\"],\"cover\":\"https://img-blog.csdnimg.cn/816e4b57413c422c86492a368d354845.jpg?x-oss-process=image/watermark,type_d3F5LXplbmhlaQ,shadow_50,text_Q1NETiBA6Zu-5ZaU,size_20,color_FFFFFF,t_70,g_se,x_16\"}},{\"route\":\"/blog/csdn/接口性能优化\",\"meta\":{\"title\":\"接口性能优化\",\"date\":\"2023-09-18 20:11:49\",\"tags\":[\"缓存\"],\"tag\":[\"缓存\"],\"cover\":\"https://img-blog.csdnimg.cn/ac579ae539dc43b1bd80278aeb1c0b1d.jpeg\"}},{\"route\":\"/blog/csdn/推荐跨域配置\",\"meta\":{\"title\":\"推荐跨域配置\",\"date\":\"2023-07-16 10:53:32\",\"tags\":[\"配置\",\"跨域\"],\"tag\":[\"配置\",\"跨域\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/提交到远程仓库\",\"meta\":{\"title\":\"提交到远程仓库\",\"date\":\"2022-04-23 21:37:49\",\"tags\":[\"spring\",\"tomcat\",\"maven\",\"java\",\"github\"],\"tag\":[\"spring\",\"tomcat\",\"maven\",\"java\",\"github\"],\"cover\":\"https://img-blog.csdnimg.cn/567445f6bc444b28852a51617fcc2935.jpg?x-oss-process=image/watermark,type_d3F5LXplbmhlaQ,shadow_50,text_Q1NETiBA6Zu-5ZaU,size_20,color_FFFFFF,t_70,g_se,x_16)![在这里插入图片描述](https://img-blog.csdnimg.cn/f608606e0c234caa8cd6e4668bb070f1.jpg?x-oss-process=image/watermark,type_d3F5LXplbmhlaQ,shadow_50,text_Q1NETiBA6Zu-5ZaU,size_20,color_FFFFFF,t_70,g_se,x_16\"}},{\"route\":\"/blog/csdn/提笔小叙@\",\"meta\":{\"title\":\"提笔小叙@\",\"date\":\"2022-04-28 20:55:16\",\"tags\":[\"总结（非技术文）\"],\"tag\":[\"总结（非技术文）\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/数据库 范式\",\"meta\":{\"title\":\"数据库 范式\",\"date\":\"2023-12-31 11:01:28\",\"tags\":[\"数据库\",\"前端\",\"服务器\"],\"tag\":[\"数据库\",\"前端\",\"服务器\"],\"cover\":\"https://img-blog.csdnimg.cn/direct/761b00522b0844d4b1a5dd580e06c099.jpeg\"}},{\"route\":\"/blog/csdn/数据库原理与分析实验三\",\"meta\":{\"title\":\"数据库原理与分析实验三\",\"date\":\"2023-09-25 22:33:17\",\"tags\":[\"数据库\"],\"tag\":[\"数据库\"],\"cover\":\"https://img-blog.csdnimg.cn/1b9ca34093554438a021e6640299ee22.jpeg\"}},{\"route\":\"/blog/csdn/数据库基本知识2\",\"meta\":{\"title\":\"数据库基本知识2\",\"date\":\"2024-01-05 15:05:10\",\"tags\":[\"数据库\",\"oracle\",\"服务器\"],\"tag\":[\"数据库\",\"oracle\",\"服务器\"],\"cover\":\"https://img-blog.csdnimg.cn/direct/b5281eb20983456ab3ce78630f6efc39.png\"}},{\"route\":\"/blog/csdn/数据库基础知识1\",\"meta\":{\"title\":\"数据库基础知识1\",\"date\":\"2024-01-04 11:53:11\",\"tags\":[\"数据库\",\"oracle\"],\"tag\":[\"数据库\",\"oracle\"],\"cover\":\"https://img-blog.csdnimg.cn/img_convert/31106149a9f0c27aac15561905fa1dd6.png\"}},{\"route\":\"/blog/csdn/数据库实验4作业\",\"meta\":{\"title\":\"数据库实验4作业\",\"date\":\"2023-10-20 10:56:16\",\"tags\":[\"数据库\",\"oracle\",\"sql\"],\"tag\":[\"数据库\",\"oracle\",\"sql\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/数据库实验7\",\"meta\":{\"title\":\"数据库实验7\",\"date\":\"2023-11-24 11:37:22\",\"tags\":[\"数据库\"],\"tag\":[\"数据库\"],\"cover\":\"https://img-blog.csdnimg.cn/4dc52241b56a4ede918fd44bcde51fd9.gif\"}},{\"route\":\"/blog/csdn/数据库实验9\",\"meta\":{\"title\":\"数据库实验9\",\"date\":\"2023-12-15 10:53:04\",\"tags\":[\"数据库\"],\"tag\":[\"数据库\"],\"cover\":\"https://img-blog.csdnimg.cn/direct/549a727910804460a0051c84ef7a0d41.gif\"}},{\"route\":\"/blog/csdn/数据库实验八\",\"meta\":{\"title\":\"数据库实验八\",\"date\":\"2023-12-08 10:58:44\",\"tags\":[\"数据库\"],\"tag\":[\"数据库\"],\"cover\":\"https://img-blog.csdnimg.cn/direct/036773653b4b4e73b846a6f9af275c6f.gif\"}},{\"route\":\"/blog/csdn/数据库实验报告（五）\",\"meta\":{\"title\":\"数据库实验报告（五）\",\"date\":\"2023-11-03 10:38:52\",\"tags\":[\"数据库\"],\"tag\":[\"数据库\"],\"cover\":\"https://img-blog.csdnimg.cn/8713875133134adfb21d6aa12d7aa862.jpeg\"}},{\"route\":\"/blog/csdn/数据库实验报告（六）\",\"meta\":{\"title\":\"数据库实验报告（六）\",\"date\":\"2023-11-17 13:06:05\",\"tags\":[\"sql\",\"数据库\"],\"tag\":[\"sql\",\"数据库\"],\"cover\":\"https://img-blog.csdnimg.cn/9b1e2ba0e335438ebff213c74a26c781.gif\"}},{\"route\":\"/blog/csdn/数据库实验报告（十）\",\"meta\":{\"title\":\"数据库实验报告（十）\",\"date\":\"2023-12-29 11:09:13\",\"tags\":[\"数据库\"],\"tag\":[\"数据库\"],\"cover\":\"https://img-blog.csdnimg.cn/direct/8c1ad0adaccf40978b2fea79046ca7ee.png\"}},{\"route\":\"/blog/csdn/数据库密码加密处理\",\"meta\":{\"title\":\"数据库密码加密处理\",\"date\":\"2023-07-22 15:32:30\",\"tags\":[\"java\",\"开发语言\"],\"tag\":[\"java\",\"开发语言\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/数据库往年试卷\",\"meta\":{\"title\":\"数据库往年试卷\",\"date\":\"2024-01-16 20:15:11\",\"tags\":[\"数据库\"],\"tag\":[\"数据库\"],\"cover\":\"https://img-blog.csdnimg.cn/img_convert/4a7558fef7b498f6483fec912abf9042.png\"}},{\"route\":\"/blog/csdn/数据库范式例题\",\"meta\":{\"title\":\"数据库范式例题\",\"date\":\"2024-01-16 20:17:10\",\"tags\":[\"数据库\",\"服务器\",\"linux\"],\"tag\":[\"数据库\",\"服务器\",\"linux\"],\"cover\":\"https://img-blog.csdnimg.cn/direct/daa2cc834ba74697a14984a1e2326822.png\"}},{\"route\":\"/blog/csdn/数据恢复与并发控制例题\",\"meta\":{\"title\":\"数据恢复与并发控制例题\",\"date\":\"2024-01-07 08:58:17\",\"tags\":[\"数据库\",\"冲突可串行化\",\"冲突可串行化调度\",\"数据恢复\"],\"tag\":[\"数据库\",\"冲突可串行化\",\"冲突可串行化调度\",\"数据恢复\"],\"cover\":\"https://img-blog.csdnimg.cn/img_convert/5ad3c8d8bdbeeef8f18382b3139e4890.png\"}},{\"route\":\"/blog/csdn/数据结构+java基础（1）+进制之间的转换\",\"meta\":{\"title\":\"数据结构+java基础（1）+进制之间的转换\",\"date\":\"2022-08-13 22:10:08\",\"tags\":[\"java\",\"经验分享\",\"数据结构\",\"推荐算法\"],\"tag\":[\"java\",\"经验分享\",\"数据结构\",\"推荐算法\"],\"cover\":\"https://img-blog.csdnimg.cn/883b04bcd4ce416c9a988166d6aa5fd2.png\"}},{\"route\":\"/blog/csdn/整数拆分乘积最大\",\"meta\":{\"title\":\"整数拆分乘积最大\",\"date\":\"2023-08-28 17:05:27\",\"tags\":[\"java\",\"开发语言\"],\"tag\":[\"java\",\"开发语言\"],\"cover\":\"https://img-blog.csdnimg.cn/3b727256571f4e689a2734d1b7fc350b.jpeg\"}},{\"route\":\"/blog/csdn/本周总结\",\"meta\":{\"title\":\"本周总结\",\"date\":\"2021-12-05 14:46:08\",\"tags\":null,\"tag\":[],\"cover\":\"\"}},{\"route\":\"/blog/csdn/每周总结：情绪管理，无效社交\",\"meta\":{\"title\":\"每周总结：情绪管理，无效社交\",\"date\":\"2021-12-11 23:17:29\",\"tags\":null,\"tag\":[],\"cover\":\"\"}},{\"route\":\"/blog/csdn/泛型+IO流基础知识+java-＞符号 lambda表达式\",\"meta\":{\"title\":\"泛型+IO流基础知识+java-＞符号 lambda表达式\",\"date\":\"2022-09-02 00:00:00\",\"tags\":[\"java\",\"经验分享\"],\"tag\":[\"java\",\"经验分享\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/浅浅的计算机网络知识\",\"meta\":{\"title\":\"浅浅的计算机网络知识\",\"date\":\"2022-12-01 17:07:48\",\"tags\":[\"网络\",\"tcp/ip\",\"udp\"],\"tag\":[\"网络\",\"tcp/ip\",\"udp\"],\"cover\":\"https://img-blog.csdnimg.cn/9e470f98ed904787a9ad8369779c4334.png\"}},{\"route\":\"/blog/csdn/浅聊Docker\",\"meta\":{\"title\":\"浅聊Docker\",\"date\":\"2023-03-30 17:29:51\",\"tags\":[\"docker\",\"运维\",\"linux\"],\"tag\":[\"docker\",\"运维\",\"linux\"],\"cover\":\"https://img-blog.csdnimg.cn/img_convert/138334cff93260b2605998cd3dc7d86c.png\"}},{\"route\":\"/blog/csdn/浅聊一下Lambda表达式\",\"meta\":{\"title\":\"浅聊一下Lambda表达式\",\"date\":\"2022-10-06 19:57:28\",\"tags\":[\"java\",\"jvm\",\"开发语言\"],\"tag\":[\"java\",\"jvm\",\"开发语言\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/浅聊一下Nginx\",\"meta\":{\"title\":\"浅聊一下Nginx\",\"date\":\"2022-11-11 15:46:21\",\"tags\":[\"nginx\",\"运维\",\"服务器\"],\"tag\":[\"nginx\",\"运维\",\"服务器\"],\"cover\":\"https://img-blog.csdnimg.cn/f4d6942f6fed40f19eba02d956e69bcc.png\"}},{\"route\":\"/blog/csdn/浅聊一下Stream流\",\"meta\":{\"title\":\"浅聊一下Stream流\",\"date\":\"2022-10-13 21:53:26\",\"tags\":[\"java\",\"jvm\",\"Stream\",\"jdk\"],\"tag\":[\"java\",\"jvm\",\"Stream\",\"jdk\"],\"cover\":\"https://img-blog.csdnimg.cn/dae6e485671746a39a8413302480f6a4.png\"}},{\"route\":\"/blog/csdn/浅聊一下内网穿透\",\"meta\":{\"title\":\"浅聊一下内网穿透\",\"date\":\"2023-07-26 09:40:15\",\"tags\":[\"内网穿透\"],\"tag\":[\"内网穿透\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/父工程在clean和install时报错，子工程不报错\",\"meta\":{\"title\":\"父工程在clean和install时报错，子工程不报错\",\"date\":\"2023-04-22 11:21:39\",\"tags\":[\"git\"],\"tag\":[\"git\"],\"cover\":\"https://img-blog.csdnimg.cn/01b1bdeca3ed49b9a15cb08d6f14970d.png\"}},{\"route\":\"/blog/csdn/用户名验证（正则表达式）\",\"meta\":{\"title\":\"用户名验证（正则表达式）\",\"date\":\"2023-06-03 22:40:52\",\"tags\":[\"正则表达式\",\"数学建模\"],\"tag\":[\"正则表达式\",\"数学建模\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/祝大家2022幸福安康\",\"meta\":{\"title\":\"祝大家2022幸福安康\",\"date\":\"2022-01-01 10:06:52\",\"tags\":null,\"tag\":[],\"cover\":\"https://img-blog.csdnimg.cn/201190a1cfe34db78f3369ba997a64cf.png?x-oss-process=image/watermark,type_d3F5LXplbmhlaQ,shadow_50,text_Q1NETiBA6Zu-5ZaU,size_20,color_FFFFFF,t_70,g_se,x_16\"}},{\"route\":\"/blog/csdn/窗口加载事件\",\"meta\":{\"title\":\"窗口加载事件\",\"date\":\"2023-06-03 17:21:08\",\"tags\":[\"javascript\",\"html\",\"css\"],\"tag\":[\"javascript\",\"html\",\"css\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/终止端口进程命令\",\"meta\":{\"title\":\"终止端口进程命令\",\"date\":\"2023-07-26 09:13:10\",\"tags\":[\"终止端口\",\"windows\",\"mac\",\"PID\",\"端口\",\"终止\"],\"tag\":[\"终止端口\",\"windows\",\"mac\",\"PID\",\"端口\",\"终止\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/自己的工具类和分页查询\",\"meta\":{\"title\":\"自己的工具类和分页查询\",\"date\":\"2022-11-18 15:39:35\",\"tags\":[\"java\",\"开发语言\",\"macos\",\"数据结构\"],\"tag\":[\"java\",\"开发语言\",\"macos\",\"数据结构\"],\"cover\":\"https://img-blog.csdnimg.cn/fc59842dfa464d2c804c0bac38b4f3d3.png\"}},{\"route\":\"/blog/csdn/蓝桥杯-X图形\",\"meta\":{\"title\":\"蓝桥杯-X图形\",\"date\":\"2024-02-12 20:13:37\",\"tags\":[\"算法\"],\"tag\":[\"算法\"],\"cover\":\"https://img-blog.csdnimg.cn/direct/4c8bed3354844d3d9d370fd5f6a4241a.gif\"}},{\"route\":\"/blog/csdn/蓝桥杯-乘积最大\",\"meta\":{\"title\":\"蓝桥杯-乘积最大\",\"date\":\"2024-02-25 17:29:12\",\"tags\":[\"蓝桥杯\",\"算法\",\"职场和发展\"],\"tag\":[\"蓝桥杯\",\"算法\",\"职场和发展\"],\"cover\":\"https://img-blog.csdnimg.cn/direct/2e9342dcaecd4ff08505e9d19aa49862.png\"}},{\"route\":\"/blog/csdn/蓝桥杯-数字三角形\",\"meta\":{\"title\":\"蓝桥杯-数字三角形\",\"date\":\"2024-02-25 17:47:15\",\"tags\":[\"蓝桥杯\",\"职场和发展\"],\"tag\":[\"蓝桥杯\",\"职场和发展\"],\"cover\":\"https://img-blog.csdnimg.cn/direct/22c2d1b6d4b14ceaac8aa8f8e085137c.png\"}},{\"route\":\"/blog/csdn/蓝桥杯-答疑\",\"meta\":{\"title\":\"蓝桥杯-答疑\",\"date\":\"2024-02-25 16:32:20\",\"tags\":[\"蓝桥杯\",\"职场和发展\"],\"tag\":[\"蓝桥杯\",\"职场和发展\"],\"cover\":\"https://img-blog.csdnimg.cn/direct/e7295491368e4176bf3e32cf109f26db.png\"}},{\"route\":\"/blog/csdn/计网小记-1\",\"meta\":{\"title\":\"计网小记-1\",\"date\":\"2024-01-18 15:36:38\",\"tags\":[\"网络\"],\"tag\":[\"网络\"],\"cover\":\"https://img-blog.csdnimg.cn/img_convert/1c11edb716666a02a75ee4ec2e9f9ae8.png\"}},{\"route\":\"/blog/csdn/试题 基础练习 01字串\",\"meta\":{\"title\":\"试题 基础练习 01字串\",\"date\":\"2023-04-07 16:52:33\",\"tags\":[\"java\",\"算法\",\"开发语言\"],\"tag\":[\"java\",\"算法\",\"开发语言\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/试题 基础练习 Fibonacci数列\",\"meta\":{\"title\":\"试题 基础练习 Fibonacci数列\",\"date\":\"2023-04-07 20:31:29\",\"tags\":[\"算法\",\"蓝桥杯\",\"职场和发展\"],\"tag\":[\"算法\",\"蓝桥杯\",\"职场和发展\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/试题 基础练习 圆的面积\",\"meta\":{\"title\":\"试题 基础练习 圆的面积\",\"date\":\"2023-04-07 20:44:10\",\"tags\":[\"java\",\"开发语言\"],\"tag\":[\"java\",\"开发语言\"],\"cover\":\"https://img-blog.csdnimg.cn/328633307dfd4b6aa52b53202f239e49.png\"}},{\"route\":\"/blog/csdn/试题 基础练习 序列求和\",\"meta\":{\"title\":\"试题 基础练习 序列求和\",\"date\":\"2023-04-07 21:06:29\",\"tags\":[\"java\",\"算法\"],\"tag\":[\"java\",\"算法\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/请求方法+super+枚举+包装类+正则表达式+学习资料\",\"meta\":{\"title\":\"请求方法+super+枚举+包装类+正则表达式+学习资料\",\"date\":\"2022-08-14 10:58:40\",\"tags\":[\"java\",\"开发语言\",\"spring\",\"tomcat\",\"maven\"],\"tag\":[\"java\",\"开发语言\",\"spring\",\"tomcat\",\"maven\"],\"cover\":\"https://img-blog.csdnimg.cn/b761f19d55564e9194f17dbf900397be.png\"}},{\"route\":\"/blog/csdn/近几日总结（5月8日）\",\"meta\":{\"title\":\"近几日总结（5月8日）\",\"date\":\"2022-05-08 16:54:55\",\"tags\":[\"经验分享\",\"java\",\"spring\",\"maven\",\"总结（非技术文）\"],\"tag\":[\"经验分享\",\"java\",\"spring\",\"maven\",\"总结（非技术文）\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/近日小结（非技术文）\",\"meta\":{\"title\":\"近日小结（非技术文）\",\"date\":\"2022-07-03 13:53:47\",\"tags\":[\"经验分享\"],\"tag\":[\"经验分享\"],\"cover\":\"https://img-blog.csdnimg.cn/3b6c21c9e0464c24afc535304685d2b0.png\"}},{\"route\":\"/blog/csdn/近日总结（12.21\",\"meta\":{\"title\":\"近日总结（12.21\",\"date\":\"2023-12-21 11:35:43\",\"tags\":[\"总结\"],\"tag\":[\"总结\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/通过location实现几秒后页面跳转\",\"meta\":{\"title\":\"通过location实现几秒后页面跳转\",\"date\":\"2023-06-03 19:56:25\",\"tags\":[\"数学建模\"],\"tag\":[\"数学建模\"],\"cover\":\"https://img-blog.csdnimg.cn/05d3fc13abe5420da0b6e45fe857f29a.png\"}},{\"route\":\"/blog/csdn/除夕---总结\",\"meta\":{\"title\":\"除夕---总结\",\"date\":\"2024-02-09 16:13:42\",\"tags\":[\"总结\"],\"tag\":[\"总结\"],\"cover\":\"https://img-blog.csdnimg.cn/direct/a5fb02aa2f844595b8d191280c41ec12.png\"}},{\"route\":\"/blog/csdn/集合转数组\",\"meta\":{\"title\":\"集合转数组\",\"date\":\"2023-08-08 09:39:42\",\"tags\":[\"windows\"],\"tag\":[\"windows\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/面经------锁\",\"meta\":{\"title\":\"面经------锁\",\"date\":\"2023-09-11 21:06:13\",\"tags\":[\"学习\"],\"tag\":[\"学习\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/面经学习一\",\"meta\":{\"title\":\"面经学习一\",\"date\":\"2023-09-07 22:07:08\",\"tags\":[\"java\",\"学习\",\"开发语言\"],\"tag\":[\"java\",\"学习\",\"开发语言\"],\"cover\":\"\"}},{\"route\":\"/blog/csdn/面经学习三\",\"meta\":{\"title\":\"面经学习三\",\"date\":\"2023-09-15 19:39:35\",\"tags\":[\"java\",\"开发语言\"],\"tag\":[\"java\",\"开发语言\"],\"cover\":\"https://img-blog.csdnimg.cn/d58a0a3d17e340b4bb339323e77c02d6.jpeg\"}},{\"route\":\"/blog/csdn/验证回文串\",\"meta\":{\"title\":\"验证回文串\",\"date\":\"2023-05-18 17:26:30\",\"tags\":[\"leetcode\",\"算法\",\"职场和发展\"],\"tag\":[\"leetcode\",\"算法\",\"职场和发展\"],\"cover\":\"https://img-blog.csdnimg.cn/61e95b9b627946a7be213a10c53a2d5d.png\"}}],\"footer\":{\"copyright\":\"MIT License | 雾喔\"},\"themeColor\":\"vp-default\",\"author\":\"雾喔\",\"friend\":[{\"nickname\":\"雾喔\",\"des\":\"你的指尖用于改变世界的力量\",\"avatar\":\"https://img.cdn.sugarat.top/mdImg/MTY3NDk5NTE2NzAzMA==674995167030\",\"url\":\"https://sugarat.top\"},{\"nickname\":\"Vitepress\",\"des\":\"Vite & Vue Powered Static Site Generator\",\"avatar\":\"https://vitepress.dev/vitepress-logo-large.webp\",\"url\":\"https://vitepress.dev/\"}],\"mermaid\":true},\"sidebar\":[{\"text\":\"\",\"items\":[]}],\"lastUpdatedText\":\"上次更新于\",\"logo\":\"/logo.png\",\"nav\":[{\"text\":\"首页\",\"link\":\"/\"},{\"text\":\"关于作者\",\"link\":\"https://sugarat.top/aboutme.html\"}],\"socialLinks\":[{\"icon\":\"github\",\"link\":\"https://gitee.com/yang-saiya\"}]},\"locales\":{},\"scrollOffset\":90,\"cleanUrls\":false}");</script>
    
  </body>
</html>