<!DOCTYPE html>
<html>
<head>
  <meta charset="utf-8">
  <title>算法总结 | Hexo</title>
  <meta name="keywords" content="">
  <meta name="description" content="算法总结 | Hexo">
    <meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1">
<meta name="description" content="第二章 线性表线性表：表内数据类型相同，有限序列 本章将以总结的形式展现： 2.1 顺序表与链式表的区别     顺序表 链式表     存取 随机存取 顺序存取   结构 顺序存储（连续） 随机存储（不连续）   空间分配 静态存储（可以动态分配） 动态存储   操作 查找 O(1) ,插入和删除O（n） 查找 O(n) ,插入和删除O（1）   缺点 插入删除不便，长度不可以改变 查找速度慢，">
<meta property="og:type" content="article">
<meta property="og:title" content="数据结构">
<meta property="og:url" content="http://yoursite.com/2021/03/12/%E6%95%B0%E6%8D%AE%E7%BB%93%E6%9E%84/index.html">
<meta property="og:site_name" content="Hexo">
<meta property="og:description" content="第二章 线性表线性表：表内数据类型相同，有限序列 本章将以总结的形式展现： 2.1 顺序表与链式表的区别     顺序表 链式表     存取 随机存取 顺序存取   结构 顺序存储（连续） 随机存储（不连续）   空间分配 静态存储（可以动态分配） 动态存储   操作 查找 O(1) ,插入和删除O（n） 查找 O(n) ,插入和删除O（1）   缺点 插入删除不便，长度不可以改变 查找速度慢，">
<meta property="og:locale" content="en_US">
<meta property="og:image" content="https://gitee.com/moluggg/image/raw/master/img/202102/23/171951-630686.png">
<meta property="og:image" content="https://gitee.com/moluggg/image/raw/master/img/202102/23/171844-136893.png">
<meta property="og:image" content="https://gitee.com/moluggg/image/raw/master/img/202102/19/203103-111988.jpeg">
<meta property="og:image" content="https://gitee.com/moluggg/image/raw/master/img/202102/23/171839-101678.jpeg">
<meta property="og:image" content="https://gitee.com/moluggg/image/raw/master/img/202102/23/200119-510473.png">
<meta property="og:image" content="https://gitee.com/moluggg/image/raw/master/img/202103/05/113954-515448.png">
<meta property="og:image" content="https://gitee.com/moluggg/image/raw/master/img/202102/23/203146-263519.jpeg">
<meta property="og:image" content="https://gitee.com/moluggg/image/raw/master/img/202102/19/153239-688589.png">
<meta property="og:image" content="https://gitee.com/moluggg/image/raw/master/img/202102/19/153248-836462.png">
<meta property="og:image" content="https://gitee.com/moluggg/image/raw/master/img/202102/19/153259-101399.png">
<meta property="og:image" content="https://gitee.com/moluggg/image/raw/master/img/202103/11/113055-306258.jpeg">
<meta property="og:image" content="https://gitee.com/moluggg/image/raw/master/img/202102/19/151817-474283.png">
<meta property="og:image" content="https://gitee.com/moluggg/image/raw/master/img/202102/02/103950-168345.png">
<meta property="og:image" content="https://gitee.com/moluggg/image/raw/master/img/202102/02/105245-140913.png">
<meta property="og:image" content="https://gitee.com/moluggg/image/raw/master/img/202102/19/153313-134836.png">
<meta property="article:published_time" content="2021-03-11T16:00:00.000Z">
<meta property="article:modified_time" content="2021-03-11T07:19:09.038Z">
<meta property="article:author" content="MOLU">
<meta name="twitter:card" content="summary">
<meta name="twitter:image" content="https://gitee.com/moluggg/image/raw/master/img/202102/23/171951-630686.png">


<link rel="icon" href="/img/avatar.jpg">

<link href="/css/style.css?v=1.1.0" rel="stylesheet">

<link href="/css/hl_theme/github.css?v=1.1.0" rel="stylesheet">

<link href="//cdn.jsdelivr.net/npm/animate.css@4.1.0/animate.min.css" rel="stylesheet">

<script src="//cdn.jsdelivr.net/npm/jquery@3.5.1/dist/jquery.min.js"></script>
<script src="/js/titleTip.js?v=1.1.0" ></script>

<script src="//cdn.jsdelivr.net/npm/highlightjs@9.16.2/highlight.pack.min.js"></script>
<script>
    hljs.initHighlightingOnLoad();
</script>

<script src="//cdn.jsdelivr.net/npm/nprogress@0.2.0/nprogress.min.js"></script>



<script src="//cdn.jsdelivr.net/npm/jquery.cookie@1.4.1/jquery.cookie.min.js" ></script>

<script src="/js/iconfont.js?v=1.1.0" ></script>

<meta name="generator" content="Hexo 5.0.0"></head>
<div style="display: none">
  <input class="theme_disqus_on" value="false">
  <input class="theme_preload_comment" value="">
  <input class="theme_blog_path" value="">
  <input id="theme_shortcut" value="true" />
</div>


<body>
<aside class="nav">
    <div class="nav-left">
        <a href="/" class="avatar_target">
    <img class="avatar" src="/img/avatar.jpg" />
</a>
<div class="author">
    <span>MOLU</span>
</div>

<div class="icon">
    
        
        <a title="rss" href="/atom.xml" target="_blank">
            
                <i class="iconfont icon-rss"></i>
            
        </a>
        
    
        
        <a title="github" href="https://github.com/molu-ggg" target="_blank">
            
                <i class="iconfont icon-github"></i>
            
        </a>
        
    
        
    
        
    
        
        <a title="email" href="mailto:2572876783@qq.com" target="_blank">
            
                <i class="iconfont icon-email"></i>
            
        </a>
        
    
</div>




<ul>
    <li><div class="all active" data-rel="All">All<small>(35)</small></div></li>
    
        
            
            <li><div data-rel="AI论文">AI论文<small>(5)</small></div>
                
            </li>
            
        
    
        
            
            <li><div data-rel="专业知识">专业知识<small>(2)</small></div>
                
            </li>
            
        
    
        
            
            <li><div data-rel="数学">数学<small>(2)</small></div>
                
            </li>
            
        
    
        
            
            <li><div data-rel="机器学习">机器学习<small>(12)</small></div>
                
            </li>
            
        
    
        
            
            <li><div data-rel="经典算法">经典算法<small>(10)</small></div>
                
            </li>
            
        
    
</ul>
<div class="left-bottom">
    <div class="menus">
    
    
    
    
    </div>
    <div><a class="about  hasFriend  site_url"  href="/about">About</a><a style="width: 50%"  class="friends">Friends</a></div>
</div>
<input type="hidden" id="yelog_site_posts_number" value="35">

<div style="display: none">
    <span id="busuanzi_value_site_uv"></span>
    <span id="busuanzi_value_site_pv"></span>
</div>

    </div>
    <div class="nav-right">
        <div class="friends-area">
    <div class="friends-title">
        Links
        <i class="iconfont icon-left"></i>
    </div>
    <div class="friends-content">
        <ul>
            
            <li><a target="_blank" href="http://yelog.org/">叶落阁</a></li>
            
        </ul>
    </div>
</div>
        <div class="title-list">
    <div class="right-top">
        <div id="default-panel">
            <i class="iconfont icon-search" data-title="搜索 快捷键 i"></i>
            <div class="right-title">All</div>
            <i class="iconfont icon-file-tree" data-title="切换到大纲视图 快捷键 w"></i>
        </div>
        <div id="search-panel">
            <i class="iconfont icon-left" data-title="返回"></i>
            <input id="local-search-input" />
            <label class="border-line" for="input"></label>
            <i class="iconfont icon-case-sensitive" data-title="大小写敏感"></i>
            <i class="iconfont icon-tag" data-title="标签"></i>
        </div>
        <div id="outline-panel" style="display: none">
            <div class="right-title">大纲</div>
            <i class="iconfont icon-list" data-title="切换到文章列表"></i>
        </div>
    </div>

    <div class="tags-list">
    <input id="tag-search" />
    <div class="tag-wrapper">
        
            <li class="article-tag-list-item">
                <i class="iconfont icon-tag"></i><a>50</a>
            </li>
        
            <li class="article-tag-list-item">
                <i class="iconfont icon-tag"></i><a>AI</a>
            </li>
        
            <li class="article-tag-list-item">
                <i class="iconfont icon-tag"></i><a>数据结构，最短路径，图</a>
            </li>
        
            <li class="article-tag-list-item">
                <i class="iconfont icon-tag"></i><a>机器学习</a>
            </li>
        
            <li class="article-tag-list-item">
                <i class="iconfont icon-tag"></i><a>相似度计算</a>
            </li>
        
    </div>

</div>

    
    <div id="local-search-result">

    </div>
    
    <nav id="title-list-nav">
        
        <a id="top" class="All 经典算法 "
           href="/2020/07/20/b_leetcode/"
           data-tag=""
           data-author="" >
            <span class="post-title" title="leetcode &amp; 蓝桥">leetcode &amp; 蓝桥</span>
            <span class="post-date" title="2020-07-20 00:00:00">2020/07/20</span>
        </a>
        
        <a id="top" class="All AI论文 "
           href="/2020/07/10/d_GAT/"
           data-tag=""
           data-author="" >
            <span class="post-title" title="GAT">GAT</span>
            <span class="post-date" title="2020-07-10 08:50:20">2020/07/10</span>
        </a>
        
        <a  class="All 专业知识 "
           href="/2021/03/12/%E8%AE%A1%E7%AE%97%E6%9C%BA%E7%BB%84%E6%88%90%E5%8E%9F%E7%90%86/"
           data-tag=""
           data-author="" >
            <span class="post-title" title="计算机组成原理">计算机组成原理</span>
            <span class="post-date" title="2021-03-12 00:00:00">2021/03/12</span>
        </a>
        
        <a  class="All 专业知识 "
           href="/2021/03/12/%E6%95%B0%E6%8D%AE%E7%BB%93%E6%9E%84/"
           data-tag=""
           data-author="" >
            <span class="post-title" title="数据结构">数据结构</span>
            <span class="post-date" title="2021-03-12 00:00:00">2021/03/12</span>
        </a>
        
        <a  class="All 经典算法 "
           href="/2020/12/05/b_%E5%8A%A8%E6%80%81%E8%A7%84%E5%88%92%E4%B8%8E%E8%AE%B0%E5%BF%86%E5%8C%96%E6%90%9C%E7%B4%A2/"
           data-tag=""
           data-author="" >
            <span class="post-title" title="动态规划记忆化">动态规划记忆化</span>
            <span class="post-date" title="2020-12-05 00:00:00">2020/12/05</span>
        </a>
        
        <a  class="All AI论文 "
           href="/2020/07/25/d_300-paperDAT/"
           data-tag=""
           data-author="" >
            <span class="post-title" title="DAT for recommender">DAT for recommender</span>
            <span class="post-date" title="2020-07-25 08:50:20">2020/07/25</span>
        </a>
        
        <a  class="All 数学 "
           href="/2020/07/20/a_%E7%AE%97%E6%B3%95/"
           data-tag=""
           data-author="" >
            <span class="post-title" title="算法总结">算法总结</span>
            <span class="post-date" title="2020-07-20 00:00:00">2020/07/20</span>
        </a>
        
        <a  class="All 数学 "
           href="/2020/07/20/a_%E6%95%B0%E5%AD%A6/"
           data-tag=""
           data-author="" >
            <span class="post-title" title="数学">数学</span>
            <span class="post-date" title="2020-07-20 00:00:00">2020/07/20</span>
        </a>
        
        <a  class="All "
           href="/2020/07/05/%E5%AF%B9%E9%9A%90%E7%A7%98%E7%9A%84%E8%A7%92%E8%90%BD%E7%94%B5%E8%A7%86%E5%89%A7%E7%9A%84%E6%84%9F%E6%82%9F%E4%BB%A5%E5%8F%8A%E8%AE%A4%E8%AF%86/"
           data-tag=""
           data-author="" >
            <span class="post-title" title="对隐秘的角落电视剧的感悟以及认识">对隐秘的角落电视剧的感悟以及认识</span>
            <span class="post-date" title="2020-07-05 00:00:00">2020/07/05</span>
        </a>
        
        <a  class="All 经典算法 "
           href="/2020/07/04/a_%E6%A8%A1%E6%9D%BF%E5%BA%93/"
           data-tag=""
           data-author="" >
            <span class="post-title" title="模板库">模板库</span>
            <span class="post-date" title="2020-07-04 08:50:20">2020/07/04</span>
        </a>
        
        <a  class="All 机器学习 "
           href="/2020/07/04/a_%E7%99%BE%E5%BA%A6%E6%8A%80%E6%9C%AF%E8%AE%A4%E8%AF%81/"
           data-tag="AI,机器学习"
           data-author="" >
            <span class="post-title" title="百度技术认证">百度技术认证</span>
            <span class="post-date" title="2020-07-04 08:50:20">2020/07/04</span>
        </a>
        
        <a  class="All AI论文 "
           href="/2020/07/01/d_GCN%E8%AE%BA%E6%96%87%E8%A7%A3%E8%AF%BB/"
           data-tag=""
           data-author="" >
            <span class="post-title" title="GCN">GCN</span>
            <span class="post-date" title="2020-07-01 08:50:20">2020/07/01</span>
        </a>
        
        <a  class="All AI论文 "
           href="/2020/06/04/d_word2vec_node2vec/"
           data-tag=""
           data-author="" >
            <span class="post-title" title="Word2vec &amp; Node2vec">Word2vec &amp; Node2vec</span>
            <span class="post-date" title="2020-06-04 08:50:20">2020/06/04</span>
        </a>
        
        <a  class="All AI论文 "
           href="/2020/05/05/d_GAN/"
           data-tag=""
           data-author="" >
            <span class="post-title" title="GAN">GAN</span>
            <span class="post-date" title="2020-05-05 08:50:20">2020/05/05</span>
        </a>
        
        <a  class="All 机器学习 "
           href="/2020/05/04/d_Deepwalk/"
           data-tag="AI,机器学习"
           data-author="" >
            <span class="post-title" title="DEEPWALK">DEEPWALK</span>
            <span class="post-date" title="2020-05-04 08:50:20">2020/05/04</span>
        </a>
        
        <a  class="All 经典算法 "
           href="/2020/02/15/b_%E8%B4%AA%E5%BF%83/"
           data-tag=""
           data-author="" >
            <span class="post-title" title="贪心与动态规划">贪心与动态规划</span>
            <span class="post-date" title="2020-02-15 00:00:00">2020/02/15</span>
        </a>
        
        <a  class="All 机器学习 "
           href="/2020/02/12/a_2020/"
           data-tag=""
           data-author="" >
            <span class="post-title" title="机器学习零散知识记录">机器学习零散知识记录</span>
            <span class="post-date" title="2020-02-12 00:00:00">2020/02/12</span>
        </a>
        
        <a  class="All 经典算法 "
           href="/2020/02/12/b_%E8%83%8C%E5%8C%85%E9%97%AE%E9%A2%98/"
           data-tag=""
           data-author="" >
            <span class="post-title" title="动态规划——背包问题">动态规划——背包问题</span>
            <span class="post-date" title="2020-02-12 00:00:00">2020/02/12</span>
        </a>
        
        <a  class="All 经典算法 "
           href="/2020/02/03/b_%E8%93%9D%E6%A1%A5%E6%9D%AF%E7%BB%83%E4%B9%A0%E9%A2%98/"
           data-tag=""
           data-author="" >
            <span class="post-title" title="蓝桥杯">蓝桥杯</span>
            <span class="post-date" title="2020-02-03 00:00:00">2020/02/03</span>
        </a>
        
        <a  class="All 机器学习 "
           href="/2020/01/20/c_%E5%86%B3%E7%AD%96%E6%A0%91/"
           data-tag="50"
           data-author="" >
            <span class="post-title" title="决策树总结">决策树总结</span>
            <span class="post-date" title="2020-01-20 00:00:00">2020/01/20</span>
        </a>
        
        <a  class="All 经典算法 "
           href="/2020/01/12/b_%E5%85%A8%E6%8E%92%E5%88%97/"
           data-tag=""
           data-author="" >
            <span class="post-title" title="全排列">全排列</span>
            <span class="post-date" title="2020-01-12 00:00:00">2020/01/12</span>
        </a>
        
        <a  class="All 经典算法 "
           href="/2019/12/02/b_%E6%9C%89%E7%A9%B7%E8%87%AA%E5%8A%A8%E6%9C%BA/"
           data-tag=""
           data-author="" >
            <span class="post-title" title="有穷自动机">有穷自动机</span>
            <span class="post-date" title="2019-12-02 00:00:00">2019/12/02</span>
        </a>
        
        <a  class="All 经典算法 "
           href="/2019/11/20/b_%E9%94%99%E6%8E%92%E5%85%AC%E5%BC%8F/"
           data-tag=""
           data-author="" >
            <span class="post-title" title="错排公式">错排公式</span>
            <span class="post-date" title="2019-11-20 00:00:00">2019/11/20</span>
        </a>
        
        <a  class="All "
           href="/2019/10/29/a_%E5%BC%97%E6%B4%9B%E4%BC%8A%E5%BE%B7/"
           data-tag="数据结构，最短路径，图"
           data-author="" >
            <span class="post-title" title="最短路径之弗洛伊德算法">最短路径之弗洛伊德算法</span>
            <span class="post-date" title="2019-10-29 00:00:00">2019/10/29</span>
        </a>
        
        <a  class="All 机器学习 "
           href="/2019/08/04/c_BPR/"
           data-tag="AI,机器学习"
           data-author="" >
            <span class="post-title" title="贝叶斯">贝叶斯</span>
            <span class="post-date" title="2019-08-04 08:50:20">2019/08/04</span>
        </a>
        
        <a  class="All 机器学习 "
           href="/2019/08/02/c_matrix/"
           data-tag="AI,机器学习"
           data-author="" >
            <span class="post-title" title="矩阵分解与正则化">矩阵分解与正则化</span>
            <span class="post-date" title="2019-08-02 08:25:59">2019/08/02</span>
        </a>
        
        <a  class="All "
           href="/2019/07/28/c_coordination1/"
           data-tag="AI,机器学习,相似度计算"
           data-author="" >
            <span class="post-title" title="协同过滤-上">协同过滤-上</span>
            <span class="post-date" title="2019-07-28 08:25:59">2019/07/28</span>
        </a>
        
        <a  class="All 机器学习 "
           href="/2019/07/26/c_cnn/"
           data-tag="AI,机器学习"
           data-author="" >
            <span class="post-title" title="cnn">cnn</span>
            <span class="post-date" title="2019-07-26 18:54:51">2019/07/26</span>
        </a>
        
        <a  class="All 机器学习 "
           href="/2019/07/22/c_coordination2/"
           data-tag="AI,机器学习"
           data-author="" >
            <span class="post-title" title="协同过滤—下">协同过滤—下</span>
            <span class="post-date" title="2019-07-22 18:54:26">2019/07/22</span>
        </a>
        
        <a  class="All 机器学习 "
           href="/2019/07/22/c_kmeans/"
           data-tag="AI,机器学习"
           data-author="" >
            <span class="post-title" title="K-means算法">K-means算法</span>
            <span class="post-date" title="2019-07-22 18:53:07">2019/07/22</span>
        </a>
        
        <a  class="All 机器学习 "
           href="/2019/07/21/c_nerve2/"
           data-tag="AI,机器学习"
           data-author="" >
            <span class="post-title" title="神经网络-下">神经网络-下</span>
            <span class="post-date" title="2019-07-21 11:37:18">2019/07/21</span>
        </a>
        
        <a  class="All "
           href="/2019/07/20/c_nerve1/"
           data-tag="AI,机器学习"
           data-author="" >
            <span class="post-title" title="神经网络—上">神经网络—上</span>
            <span class="post-date" title="2019-07-20 17:29:10">2019/07/20</span>
        </a>
        
        <a  class="All 机器学习 "
           href="/2019/06/22/c_gradient/"
           data-tag="AI,机器学习"
           data-author="" >
            <span class="post-title" title="线性回归与梯度下降算法">线性回归与梯度下降算法</span>
            <span class="post-date" title="2019-06-22 08:26:36">2019/06/22</span>
        </a>
        
        <a  class="All 机器学习 "
           href="/2019/06/19/c_flyback/"
           data-tag="AI,机器学习"
           data-author="" >
            <span class="post-title" title="线性回归">线性回归</span>
            <span class="post-date" title="2019-06-19 00:00:00">2019/06/19</span>
        </a>
        
        <a  class="All 经典算法 "
           href="/2019/05/20/b_%E8%A7%86%E9%A2%91%E7%BB%8F%E5%85%B8%E7%AE%97%E6%B3%95/"
           data-tag=""
           data-author="" >
            <span class="post-title" title="简单入门算法">简单入门算法</span>
            <span class="post-date" title="2019-05-20 00:00:00">2019/05/20</span>
        </a>
        
        <div id="no-item-tips">

        </div>
    </nav>
    <div id="outline-list">
    </div>
</div>
    </div>
    <div class="hide-list">
        <div class="semicircle" data-title="切换全屏 快捷键 s">
            <div class="brackets first"><</div>
            <div class="brackets">&gt;</div>
        </div>
    </div>
</aside>
<div id="post">
    <div class="pjax">
        <article id="post-a_算法" class="article article-type-post" itemscope itemprop="blogPost">
    
        <h1 class="article-title">算法总结</h1>
    
    <div class="article-meta">
        
        
        
        <span class="book">
            <i class="iconfont icon-category"></i>
            
            
            <a  data-rel="数学">数学</a>
            
        </span>
        
        
    </div>
    <div class="article-meta">
        
            Created At : <time class="date" title='Updated At: 2020-08-14 20:21:51'>2020-07-20 00:00</time>
        
    </div>
    <div class="article-meta">
        
        
        <span id="busuanzi_container_page_pv">
            Views 👀 :<span id="busuanzi_value_page_pv">
                <span class="count-comment">
                    <span class="spinner">
                      <div class="cube1"></div>
                      <div class="cube2"></div>
                    </span>
                </span>
            </span>
        </span>
        
        
    </div>
    
    <div class="toc-ref">
    
        <ol class="toc"><li class="toc-item toc-level-1"><a class="toc-link" href="#1-%E6%9C%80%E7%9F%AD%E8%B7%AF%E5%BE%84"><span class="toc-text">1.最短路径</span></a></li><li class="toc-item toc-level-1"><a class="toc-link" href="#100-2-%E6%95%B0%E5%80%BC%E6%A6%82%E7%8E%87%E7%AE%97%E6%B3%95%E5%92%8C%E8%88%8D%E4%BC%8D%E5%BE%B7%E7%AE%97%E6%B3%95%E3%80%82"><span class="toc-text">:100: 2.数值概率算法和舍伍德算法。</span></a></li><li class="toc-item toc-level-1"><a class="toc-link" href="#3-%E7%81%B0%E8%89%B2%E9%A2%84%E6%B5%8B"><span class="toc-text">3.灰色预测</span></a></li><li class="toc-item toc-level-1"><a class="toc-link" href="#4-%E8%81%9A%E7%B1%BB%E7%AE%97%E6%B3%95"><span class="toc-text">4.聚类算法</span></a></li><li class="toc-item toc-level-1"><a class="toc-link" href="#5-%E8%92%99%E7%89%B9%E5%8D%A1%E6%B4%9B"><span class="toc-text">5.蒙特卡洛</span></a></li><li class="toc-item toc-level-1"><a class="toc-link" href="#6-%E6%A8%A1%E6%8B%9F%E9%80%80%E7%81%AB"><span class="toc-text">6.模拟退火</span></a></li><li class="toc-item toc-level-1"><a class="toc-link" href="#7-%E9%81%97%E4%BC%A0%E7%AE%97%E6%B3%95"><span class="toc-text">7.遗传算法</span></a></li><li class="toc-item toc-level-1"><a class="toc-link" href="#8-%E7%B2%92%E5%AD%90%E7%BE%A4%E7%AE%97%E6%B3%95"><span class="toc-text">8.粒子群算法</span></a></li><li class="toc-item toc-level-1"><a class="toc-link" href="#9-%E7%A7%8D%E7%BE%A4%E7%AB%9E%E4%BA%89%EF%BC%9A"><span class="toc-text">9.种群竞争：</span></a></li><li class="toc-item toc-level-1"><a class="toc-link" href="#10-%E6%8E%92%E9%98%9F%E8%AE%BA%EF%BC%9A"><span class="toc-text">10.排队论：</span></a></li><li class="toc-item toc-level-1"><a class="toc-link" href="#11-%E5%B1%82%E6%AC%A1%E5%88%86%E6%9E%90%EF%BC%9A"><span class="toc-text">11.层次分析：</span></a></li><li class="toc-item toc-level-1"><a class="toc-link" href="#12-%E5%A4%9A%E5%85%83%E5%9B%9E%E5%BD%92%EF%BC%9A"><span class="toc-text">12.多元回归：</span></a></li><li class="toc-item toc-level-1"><a class="toc-link" href="#13-%E4%B8%BB%E6%88%90%E5%88%86%E5%88%86%E6%9E%90%EF%BC%9A"><span class="toc-text">13.主成分分析：</span></a></li><li class="toc-item toc-level-1"><a class="toc-link" href="#14-%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C"><span class="toc-text">14.神经网络</span></a><ol class="toc-child"><li class="toc-item toc-level-2"><a class="toc-link" href="#%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%EF%BC%882-2-1%EF%BC%89%E7%BB%93%E6%9E%84"><span class="toc-text">神经网络（2,2,1）结构</span></a></li></ol></li></ol>
    
<style>
    .left-col .switch-btn,
    .left-col .switch-area {
        display: none;
    }
    .toc-level-4 i,
    .toc-level-4 ol {
        display: none !important;
    }
</style>
</div>
    
    <div class="article-entry" itemprop="articleBody">
      
        <h1 id="1-最短路径"><a href="#1-最短路径" class="headerlink" title="1.最短路径"></a>1.最短路径</h1><p><strong>有向图起始点尅是到各点的最短距离：</strong></p>
<p>类似：这种图或者邻接矩阵的形式，可以得到某一点a向各个节点的最短路径</p>
<p><img src="https://gitee.com/moluggg/image/raw/master/img/202007/26/120821-168754.jpeg" alt="img"></p>
<p>算法描述：</p>
<ol>
<li><p>初始化，以该节点更新与有向连接的邻结点的距离进行更新，不能通过的，两者之间距离为正无穷、</p>
</li>
<li><p>找出最短路的结点b，确定该路径ab是a到b的最短路，并将b作为下一节点，继续重复算法一直到结束。</p>
</li>
<li>以该节点更新与有向连接的邻结点的距离进行更新，$dis[a][y]=min(dis[x][y],dis[x][b]+dis[b][y])$</li>
<li>重复23过程一直到结束</li>
</ol>
<pre><code class="lang-c++">#include&lt;fstream&gt;
#define MaxNum 765432100
using namespace std;

ifstream fin(&quot;Dijkstra.in&quot;);
ofstream fout(&quot;Dijkstra.out&quot;);

int Map[501][501];
bool is_arrived[501];
int Dist[501],From[501],Stack[501];
int p,q,k,Path,Source,Vertex,Temp,SetCard;

int FindMin()
&amp;#123;
int p,Temp=0,Minm=MaxNum;
for(p=1;p&lt;=Vertex;p++)
if ((Dist[p]&lt;Minm)&amp;&amp;(!is_arrived[p]))
&amp;#123;
Minm=Dist[p];
Temp=p;
&amp;#125;
return Temp;
&amp;#125;
int main()
&amp;#123;
memset(is_arrived,0,sizeof(is_arrived));

fin &gt;&gt; Source &gt;&gt; Vertex;
for(p=1;p&lt;=Vertex;p++)
for(q=1;q&lt;=Vertex;q++)
&amp;#123;
fin &gt;&gt; Map[p][q];
if (Map[p][q]==0) Map[p][q]=MaxNum;
&amp;#125;
for(p=1;p&lt;=Vertex;p++)
&amp;#123;
Dist[p]=Map[Source][p];
if (Dist[p]!=MaxNum) 
From[p]=Source;
else 
From[p]=p;
&amp;#125;

is_arrived[Source]=true;
SetCard=1;
do
&amp;#123;
Temp=FindMin();
if (Temp!=0)
&amp;#123;
SetCard=SetCard+1;
is_arrived[Temp]=true;
for(p=1;p&lt;=Vertex;p++)
if ((Dist[p]&gt;Dist[Temp]+Map[Temp][p])&amp;&amp;(!is_arrived[p]))
&amp;#123;
Dist[p]=Dist[Temp]+Map[Temp][p];
From[p]=Temp;
&amp;#125;
&amp;#125;
else
break;
&amp;#125;
while (SetCard!=Vertex);

for(p=1;p&lt;=Vertex;p++)
if(p!=Source)
&amp;#123;
fout &lt;&lt; &quot;========================\n&quot;;
fout &lt;&lt; &quot;Source:&quot; &lt;&lt; Source &lt;&lt; &quot;\nTarget:&quot; &lt;&lt; p &lt;&lt; &#39;\n&#39;;
if (Dist[p]==MaxNum)
&amp;#123;
fout &lt;&lt; &quot;Distance:&quot; &lt;&lt; &quot;Infinity\n&quot;;
fout &lt;&lt; &quot;Path:No Way!&quot;;
&amp;#125;
else
&amp;#123; 
fout &lt;&lt; &quot;Distance:&quot; &lt;&lt; Dist[p] &lt;&lt; &#39;\n&#39;;
k=1;
Path=p;
while (From[Path]!=Path)
&amp;#123;
Stack[k]=Path;
Path=From[Path];
k=k+1;
&amp;#125;
fout &lt;&lt; &quot;Path:&quot; &lt;&lt; Source;
for(q=k-1;q&gt;=1;q--)
fout &lt;&lt; &quot;--&gt;&quot; &lt;&lt; Stack[q];
&amp;#125;
fout &lt;&lt; &quot;\n========================\n\n&quot;;
&amp;#125;

fin.close();
fout.close();
return 0;
&amp;#125;
</code></pre>
<h1 id="100-2-数值概率算法和舍伍德算法。"><a href="#100-2-数值概率算法和舍伍德算法。" class="headerlink" title=":100: 2.数值概率算法和舍伍德算法。"></a>:100: 2.数值概率算法和舍伍德算法。</h1><h1 id="3-灰色预测"><a href="#3-灰色预测" class="headerlink" title="3.灰色预测"></a>3.灰色预测</h1><p><strong>这是像线性回归一下，建立起一个预测模型，然后进行预测未知数据</strong></p>
<p><strong>什么情况下使用灰色预测模型？</strong></p>
<p><strong>数据量小，预测的周期短，预测的长度短</strong></p>
<p>分为GM(1,1)，GM(1,m)，GM(n,m)分别用于一个自变量一个因变量，多个自变量一个因变量，多个自变量多个因变量</p>
<p>这里有两个程序，有一个简单版本，有一个复杂版本，复杂版本请参考github</p>
<pre><code class="lang-matlab">function []=greymodel(y)
% 本程序主要用来计算根据灰色理论建立的模型的预测值。
% 应用的数学模型是 GM(1,1)。
% 原始数据的处理方法是一次累加法。
y=input(&#39;请输入数据 &#39;);
n=length(y);
yy=ones(n,1);
yy(1)=y(1);
for i=2:n
    yy(i)=yy(i-1)+y(i);
end
B=ones(n-1,2);
for i=1:(n-1)
    B(i,1)=-(yy(i)+yy(i+1))/2;
    B(i,2)=1;
end
BT=B&#39;;
for j=1:n-1
    YN(j)=y(j+1);
end
YN=YN&#39;;
A=inv(BT*B)*BT*YN;
a=A(1);
u=A(2);
t=u/a;
i=1:n+2;
yys(i+1)=(y(1)-t).*exp(-a.*i)+t;
yys(1)=y(1);
for j=n+2:-1:2
    ys(j)=yys(j)-yys(j-1);
end
x=1:n;
xs=2:n+2;
yn=ys(2:n+2);
plot(x,y,&#39;^r&#39;,xs,yn,&#39;*-b&#39;);
det=0;

sum1=0;
sumpe=0;
for i=1:n
    sumpe=sumpe+y(i);
end
pe=sumpe/n;
for i=1:n;
    sum1=sum1+(y(i)-pe).^2;
end
s1=sqrt(sum1/n);
sumce=0;
for i=2:n
    sumce=sumce+(y(i)-yn(i));
end
ce=sumce/(n-1);
sum2=0;
for i=2:n;
    sum2=sum2+(y(i)-yn(i)-ce).^2;
end
s2=sqrt(sum2/(n-1));
c=(s2)/(s1);
disp([&#39;后验差比值为：&#39;,num2str(c)]);
if c&lt;0.35
    disp(&#39;系统预测精度好&#39;)
else if c&lt;0.5
        disp(&#39;系统预测精度合格&#39;)
    else if c&lt;0.65
            disp(&#39;系统预测精度勉强&#39;)
        else
            disp(&#39;系统预测精度不合格&#39;)
        end
    end
end

disp([&#39;下个拟合值为 &#39;,num2str(ys(n+1))]);
disp([&#39;再下个拟合值为&#39;,num2str(ys(n+2))]);
</code></pre>
<h1 id="4-聚类算法"><a href="#4-聚类算法" class="headerlink" title="4.聚类算法"></a>4.聚类算法</h1><p><strong>就是将没有分过类的数据，根据数据表现的规律，分为k类，我们这里用kmeans，</strong></p>
<p>比如将数据分成4类：，这里的样本以前都是黑色的，我们认为设定k=4，就是分为4类以后得到的分类结果</p>
<p><img src="https://gitee.com/moluggg/image/raw/master/img/202007/23/183011-843982.png" alt="image-20200723183009694"></p>
<pre><code class="lang-python">from numpy import *
import time
import matplotlib.pyplot as plt


# calculate Euclidean distance
def euclDistance(vector1, vector2):
    return sqrt(sum(power(vector2 - vector1, 2)))

# init centroids with random samples
def initCentroids(dataSet, k):
    numSamples, dim = dataSet.shape
    centroids = zeros((k, dim))
    for i in range(k):
        index = int(random.uniform(0, numSamples))
        centroids[i, :] = dataSet[index, :]
    return centroids

# k-means cluster
def kmeans(dataSet, k):
    numSamples = dataSet.shape[0]
    # first column stores which cluster this sample belongs to,
    # second column stores the error between this sample and its centroid
    clusterAssment = mat(zeros((numSamples, 2)))
    clusterChanged = True

    # step 1: init centroids
    centroids = initCentroids(dataSet, k)

    while clusterChanged:
        clusterChanged = False
        # for each sample
        for i in xrange(numSamples):
            minDist  = 100000.0
            minIndex = 0
            # for each centroid
            # step 2: find the centroid who is closest
            for j in range(k):
                distance = euclDistance(centroids[j, :], dataSet[i, :])
                if distance &lt; minDist:
                    minDist  = distance
                    minIndex = j

            # step 3: update its cluster
            if clusterAssment[i, 0] != minIndex:
                clusterChanged = True
                clusterAssment[i, :] = minIndex, minDist**2

        # step 4: update centroids
        for j in range(k):
            pointsInCluster = dataSet[nonzero(clusterAssment[:, 0].A == j)[0]]
            centroids[j, :] = mean(pointsInCluster, axis = 0)

    print &#39;Congratulations, cluster complete!&#39;
    return centroids, clusterAssment

# show your cluster only available with 2-D data
def showCluster(dataSet, k, centroids, clusterAssment):
    numSamples, dim = dataSet.shape
    if dim != 2:
        print &quot;Sorry! I can not draw because the dimension of your data is not 2!&quot;
        return 1

    mark = [&#39;or&#39;, &#39;ob&#39;, &#39;og&#39;, &#39;ok&#39;, &#39;^r&#39;, &#39;+r&#39;, &#39;sr&#39;, &#39;dr&#39;, &#39;&lt;r&#39;, &#39;pr&#39;]
    if k &gt; len(mark):
        print &quot;Sorry! Your k is too large! please contact Zouxy&quot;
        return 1

    # draw all samples
    for i in xrange(numSamples):
        markIndex = int(clusterAssment[i, 0])
        plt.plot(dataSet[i, 0], dataSet[i, 1], mark[markIndex])

    mark = [&#39;Dr&#39;, &#39;Db&#39;, &#39;Dg&#39;, &#39;Dk&#39;, &#39;^b&#39;, &#39;+b&#39;, &#39;sb&#39;, &#39;db&#39;, &#39;&lt;b&#39;, &#39;pb&#39;]
    # draw the centroids
    for i in range(k):
        plt.plot(centroids[i, 0], centroids[i, 1], mark[i], markersize = 12)

    plt.show()

#测试代码：


from numpy import *
import time
import matplotlib.pyplot as plt

# step 1: load data
print &quot;step 1: load data...&quot;
dataSet = []
##################################修改##########################
fileIn = open(&#39;E:/Python/Machine Learning in Action/testSet.txt&#39;)
for line in fileIn.readlines():
    lineArr = line.strip().split(&#39;\t&#39;)
    dataSet.append([float(lineArr[0]), float(lineArr[1])])

# step 2: clustering...
print &quot;step 2: clustering...&quot;
dataSet = mat(dataSet)
k = 4
centroids, clusterAssment = kmeans(dataSet, k)

# step 3: show the result
print &quot;step 3: show the result...&quot;
showCluster(dataSet, k, centroids, clusterAssment)
</code></pre>
<p>数据文件长这个样子</p>
<p><img src="https://gitee.com/moluggg/image/raw/master/img/202007/23/183142-836169.png" alt="在这里插入图片描述"></p>
<h1 id="5-蒙特卡洛"><a href="#5-蒙特卡洛" class="headerlink" title="5.蒙特卡洛"></a>5.蒙特卡洛</h1><h1 id="6-模拟退火"><a href="#6-模拟退火" class="headerlink" title="6.模拟退火"></a>6.模拟退火</h1><p><strong>功能：</strong></p>
<p><strong>是一种搜索方法，寻找全局最优值（寻找最小值或者最大值）</strong></p>
<p><strong>例题：</strong></p>
<p><strong>TSP最短路径</strong></p>
<p>基本思想：</p>
<p>是贪心算法，在搜索过程中加入随机扰动，如果找到一个更接近最优值的点，接受该解。否则，以一定的概率接收一个比当前解要差的解，防止陷入局部最优解，<strong>做到更加接近最优值。</strong>模拟退火过程，初始温度最高，随着搜索的过程，温度越低，内能减小，搜索范围趋于稳定。</p>
<p>算法步骤：（以最小值为例）</p>
<p><strong>Step1.</strong>  设置初始温度$t_{k}=t_{max}$,当前迭代步数$k=0$ ，起点$x=x_{0}$</p>
<p><strong>Step2.</strong>  得到新解 $x_{new }= x+\Delta x$    其中 $\Delta x$在[dmin,dmax]之间</p>
<p><strong>Step3.</strong>  $dE = f(x_{new})- f(x)$ ，我中$f(x)$是优化目标</p>
<p><strong>Step4.</strong>  若$dE &gt;0 $ 则接受该点，否则，以概率$\exp (-dE/(k T))$接受该点；并降温。</p>
<blockquote>
<p><strong>Step4.</strong>  若$dE &lt;0 $ 则接受该点，否则，以概率$\exp (dE/(k T))$接受该点(最大值)</p>
</blockquote>
<p>这样一直重复2,3,4一直达到阈值温度结束</p>
<p><img src="E:\typora图片\image-20200814202148832.png" alt="image-20200814202148832"></p>
<pre><code class="lang-matlab">function main
clc
clear;

%范围
section_l = 0;
section_h = 9;

%绘制函数图像
draw_base();

%初始温度，停止温度与降温系数
tmp = 1e5;
tmp_min = 1e-3;
alpha = 0.98;

%生成初始随机解
x_old = (section_h - section_l) * rand() + section_l;
x_new = x_old;
s_old = val(x_old);
s_new = s_old;

text_lt = text(0, 0, &#39;Init&#39;);

%计数器
counter = 0;

%退火的主体部分，一个循环
while(tmp &gt; tmp_min)  %阈值温度
  %随机扰动
  delta = (rand() - 0.5) * 3;
  x_new = x_old + delta;
  %扰动的值小于一半的区间范围时，可以用这种办法防止新值超出区间范围
  if(x_new &lt; section_l || x_new &gt; section_h)
    x_new = x_new - 2 * delta;
  end

  s_new = val(x_new);

  %求差值，这里是找最大值而非最小值，所以不是s_new - s_old    3
  dE = s_old - s_new;

  %判断，是否接受该点    4 
  j = judge(dE, tmp); 
  if(j)
    s_old = s_new;
    x_old = x_new;
  end

  %只有当dE &lt; 0的情况下才降温 
  if(dE &lt; 0)
    delete(text_lt);
    hold on, scatter(x_old, s_old);
    hold on, text_lt = text(0.3, 21, [&#39;tmp: &#39;, num2str(tmp)]);
    pause(0.01);
    %上面是绘图的代码，下面降温
    tmp = tmp * alpha;
  else
    counter = counter + 1;
  end

  %当接受更差的解的概率太小时，若又长时间找不到更优的解，那么退出循环，结束算法
  if(counter &gt; 10000)
    break;
  end

end
end
function [y] = val(x)
    y = x + 10 * sin(5 * x) + 7 * cos(4 * x);
end

function draw_base()
    X = 0: 0.01:9;
    M = val(X);
    plot(X, M);
end
function [y] = judge(dE, t)
  if(dE &lt; 0)
    y = 1;
  else
    d = exp(-(dE / t));
    if(d &gt; rand)
      y = 1;
    else
      y = 0;
    end
  end
end
</code></pre>
<p><a target="_blank" rel="noopener" href="https://zhuanlan.zhihu.com/p/33184423">https://zhuanlan.zhihu.com/p/33184423</a></p>
<p>优缺点：</p>
<p>温度管理问题是一个难以处理的问题，实际应用中，由于必须考虑计算复杂度的切实可行性等问题，很容易受到参数的影响</p>
<p>具有局部搜索能力强、运行时间短的优点</p>
<p>例题： 110警车配置</p>
<p>思路：图论问题，简化问题，将图结点保留，边信息保留，由图转化成邻接矩阵的问题</p>
<p><img src="https://gitee.com/moluggg/image/raw/master/img/202007/26/111234-101527.png" alt="image-20200726111233676"></p>
<h1 id="7-遗传算法"><a href="#7-遗传算法" class="headerlink" title="7.遗传算法"></a>7.遗传算法</h1><p><strong>功能：也是寻找最优值方案的算法</strong></p>
<p>所以求最大值的过程就转化成一个“袋鼠跳”的过程。**</p>
<p>下面介绍介绍“袋鼠跳”的几种方式。</p>
<ul>
<li>爬山算法：一只袋鼠朝着比现在高的地方跳去。它找到了不远处的最高的山峰。但是这座山不一定是最高峰。这就是爬山算法，它不能保证局部最优值就是全局最优值。</li>
<li>模拟退火：袋鼠喝醉了。它随机地跳了很长时间。这期间，它可能走向高处，也可能踏入平地。但是，它渐渐清醒了并朝最高峰跳去。这就是模拟退火算法。</li>
<li>遗传算法：有很多袋鼠，它们降落到喜玛拉雅山脉的任意地方。这些袋鼠并不知道它们的任务是寻找珠穆朗玛峰。但每过几年，就在一些海拔高度较低的地方射杀一些袋鼠。于是，不断有袋鼠死于海拔较低的地方，而越是在海拔高的袋鼠越是能活得更久，也越有机会生儿育女。就这样经过许多年，这些袋鼠们竟然都不自觉地聚拢到了一个个的山峰上，可是在所有的袋鼠中，只有聚拢到珠穆朗玛峰的袋鼠被带回了美丽的澳洲。</li>
</ul>
<p><a target="_blank" rel="noopener" href="https://blog.csdn.net/JX_Cesare/article/details/81268771">https://blog.csdn.net/JX_Cesare/article/details/81268771</a></p>
<p><a target="_blank" rel="noopener" href="https://www.jianshu.com/p/ae5157c26af9">https://www.jianshu.com/p/ae5157c26af9</a></p>
<p>基本思想：适者生存（接近最优值的），劣者淘汰（每隔一段时间淘汰掉一些结点）</p>
<p>并遵循遗传中的基因遗传，变异，交配产生后代</p>
<p>h函数还是没有太看懂</p>
<p>算法步骤：</p>
<ol>
<li><p>编码（设计基因片段）</p>
</li>
<li><p>设计适应度函数，一个目标函数</p>
</li>
<li>选择（轮盘赌：根据自身的优势把所有种群放在0-1之间选择，随机竞争，最佳保留等算法）</li>
<li>遗传（染色体交叉）基因片段的交换</li>
<li>变异</li>
</ol>
<p>轮盘赌：</p>
<p><img src="https://gitee.com/moluggg/image/raw/master/img/202007/25/175801-996578.png" alt="image-20200725175759649"></p>
<pre><code class="lang-matlab">function main()
clear;
clc;
%种群大小
popsize=100;
%二进制编码长度
chromlength=10;
%交叉概率
pc = 0.6;
%变异概率
pm = 0.001;
%初始种群
pop = initpop(popsize,chromlength);

for i = 1:100
    %计算适应度值（函数值）
    objvalue = cal_objvalue(pop);
    fitvalue = objvalue;
    %选择操作
    newpop = selection(pop,fitvalue);
    %交叉操作
    newpop = crossover(newpop,pc);
    %变异操作
    newpop = mutation(newpop,pm);
    %更新种群
    pop = newpop;
    %寻找最优解
    [bestindividual,bestfit] = best(pop,fitvalue);
    x2 = binary2decimal(bestindividual);
    x1 = binary2decimal(newpop);
    y1 = cal_objvalue(newpop);
    if mod(i,10) == 0
        figure;
        fplot(&#39;10*sin(5*x)+7*abs(x-5)+10&#39;,[0 10]);
        hold on;
        plot(x1,y1,&#39;*&#39;);
        title([&#39;迭代次数为n=&#39; num2str(i)]);
        %plot(x1,y1,&#39;*&#39;);
    end
end
fprintf(&#39;The best X is ---&gt;&gt;%5.2f\n&#39;,x2);
fprintf(&#39;The best Y is ---&gt;&gt;%5.2f\n&#39;,bestfit);
</code></pre>
<h1 id="8-粒子群算法"><a href="#8-粒子群算法" class="headerlink" title="8.粒子群算法"></a>8.粒子群算法</h1><h1 id="9-种群竞争："><a href="#9-种群竞争：" class="headerlink" title="9.种群竞争："></a>9.种群竞争：</h1><p>也是一种搜索最优解的办法</p>
<h1 id="10-排队论："><a href="#10-排队论：" class="headerlink" title="10.排队论："></a>10.排队论：</h1><p>（比如超时购物体验上，随时可能需要排队）研究排队问题，就是要把排队的时间控制到一定的程度内，在服<strong>务质量的提高和成本的降低之间取得平衡</strong>，找到最适当的解。</p>
<p><a target="_blank" rel="noopener" href="https://blog.csdn.net/qq547276542/article/details/77689099?utm_medium=distribute.pc_aggpage_search_result.none-task-blog-2~all~first_rank_v2~rank_v25-1-77689099.nonecase">入门链接</a></p>
<h1 id="11-层次分析："><a href="#11-层次分析：" class="headerlink" title="11.层次分析："></a>11.层次分析：</h1><p><a target="_blank" rel="noopener" href="https://zhuanlan.zhihu.com/p/38207837">https://zhuanlan.zhihu.com/p/38207837</a></p>
<p>一般用不太上，主要是一些数据分析类要做的问题</p>
<p><a target="_blank" rel="noopener" href="https://www.bilibili.com/video/BV1KE411q7Wh?from=search&amp;seid=493957873109193507">https://www.bilibili.com/video/BV1KE411q7Wh?from=search&amp;seid=493957873109193507</a></p>
<p><strong>做决策，综合各方面进行选择。比如：</strong></p>
<p><strong>日常生活中有很多的决策问题。决策是指在面临多种方案时需要依据一定的标准选择某一种方案。日常生活中有许多决策的问题。比如：</strong></p>
<ol>
<li><strong>买钢笔，一般要依据质量、颜色、实用性、价格、外形等方面的因素选择某一支钢笔。</strong></li>
<li><strong>假期旅游，是去风光秀丽的苏州，还是去迷人的北戴河，或者是去山水甲天下的桂林，那一般会依据景色、费用、食宿条件、旅途等因素来算着去哪个地方。</strong></li>
</ol>
<p><strong>面临各种各样的方案，要进行比较、判断、评价、直至最后的决策。这个过程中都是一些主观的因素，这些因素可能由于个人情况的不同，有相应不同的比重</strong></p>
<p><img src="https://gitee.com/moluggg/image/raw/master/img/202008/09/180535-36405.jpeg" alt="img"></p>
<h1 id="12-多元回归："><a href="#12-多元回归：" class="headerlink" title="12.多元回归："></a>12.多元回归：</h1><p>多个影响因素之间的线性回归</p>
<h1 id="13-主成分分析："><a href="#13-主成分分析：" class="headerlink" title="13.主成分分析："></a>13.主成分分析：</h1><p><strong>就是将一堆数据表示比如，探索人的身高x1，体重x2，智力x3，耐力x4等共10几个影响因素与 考试成绩之间y的关系，由于影响因素太多，我们需要找出一个上面十几页因素的线性组合，形成几个新的影响因素,为：</strong></p>
<p><strong>x = k1x1+ k2x2+ k3x3…..knxn</strong>  </p>
<p><strong>使得这几个新的影响因素与y关系很强</strong></p>
<p><strong>通过主成分分析:</strong></p>
<div class="table-container">
<table>
<thead>
<tr>
<th><strong>新影响因素</strong></th>
<th><strong>比率</strong></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<td><strong>xk</strong></td>
<td><strong>89%</strong></td>
<td></td>
</tr>
<tr>
<td><strong>xk+1</strong></td>
<td><strong>7%</strong></td>
<td></td>
</tr>
<tr>
<td></td>
<td></td>
</tr>
</tbody>
</table>
</div>
<p><strong>注：后面的比率表示与y关系有多大，递减排序</strong></p>
<p><strong>可以提取到一个或者几个（根据实际需求选择前几个）作为新的影响因素完成后续的任务。</strong></p>
<p>对于二维数据而言：</p>
<div class="table-container">
<table>
<thead>
<tr>
<th><img src="https://gitee.com/moluggg/image/raw/master/img/202007/26/113949-282365.png" alt="image-20200726113919926"></th>
<th><img src="https://gitee.com/moluggg/image/raw/master/img/202007/26/113942-122991.png" alt="image-20200726112600558"></th>
</tr>
</thead>
<tbody>
<tr>
<td>原始数据</td>
<td>将原始数据求质点的数据分布</td>
</tr>
</tbody>
</table>
</div>
<p>这样求出来的数据，$\bar{X},\bar{Y}=0$便于以下的计算</p>
<p>x,y轴上的方差与协方差公式：</p>
<script type="math/tex; mode=display">
\begin{array}{c}
s^{2}(X)=\frac{\sum_{i=1}^{n}\left(X_{i}-\bar{X}\right)^{2}}{n-1} \\
s^{2}\left(D_{1}\right)=18.97 \\
s^{2}\left(D_{2}\right)=3.13 \\
\operatorname{cov}(X, Y)=\frac{\sum_{i-1}^{n}\left(X_{i}-\bar{X}\right)\left(Y_{i}-\bar{Y}\right)}{n-1} \\
\operatorname{cov}\left(D_{1}, D_{2}\right)=6.49
\end{array}</script><p><strong>实际上可以表示为：</strong></p>
<script type="math/tex; mode=display">
\begin{aligned}
C=\left[\begin{array}{ll}
s^{2}(X) & \operatorname{cov}(X Y) \\
\operatorname{cov}(X Y) & s^{2}(Y)
\end{array}\right]=\left[\begin{array}{cc}
18.97 & 6.49 \\
6.49 & 3.13
\end{array}\right] \\
&=\frac{1}{6-1} A^{T} A
\end{aligned}</script><p>其中，$A  $是以质心为（0,0）的处理后的数据 ，形状为 4*2</p>
<p> 我们的目标就是重新寻找一个方向，使得这些点让在这个方向上分散得最开。</p>
<p>也就是说，在这个方向上，点到中心原点的方差最大。</p>
<script type="math/tex; mode=display">
\begin{array}{c}
S=\vec{a} \cdot \vec{v}=|a||v| \cos \theta=|a| \cos \theta \\
s^{2}=\sum_{i=1}^{n} \frac{S^{2}}{n-1}=\frac{\vec{v} \cdot \vec{A}^{T} \cdot\left(\vec{v} \cdot \vec{A}^{T}\right)^{T}}{n-1} \\
=\vec{v} \cdot \frac{\vec{A}^{T} \vec{A}}{n-1} \cdot \vec{v}^{T} \\
=\vec{v} C \vec{v}^{T}
\end{array}</script><p>构造 目标函数以及 约束函数，利用拉格朗日定理,求解：</p>
<script type="math/tex; mode=display">
\begin{array}{l}
J=s^{2}=\vec{v} C \vec{v}^{T} \\
\text {s.t.} \vec{v} \cdot \vec{v}^{T}=1 \\
F(\vec{v})=\vec{v} C \vec{v}^{T}-\lambda\left(1-\vec{v} \cdot \vec{v}^{T}\right)
\end{array}</script><p><img src="https://gitee.com/moluggg/image/raw/master/img/202007/26/114938-645529.png" alt="image-20200726114911807"></p>
<p>求最值之后会得到一个特征值公式：</p>
<script type="math/tex; mode=display">
\begin{array}{c}
\frac{\partial F(\vec{v})}{\partial \vec{v}}=0 \\
2 C v^{T}-2 \lambda v^{T}=0 \\
C v^{T}=\lambda v^{T}
\end{array}</script><p>然后特征值分解会得到两个相互独立的向量（线性代数有求解过程）</p>
<div class="table-container">
<table>
<thead>
<tr>
<th><img src="https://gitee.com/moluggg/image/raw/master/img/202007/26/115216-408997.png" alt="image-20200726115215898"></th>
<th><img src="https://gitee.com/moluggg/image/raw/master/img/202007/26/115223-541960.png" alt="image-20200726115155432"></th>
</tr>
</thead>
<tbody>
<tr>
<td>求解的两个向量</td>
<td>表示咱图中</td>
</tr>
</tbody>
</table>
</div>
<p>$\lambda$的含义：带入到公式（2）此时，Cv=$\lambda v$ 带入可得，由于$\lambda$是一个常数，可以提出来，$v^{T}v=1$</p>
<p>所以  $s^{2}= \lambda$</p>
<p>最后得到的两个方向：信息集中到了一个方向里面，因此主成分分析降维技术，将信息少的维度省去</p>
<p><img src="E:\typora图片\image-20200726115717782.png" alt="image-20200726115717782"></p>
<p>例题：</p>
<p><img src="https://gitee.com/moluggg/image/raw/master/img/202007/26/115943-499677.jpeg" alt="img"></p>
<pre><code class="lang-matlab">clc
clear all
A=xlsread(&#39;这里换成你自己的数据根目录，例如：D:\资料库区\大三上\HUAWEI\MATLAB\主成分分析.xls&#39;,&#39;B3:I17&#39;);
%得到的数据矩阵的行数和列数
a=size(A,1);
b=size(A,2);
%数据的标准化处理:得到标准化后的矩阵SA
for i=1:b
    SA(:,i)=(A(:,i)-mean(A(:,i)))/std(A(:,i));
end
%计算系数矩阵:CM
CM=corrcoef(SA);
%计算CM的特征值和特征向量
[V,D]=eig(CM);
%将特征值按降序排列到DS中
for j=1:b
    DS(j,1)=D(b+1-j,b+1-j);
end
%计算贡献率
for i=1:b
    DS(i,2)=DS(i,1)/sum(DS(:,1));%单个贡献率
    DS(i,3)=sum(DS(1:i,1))/sum(DS(:,1));%累计贡献率
end
%假定主成分的信息保留率
T=0.9;
for k=1:b
    if DS(k,3) &gt;= T
        com_num=k;
        break;
    end
end
%提取主成分的特征向量
for j=1:com_num
    PV(:,j)=V(:,b+1-j);
end
%计算主成分得分
new_score=SA*PV;
for i=1:a
    total_score(i,1)=sum(new_score(i,:));
    total_score(i,2)=i;
end
%强主成分得分与总分放到同一个矩阵中
result_report=[new_score,total_score];
%按总分降序排列
result_report=sortrows(result_report,-4);
%输出结果
disp(&#39;特征值、贡献率、累计贡献率：&#39;)
DS
disp(&#39;信息保留率T对应的主成分数与特征向量：&#39;)
com_num
PV
disp(&#39;主成分得分及排序（按第4列的总分进行降序排列，前3列为个各成分得分，第5列为企业编号）&#39;)
result_report

可以用下方数据训练一下
</code></pre>
<h1 id="14-神经网络"><a href="#14-神经网络" class="headerlink" title="14.神经网络"></a>14.神经网络</h1><p><strong>神经网络的普通应用：回归与分类，可以完成预测，线性回归可以哇按成的神经网络就可以完成。</strong></p>
<h2 id="神经网络（2-2-1）结构"><a href="#神经网络（2-2-1）结构" class="headerlink" title="神经网络（2,2,1）结构"></a>神经网络（2,2,1）结构</h2><pre><code class="lang-python">import numpy as np
#这是一层神经网络的构造：
def sigmoid(x):
  # Our activation function: f(x) = 1 / (1 + e^(-x))
  return 1 / (1 + np.exp(-x))

class Neuron:
  def __init__(self, weights, bias):
    self.weights = weights
    self.bias = bias

  def feedforward(self, inputs):
    # Weight inputs, add bias, then use the activation function
    total = np.dot(self.weights, inputs) + self.bias
    return sigmoid(total)

weights = np.array([0, 1]) # w1 = 0, w2 = 1
bias = 4                   # b = 4
n = Neuron(weights, bias)

x = np.array([2, 3])       # x1 = 2, x2 = 3
print(n.feedforward(x))    # 0.9990889488055994
</code></pre>
<pre><code>0.9990889488055994
</code></pre><pre><code class="lang-python">import numpy as np

# ... code from previous section here

class OurNeuralNetwork:
  &#39;&#39;&#39;
  A neural network with:
    - 2 inputs
    - a hidden layer with 2 neurons (h1, h2)
    - an output layer with 1 neuron (o1)
  Each neuron has the same weights and bias:
    - w = [0, 1]
    - b = 0
  &#39;&#39;&#39;
  def __init__(self):
    weights = np.array([0, 1])
    bias = 0

    # The Neuron class here is from the previous section
    self.h1 = Neuron(weights, bias)
    self.h2 = Neuron(weights, bias)
    self.o1 = Neuron(weights, bias) #初始化

  def feedforward(self, x):
    out_h1 = self.h1.feedforward(x) #前向传播
    out_h2 = self.h2.feedforward(x)

    # The inputs for o1 are the outputs from h1 and h2
    out_o1 = self.o1.feedforward(np.array([out_h1, out_h2]))

    return out_o1

network = OurNeuralNetwork()
x = np.array([2, 3])
print(network.feedforward(x)) # 0.7216325609518421
</code></pre>
<pre><code>0.7216325609518421
</code></pre><p><img src="https://gitee.com/moluggg/image/raw/master/img/202007/26/121228-640748.png" alt="image-20200725163502982"></p>
<pre><code class="lang-python">import numpy as np

def sigmoid(x):
  # Sigmoid activation function: f(x) = 1 / (1 + e^(-x))
    return 1 / (1 + np.exp(-x))

def deriv_sigmoid(x):
# Derivative of sigmoid: f&#39;(x) = f(x) * (1 - f(x))
    fx = sigmoid(x)
    return fx * (1 - fx)

def mse_loss(y_true, y_pred):
  # y_true and y_pred are numpy arrays of the same length.
    return ((y_true - y_pred) ** 2).mean()

class OurNeuralNetwork: #这个神经网络只是用于有2两个输入，一个隐藏层（含有2个结点），一个输出的简单的神经网络

    def __init__(self):
    # 权重，Weights
        self.w1 = np.random.normal()
        self.w2 = np.random.normal()
        self.w3 = np.random.normal()
        self.w4 = np.random.normal()
        self.w5 = np.random.normal()
        self.w6 = np.random.normal()

        # 截距项，Biases
        self.b1 = np.random.normal()
        self.b2 = np.random.normal()
        self.b3 = np.random.normal()

    def feedforward(self, x):
        # x is a numpy array with 2 elements.
        h1 = sigmoid(self.w1 * x[0] + self.w2 * x[1] + self.b1)
        h2 = sigmoid(self.w3 * x[0] + self.w4 * x[1] + self.b2)
        o1 = sigmoid(self.w5 * h1 + self.w6 * h2 + self.b3)
        return o1

    def train(self, data, all_y_trues): #l=里面涉及到反向梯度更新传播算法
        &#39;&#39;&#39;
        - data is a (n x 2) numpy array, n = # of samples in the dataset.
        - all_y_trues is a numpy array with n elements.
          Elements in all_y_trues correspond to those in data.
        &#39;&#39;&#39;
        learn_rate = 0.1
        epochs = 1000 # number of times to loop through the entire dataset

        for epoch in range(epochs):

            for x, y_true in zip(data, all_y_trues):


                # --- Do a feedforward (we&#39;ll need these values later) 每一次的前向传播，可以用feedforward代替
                sum_h1 = self.w1 * x[0] + self.w2 * x[1] + self.b1
                h1 = sigmoid(sum_h1)

                sum_h2 = self.w3 * x[0] + self.w4 * x[1] + self.b2
                h2 = sigmoid(sum_h2)

                sum_o1 = self.w5 * h1 + self.w6 * h2 + self.b3
                o1 = sigmoid(sum_o1)
                y_pred = o1
                &#39;&#39;&#39;
                h1,h2,o1 =OurNeuralNetwork.feedforward(data)   #类的使用还不太会
                y_pred = o1
                &#39;&#39;&#39;

                # --- Calculate partial derivatives.
                # --- Naming: d_L_d_w1 represents &quot;partial L / partial w1&quot;
                d_L_d_ypred = -2 * (y_true - y_pred)

                # Neuron o1
                d_ypred_d_w5 = h1 * deriv_sigmoid(sum_o1)
                d_ypred_d_w6 = h2 * deriv_sigmoid(sum_o1)
                d_ypred_d_b3 = deriv_sigmoid(sum_o1)

                d_ypred_d_h1 = self.w5 * deriv_sigmoid(sum_o1)
                d_ypred_d_h2 = self.w6 * deriv_sigmoid(sum_o1)

                # Neuron h1
                d_h1_d_w1 = x[0] * deriv_sigmoid(sum_h1)
                d_h1_d_w2 = x[1] * deriv_sigmoid(sum_h1)
                d_h1_d_b1 = deriv_sigmoid(sum_h1)

                # Neuron h2
                d_h2_d_w3 = x[0] * deriv_sigmoid(sum_h2)
                d_h2_d_w4 = x[1] * deriv_sigmoid(sum_h2)
                d_h2_d_b2 = deriv_sigmoid(sum_h2)

                # --- Update weights and biases
                # Neuron h1
                self.w1 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w1
                self.w2 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w2
                self.b1 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_b1

                # Neuron h2
                self.w3 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w3
                self.w4 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w4
                self.b2 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_b2

                # Neuron o1
                self.w5 -= learn_rate * d_L_d_ypred * d_ypred_d_w5
                self.w6 -= learn_rate * d_L_d_ypred * d_ypred_d_w6
                self.b3 -= learn_rate * d_L_d_ypred * d_ypred_d_b3

          # --- Calculate total loss at the end of each epoch
        if epoch % 10 == 0:
            y_preds = np.apply_along_axis(self.feedforward, 1, data)
            loss = mse_loss(all_y_trues, y_preds)
            print(1)
            print(&quot;Epoch %d loss: %.3f&quot; % (epoch, loss))

# Define dataset
data = np.array([
  [-2, -1],  # Alice
  [25, 6],   # Bob
  [17, 4],   # Charlie
  [-15, -6], # Diana
])
all_y_trues = np.array([
  1, # Alice
  0, # Bob
  0, # Charlie
  1, # Diana
])

# Train our neural network!
network = OurNeuralNetwork()
print(network.train(data, all_y_trues))
</code></pre>
<p><img src="https://gitee.com/moluggg/image/raw/master/img/202007/26/121233-970393.png" alt="image-20200725163508715"></p>
<p><img src="E:\typora图片\image-20200801093306281.png" alt="image-20200801093306281"></p>
<p><img src="E:\typora图片\image-20200801093317595.png" alt="image-20200801093317595"></p>
<p><img src="https://gitee.com/moluggg/image/raw/master/img/202008/01/093649-72979.png" alt="image-20200801093451450"></p>
<p>lINGO 做规划</p>

      
       <hr><span style="font-style: italic;color: gray;"> 转载请注明来源，欢迎对文章中的引用来源进行考证，欢迎指出任何有错误或不够清晰的表达。可以在下面评论区评论，也可以邮件至 2572876783@qq.com </span>
    </div>
</article>


<p>
    <a  class="dashang" onclick="dashangToggle()">💰</a>
</p>






    
        <!-- MathJax配置，可通过单美元符号书写行内公式等 -->
<script type="text/x-mathjax-config">
    MathJax.Hub.Config({
    "HTML-CSS": {
        preferredFont: "TeX",
        availableFonts: ["STIX","TeX"],
        linebreaks: { automatic:true },
        EqnChunk: (MathJax.Hub.Browser.isMobile ? 10 : 50)
    },
    tex2jax: {
        inlineMath: [ ["$", "$"], ["\\(","\\)"] ],
        processEscapes: true,
        ignoreClass: "tex2jax_ignore|dno",
        skipTags: ['script', 'noscript', 'style', 'textarea', 'pre', 'code']
    },
    TeX: {
        equationNumbers: { autoNumber: "AMS" },
        noUndefined: { attributes: { mathcolor: "red", mathbackground: "#FFEEEE", mathsize: "90%" } },
        Macros: { href: "{}" }
    },
    messageStyle: "none"
    });
</script>
<!-- 给MathJax元素添加has-jax class -->
<script type="text/x-mathjax-config">
    MathJax.Hub.Queue(function() {
        var all = MathJax.Hub.getAllJax(), i;
        for(i=0; i < all.length; i += 1) {
            all[i].SourceElement().parentNode.className += ' has-jax';
        }
    });
</script>
<!-- 通过连接CDN加载MathJax的js代码 -->
<script type="text/javascript" async
        src="//cdn.jsdelivr.net/npm/mathjax@2.7.8/unpacked/MathJax.js?config=TeX-MML-AM_CHTML">
</script>
<input type="hidden" id="MathJax-js"
        value="//cdn.jsdelivr.net/npm/mathjax@2.7.8/unpacked/MathJax.js?config=TeX-MML-AM_CHTML">
</input>
    




    </div>
    <div class="copyright">
        <p class="footer-entry">
    ©2016-2020 MOLU
</p>
<p class="footer-entry">Built with <a href="https://hexo.io/" target="_blank">Hexo</a> and <a href="https://github.com/yelog/hexo-theme-3-hexo" target="_blank">3-hexo</a> theme</p>

    </div>
    <div class="full-toc">
        <button class="full" data-title="切换全屏 快捷键 s"><span class="min "></span></button>
<a class="" id="rocket" ></a>

    </div>
</div>

<div class="hide_box" onclick="dashangToggle()"></div>
<div class="shang_box">
    <a class="shang_close"  onclick="dashangToggle()">×</a>
    <div class="shang_tit">
        <p>Help us with donation</p>
    </div>
    <div class="shang_payimg">
        <div class="pay_img">
            <img src="/img/alipay.jpg" class="alipay" title="扫码支持">
            <img src="/img/weixin.jpg" class="weixin" title="扫码支持">
        </div>
    </div>
    <div class="shang_payselect">
        <span><label><input type="radio" name="pay" checked value="alipay">alipay</label></span><span><label><input type="radio" name="pay" value="weixin">weixin</label></span>
    </div>
</div>


</body>
<script src="/js/jquery.pjax.js?v=1.1.0" ></script>

<script src="/js/script.js?v=1.1.0" ></script>
<script>
    var img_resize = 'default';
    function initArticle() {
        /*渲染对应的表格样式*/
        
            $("#post .pjax table").addClass("green_title");
        

        /*渲染打赏样式*/
        
        $("input[name=pay]").on("click", function () {
            if($("input[name=pay]:checked").val()=="weixin"){
                $(".shang_box .shang_payimg .pay_img").addClass("weixin_img");
            } else {
                $(".shang_box .shang_payimg .pay_img").removeClass("weixin_img");
            }
        })
        

        /*高亮代码块行号*/
        

        /*访问数量*/
        
        $.getScript("//busuanzi.ibruce.info/busuanzi/2.3/busuanzi.pure.mini.js");
        

        /*代码高亮，行号对齐*/
        $('.pre-numbering').css('line-height',$('.has-numbering').css('line-height'));

        
        
    }

    /*打赏页面隐藏与展示*/
    
    function dashangToggle() {
        $(".shang_box").fadeToggle();
        $(".hide_box").fadeToggle();
    }
    

</script>

<!--加入行号的高亮代码块样式-->

<!--自定义样式设置-->
<style>
    
    
    .nav {
        width: 542px;
    }
    .nav.fullscreen {
        margin-left: -542px;
    }
    .nav-left {
        width: 120px;
    }
    
    
    @media screen and (max-width: 1468px) {
        .nav {
            width: 492px;
        }
        .nav.fullscreen {
            margin-left: -492px;
        }
        .nav-left {
            width: 100px;
        }
    }
    
    
    @media screen and (max-width: 1024px) {
        .nav {
            width: 492px;
            margin-left: -492px
        }
        .nav.fullscreen {
            margin-left: 0;
        }
    }
    
    @media screen and (max-width: 426px) {
        .nav {
            width: 100%;
        }
        .nav-left {
            width: 100%;
        }
    }
    
    
    .nav-right .title-list nav a .post-title, .nav-right .title-list #local-search-result a .post-title {
        color: #383636;
    }
    
    
    .nav-right .title-list nav a .post-date, .nav-right .title-list #local-search-result a .post-date {
        color: #5e5e5f;
    }
    
    
    .nav-right nav a.hover, #local-search-result a.hover{
        background-color: #e2e0e0;
    }
    
    

    /*列表样式*/
    

    /* 背景图样式 */
    
    


    /*引用块样式*/
    

    /*文章列表背景图*/
    

    
</style>







</html>
