@Book{数学基础2018汪芳庭,
  author       = {汪芳庭},
  title        = {数学基础},
  publisher    = {高等教育出版社},
  year         = {2018},
  address      = {北京},
  edition      = {2},
  month        = {10}
}

@Book{数理逻辑2010汪芳庭,
  author       = {汪芳庭},
  title        = {数理逻辑},
  publisher    = {中国科学技术大学出版社},
  year         = {2010},
  address      = {合肥},
  edition      = {2}
}

@Book{kleene2002mathematical,
  title={Mathematical logic},
  author={Kleene, Stephen Cole},
  year={2002},
  publisher={Courier Corporation}
}

@article{10.1145/362686.362692,
author = {Bloom, Burton H.},
title = {Space/Time Trade-Offs in Hash Coding with Allowable Errors},
year = {1970},
issue_date = {July 1970},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
volume = {13},
number = {7},
issn = {0001-0782},
url = {https://doi.org/10.1145/362686.362692},
doi = {10.1145/362686.362692},
abstract = {In this paper trade-offs among certain computational factors in hash coding are analyzed. The paradigm problem considered is that of testing a series of messages one-by-one for membership in a given set of messages. Two new hash-coding methods are examined and compared with a particular conventional hash-coding method. The computational factors considered are the size of the hash area (space), the time required to identify a message as a nonmember of the given set (reject time), and an allowable error frequency.The new methods are intended to reduce the amount of space required to contain the hash-coded information from that associated with conventional methods. The reduction in space is accomplished by exploiting the possibility that a small fraction of errors of commission may be tolerable in some applications, in particular, applications in which a large amount of data is involved and a core resident hash area is consequently not feasible using conventional methods.In such applications, it is envisaged that overall performance could be improved by using a smaller core resident hash area in conjunction with the new methods and, when necessary, by using some secondary and perhaps time-consuming test to “catch” the small fraction of errors associated with the new methods. An example is discussed which illustrates possible areas of application for the new methods.Analysis of the paradigm problem demonstrates that allowing a small number of test messages to be falsely identified as members of the given set will permit a much smaller hash area to be used without increasing reject time.},
journal = {Commun. ACM},
month = {jul},
pages = {422–426},
numpages = {5},
keywords = {storage layout, scatter storage, retrieval efficiency, retrieval trade-offs, searching, storage efficiency, hash coding, hash addressing}
}

@Book{概率论与数理统计2009陈希孺,
  author       = {陈希孺},
  title        = {概率论与数理统计},
  publisher    = {中国科学技术大学出版社},
  year         = {2009},
  month        = {2},
  address      = {合肥},
  edition      = {2}
}

@Book{机器学习2016周志华,
  author       = {周志华},
  title        = {机器学习},
  publisher    = {清华大学出版社},
  year         = {2016},
  month        = {1},
  edition      = {1},
  address      = {北京}
}

@Book{西瓜书,
  author       = {周志华},
  title        = {机器学习},
  publisher    = {清华大学出版社},
  year         = {2016},
  month        = {1},
  edition      = {1},
  address      = {北京}
}

@Book{统计学习方法2012李航,
  author       = {李航},
  title        = {统计学习方法},
  publisher    = {清华大学出版社},
  year         = {2012},
  month        = {3},
  edition      = {1},
  address      = {北京}
}

@Book{常庚哲2012数学分析教程,
  title        = {数学分析教程.上册},
  author       = {常庚哲 and 史济怀},
  publisher    = {中国科学技术大学出版社},
  year         = {2012},
  month        = {8},
  edition      = {3},
  address      = {合肥},
  isbn         = {978-7-312-03009-3}
}
