{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "## Datawhale组队学习Pandas\n",
    "## 第二章 pandas基础\n",
    "## 第二次打卡"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Looking in indexes: https://mirror.baidu.com/pypi/simple/\n",
      "Collecting pandas==1.1.5\n",
      "\u001b[?25l  Downloading https://mirror.baidu.com/pypi/packages/fd/70/e8eee0cbddf926bf51958c7d6a86bc69167c300fa2ba8e592330a2377d1b/pandas-1.1.5-cp37-cp37m-manylinux1_x86_64.whl (9.5MB)\n",
      "\u001b[K     |████████████████████████████████| 9.5MB 14.3MB/s eta 0:00:01\n",
      "\u001b[?25hRequirement already satisfied: python-dateutil>=2.7.3 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from pandas==1.1.5) (2.8.0)\n",
      "Requirement already satisfied: pytz>=2017.2 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from pandas==1.1.5) (2019.3)\n",
      "Requirement already satisfied: numpy>=1.15.4 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from pandas==1.1.5) (1.16.4)\n",
      "Requirement already satisfied: six>=1.5 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from python-dateutil>=2.7.3->pandas==1.1.5) (1.15.0)\n",
      "Installing collected packages: pandas\n",
      "  Found existing installation: pandas 0.23.4\n",
      "    Uninstalling pandas-0.23.4:\n",
      "      Successfully uninstalled pandas-0.23.4\n",
      "Successfully installed pandas-1.1.5\n",
      "Looking in indexes: https://mirror.baidu.com/pypi/simple/\n",
      "Requirement already satisfied: xlrd==1.2.0 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (1.2.0)\n",
      "Archive:  data/data65130/data.zip\n",
      "  inflating: data/airport.csv        \n",
      "  inflating: data/audit.csv          \n",
      "  inflating: data/car.csv            \n",
      "  inflating: data/chocolate.csv      \n",
      "  inflating: data/color.xlsx         \n",
      "  inflating: data/company.csv        \n",
      "  inflating: data/Diamonds.csv       \n",
      "  inflating: data/drugs.csv          \n",
      "  inflating: data/fruit.csv          \n",
      "  inflating: data/house_info.xls     \n",
      "  inflating: data/learn_pandas.csv   \n",
      "  inflating: data/missing_chi.csv    \n",
      "  inflating: data/my_csv.csv         \n",
      "  inflating: data/my_csv_saved.csv   \n",
      "  inflating: data/my_excel.xlsx      \n",
      "  inflating: data/my_excel_saved.xlsx  \n",
      "  inflating: data/my_table.txt       \n",
      "  inflating: data/my_table_special_sep.txt  \n",
      "  inflating: data/my_txt_saved.txt   \n",
      "  inflating: data/pokemon.csv        \n",
      "  inflating: data/script.csv         \n",
      "  inflating: data/solar.csv          \n",
      "   creating: data/us_report/\n",
      "  inflating: data/us_report/04-12-2020.csv  \n",
      "  inflating: data/us_report/04-13-2020.csv  \n",
      "  inflating: data/us_report/04-14-2020.csv  \n",
      "  inflating: data/us_report/04-15-2020.csv  \n",
      "  inflating: data/us_report/04-16-2020.csv  \n",
      "  inflating: data/us_report/04-17-2020.csv  \n",
      "  inflating: data/us_report/04-18-2020.csv  \n",
      "  inflating: data/us_report/04-19-2020.csv  \n",
      "  inflating: data/us_report/04-20-2020.csv  \n",
      "  inflating: data/us_report/04-21-2020.csv  \n",
      "  inflating: data/us_report/04-22-2020.csv  \n",
      "  inflating: data/us_report/04-23-2020.csv  \n",
      "  inflating: data/us_report/04-24-2020.csv  \n",
      "  inflating: data/us_report/04-25-2020.csv  \n",
      "  inflating: data/us_report/04-26-2020.csv  \n",
      "  inflating: data/us_report/04-27-2020.csv  \n",
      "  inflating: data/us_report/04-28-2020.csv  \n",
      "  inflating: data/us_report/04-29-2020.csv  \n",
      "  inflating: data/us_report/04-30-2020.csv  \n",
      "  inflating: data/us_report/05-01-2020.csv  \n",
      "  inflating: data/us_report/05-02-2020.csv  \n",
      "  inflating: data/us_report/05-03-2020.csv  \n",
      "  inflating: data/us_report/05-04-2020.csv  \n",
      "  inflating: data/us_report/05-05-2020.csv  \n",
      "  inflating: data/us_report/05-06-2020.csv  \n",
      "  inflating: data/us_report/05-07-2020.csv  \n",
      "  inflating: data/us_report/05-08-2020.csv  \n",
      "  inflating: data/us_report/05-09-2020.csv  \n",
      "  inflating: data/us_report/05-10-2020.csv  \n",
      "  inflating: data/us_report/05-11-2020.csv  \n",
      "  inflating: data/us_report/05-12-2020.csv  \n",
      "  inflating: data/us_report/05-13-2020.csv  \n",
      "  inflating: data/us_report/05-14-2020.csv  \n",
      "  inflating: data/us_report/05-15-2020.csv  \n",
      "  inflating: data/us_report/05-16-2020.csv  \n",
      "  inflating: data/us_report/05-17-2020.csv  \n",
      "  inflating: data/us_report/05-18-2020.csv  \n",
      "  inflating: data/us_report/05-19-2020.csv  \n",
      "  inflating: data/us_report/05-20-2020.csv  \n",
      "  inflating: data/us_report/05-21-2020.csv  \n",
      "  inflating: data/us_report/05-22-2020.csv  \n",
      "  inflating: data/us_report/05-23-2020.csv  \n",
      "  inflating: data/us_report/05-24-2020.csv  \n",
      "  inflating: data/us_report/05-25-2020.csv  \n",
      "  inflating: data/us_report/05-26-2020.csv  \n",
      "  inflating: data/us_report/05-27-2020.csv  \n",
      "  inflating: data/us_report/05-28-2020.csv  \n",
      "  inflating: data/us_report/05-29-2020.csv  \n",
      "  inflating: data/us_report/05-30-2020.csv  \n",
      "  inflating: data/us_report/05-31-2020.csv  \n",
      "  inflating: data/us_report/06-01-2020.csv  \n",
      "  inflating: data/us_report/06-02-2020.csv  \n",
      "  inflating: data/us_report/06-03-2020.csv  \n",
      "  inflating: data/us_report/06-04-2020.csv  \n",
      "  inflating: data/us_report/06-05-2020.csv  \n",
      "  inflating: data/us_report/06-06-2020.csv  \n",
      "  inflating: data/us_report/06-07-2020.csv  \n",
      "  inflating: data/us_report/06-08-2020.csv  \n",
      "  inflating: data/us_report/06-09-2020.csv  \n",
      "  inflating: data/us_report/06-10-2020.csv  \n",
      "  inflating: data/us_report/06-11-2020.csv  \n",
      "  inflating: data/us_report/06-12-2020.csv  \n",
      "  inflating: data/us_report/06-13-2020.csv  \n",
      "  inflating: data/us_report/06-14-2020.csv  \n",
      "  inflating: data/us_report/06-15-2020.csv  \n",
      "  inflating: data/us_report/06-16-2020.csv  \n",
      "  inflating: data/us_report/06-17-2020.csv  \n",
      "  inflating: data/us_report/06-18-2020.csv  \n",
      "  inflating: data/us_report/06-19-2020.csv  \n",
      "  inflating: data/us_report/06-20-2020.csv  \n",
      "  inflating: data/us_report/06-21-2020.csv  \n",
      "  inflating: data/us_report/06-22-2020.csv  \n",
      "  inflating: data/us_report/06-23-2020.csv  \n",
      "  inflating: data/us_report/06-24-2020.csv  \n",
      "  inflating: data/us_report/06-25-2020.csv  \n",
      "  inflating: data/us_report/06-26-2020.csv  \n",
      "  inflating: data/us_report/06-27-2020.csv  \n",
      "  inflating: data/us_report/06-28-2020.csv  \n",
      "  inflating: data/us_report/06-29-2020.csv  \n",
      "  inflating: data/us_report/06-30-2020.csv  \n",
      "  inflating: data/us_report/07-01-2020.csv  \n",
      "  inflating: data/us_report/07-02-2020.csv  \n",
      "  inflating: data/us_report/07-03-2020.csv  \n",
      "  inflating: data/us_report/07-04-2020.csv  \n",
      "  inflating: data/us_report/07-05-2020.csv  \n",
      "  inflating: data/us_report/07-06-2020.csv  \n",
      "  inflating: data/us_report/07-07-2020.csv  \n",
      "  inflating: data/us_report/07-08-2020.csv  \n",
      "  inflating: data/us_report/07-09-2020.csv  \n",
      "  inflating: data/us_report/07-10-2020.csv  \n",
      "  inflating: data/us_report/07-11-2020.csv  \n",
      "  inflating: data/us_report/07-12-2020.csv  \n",
      "  inflating: data/us_report/07-13-2020.csv  \n",
      "  inflating: data/us_report/07-14-2020.csv  \n",
      "  inflating: data/us_report/07-15-2020.csv  \n",
      "  inflating: data/us_report/07-16-2020.csv  \n",
      "  inflating: data/us_report/07-17-2020.csv  \n",
      "  inflating: data/us_report/07-18-2020.csv  \n",
      "  inflating: data/us_report/07-19-2020.csv  \n",
      "  inflating: data/us_report/07-20-2020.csv  \n",
      "  inflating: data/us_report/07-21-2020.csv  \n",
      "  inflating: data/us_report/07-22-2020.csv  \n",
      "  inflating: data/us_report/07-23-2020.csv  \n",
      "  inflating: data/us_report/07-24-2020.csv  \n",
      "  inflating: data/us_report/07-25-2020.csv  \n",
      "  inflating: data/us_report/07-26-2020.csv  \n",
      "  inflating: data/us_report/07-27-2020.csv  \n",
      "  inflating: data/us_report/07-28-2020.csv  \n",
      "  inflating: data/us_report/07-29-2020.csv  \n",
      "  inflating: data/us_report/07-30-2020.csv  \n",
      "  inflating: data/us_report/07-31-2020.csv  \n",
      "  inflating: data/us_report/08-01-2020.csv  \n",
      "  inflating: data/us_report/08-02-2020.csv  \n",
      "  inflating: data/us_report/08-03-2020.csv  \n",
      "  inflating: data/us_report/08-04-2020.csv  \n",
      "  inflating: data/us_report/08-05-2020.csv  \n",
      "  inflating: data/us_report/08-06-2020.csv  \n",
      "  inflating: data/us_report/08-07-2020.csv  \n",
      "  inflating: data/us_report/08-08-2020.csv  \n",
      "  inflating: data/us_report/08-09-2020.csv  \n",
      "  inflating: data/us_report/08-10-2020.csv  \n",
      "  inflating: data/us_report/08-11-2020.csv  \n",
      "  inflating: data/us_report/08-12-2020.csv  \n",
      "  inflating: data/us_report/08-13-2020.csv  \n",
      "  inflating: data/us_report/08-14-2020.csv  \n",
      "  inflating: data/us_report/08-15-2020.csv  \n",
      "  inflating: data/us_report/08-16-2020.csv  \n",
      "  inflating: data/us_report/08-17-2020.csv  \n",
      "  inflating: data/us_report/08-18-2020.csv  \n",
      "  inflating: data/us_report/08-19-2020.csv  \n",
      "  inflating: data/us_report/08-20-2020.csv  \n",
      "  inflating: data/us_report/08-21-2020.csv  \n",
      "  inflating: data/us_report/08-22-2020.csv  \n",
      "  inflating: data/us_report/08-23-2020.csv  \n",
      "  inflating: data/us_report/08-24-2020.csv  \n",
      "  inflating: data/us_report/08-25-2020.csv  \n",
      "  inflating: data/us_report/08-26-2020.csv  \n",
      "  inflating: data/us_report/08-27-2020.csv  \n",
      "  inflating: data/us_report/08-28-2020.csv  \n",
      "  inflating: data/us_report/08-29-2020.csv  \n",
      "  inflating: data/us_report/08-30-2020.csv  \n",
      "  inflating: data/us_report/08-31-2020.csv  \n",
      "  inflating: data/us_report/09-01-2020.csv  \n",
      "  inflating: data/us_report/09-02-2020.csv  \n",
      "  inflating: data/us_report/09-03-2020.csv  \n",
      "  inflating: data/us_report/09-04-2020.csv  \n",
      "  inflating: data/us_report/09-05-2020.csv  \n",
      "  inflating: data/us_report/09-06-2020.csv  \n",
      "  inflating: data/us_report/09-07-2020.csv  \n",
      "  inflating: data/us_report/09-08-2020.csv  \n",
      "  inflating: data/us_report/09-09-2020.csv  \n",
      "  inflating: data/us_report/09-10-2020.csv  \n",
      "  inflating: data/us_report/09-11-2020.csv  \n",
      "  inflating: data/us_report/09-12-2020.csv  \n",
      "  inflating: data/us_report/09-13-2020.csv  \n",
      "  inflating: data/us_report/09-14-2020.csv  \n",
      "  inflating: data/us_report/09-15-2020.csv  \n",
      "  inflating: data/us_report/09-16-2020.csv  \n",
      "  inflating: data/us_report/09-17-2020.csv  \n",
      "  inflating: data/us_report/09-18-2020.csv  \n",
      "  inflating: data/us_report/09-19-2020.csv  \n",
      "  inflating: data/us_report/09-20-2020.csv  \n",
      "  inflating: data/us_report/09-21-2020.csv  \n",
      "  inflating: data/us_report/09-22-2020.csv  \n",
      "  inflating: data/us_report/09-23-2020.csv  \n",
      "  inflating: data/us_report/09-24-2020.csv  \n",
      "  inflating: data/us_report/09-25-2020.csv  \n",
      "  inflating: data/us_report/09-26-2020.csv  \n",
      "  inflating: data/us_report/09-27-2020.csv  \n",
      "  inflating: data/us_report/09-28-2020.csv  \n",
      "  inflating: data/us_report/09-29-2020.csv  \n",
      "  inflating: data/us_report/09-30-2020.csv  \n",
      "  inflating: data/us_report/10-01-2020.csv  \n",
      "  inflating: data/us_report/10-02-2020.csv  \n",
      "  inflating: data/us_report/10-03-2020.csv  \n",
      "  inflating: data/us_report/10-04-2020.csv  \n",
      "  inflating: data/us_report/10-05-2020.csv  \n",
      "  inflating: data/us_report/10-06-2020.csv  \n",
      "  inflating: data/us_report/10-07-2020.csv  \n",
      "  inflating: data/us_report/10-08-2020.csv  \n",
      "  inflating: data/us_report/10-09-2020.csv  \n",
      "  inflating: data/us_report/10-10-2020.csv  \n",
      "  inflating: data/us_report/10-11-2020.csv  \n",
      "  inflating: data/us_report/10-12-2020.csv  \n",
      "  inflating: data/us_report/10-13-2020.csv  \n",
      "  inflating: data/us_report/10-14-2020.csv  \n",
      "  inflating: data/us_report/10-15-2020.csv  \n",
      "  inflating: data/us_report/10-16-2020.csv  \n",
      "  inflating: data/us_report/10-17-2020.csv  \n",
      "  inflating: data/us_report/10-18-2020.csv  \n",
      "  inflating: data/us_report/10-19-2020.csv  \n",
      "  inflating: data/us_report/10-20-2020.csv  \n",
      "  inflating: data/us_report/10-21-2020.csv  \n",
      "  inflating: data/us_report/10-22-2020.csv  \n",
      "  inflating: data/us_report/10-23-2020.csv  \n",
      "  inflating: data/us_report/10-24-2020.csv  \n",
      "  inflating: data/us_report/10-25-2020.csv  \n",
      "  inflating: data/us_report/10-26-2020.csv  \n",
      "  inflating: data/us_report/10-27-2020.csv  \n",
      "  inflating: data/us_report/10-28-2020.csv  \n",
      "  inflating: data/us_report/10-29-2020.csv  \n",
      "  inflating: data/us_report/10-30-2020.csv  \n",
      "  inflating: data/us_report/10-31-2020.csv  \n",
      "  inflating: data/us_report/11-01-2020.csv  \n",
      "  inflating: data/us_report/11-02-2020.csv  \n",
      "  inflating: data/us_report/11-03-2020.csv  \n",
      "  inflating: data/us_report/11-04-2020.csv  \n",
      "  inflating: data/us_report/11-05-2020.csv  \n",
      "  inflating: data/us_report/11-06-2020.csv  \n",
      "  inflating: data/us_report/11-07-2020.csv  \n",
      "  inflating: data/us_report/11-08-2020.csv  \n",
      "  inflating: data/us_report/11-09-2020.csv  \n",
      "  inflating: data/us_report/11-10-2020.csv  \n",
      "  inflating: data/us_report/11-11-2020.csv  \n",
      "  inflating: data/us_report/11-12-2020.csv  \n",
      "  inflating: data/us_report/11-13-2020.csv  \n",
      "  inflating: data/us_report/11-14-2020.csv  \n",
      "  inflating: data/us_report/11-15-2020.csv  \n",
      "  inflating: data/us_report/11-16-2020.csv  \n"
     ]
    }
   ],
   "source": [
    "!pip install pandas==1.1.5\r\n",
    "!pip install xlrd==1.2.0\r\n",
    "!unzip data/data65130/data.zip -d data/"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "在开始学习前，请保证 pandas 的版本号不低于1.1.4，否则请务必升级！\n",
    "\n",
    "## 一、文件的读取和写入\n",
    "### 1. 文件读取"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "`pandas`可以读取的文件格式有很多，这里主要介绍读取`csv, excel, txt`文件。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>col1</th>\n",
       "      <th>col2</th>\n",
       "      <th>col3</th>\n",
       "      <th>col4</th>\n",
       "      <th>col5</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>2</td>\n",
       "      <td>a</td>\n",
       "      <td>1.4</td>\n",
       "      <td>apple</td>\n",
       "      <td>2020/1/1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>3</td>\n",
       "      <td>b</td>\n",
       "      <td>3.4</td>\n",
       "      <td>banana</td>\n",
       "      <td>2020/1/2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>6</td>\n",
       "      <td>c</td>\n",
       "      <td>2.5</td>\n",
       "      <td>orange</td>\n",
       "      <td>2020/1/5</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>5</td>\n",
       "      <td>d</td>\n",
       "      <td>3.2</td>\n",
       "      <td>lemon</td>\n",
       "      <td>2020/1/7</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   col1 col2  col3    col4      col5\n",
       "0     2    a   1.4   apple  2020/1/1\n",
       "1     3    b   3.4  banana  2020/1/2\n",
       "2     6    c   2.5  orange  2020/1/5\n",
       "3     5    d   3.2   lemon  2020/1/7"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_csv = pd.read_csv('data/my_csv.csv')\n",
    "df_csv"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>col1</th>\n",
       "      <th>col2</th>\n",
       "      <th>col3</th>\n",
       "      <th>col4</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>2</td>\n",
       "      <td>a</td>\n",
       "      <td>1.4</td>\n",
       "      <td>apple 2020/1/1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>3</td>\n",
       "      <td>b</td>\n",
       "      <td>3.4</td>\n",
       "      <td>banana 2020/1/2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>6</td>\n",
       "      <td>c</td>\n",
       "      <td>2.5</td>\n",
       "      <td>orange 2020/1/5</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>5</td>\n",
       "      <td>d</td>\n",
       "      <td>3.2</td>\n",
       "      <td>lemon 2020/1/7</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   col1 col2  col3             col4\n",
       "0     2    a   1.4   apple 2020/1/1\n",
       "1     3    b   3.4  banana 2020/1/2\n",
       "2     6    c   2.5  orange 2020/1/5\n",
       "3     5    d   3.2   lemon 2020/1/7"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_txt = pd.read_table('data/my_table.txt')\n",
    "df_txt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>col1</th>\n",
       "      <th>col2</th>\n",
       "      <th>col3</th>\n",
       "      <th>col4</th>\n",
       "      <th>col5</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>2</td>\n",
       "      <td>a</td>\n",
       "      <td>1.4</td>\n",
       "      <td>apple</td>\n",
       "      <td>2020/1/1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>3</td>\n",
       "      <td>b</td>\n",
       "      <td>3.4</td>\n",
       "      <td>banana</td>\n",
       "      <td>2020/1/2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>6</td>\n",
       "      <td>c</td>\n",
       "      <td>2.5</td>\n",
       "      <td>orange</td>\n",
       "      <td>2020/1/5</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>5</td>\n",
       "      <td>d</td>\n",
       "      <td>3.2</td>\n",
       "      <td>lemon</td>\n",
       "      <td>2020/1/7</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   col1 col2  col3    col4      col5\n",
       "0     2    a   1.4   apple  2020/1/1\n",
       "1     3    b   3.4  banana  2020/1/2\n",
       "2     6    c   2.5  orange  2020/1/5\n",
       "3     5    d   3.2   lemon  2020/1/7"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_excel = pd.read_excel('data/my_excel.xlsx')\n",
    "df_excel"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>col1</th>\n",
       "      <th>col2</th>\n",
       "      <th>col3</th>\n",
       "      <th>col4</th>\n",
       "      <th>col5</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>2</td>\n",
       "      <td>a</td>\n",
       "      <td>1.4</td>\n",
       "      <td>apple</td>\n",
       "      <td>2020/1/1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>3</td>\n",
       "      <td>b</td>\n",
       "      <td>3.4</td>\n",
       "      <td>banana</td>\n",
       "      <td>2020/1/2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>6</td>\n",
       "      <td>c</td>\n",
       "      <td>2.5</td>\n",
       "      <td>orange</td>\n",
       "      <td>2020/1/5</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>5</td>\n",
       "      <td>d</td>\n",
       "      <td>3.2</td>\n",
       "      <td>lemon</td>\n",
       "      <td>2020/1/7</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   col1 col2  col3    col4      col5\n",
       "0     2    a   1.4   apple  2020/1/1\n",
       "1     3    b   3.4  banana  2020/1/2\n",
       "2     6    c   2.5  orange  2020/1/5\n",
       "3     5    d   3.2   lemon  2020/1/7"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_excel = pd.read_excel('data/my_excel.xlsx')\r\n",
    "df_excel"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "这里有一些常用的公共参数，`header=None`表示第一行不作为列名，`index_col`表示把某一列或几列作为索引，索引的内容将会在第三章进行详述，`usecols`表示读取列的集合，默认读取所有的列，`parse_dates`表示需要转化为时间的列，关于时间序列的有关内容将在第十章讲解，`nrows`表示读取的数据行数。上面这些参数在上述的三个函数里都可以使用。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>0</th>\n",
       "      <th>1</th>\n",
       "      <th>2</th>\n",
       "      <th>3</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>col1</td>\n",
       "      <td>col2</td>\n",
       "      <td>col3</td>\n",
       "      <td>col4</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>2</td>\n",
       "      <td>a</td>\n",
       "      <td>1.4</td>\n",
       "      <td>apple 2020/1/1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>3</td>\n",
       "      <td>b</td>\n",
       "      <td>3.4</td>\n",
       "      <td>banana 2020/1/2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>6</td>\n",
       "      <td>c</td>\n",
       "      <td>2.5</td>\n",
       "      <td>orange 2020/1/5</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>5</td>\n",
       "      <td>d</td>\n",
       "      <td>3.2</td>\n",
       "      <td>lemon 2020/1/7</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "      0     1     2                3\n",
       "0  col1  col2  col3             col4\n",
       "1     2     a   1.4   apple 2020/1/1\n",
       "2     3     b   3.4  banana 2020/1/2\n",
       "3     6     c   2.5  orange 2020/1/5\n",
       "4     5     d   3.2   lemon 2020/1/7"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pd.read_table('data/my_table.txt', header=None)\r\n",
    "#相比与原来的，多了第一行的0 1 2 3"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "\n",
    "```\n",
    "pd.read_table?\n",
    "```\n",
    "来详细看看官方文档\n",
    "```\n",
    "pd.read_table(\n",
    "    filepath_or_buffer: Union[str, pathlib.Path, IO[~AnyStr]],\n",
    "    sep='\\t',\n",
    "    delimiter=None,\n",
    "    header='infer',\n",
    "    names=None,\n",
    "    index_col=None,\n",
    "    usecols=None,\n",
    "    squeeze=False,\n",
    "    prefix=None,\n",
    "    mangle_dupe_cols=True,\n",
    "    dtype=None,\n",
    "    engine=None,\n",
    "    converters=None,\n",
    "    true_values=None,\n",
    "    false_values=None,\n",
    "    skipinitialspace=False,\n",
    "    skiprows=None,\n",
    "    skipfooter=0,\n",
    "    nrows=None,\n",
    "    na_values=None,\n",
    "    keep_default_na=True,\n",
    "    na_filter=True,\n",
    "    verbose=False,\n",
    "    skip_blank_lines=True,\n",
    "    parse_dates=False,\n",
    "    infer_datetime_format=False,\n",
    "    keep_date_col=False,\n",
    "    date_parser=None,\n",
    "    dayfirst=False,\n",
    "    cache_dates=True,\n",
    "    iterator=False,\n",
    "    chunksize=None,\n",
    "    compression='infer',\n",
    "    thousands=None,\n",
    "    decimal: str = '.',\n",
    "    lineterminator=None,\n",
    "    quotechar='\"',\n",
    "    quoting=0,\n",
    "    doublequote=True,\n",
    "    escapechar=None,\n",
    "    comment=None,\n",
    "    encoding=None,\n",
    "    dialect=None,\n",
    "    error_bad_lines=True,\n",
    "    warn_bad_lines=True,\n",
    "    delim_whitespace=False,\n",
    "    low_memory=True,\n",
    "    memory_map=False,\n",
    "    float_precision=None,\n",
    ")\n",
    "Docstring:\n",
    "Read general delimited file into DataFrame.\n",
    "\n",
    "Also supports optionally iterating or breaking of the file\n",
    "into chunks.\n",
    "\n",
    "Additional help can be found in the online docs for\n",
    "`IO Tools <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.\n",
    "\n",
    "Parameters\n",
    "----------\n",
    "filepath_or_buffer : str, path object or file-like object\n",
    "    Any valid string path is acceptable. The string could be a URL. Valid\n",
    "    URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is\n",
    "    expected. A local file could be: file://localhost/path/to/table.csv.\n",
    "\n",
    "    If you want to pass in a path object, pandas accepts any ``os.PathLike``.\n",
    "\n",
    "    By file-like object, we refer to objects with a ``read()`` method, such as\n",
    "    a file handler (e.g. via builtin ``open`` function) or ``StringIO``.\n",
    "sep : str, default '\\\\t' (tab-stop)\n",
    "    Delimiter to use. If sep is None, the C engine cannot automatically detect\n",
    "    the separator, but the Python parsing engine can, meaning the latter will\n",
    "    be used and automatically detect the separator by Python's builtin sniffer\n",
    "    tool, ``csv.Sniffer``. In addition, separators longer than 1 character and\n",
    "    different from ``'\\s+'`` will be interpreted as regular expressions and\n",
    "    will also force the use of the Python parsing engine. Note that regex\n",
    "    delimiters are prone to ignoring quoted data. Regex example: ``'\\r\\t'``.\n",
    "delimiter : str, default ``None``\n",
    "    Alias for sep.\n",
    "header : int, list of int, default 'infer'\n",
    "    Row number(s) to use as the column names, and the start of the\n",
    "    data.  Default behavior is to infer the column names: if no names\n",
    "    are passed the behavior is identical to ``header=0`` and column\n",
    "    names are inferred from the first line of the file, if column\n",
    "    names are passed explicitly then the behavior is identical to\n",
    "    ``header=None``. Explicitly pass ``header=0`` to be able to\n",
    "    replace existing names. The header can be a list of integers that\n",
    "    specify row locations for a multi-index on the columns\n",
    "    e.g. [0,1,3]. Intervening rows that are not specified will be\n",
    "    skipped (e.g. 2 in this example is skipped). Note that this\n",
    "    parameter ignores commented lines and empty lines if\n",
    "    ``skip_blank_lines=True``, so ``header=0`` denotes the first line of\n",
    "    data rather than the first line of the file.\n",
    "names : array-like, optional\n",
    "    List of column names to use. If the file contains a header row,\n",
    "    then you should explicitly pass ``header=0`` to override the column names.\n",
    "    Duplicates in this list are not allowed.\n",
    "index_col : int, str, sequence of int / str, or False, default ``None``\n",
    "  Column(s) to use as the row labels of the ``DataFrame``, either given as\n",
    "  string name or column index. If a sequence of int / str is given, a\n",
    "  MultiIndex is used.\n",
    "\n",
    "  Note: ``index_col=False`` can be used to force pandas to *not* use the first\n",
    "  column as the index, e.g. when you have a malformed file with delimiters at\n",
    "  the end of each line.\n",
    "usecols : list-like or callable, optional\n",
    "    Return a subset of the columns. If list-like, all elements must either\n",
    "    be positional (i.e. integer indices into the document columns) or strings\n",
    "    that correspond to column names provided either by the user in `names` or\n",
    "    inferred from the document header row(s). For example, a valid list-like\n",
    "    `usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.\n",
    "    Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.\n",
    "    To instantiate a DataFrame from ``data`` with element order preserved use\n",
    "    ``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns\n",
    "    in ``['foo', 'bar']`` order or\n",
    "    ``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``\n",
    "    for ``['bar', 'foo']`` order.\n",
    "\n",
    "    If callable, the callable function will be evaluated against the column\n",
    "    names, returning names where the callable function evaluates to True. An\n",
    "    example of a valid callable argument would be ``lambda x: x.upper() in\n",
    "    ['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster\n",
    "    parsing time and lower memory usage.\n",
    "squeeze : bool, default False\n",
    "    If the parsed data only contains one column then return a Series.\n",
    "prefix : str, optional\n",
    "    Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ...\n",
    "mangle_dupe_cols : bool, default True\n",
    "    Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than\n",
    "    'X'...'X'. Passing in False will cause data to be overwritten if there\n",
    "    are duplicate names in the columns.\n",
    "dtype : Type name or dict of column -> type, optional\n",
    "    Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32,\n",
    "    'c': 'Int64'}\n",
    "    Use `str` or `object` together with suitable `na_values` settings\n",
    "    to preserve and not interpret dtype.\n",
    "    If converters are specified, they will be applied INSTEAD\n",
    "    of dtype conversion.\n",
    "engine : {'c', 'python'}, optional\n",
    "    Parser engine to use. The C engine is faster while the python engine is\n",
    "    currently more feature-complete.\n",
    "converters : dict, optional\n",
    "    Dict of functions for converting values in certain columns. Keys can either\n",
    "    be integers or column labels.\n",
    "true_values : list, optional\n",
    "    Values to consider as True.\n",
    "false_values : list, optional\n",
    "    Values to consider as False.\n",
    "skipinitialspace : bool, default False\n",
    "    Skip spaces after delimiter.\n",
    "skiprows : list-like, int or callable, optional\n",
    "    Line numbers to skip (0-indexed) or number of lines to skip (int)\n",
    "    at the start of the file.\n",
    "\n",
    "    If callable, the callable function will be evaluated against the row\n",
    "    indices, returning True if the row should be skipped and False otherwise.\n",
    "    An example of a valid callable argument would be ``lambda x: x in [0, 2]``.\n",
    "skipfooter : int, default 0\n",
    "    Number of lines at bottom of file to skip (Unsupported with engine='c').\n",
    "nrows : int, optional\n",
    "    Number of rows of file to read. Useful for reading pieces of large files.\n",
    "na_values : scalar, str, list-like, or dict, optional\n",
    "    Additional strings to recognize as NA/NaN. If dict passed, specific\n",
    "    per-column NA values.  By default the following values are interpreted as\n",
    "    NaN: '', '#N/A', '#N/A N/A', '#NA', '-1.#IND', '-1.#QNAN', '-NaN', '-nan',\n",
    "    '1.#IND', '1.#QNAN', '<NA>', 'N/A', 'NA', 'NULL', 'NaN', 'n/a',\n",
    "    'nan', 'null'.\n",
    "keep_default_na : bool, default True\n",
    "    Whether or not to include the default NaN values when parsing the data.\n",
    "    Depending on whether `na_values` is passed in, the behavior is as follows:\n",
    "\n",
    "    * If `keep_default_na` is True, and `na_values` are specified, `na_values`\n",
    "      is appended to the default NaN values used for parsing.\n",
    "    * If `keep_default_na` is True, and `na_values` are not specified, only\n",
    "      the default NaN values are used for parsing.\n",
    "    * If `keep_default_na` is False, and `na_values` are specified, only\n",
    "      the NaN values specified `na_values` are used for parsing.\n",
    "    * If `keep_default_na` is False, and `na_values` are not specified, no\n",
    "      strings will be parsed as NaN.\n",
    "\n",
    "    Note that if `na_filter` is passed in as False, the `keep_default_na` and\n",
    "    `na_values` parameters will be ignored.\n",
    "na_filter : bool, default True\n",
    "    Detect missing value markers (empty strings and the value of na_values). In\n",
    "    data without any NAs, passing na_filter=False can improve the performance\n",
    "    of reading a large file.\n",
    "verbose : bool, default False\n",
    "    Indicate number of NA values placed in non-numeric columns.\n",
    "skip_blank_lines : bool, default True\n",
    "    If True, skip over blank lines rather than interpreting as NaN values.\n",
    "parse_dates : bool or list of int or names or list of lists or dict, default False\n",
    "    The behavior is as follows:\n",
    "\n",
    "    * boolean. If True -> try parsing the index.\n",
    "    * list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3\n",
    "      each as a separate date column.\n",
    "    * list of lists. e.g.  If [[1, 3]] -> combine columns 1 and 3 and parse as\n",
    "      a single date column.\n",
    "    * dict, e.g. {'foo' : [1, 3]} -> parse columns 1, 3 as date and call\n",
    "      result 'foo'\n",
    "\n",
    "    If a column or index cannot be represented as an array of datetimes,\n",
    "    say because of an unparseable value or a mixture of timezones, the column\n",
    "    or index will be returned unaltered as an object data type. For\n",
    "    non-standard datetime parsing, use ``pd.to_datetime`` after\n",
    "    ``pd.read_csv``. To parse an index or column with a mixture of timezones,\n",
    "    specify ``date_parser`` to be a partially-applied\n",
    "    :func:`pandas.to_datetime` with ``utc=True``. See\n",
    "    :ref:`io.csv.mixed_timezones` for more.\n",
    "\n",
    "    Note: A fast-path exists for iso8601-formatted dates.\n",
    "infer_datetime_format : bool, default False\n",
    "    If True and `parse_dates` is enabled, pandas will attempt to infer the\n",
    "    format of the datetime strings in the columns, and if it can be inferred,\n",
    "    switch to a faster method of parsing them. In some cases this can increase\n",
    "    the parsing speed by 5-10x.\n",
    "keep_date_col : bool, default False\n",
    "    If True and `parse_dates` specifies combining multiple columns then\n",
    "    keep the original columns.\n",
    "date_parser : function, optional\n",
    "    Function to use for converting a sequence of string columns to an array of\n",
    "    datetime instances. The default uses ``dateutil.parser.parser`` to do the\n",
    "    conversion. Pandas will try to call `date_parser` in three different ways,\n",
    "    advancing to the next if an exception occurs: 1) Pass one or more arrays\n",
    "    (as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the\n",
    "    string values from the columns defined by `parse_dates` into a single array\n",
    "    and pass that; and 3) call `date_parser` once for each row using one or\n",
    "    more strings (corresponding to the columns defined by `parse_dates`) as\n",
    "    arguments.\n",
    "dayfirst : bool, default False\n",
    "    DD/MM format dates, international and European format.\n",
    "cache_dates : bool, default True\n",
    "    If True, use a cache of unique, converted dates to apply the datetime\n",
    "    conversion. May produce significant speed-up when parsing duplicate\n",
    "    date strings, especially ones with timezone offsets.\n",
    "\n",
    "    .. versionadded:: 0.25.0\n",
    "iterator : bool, default False\n",
    "    Return TextFileReader object for iteration or getting chunks with\n",
    "    ``get_chunk()``.\n",
    "chunksize : int, optional\n",
    "    Return TextFileReader object for iteration.\n",
    "    See the `IO Tools docs\n",
    "    <https://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_\n",
    "    for more information on ``iterator`` and ``chunksize``.\n",
    "compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'\n",
    "    For on-the-fly decompression of on-disk data. If 'infer' and\n",
    "    `filepath_or_buffer` is path-like, then detect compression from the\n",
    "    following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no\n",
    "    decompression). If using 'zip', the ZIP file must contain only one data\n",
    "    file to be read in. Set to None for no decompression.\n",
    "thousands : str, optional\n",
    "    Thousands separator.\n",
    "decimal : str, default '.'\n",
    "    Character to recognize as decimal point (e.g. use ',' for European data).\n",
    "lineterminator : str (length 1), optional\n",
    "    Character to break file into lines. Only valid with C parser.\n",
    "quotechar : str (length 1), optional\n",
    "    The character used to denote the start and end of a quoted item. Quoted\n",
    "    items can include the delimiter and it will be ignored.\n",
    "quoting : int or csv.QUOTE_* instance, default 0\n",
    "    Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of\n",
    "    QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).\n",
    "doublequote : bool, default ``True``\n",
    "   When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate\n",
    "   whether or not to interpret two consecutive quotechar elements INSIDE a\n",
    "   field as a single ``quotechar`` element.\n",
    "escapechar : str (length 1), optional\n",
    "    One-character string used to escape other characters.\n",
    "comment : str, optional\n",
    "    Indicates remainder of line should not be parsed. If found at the beginning\n",
    "    of a line, the line will be ignored altogether. This parameter must be a\n",
    "    single character. Like empty lines (as long as ``skip_blank_lines=True``),\n",
    "    fully commented lines are ignored by the parameter `header` but not by\n",
    "    `skiprows`. For example, if ``comment='#'``, parsing\n",
    "    ``#empty\\na,b,c\\n1,2,3`` with ``header=0`` will result in 'a,b,c' being\n",
    "    treated as the header.\n",
    "encoding : str, optional\n",
    "    Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python\n",
    "    standard encodings\n",
    "    <https://docs.python.org/3/library/codecs.html#standard-encodings>`_ .\n",
    "dialect : str or csv.Dialect, optional\n",
    "    If provided, this parameter will override values (default or not) for the\n",
    "    following parameters: `delimiter`, `doublequote`, `escapechar`,\n",
    "    `skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to\n",
    "    override values, a ParserWarning will be issued. See csv.Dialect\n",
    "    documentation for more details.\n",
    "error_bad_lines : bool, default True\n",
    "    Lines with too many fields (e.g. a csv line with too many commas) will by\n",
    "    default cause an exception to be raised, and no DataFrame will be returned.\n",
    "    If False, then these \"bad lines\" will dropped from the DataFrame that is\n",
    "    returned.\n",
    "warn_bad_lines : bool, default True\n",
    "    If error_bad_lines is False, and warn_bad_lines is True, a warning for each\n",
    "    \"bad line\" will be output.\n",
    "delim_whitespace : bool, default False\n",
    "    Specifies whether or not whitespace (e.g. ``' '`` or ``'    '``) will be\n",
    "    used as the sep. Equivalent to setting ``sep='\\s+'``. If this option\n",
    "    is set to True, nothing should be passed in for the ``delimiter``\n",
    "    parameter.\n",
    "low_memory : bool, default True\n",
    "    Internally process the file in chunks, resulting in lower memory use\n",
    "    while parsing, but possibly mixed type inference.  To ensure no mixed\n",
    "    types either set False, or specify the type with the `dtype` parameter.\n",
    "    Note that the entire file is read into a single DataFrame regardless,\n",
    "    use the `chunksize` or `iterator` parameter to return the data in chunks.\n",
    "    (Only valid with C parser).\n",
    "memory_map : bool, default False\n",
    "    If a filepath is provided for `filepath_or_buffer`, map the file object\n",
    "    directly onto memory and access the data directly from there. Using this\n",
    "    option can improve performance because there is no longer any I/O overhead.\n",
    "float_precision : str, optional\n",
    "    Specifies which converter the C engine should use for floating-point\n",
    "    values. The options are `None` for the ordinary converter,\n",
    "    `high` for the high-precision converter, and `round_trip` for the\n",
    "    round-trip converter.\n",
    "\n",
    "Returns\n",
    "-------\n",
    "DataFrame or TextParser\n",
    "    A comma-separated values (csv) file is returned as two-dimensional\n",
    "    data structure with labeled axes.\n",
    "\n",
    "See Also\n",
    "--------\n",
    "DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.\n",
    "read_csv : Read a comma-separated values (csv) file into DataFrame.\n",
    "read_fwf : Read a table of fixed-width formatted lines into DataFrame.\n",
    "\n",
    "Examples\n",
    "--------\n",
    ">>> pd.read_table('data.csv')  # doctest: +SKIP\n",
    "```\n",
    "这老长了，看看里面的有意思的地方\n",
    "分隔符: 默认是tab分隔\n",
    "```\n",
    "sep : str, default '\\\\t' (tab-stop)\n",
    "    Delimiter to use. If sep is None, the C engine cannot automatically detect\n",
    "    the separator, but the Python parsing engine can, meaning the latter will\n",
    "    be used and automatically detect the separator by Python's builtin sniffer\n",
    "    tool, ``csv.Sniffer``. In addition, separators longer than 1 character and\n",
    "    different from ``'\\s+'`` will be interpreted as regular expressions and\n",
    "    will also force the use of the Python parsing engine. Note that regex\n",
    "    delimiters are prone to ignoring quoted data. Regex example: ``'\\r\\t'``.\n",
    "```\n",
    "\n",
    "表头：默认是显示column的名字，在选择skip_blank_lines时会忽略commented和空的行\n",
    "```\n",
    "header : int, list of int, default 'infer'\n",
    "    Row number(s) to use as the column names, and the start of the\n",
    "    data.  Default behavior is to infer the column names: if no names\n",
    "    are passed the behavior is identical to ``header=0`` and column\n",
    "    names are inferred from the first line of the file, if column\n",
    "    names are passed explicitly then the behavior is identical to\n",
    "    ``header=None``. Explicitly pass ``header=0`` to be able to\n",
    "    replace existing names. The header can be a list of integers that\n",
    "    specify row locations for a multi-index on the columns\n",
    "    e.g. [0,1,3]. Intervening rows that are not specified will be\n",
    "    skipped (e.g. 2 in this example is skipped). Note that this\n",
    "    parameter ignores commented lines and empty lines if\n",
    "    ``skip_blank_lines=True``, so ``header=0`` denotes the first line of\n",
    "    data rather than the first line of the file.\n",
    "```\n",
    "混合数据类型节省内存：使用混合的type，做分析的时候的内存使用降低，整个文件被读入一个单独的DataFrame\n",
    "```\n",
    "low_memory : bool, default True\n",
    "    Internally process the file in chunks, resulting in lower memory use\n",
    "    while parsing, but possibly mixed type inference.  To ensure no mixed\n",
    "    types either set False, or specify the type with the `dtype` parameter.\n",
    "    Note that the entire file is read into a single DataFrame regardless,\n",
    "    use the `chunksize` or `iterator` parameter to return the data in chunks.\n",
    "    (Only valid with C parser).\n",
    "```\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th>col3</th>\n",
       "      <th>col4</th>\n",
       "      <th>col5</th>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>col1</th>\n",
       "      <th>col2</th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <th>a</th>\n",
       "      <td>1.4</td>\n",
       "      <td>apple</td>\n",
       "      <td>2020/1/1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <th>b</th>\n",
       "      <td>3.4</td>\n",
       "      <td>banana</td>\n",
       "      <td>2020/1/2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>6</th>\n",
       "      <th>c</th>\n",
       "      <td>2.5</td>\n",
       "      <td>orange</td>\n",
       "      <td>2020/1/5</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5</th>\n",
       "      <th>d</th>\n",
       "      <td>3.2</td>\n",
       "      <td>lemon</td>\n",
       "      <td>2020/1/7</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "           col3    col4      col5\n",
       "col1 col2                        \n",
       "2    a      1.4   apple  2020/1/1\n",
       "3    b      3.4  banana  2020/1/2\n",
       "6    c      2.5  orange  2020/1/5\n",
       "5    d      3.2   lemon  2020/1/7"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pd.read_csv('data/my_csv.csv', index_col=['col1', 'col2'])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "\n",
    "```\n",
    "pd.read_csv(\n",
    "    filepath_or_buffer: Union[str, pathlib.Path, IO[~AnyStr]],\n",
    "    sep=',',\n",
    "    delimiter=None,\n",
    "    header='infer',\n",
    "    names=None,\n",
    "    index_col=None,\n",
    "    usecols=None,\n",
    "    squeeze=False,\n",
    "    prefix=None,\n",
    "    mangle_dupe_cols=True,\n",
    "    dtype=None,\n",
    "    engine=None,\n",
    "    converters=None,\n",
    "    true_values=None,\n",
    "    false_values=None,\n",
    "    skipinitialspace=False,\n",
    "    skiprows=None,\n",
    "    skipfooter=0,\n",
    "    nrows=None,\n",
    "    na_values=None,\n",
    "    keep_default_na=True,\n",
    "    na_filter=True,\n",
    "    verbose=False,\n",
    "    skip_blank_lines=True,\n",
    "    parse_dates=False,\n",
    "    infer_datetime_format=False,\n",
    "    keep_date_col=False,\n",
    "    date_parser=None,\n",
    "    dayfirst=False,\n",
    "    cache_dates=True,\n",
    "    iterator=False,\n",
    "    chunksize=None,\n",
    "    compression='infer',\n",
    "    thousands=None,\n",
    "    decimal: str = '.',\n",
    "    lineterminator=None,\n",
    "    quotechar='\"',\n",
    "    quoting=0,\n",
    "    doublequote=True,\n",
    "    escapechar=None,\n",
    "    comment=None,\n",
    "    encoding=None,\n",
    "    dialect=None,\n",
    "    error_bad_lines=True,\n",
    "    warn_bad_lines=True,\n",
    "    delim_whitespace=False,\n",
    "    low_memory=True,\n",
    "    memory_map=False,\n",
    "    float_precision=None,\n",
    ")\n",
    "Docstring:\n",
    "Read a comma-separated values (csv) file into DataFrame.\n",
    "\n",
    "Also supports optionally iterating or breaking of the file\n",
    "into chunks.\n",
    "\n",
    "Additional help can be found in the online docs for\n",
    "`IO Tools <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.\n",
    "\n",
    "Parameters\n",
    "----------\n",
    "filepath_or_buffer : str, path object or file-like object\n",
    "    Any valid string path is acceptable. The string could be a URL. Valid\n",
    "    URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is\n",
    "    expected. A local file could be: file://localhost/path/to/table.csv.\n",
    "\n",
    "    If you want to pass in a path object, pandas accepts any ``os.PathLike``.\n",
    "\n",
    "    By file-like object, we refer to objects with a ``read()`` method, such as\n",
    "    a file handler (e.g. via builtin ``open`` function) or ``StringIO``.\n",
    "sep : str, default ','\n",
    "    Delimiter to use. If sep is None, the C engine cannot automatically detect\n",
    "    the separator, but the Python parsing engine can, meaning the latter will\n",
    "    be used and automatically detect the separator by Python's builtin sniffer\n",
    "    tool, ``csv.Sniffer``. In addition, separators longer than 1 character and\n",
    "    different from ``'\\s+'`` will be interpreted as regular expressions and\n",
    "    will also force the use of the Python parsing engine. Note that regex\n",
    "    delimiters are prone to ignoring quoted data. Regex example: ``'\\r\\t'``.\n",
    "delimiter : str, default ``None``\n",
    "    Alias for sep.\n",
    "header : int, list of int, default 'infer'\n",
    "    Row number(s) to use as the column names, and the start of the\n",
    "    data.  Default behavior is to infer the column names: if no names\n",
    "    are passed the behavior is identical to ``header=0`` and column\n",
    "    names are inferred from the first line of the file, if column\n",
    "    names are passed explicitly then the behavior is identical to\n",
    "    ``header=None``. Explicitly pass ``header=0`` to be able to\n",
    "    replace existing names. The header can be a list of integers that\n",
    "    specify row locations for a multi-index on the columns\n",
    "    e.g. [0,1,3]. Intervening rows that are not specified will be\n",
    "    skipped (e.g. 2 in this example is skipped). Note that this\n",
    "    parameter ignores commented lines and empty lines if\n",
    "    ``skip_blank_lines=True``, so ``header=0`` denotes the first line of\n",
    "    data rather than the first line of the file.\n",
    "names : array-like, optional\n",
    "    List of column names to use. If the file contains a header row,\n",
    "    then you should explicitly pass ``header=0`` to override the column names.\n",
    "    Duplicates in this list are not allowed.\n",
    "index_col : int, str, sequence of int / str, or False, default ``None``\n",
    "  Column(s) to use as the row labels of the ``DataFrame``, either given as\n",
    "  string name or column index. If a sequence of int / str is given, a\n",
    "  MultiIndex is used.\n",
    "\n",
    "  Note: ``index_col=False`` can be used to force pandas to *not* use the first\n",
    "  column as the index, e.g. when you have a malformed file with delimiters at\n",
    "  the end of each line.\n",
    "usecols : list-like or callable, optional\n",
    "    Return a subset of the columns. If list-like, all elements must either\n",
    "    be positional (i.e. integer indices into the document columns) or strings\n",
    "    that correspond to column names provided either by the user in `names` or\n",
    "    inferred from the document header row(s). For example, a valid list-like\n",
    "    `usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.\n",
    "    Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.\n",
    "    To instantiate a DataFrame from ``data`` with element order preserved use\n",
    "    ``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns\n",
    "    in ``['foo', 'bar']`` order or\n",
    "    ``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``\n",
    "    for ``['bar', 'foo']`` order.\n",
    "\n",
    "    If callable, the callable function will be evaluated against the column\n",
    "    names, returning names where the callable function evaluates to True. An\n",
    "    example of a valid callable argument would be ``lambda x: x.upper() in\n",
    "    ['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster\n",
    "    parsing time and lower memory usage.\n",
    "squeeze : bool, default False\n",
    "    If the parsed data only contains one column then return a Series.\n",
    "prefix : str, optional\n",
    "    Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ...\n",
    "mangle_dupe_cols : bool, default True\n",
    "    Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than\n",
    "    'X'...'X'. Passing in False will cause data to be overwritten if there\n",
    "    are duplicate names in the columns.\n",
    "dtype : Type name or dict of column -> type, optional\n",
    "    Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32,\n",
    "    'c': 'Int64'}\n",
    "    Use `str` or `object` together with suitable `na_values` settings\n",
    "    to preserve and not interpret dtype.\n",
    "    If converters are specified, they will be applied INSTEAD\n",
    "    of dtype conversion.\n",
    "engine : {'c', 'python'}, optional\n",
    "    Parser engine to use. The C engine is faster while the python engine is\n",
    "    currently more feature-complete.\n",
    "converters : dict, optional\n",
    "    Dict of functions for converting values in certain columns. Keys can either\n",
    "    be integers or column labels.\n",
    "true_values : list, optional\n",
    "    Values to consider as True.\n",
    "false_values : list, optional\n",
    "    Values to consider as False.\n",
    "skipinitialspace : bool, default False\n",
    "    Skip spaces after delimiter.\n",
    "skiprows : list-like, int or callable, optional\n",
    "    Line numbers to skip (0-indexed) or number of lines to skip (int)\n",
    "    at the start of the file.\n",
    "\n",
    "    If callable, the callable function will be evaluated against the row\n",
    "    indices, returning True if the row should be skipped and False otherwise.\n",
    "    An example of a valid callable argument would be ``lambda x: x in [0, 2]``.\n",
    "skipfooter : int, default 0\n",
    "    Number of lines at bottom of file to skip (Unsupported with engine='c').\n",
    "nrows : int, optional\n",
    "    Number of rows of file to read. Useful for reading pieces of large files.\n",
    "na_values : scalar, str, list-like, or dict, optional\n",
    "    Additional strings to recognize as NA/NaN. If dict passed, specific\n",
    "    per-column NA values.  By default the following values are interpreted as\n",
    "    NaN: '', '#N/A', '#N/A N/A', '#NA', '-1.#IND', '-1.#QNAN', '-NaN', '-nan',\n",
    "    '1.#IND', '1.#QNAN', '<NA>', 'N/A', 'NA', 'NULL', 'NaN', 'n/a',\n",
    "    'nan', 'null'.\n",
    "keep_default_na : bool, default True\n",
    "    Whether or not to include the default NaN values when parsing the data.\n",
    "    Depending on whether `na_values` is passed in, the behavior is as follows:\n",
    "\n",
    "    * If `keep_default_na` is True, and `na_values` are specified, `na_values`\n",
    "      is appended to the default NaN values used for parsing.\n",
    "    * If `keep_default_na` is True, and `na_values` are not specified, only\n",
    "      the default NaN values are used for parsing.\n",
    "    * If `keep_default_na` is False, and `na_values` are specified, only\n",
    "      the NaN values specified `na_values` are used for parsing.\n",
    "    * If `keep_default_na` is False, and `na_values` are not specified, no\n",
    "      strings will be parsed as NaN.\n",
    "\n",
    "    Note that if `na_filter` is passed in as False, the `keep_default_na` and\n",
    "    `na_values` parameters will be ignored.\n",
    "na_filter : bool, default True\n",
    "    Detect missing value markers (empty strings and the value of na_values). In\n",
    "    data without any NAs, passing na_filter=False can improve the performance\n",
    "    of reading a large file.\n",
    "verbose : bool, default False\n",
    "    Indicate number of NA values placed in non-numeric columns.\n",
    "skip_blank_lines : bool, default True\n",
    "    If True, skip over blank lines rather than interpreting as NaN values.\n",
    "parse_dates : bool or list of int or names or list of lists or dict, default False\n",
    "    The behavior is as follows:\n",
    "\n",
    "    * boolean. If True -> try parsing the index.\n",
    "    * list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3\n",
    "      each as a separate date column.\n",
    "    * list of lists. e.g.  If [[1, 3]] -> combine columns 1 and 3 and parse as\n",
    "      a single date column.\n",
    "    * dict, e.g. {'foo' : [1, 3]} -> parse columns 1, 3 as date and call\n",
    "      result 'foo'\n",
    "\n",
    "    If a column or index cannot be represented as an array of datetimes,\n",
    "    say because of an unparseable value or a mixture of timezones, the column\n",
    "    or index will be returned unaltered as an object data type. For\n",
    "    non-standard datetime parsing, use ``pd.to_datetime`` after\n",
    "    ``pd.read_csv``. To parse an index or column with a mixture of timezones,\n",
    "    specify ``date_parser`` to be a partially-applied\n",
    "    :func:`pandas.to_datetime` with ``utc=True``. See\n",
    "    :ref:`io.csv.mixed_timezones` for more.\n",
    "\n",
    "    Note: A fast-path exists for iso8601-formatted dates.\n",
    "infer_datetime_format : bool, default False\n",
    "    If True and `parse_dates` is enabled, pandas will attempt to infer the\n",
    "    format of the datetime strings in the columns, and if it can be inferred,\n",
    "    switch to a faster method of parsing them. In some cases this can increase\n",
    "    the parsing speed by 5-10x.\n",
    "keep_date_col : bool, default False\n",
    "    If True and `parse_dates` specifies combining multiple columns then\n",
    "    keep the original columns.\n",
    "date_parser : function, optional\n",
    "    Function to use for converting a sequence of string columns to an array of\n",
    "    datetime instances. The default uses ``dateutil.parser.parser`` to do the\n",
    "    conversion. Pandas will try to call `date_parser` in three different ways,\n",
    "    advancing to the next if an exception occurs: 1) Pass one or more arrays\n",
    "    (as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the\n",
    "    string values from the columns defined by `parse_dates` into a single array\n",
    "    and pass that; and 3) call `date_parser` once for each row using one or\n",
    "    more strings (corresponding to the columns defined by `parse_dates`) as\n",
    "    arguments.\n",
    "dayfirst : bool, default False\n",
    "    DD/MM format dates, international and European format.\n",
    "cache_dates : bool, default True\n",
    "    If True, use a cache of unique, converted dates to apply the datetime\n",
    "    conversion. May produce significant speed-up when parsing duplicate\n",
    "    date strings, especially ones with timezone offsets.\n",
    "\n",
    "    .. versionadded:: 0.25.0\n",
    "iterator : bool, default False\n",
    "    Return TextFileReader object for iteration or getting chunks with\n",
    "    ``get_chunk()``.\n",
    "chunksize : int, optional\n",
    "    Return TextFileReader object for iteration.\n",
    "    See the `IO Tools docs\n",
    "    <https://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_\n",
    "    for more information on ``iterator`` and ``chunksize``.\n",
    "compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'\n",
    "    For on-the-fly decompression of on-disk data. If 'infer' and\n",
    "    `filepath_or_buffer` is path-like, then detect compression from the\n",
    "    following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no\n",
    "    decompression). If using 'zip', the ZIP file must contain only one data\n",
    "    file to be read in. Set to None for no decompression.\n",
    "thousands : str, optional\n",
    "    Thousands separator.\n",
    "decimal : str, default '.'\n",
    "    Character to recognize as decimal point (e.g. use ',' for European data).\n",
    "lineterminator : str (length 1), optional\n",
    "    Character to break file into lines. Only valid with C parser.\n",
    "quotechar : str (length 1), optional\n",
    "    The character used to denote the start and end of a quoted item. Quoted\n",
    "    items can include the delimiter and it will be ignored.\n",
    "quoting : int or csv.QUOTE_* instance, default 0\n",
    "    Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of\n",
    "    QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).\n",
    "doublequote : bool, default ``True``\n",
    "   When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate\n",
    "   whether or not to interpret two consecutive quotechar elements INSIDE a\n",
    "   field as a single ``quotechar`` element.\n",
    "escapechar : str (length 1), optional\n",
    "    One-character string used to escape other characters.\n",
    "comment : str, optional\n",
    "    Indicates remainder of line should not be parsed. If found at the beginning\n",
    "    of a line, the line will be ignored altogether. This parameter must be a\n",
    "    single character. Like empty lines (as long as ``skip_blank_lines=True``),\n",
    "    fully commented lines are ignored by the parameter `header` but not by\n",
    "    `skiprows`. For example, if ``comment='#'``, parsing\n",
    "    ``#empty\\na,b,c\\n1,2,3`` with ``header=0`` will result in 'a,b,c' being\n",
    "    treated as the header.\n",
    "encoding : str, optional\n",
    "    Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python\n",
    "    standard encodings\n",
    "    <https://docs.python.org/3/library/codecs.html#standard-encodings>`_ .\n",
    "dialect : str or csv.Dialect, optional\n",
    "    If provided, this parameter will override values (default or not) for the\n",
    "    following parameters: `delimiter`, `doublequote`, `escapechar`,\n",
    "    `skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to\n",
    "    override values, a ParserWarning will be issued. See csv.Dialect\n",
    "    documentation for more details.\n",
    "error_bad_lines : bool, default True\n",
    "    Lines with too many fields (e.g. a csv line with too many commas) will by\n",
    "    default cause an exception to be raised, and no DataFrame will be returned.\n",
    "    If False, then these \"bad lines\" will dropped from the DataFrame that is\n",
    "    returned.\n",
    "warn_bad_lines : bool, default True\n",
    "    If error_bad_lines is False, and warn_bad_lines is True, a warning for each\n",
    "    \"bad line\" will be output.\n",
    "delim_whitespace : bool, default False\n",
    "    Specifies whether or not whitespace (e.g. ``' '`` or ``'    '``) will be\n",
    "    used as the sep. Equivalent to setting ``sep='\\s+'``. If this option\n",
    "    is set to True, nothing should be passed in for the ``delimiter``\n",
    "    parameter.\n",
    "low_memory : bool, default True\n",
    "    Internally process the file in chunks, resulting in lower memory use\n",
    "    while parsing, but possibly mixed type inference.  To ensure no mixed\n",
    "    types either set False, or specify the type with the `dtype` parameter.\n",
    "    Note that the entire file is read into a single DataFrame regardless,\n",
    "    use the `chunksize` or `iterator` parameter to return the data in chunks.\n",
    "    (Only valid with C parser).\n",
    "memory_map : bool, default False\n",
    "    If a filepath is provided for `filepath_or_buffer`, map the file object\n",
    "    directly onto memory and access the data directly from there. Using this\n",
    "    option can improve performance because there is no longer any I/O overhead.\n",
    "float_precision : str, optional\n",
    "    Specifies which converter the C engine should use for floating-point\n",
    "    values. The options are `None` for the ordinary converter,\n",
    "    `high` for the high-precision converter, and `round_trip` for the\n",
    "    round-trip converter.\n",
    "\n",
    "Returns\n",
    "-------\n",
    "DataFrame or TextParser\n",
    "    A comma-separated values (csv) file is returned as two-dimensional\n",
    "    data structure with labeled axes.\n",
    "\n",
    "See Also\n",
    "--------\n",
    "DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.\n",
    "read_csv : Read a comma-separated values (csv) file into DataFrame.\n",
    "read_fwf : Read a table of fixed-width formatted lines into DataFrame.\n",
    "\n",
    "Examples\n",
    "--------\n",
    ">>> pd.read_csv('data.csv')  # doctest: +SKIP\n",
    "```\n",
    "分隔符:多于一个字符和空格将会被认作regular expressions\n",
    "```\n",
    "Delimiter to use. If sep is None, the C engine cannot automatically detect\n",
    "    the separator, but the Python parsing engine can, meaning the latter will\n",
    "    be used and automatically detect the separator by Python's builtin sniffer\n",
    "    tool, ``csv.Sniffer``. In addition, separators longer than 1 character and\n",
    "    different from ``'\\s+'`` will be interpreted as regular expressions and\n",
    "    will also force the use of the Python parsing engine. Note that regex\n",
    "    delimiters are prone to ignoring quoted data. Regex example: ``'\\r\\t'``.\n",
    "```\n",
    "居然还能直接读取压缩文件：\n",
    "```\n",
    "compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'\n",
    "    For on-the-fly decompression of on-disk data. If 'infer' and\n",
    "    `filepath_or_buffer` is path-like, then detect compression from the\n",
    "    following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no\n",
    "    decompression). If using 'zip', the ZIP file must contain only one data\n",
    "    file to be read in. Set to None for no decompression.\n",
    "```\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>col1</th>\n",
       "      <th>col2</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>2</td>\n",
       "      <td>a</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>3</td>\n",
       "      <td>b</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>6</td>\n",
       "      <td>c</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>5</td>\n",
       "      <td>d</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   col1 col2\n",
       "0     2    a\n",
       "1     3    b\n",
       "2     6    c\n",
       "3     5    d"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pd.read_table('../data/my_table.txt', usecols=['col1', 'col2'])\r\n",
    "'''\r\n",
    "只返回部分数据的参数\r\n",
    "usecols : list-like or callable, optional\r\n",
    "    Return a subset of the columns. If list-like, all elements must either\r\n",
    "    be positional (i.e. integer indices into the document columns) or strings\r\n",
    "    that correspond to column names provided either by the user in `names` or\r\n",
    "    inferred from the document header row(s). For example, a valid list-like\r\n",
    "    `usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.\r\n",
    "    Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.\r\n",
    "    To instantiate a DataFrame from ``data`` with element order preserved use\r\n",
    "    ``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns\r\n",
    "    in ``['foo', 'bar']`` order or\r\n",
    "    ``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``\r\n",
    "    for ``['bar', 'foo']`` order.\r\n",
    "\r\n",
    "    If callable, the callable function will be evaluated against the column\r\n",
    "    names, returning names where the callable function evaluates to True. An\r\n",
    "    example of a valid callable argument would be ``lambda x: x.upper() in\r\n",
    "    ['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster\r\n",
    "    parsing time and lower memory usage.\r\n",
    "    \r\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>col1</th>\n",
       "      <th>col2</th>\n",
       "      <th>col3</th>\n",
       "      <th>col4</th>\n",
       "      <th>col5</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>2</td>\n",
       "      <td>a</td>\n",
       "      <td>1.4</td>\n",
       "      <td>apple</td>\n",
       "      <td>2020-01-01</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>3</td>\n",
       "      <td>b</td>\n",
       "      <td>3.4</td>\n",
       "      <td>banana</td>\n",
       "      <td>2020-01-02</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>6</td>\n",
       "      <td>c</td>\n",
       "      <td>2.5</td>\n",
       "      <td>orange</td>\n",
       "      <td>2020-01-05</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>5</td>\n",
       "      <td>d</td>\n",
       "      <td>3.2</td>\n",
       "      <td>lemon</td>\n",
       "      <td>2020-01-07</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   col1 col2  col3    col4       col5\n",
       "0     2    a   1.4   apple 2020-01-01\n",
       "1     3    b   3.4  banana 2020-01-02\n",
       "2     6    c   2.5  orange 2020-01-05\n",
       "3     5    d   3.2   lemon 2020-01-07"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pd.read_csv('../data/my_csv.csv', parse_dates=['col5'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>col1</th>\n",
       "      <th>col2</th>\n",
       "      <th>col3</th>\n",
       "      <th>col4</th>\n",
       "      <th>col5</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>2</td>\n",
       "      <td>a</td>\n",
       "      <td>1.4</td>\n",
       "      <td>apple</td>\n",
       "      <td>2020/1/1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>3</td>\n",
       "      <td>b</td>\n",
       "      <td>3.4</td>\n",
       "      <td>banana</td>\n",
       "      <td>2020/1/2</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   col1 col2  col3    col4      col5\n",
       "0     2    a   1.4   apple  2020/1/1\n",
       "1     3    b   3.4  banana  2020/1/2"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pd.read_excel('../data/my_excel.xlsx', nrows=2)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "\n",
    "来看看官方文档\n",
    "```\n",
    "pd.read_excel?\n",
    "```\n",
    "得到:\n",
    "```\n",
    "Signature:\n",
    "pd.read_excel(\n",
    "    io,\n",
    "    sheet_name=0,\n",
    "    header=0,\n",
    "    names=None,\n",
    "    index_col=None,\n",
    "    usecols=None,\n",
    "    squeeze=False,\n",
    "    dtype=None,\n",
    "    engine=None,\n",
    "    converters=None,\n",
    "    true_values=None,\n",
    "    false_values=None,\n",
    "    skiprows=None,\n",
    "    nrows=None,\n",
    "    na_values=None,\n",
    "    keep_default_na=True,\n",
    "    na_filter=True,\n",
    "    verbose=False,\n",
    "    parse_dates=False,\n",
    "    date_parser=None,\n",
    "    thousands=None,\n",
    "    comment=None,\n",
    "    skipfooter=0,\n",
    "    convert_float=True,\n",
    "    mangle_dupe_cols=True,\n",
    ")\n",
    "Docstring:\n",
    "Read an Excel file into a pandas DataFrame.\n",
    "\n",
    "Supports `xls`, `xlsx`, `xlsm`, `xlsb`, `odf`, `ods` and `odt` file extensions\n",
    "read from a local filesystem or URL. Supports an option to read\n",
    "a single sheet or a list of sheets.\n",
    "\n",
    "Parameters\n",
    "----------\n",
    "io : str, bytes, ExcelFile, xlrd.Book, path object, or file-like object\n",
    "    Any valid string path is acceptable. The string could be a URL. Valid\n",
    "    URL schemes include http, ftp, s3, and file. For file URLs, a host is\n",
    "    expected. A local file could be: ``file://localhost/path/to/table.xlsx``.\n",
    "\n",
    "    If you want to pass in a path object, pandas accepts any ``os.PathLike``.\n",
    "\n",
    "    By file-like object, we refer to objects with a ``read()`` method,\n",
    "    such as a file handler (e.g. via builtin ``open`` function)\n",
    "    or ``StringIO``.\n",
    "sheet_name : str, int, list, or None, default 0\n",
    "    Strings are used for sheet names. Integers are used in zero-indexed\n",
    "    sheet positions. Lists of strings/integers are used to request\n",
    "    multiple sheets. Specify None to get all sheets.\n",
    "\n",
    "    Available cases:\n",
    "\n",
    "    * Defaults to ``0``: 1st sheet as a `DataFrame`\n",
    "    * ``1``: 2nd sheet as a `DataFrame`\n",
    "    * ``\"Sheet1\"``: Load sheet with name \"Sheet1\"\n",
    "    * ``[0, 1, \"Sheet5\"]``: Load first, second and sheet named \"Sheet5\"\n",
    "      as a dict of `DataFrame`\n",
    "    * None: All sheets.\n",
    "\n",
    "header : int, list of int, default 0\n",
    "    Row (0-indexed) to use for the column labels of the parsed\n",
    "    DataFrame. If a list of integers is passed those row positions will\n",
    "    be combined into a ``MultiIndex``. Use None if there is no header.\n",
    "names : array-like, default None\n",
    "    List of column names to use. If file contains no header row,\n",
    "    then you should explicitly pass header=None.\n",
    "index_col : int, list of int, default None\n",
    "    Column (0-indexed) to use as the row labels of the DataFrame.\n",
    "    Pass None if there is no such column.  If a list is passed,\n",
    "    those columns will be combined into a ``MultiIndex``.  If a\n",
    "    subset of data is selected with ``usecols``, index_col\n",
    "    is based on the subset.\n",
    "usecols : int, str, list-like, or callable default None\n",
    "    * If None, then parse all columns.\n",
    "    * If str, then indicates comma separated list of Excel column letters\n",
    "      and column ranges (e.g. \"A:E\" or \"A,C,E:F\"). Ranges are inclusive of\n",
    "      both sides.\n",
    "    * If list of int, then indicates list of column numbers to be parsed.\n",
    "    * If list of string, then indicates list of column names to be parsed.\n",
    "\n",
    "      .. versionadded:: 0.24.0\n",
    "\n",
    "    * If callable, then evaluate each column name against it and parse the\n",
    "      column if the callable returns ``True``.\n",
    "\n",
    "    Returns a subset of the columns according to behavior above.\n",
    "\n",
    "      .. versionadded:: 0.24.0\n",
    "\n",
    "squeeze : bool, default False\n",
    "    If the parsed data only contains one column then return a Series.\n",
    "dtype : Type name or dict of column -> type, default None\n",
    "    Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}\n",
    "    Use `object` to preserve data as stored in Excel and not interpret dtype.\n",
    "    If converters are specified, they will be applied INSTEAD\n",
    "    of dtype conversion.\n",
    "engine : str, default None\n",
    "    If io is not a buffer or path, this must be set to identify io.\n",
    "    Supported engines: \"xlrd\", \"openpyxl\", \"odf\", \"pyxlsb\", default \"xlrd\".\n",
    "    Engine compatibility :\n",
    "    - \"xlrd\" supports most old/new Excel file formats.\n",
    "    - \"openpyxl\" supports newer Excel file formats.\n",
    "    - \"odf\" supports OpenDocument file formats (.odf, .ods, .odt).\n",
    "    - \"pyxlsb\" supports Binary Excel files.\n",
    "converters : dict, default None\n",
    "    Dict of functions for converting values in certain columns. Keys can\n",
    "    either be integers or column labels, values are functions that take one\n",
    "    input argument, the Excel cell content, and return the transformed\n",
    "    content.\n",
    "true_values : list, default None\n",
    "    Values to consider as True.\n",
    "false_values : list, default None\n",
    "    Values to consider as False.\n",
    "skiprows : list-like\n",
    "    Rows to skip at the beginning (0-indexed).\n",
    "nrows : int, default None\n",
    "    Number of rows to parse.\n",
    "\n",
    "    .. versionadded:: 0.23.0\n",
    "\n",
    "na_values : scalar, str, list-like, or dict, default None\n",
    "    Additional strings to recognize as NA/NaN. If dict passed, specific\n",
    "    per-column NA values. By default the following values are interpreted\n",
    "    as NaN: '', '#N/A', '#N/A N/A', '#NA', '-1.#IND', '-1.#QNAN', '-NaN', '-nan',\n",
    "    '1.#IND', '1.#QNAN', '<NA>', 'N/A', 'NA', 'NULL', 'NaN', 'n/a',\n",
    "    'nan', 'null'.\n",
    "keep_default_na : bool, default True\n",
    "    Whether or not to include the default NaN values when parsing the data.\n",
    "    Depending on whether `na_values` is passed in, the behavior is as follows:\n",
    "\n",
    "    * If `keep_default_na` is True, and `na_values` are specified, `na_values`\n",
    "      is appended to the default NaN values used for parsing.\n",
    "    * If `keep_default_na` is True, and `na_values` are not specified, only\n",
    "      the default NaN values are used for parsing.\n",
    "    * If `keep_default_na` is False, and `na_values` are specified, only\n",
    "      the NaN values specified `na_values` are used for parsing.\n",
    "    * If `keep_default_na` is False, and `na_values` are not specified, no\n",
    "      strings will be parsed as NaN.\n",
    "\n",
    "    Note that if `na_filter` is passed in as False, the `keep_default_na` and\n",
    "    `na_values` parameters will be ignored.\n",
    "na_filter : bool, default True\n",
    "    Detect missing value markers (empty strings and the value of na_values). In\n",
    "    data without any NAs, passing na_filter=False can improve the performance\n",
    "    of reading a large file.\n",
    "verbose : bool, default False\n",
    "    Indicate number of NA values placed in non-numeric columns.\n",
    "parse_dates : bool, list-like, or dict, default False\n",
    "    The behavior is as follows:\n",
    "\n",
    "    * bool. If True -> try parsing the index.\n",
    "    * list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3\n",
    "      each as a separate date column.\n",
    "    * list of lists. e.g.  If [[1, 3]] -> combine columns 1 and 3 and parse as\n",
    "      a single date column.\n",
    "    * dict, e.g. {'foo' : [1, 3]} -> parse columns 1, 3 as date and call\n",
    "      result 'foo'\n",
    "\n",
    "    If a column or index contains an unparseable date, the entire column or\n",
    "    index will be returned unaltered as an object data type. If you don`t want to\n",
    "    parse some cells as date just change their type in Excel to \"Text\".\n",
    "    For non-standard datetime parsing, use ``pd.to_datetime`` after ``pd.read_excel``.\n",
    "\n",
    "    Note: A fast-path exists for iso8601-formatted dates.\n",
    "date_parser : function, optional\n",
    "    Function to use for converting a sequence of string columns to an array of\n",
    "    datetime instances. The default uses ``dateutil.parser.parser`` to do the\n",
    "    conversion. Pandas will try to call `date_parser` in three different ways,\n",
    "    advancing to the next if an exception occurs: 1) Pass one or more arrays\n",
    "    (as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the\n",
    "    string values from the columns defined by `parse_dates` into a single array\n",
    "    and pass that; and 3) call `date_parser` once for each row using one or\n",
    "    more strings (corresponding to the columns defined by `parse_dates`) as\n",
    "    arguments.\n",
    "thousands : str, default None\n",
    "    Thousands separator for parsing string columns to numeric.  Note that\n",
    "    this parameter is only necessary for columns stored as TEXT in Excel,\n",
    "    any numeric columns will automatically be parsed, regardless of display\n",
    "    format.\n",
    "comment : str, default None\n",
    "    Comments out remainder of line. Pass a character or characters to this\n",
    "    argument to indicate comments in the input file. Any data between the\n",
    "    comment string and the end of the current line is ignored.\n",
    "skipfooter : int, default 0\n",
    "    Rows at the end to skip (0-indexed).\n",
    "convert_float : bool, default True\n",
    "    Convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric\n",
    "    data will be read in as floats: Excel stores all numbers as floats\n",
    "    internally.\n",
    "mangle_dupe_cols : bool, default True\n",
    "    Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than\n",
    "    'X'...'X'. Passing in False will cause data to be overwritten if there\n",
    "    are duplicate names in the columns.\n",
    "\n",
    "Returns\n",
    "-------\n",
    "DataFrame or dict of DataFrames\n",
    "    DataFrame from the passed in Excel file. See notes in sheet_name\n",
    "    argument for more information on when a dict of DataFrames is returned.\n",
    "\n",
    "See Also\n",
    "--------\n",
    "DataFrame.to_excel : Write DataFrame to an Excel file.\n",
    "DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.\n",
    "read_csv : Read a comma-separated values (csv) file into DataFrame.\n",
    "read_fwf : Read a table of fixed-width formatted lines into DataFrame.\n",
    "\n",
    "Examples\n",
    "--------\n",
    "The file can be read using the file name as string or an open file object:\n",
    "\n",
    ">>> pd.read_excel('tmp.xlsx', index_col=0)  # doctest: +SKIP\n",
    "       Name  Value\n",
    "0   string1      1\n",
    "1   string2      2\n",
    "2  #Comment      3\n",
    "\n",
    ">>> pd.read_excel(open('tmp.xlsx', 'rb'),\n",
    "...               sheet_name='Sheet3')  # doctest: +SKIP\n",
    "   Unnamed: 0      Name  Value\n",
    "0           0   string1      1\n",
    "1           1   string2      2\n",
    "2           2  #Comment      3\n",
    "\n",
    "Index and header can be specified via the `index_col` and `header` arguments\n",
    "\n",
    ">>> pd.read_excel('tmp.xlsx', index_col=None, header=None)  # doctest: +SKIP\n",
    "     0         1      2\n",
    "0  NaN      Name  Value\n",
    "1  0.0   string1      1\n",
    "2  1.0   string2      2\n",
    "3  2.0  #Comment      3\n",
    "\n",
    "Column types are inferred but can be explicitly specified\n",
    "\n",
    ">>> pd.read_excel('tmp.xlsx', index_col=0,\n",
    "...               dtype={'Name': str, 'Value': float})  # doctest: +SKIP\n",
    "       Name  Value\n",
    "0   string1    1.0\n",
    "1   string2    2.0\n",
    "2  #Comment    3.0\n",
    "\n",
    "True, False, and NA values, and thousands separators have defaults,\n",
    "but can be explicitly specified, too. Supply the values you would like\n",
    "as strings or lists of strings!\n",
    "\n",
    ">>> pd.read_excel('tmp.xlsx', index_col=0,\n",
    "...               na_values=['string1', 'string2'])  # doctest: +SKIP\n",
    "       Name  Value\n",
    "0       NaN      1\n",
    "1       NaN      2\n",
    "2  #Comment      3\n",
    "\n",
    "Comment lines in the excel input file can be skipped using the `comment` kwarg\n",
    "\n",
    ">>> pd.read_excel('tmp.xlsx', index_col=0, comment='#')  # doctest: +SKIP\n",
    "      Name  Value\n",
    "0  string1    1.0\n",
    "1  string2    2.0\n",
    "2     None    NaN\n",
    "```\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "在读取`txt`文件时，经常遇到分隔符非空格的情况，`read_table`有一个分割参数`sep`，它使得用户可以自定义分割符号，进行`txt`数据的读取。例如，下面的读取的表以`||||`为分割："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>col1 |||| col2</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>TS |||| This is an apple.</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>GQ |||| My name is Bob.</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>WT |||| Well done!</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>PT |||| May I help you?</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "              col1 |||| col2\n",
       "0  TS |||| This is an apple.\n",
       "1    GQ |||| My name is Bob.\n",
       "2         WT |||| Well done!\n",
       "3    PT |||| May I help you?"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pd.read_table('data/my_table_special_sep.txt')\r\n",
    "#读取的就很乱"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "上面的结果显然不是理想的，这时可以使用`sep`，同时需要指定引擎为`python`："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>col1</th>\n",
       "      <th>col2</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>TS</td>\n",
       "      <td>This is an apple.</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>GQ</td>\n",
       "      <td>My name is Bob.</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>WT</td>\n",
       "      <td>Well done!</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>PT</td>\n",
       "      <td>May I help you?</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "  col1               col2\n",
       "0   TS  This is an apple.\n",
       "1   GQ    My name is Bob.\n",
       "2   WT         Well done!\n",
       "3   PT    May I help you?"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pd.read_table('data/my_table_special_sep.txt', sep=' \\|\\|\\|\\| ', engine='python')\r\n",
    "'''\r\n",
    "engine有C和python可选，C快一点，python功能强一点\r\n",
    "sep : str, default '\\\\t' (tab-stop)\r\n",
    "    Delimiter to use. If sep is None, the C engine cannot automatically detect\r\n",
    "    the separator, but the Python parsing engine can, meaning the latter will\r\n",
    "    be used and automatically detect the separator by Python's builtin sniffer\r\n",
    "    tool, ``csv.Sniffer``. In addition, separators longer than 1 character and\r\n",
    "    different from ``'\\s+'`` will be interpreted as regular expressions and\r\n",
    "    will also force the use of the Python parsing engine. Note that regex\r\n",
    "    delimiters are prone to ignoring quoted data. Regex example: ``'\\r\\t'``.\r\n",
    "engine : {'c', 'python'}, optional\r\n",
    "    Parser engine to use. The C engine is faster while the python engine is\r\n",
    "    currently more feature-complete.\r\n",
    "'''\r\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "#### 【WARNING】`sep`是正则参数\n",
    "\n",
    "在使用`read_table`的时候需要注意，参数`sep`中使用的是正则表达式，因此需要对`|`进行转义变成`\\|`，否则无法读取到正确的结果。有关正则表达式的基本内容可以参考第八章或者其他相关资料。\n",
    "\n",
    "#### 【END】\n",
    "\n",
    "### 2. 数据写入\n",
    "\n",
    "一般在数据写入中，最常用的操作是把`index`设置为`False`，特别当索引没有特殊意义的时候，这样的行为能把索引在保存的时候去除。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "df_csv.to_csv('data/my_csv_saved.csv', index=False)\n",
    "df_excel.to_excel('data/my_excel_saved.xlsx', index=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "`pandas`中没有定义`to_table`函数，但是`to_csv`可以保存为`txt`文件，并且允许自定义分隔符，常用制表符`\\t`分割："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "df_txt.to_csv('../data/my_txt_saved.txt', sep='\\t', index=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "如果想要把表格快速转换为`markdown`和`latex`语言，可以使用`to_markdown`和`to_latex`函数，此处需要安装`tabulate`包。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "|    |   col1 | col2   |   col3 | col4   | col5     |\n",
      "|---:|-------:|:-------|-------:|:-------|:---------|\n",
      "|  0 |      2 | a      |    1.4 | apple  | 2020/1/1 |\n",
      "|  1 |      3 | b      |    3.4 | banana | 2020/1/2 |\n",
      "|  2 |      6 | c      |    2.5 | orange | 2020/1/5 |\n",
      "|  3 |      5 | d      |    3.2 | lemon  | 2020/1/7 |\n"
     ]
    }
   ],
   "source": [
    "print(df_csv.to_markdown())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\\begin{tabular}{lrlrll}\n",
      "\\toprule\n",
      "{} &  col1 & col2 &  col3 &    col4 &      col5 \\\\\n",
      "\\midrule\n",
      "0 &     2 &    a &   1.4 &   apple &  2020/1/1 \\\\\n",
      "1 &     3 &    b &   3.4 &  banana &  2020/1/2 \\\\\n",
      "2 &     6 &    c &   2.5 &  orange &  2020/1/5 \\\\\n",
      "3 &     5 &    d &   3.2 &   lemon &  2020/1/7 \\\\\n",
      "\\bottomrule\n",
      "\\end{tabular}\n",
      "\n"
     ]
    }
   ],
   "source": [
    "print(df_csv.to_latex())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "## 二、基本数据结构\n",
    "`pandas`中具有两种基本的数据存储结构，存储一维`values`的`Series`和存储二维`values`的`DataFrame`，在这两种结构上定义了很多的属性和方法。\n",
    "\n",
    "### 1. Series\n",
    "`Series`一般由四个部分组成，分别是序列的值`data`、索引`index`、存储类型`dtype`、序列的名字`name`。其中，索引也可以指定它的名字，默认为空。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "my_idx\n",
       "id1              100\n",
       "20                 a\n",
       "third    {'dic1': 5}\n",
       "Name: my_name, dtype: object"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s = pd.Series(data = [100, 'a', {'dic1':5}],\n",
    "              index = pd.Index(['id1', 20, 'third'], name='my_idx'),\n",
    "              dtype = 'object',\n",
    "              name = 'my_name')#index的name和name的区别\n",
    "s"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "#### 【NOTE】`object`类型\n",
    "\n",
    "`object`代表了一种混合类型，正如上面的例子中存储了整数、字符串以及`Python`的字典数据结构。此外，目前`pandas`把纯字符串序列也默认认为是一种`object`类型的序列，但它也可以用`string`类型存储，文本序列的内容会在第八章中讨论。\n",
    "\n",
    "#### 【END】\n",
    "\n",
    "对于这些属性，可以通过 . 的方式来获取："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([100, 'a', {'dic1': 5}], dtype=object)"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s.values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Index(['id1', 20, 'third'], dtype='object', name='my_idx')"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s.index"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "dtype('O')"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s.dtype"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'my_name'"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s.name"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "利用`.shape`可以获取序列的长度："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(3,)"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s.shape"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "索引是`pandas`中最重要的概念之一，它将在第三章中被详细地讨论。如果想要取出单个索引对应的值，可以通过`[index_item]`可以取出。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "看一看参考文档\n",
    "```\n",
    "pd.Series?\n",
    "```\n",
    "\n",
    "关于label出现了一个词hashable，这个词的意思可哈希的，大概就是不可变的，像是元组，字符串一样  \n",
    "copy参数是是否拷贝数据（原数据）\n",
    "```\n",
    "pd.Series(\n",
    "    data=None,\n",
    "    index=None,\n",
    "    dtype=None,\n",
    "    name=None,\n",
    "    copy=False,\n",
    "    fastpath=False,\n",
    ")\n",
    "Docstring:     \n",
    "One-dimensional ndarray with axis labels (including time series).\n",
    "\n",
    "Labels need not be unique but must be a hashable type. The object\n",
    "supports both integer- and label-based indexing and provides a host of\n",
    "methods for performing operations involving the index. Statistical\n",
    "methods from ndarray have been overridden to automatically exclude\n",
    "missing data (currently represented as NaN).\n",
    "\n",
    "Operations between Series (+, -, /, *, **) align values based on their\n",
    "associated index values-- they need not be the same length. The result\n",
    "index will be the sorted union of the two indexes.\n",
    "\n",
    "Parameters\n",
    "----------\n",
    "data : array-like, Iterable, dict, or scalar value\n",
    "    Contains data stored in Series.\n",
    "\n",
    "    .. versionchanged:: 0.23.0\n",
    "       If data is a dict, argument order is maintained for Python 3.6\n",
    "       and later.\n",
    "\n",
    "index : array-like or Index (1d)\n",
    "    Values must be hashable and have the same length as `data`.\n",
    "    Non-unique index values are allowed. Will default to\n",
    "    RangeIndex (0, 1, 2, ..., n) if not provided. If both a dict and index\n",
    "    sequence are used, the index will override the keys found in the\n",
    "    dict.\n",
    "dtype : str, numpy.dtype, or ExtensionDtype, optional\n",
    "    Data type for the output Series. If not specified, this will be\n",
    "    inferred from `data`.\n",
    "    See the :ref:`user guide <basics.dtypes>` for more usages.\n",
    "name : str, optional\n",
    "    The name to give to the Series.\n",
    "copy : bool, default False\n",
    "    Copy input data.\n",
    "```\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "### 2. DataFrame\n",
    "`DataFrame`在`Series`的基础上增加了列索引，一个数据框可以由二维的`data`与行列索引来构造："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>col_0</th>\n",
       "      <th>col_1</th>\n",
       "      <th>col_2</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>row_0</th>\n",
       "      <td>1</td>\n",
       "      <td>a</td>\n",
       "      <td>1.2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>row_1</th>\n",
       "      <td>2</td>\n",
       "      <td>b</td>\n",
       "      <td>2.2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>row_2</th>\n",
       "      <td>3</td>\n",
       "      <td>c</td>\n",
       "      <td>3.2</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "       col_0 col_1  col_2\n",
       "row_0      1     a    1.2\n",
       "row_1      2     b    2.2\n",
       "row_2      3     c    3.2"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "data = [[1, 'a', 1.2], [2, 'b', 2.2], [3, 'c', 3.2]]\n",
    "df = pd.DataFrame(data = data,\n",
    "                  index = ['row_%d'%i for i in range(3)],\n",
    "                  columns=['col_0', 'col_1', 'col_2'])\n",
    "df"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "但一般而言，更多的时候会采用从列索引名到数据的映射来构造数据框，同时再加上行索引："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>col_0</th>\n",
       "      <th>col_1</th>\n",
       "      <th>col_2</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>row_0</th>\n",
       "      <td>1</td>\n",
       "      <td>a</td>\n",
       "      <td>1.2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>row_1</th>\n",
       "      <td>2</td>\n",
       "      <td>b</td>\n",
       "      <td>2.2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>row_2</th>\n",
       "      <td>3</td>\n",
       "      <td>c</td>\n",
       "      <td>3.2</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "       col_0 col_1  col_2\n",
       "row_0      1     a    1.2\n",
       "row_1      2     b    2.2\n",
       "row_2      3     c    3.2"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df = pd.DataFrame(data = {'col_0': [1,2,3],\n",
    "                          'col_1':list('abc'),\n",
    "                          'col_2': [1.2, 2.2, 3.2]},\n",
    "                  index = ['row_%d'%i for i in range(3)])\n",
    "df"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "由于这种映射关系，在`DataFrame`中可以用`[col_name]`与`[col_list]`来取出相应的列与由多个列组成的表，结果分别为`Series`和`DataFrame`："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "row_0    1\n",
       "row_1    2\n",
       "row_2    3\n",
       "Name: col_0, dtype: int64"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df['col_0']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>col_0</th>\n",
       "      <th>col_1</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>row_0</th>\n",
       "      <td>1</td>\n",
       "      <td>a</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>row_1</th>\n",
       "      <td>2</td>\n",
       "      <td>b</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>row_2</th>\n",
       "      <td>3</td>\n",
       "      <td>c</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "       col_0 col_1\n",
       "row_0      1     a\n",
       "row_1      2     b\n",
       "row_2      3     c"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df[['col_0', 'col_1']]#list"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "与`Series`类似，在数据框中同样可以取出相应的属性："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[1, 'a', 1.2],\n",
       "       [2, 'b', 2.2],\n",
       "       [3, 'c', 3.2]], dtype=object)"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df.values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Index(['row_0', 'row_1', 'row_2'], dtype='object')"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df.index"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Index(['col_0', 'col_1', 'col_2'], dtype='object')"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df.columns"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "col_0      int64\n",
       "col_1     object\n",
       "col_2    float64\n",
       "dtype: object"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df.dtypes # 返回的是值为相应列数据类型的Series"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(3, 3)"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df.shape"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "通过`.T`可以把`DataFrame`进行转置："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>row_0</th>\n",
       "      <th>row_1</th>\n",
       "      <th>row_2</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>col_0</th>\n",
       "      <td>1</td>\n",
       "      <td>2</td>\n",
       "      <td>3</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>col_1</th>\n",
       "      <td>a</td>\n",
       "      <td>b</td>\n",
       "      <td>c</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>col_2</th>\n",
       "      <td>1.2</td>\n",
       "      <td>2.2</td>\n",
       "      <td>3.2</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "      row_0 row_1 row_2\n",
       "col_0     1     2     3\n",
       "col_1     a     b     c\n",
       "col_2   1.2   2.2   3.2"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df.T"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "看看官方文档\n",
    "```\n",
    "pd.DataFrame?\n",
    "```\n",
    "值没有序号会自己排：Will default to\n",
    "    RangeIndex (0, 1, 2, ..., n) if no column labels are provided.\n",
    "```\n",
    "pd.DataFrame(\n",
    "    data=None,\n",
    "    index: Union[Collection, NoneType] = None,\n",
    "    columns: Union[Collection, NoneType] = None,\n",
    "    dtype: Union[ForwardRef('ExtensionDtype'), str, numpy.dtype, Type[Union[str, float, int, complex, bool]], NoneType] = None,\n",
    "    copy: bool = False,\n",
    ")\n",
    "Docstring:     \n",
    "Two-dimensional, size-mutable, potentially heterogeneous tabular data.\n",
    "\n",
    "Data structure also contains labeled axes (rows and columns).\n",
    "Arithmetic operations align on both row and column labels. Can be\n",
    "thought of as a dict-like container for Series objects. The primary\n",
    "pandas data structure.\n",
    "\n",
    "Parameters\n",
    "----------\n",
    "data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame\n",
    "    Dict can contain Series, arrays, constants, or list-like objects.\n",
    "\n",
    "    .. versionchanged:: 0.23.0\n",
    "       If data is a dict, column order follows insertion-order for\n",
    "       Python 3.6 and later.\n",
    "\n",
    "    .. versionchanged:: 0.25.0\n",
    "       If data is a list of dicts, column order follows insertion-order\n",
    "       for Python 3.6 and later.\n",
    "\n",
    "index : Index or array-like\n",
    "    Index to use for resulting frame. Will default to RangeIndex if\n",
    "    no indexing information part of input data and no index provided.\n",
    "columns : Index or array-like\n",
    "    Column labels to use for resulting frame. Will default to\n",
    "    RangeIndex (0, 1, 2, ..., n) if no column labels are provided.\n",
    "dtype : dtype, default None\n",
    "    Data type to force. Only a single dtype is allowed. If None, infer.\n",
    "copy : bool, default False\n",
    "    Copy data from inputs. Only affects DataFrame / 2d ndarray input.\n",
    "\n",
    "See Also\n",
    "--------\n",
    "DataFrame.from_records : Constructor from tuples, also record arrays.\n",
    "DataFrame.from_dict : From dicts of Series, arrays, or dicts.\n",
    "read_csv : Read a comma-separated values (csv) file into DataFrame.\n",
    "read_table : Read general delimited file into DataFrame.\n",
    "read_clipboard : Read text from clipboard into DataFrame.\n",
    "\n",
    "Examples\n",
    "--------\n",
    "Constructing DataFrame from a dictionary.\n",
    "\n",
    ">>> d = {'col1': [1, 2], 'col2': [3, 4]}\n",
    ">>> df = pd.DataFrame(data=d)\n",
    ">>> df\n",
    "   col1  col2\n",
    "0     1     3\n",
    "1     2     4\n",
    "\n",
    "Notice that the inferred dtype is int64.\n",
    "\n",
    ">>> df.dtypes\n",
    "col1    int64\n",
    "col2    int64\n",
    "dtype: object\n",
    "\n",
    "To enforce a single dtype:\n",
    "\n",
    ">>> df = pd.DataFrame(data=d, dtype=np.int8)\n",
    ">>> df.dtypes\n",
    "col1    int8\n",
    "col2    int8\n",
    "dtype: object\n",
    "\n",
    "Constructing DataFrame from numpy ndarray:\n",
    "\n",
    ">>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),\n",
    "...                    columns=['a', 'b', 'c'])\n",
    ">>> df2\n",
    "   a  b  c\n",
    "0  1  2  3\n",
    "1  4  5  6\n",
    "2  7  8  9\n",
    "```"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "## 三、常用基本函数\n",
    "为了进行举例说明，在接下来的部分和其余章节都将会使用一份`learn_pandas.csv`的虚拟数据集，它记录了四所学校学生的体测个人信息。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Index(['School', 'Grade', 'Name', 'Gender', 'Height', 'Weight', 'Transfer',\n",
       "       'Test_Number', 'Test_Date', 'Time_Record'],\n",
       "      dtype='object')"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df = pd.read_csv('data/learn_pandas.csv')\n",
    "df.columns"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "上述列名依次代表学校、年级、姓名、性别、身高、体重、是否为转系生、体测场次、测试时间、1000米成绩，本章只需使用其中的前七列。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "df = df[df.columns[:7]]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "### 1. 汇总函数\n",
    "`head, tail`函数分别表示返回表或者序列的前`n`行和后`n`行，其中`n`默认为5："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>School</th>\n",
       "      <th>Grade</th>\n",
       "      <th>Name</th>\n",
       "      <th>Gender</th>\n",
       "      <th>Height</th>\n",
       "      <th>Weight</th>\n",
       "      <th>Transfer</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>Shanghai Jiao Tong University</td>\n",
       "      <td>Freshman</td>\n",
       "      <td>Gaopeng Yang</td>\n",
       "      <td>Female</td>\n",
       "      <td>158.9</td>\n",
       "      <td>46.0</td>\n",
       "      <td>N</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>Peking University</td>\n",
       "      <td>Freshman</td>\n",
       "      <td>Changqiang You</td>\n",
       "      <td>Male</td>\n",
       "      <td>166.5</td>\n",
       "      <td>70.0</td>\n",
       "      <td>N</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                          School     Grade            Name  Gender  Height  \\\n",
       "0  Shanghai Jiao Tong University  Freshman    Gaopeng Yang  Female   158.9   \n",
       "1              Peking University  Freshman  Changqiang You    Male   166.5   \n",
       "\n",
       "   Weight Transfer  \n",
       "0    46.0        N  \n",
       "1    70.0        N  "
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df.head(2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>School</th>\n",
       "      <th>Grade</th>\n",
       "      <th>Name</th>\n",
       "      <th>Gender</th>\n",
       "      <th>Height</th>\n",
       "      <th>Weight</th>\n",
       "      <th>Transfer</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>195</th>\n",
       "      <td>Fudan University</td>\n",
       "      <td>Junior</td>\n",
       "      <td>Xiaojuan Sun</td>\n",
       "      <td>Female</td>\n",
       "      <td>153.9</td>\n",
       "      <td>46.0</td>\n",
       "      <td>N</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>196</th>\n",
       "      <td>Tsinghua University</td>\n",
       "      <td>Senior</td>\n",
       "      <td>Li Zhao</td>\n",
       "      <td>Female</td>\n",
       "      <td>160.9</td>\n",
       "      <td>50.0</td>\n",
       "      <td>N</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>197</th>\n",
       "      <td>Shanghai Jiao Tong University</td>\n",
       "      <td>Senior</td>\n",
       "      <td>Chengqiang Chu</td>\n",
       "      <td>Female</td>\n",
       "      <td>153.9</td>\n",
       "      <td>45.0</td>\n",
       "      <td>N</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>198</th>\n",
       "      <td>Shanghai Jiao Tong University</td>\n",
       "      <td>Senior</td>\n",
       "      <td>Chengmei Shen</td>\n",
       "      <td>Male</td>\n",
       "      <td>175.3</td>\n",
       "      <td>71.0</td>\n",
       "      <td>N</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>199</th>\n",
       "      <td>Tsinghua University</td>\n",
       "      <td>Sophomore</td>\n",
       "      <td>Chunpeng Lv</td>\n",
       "      <td>Male</td>\n",
       "      <td>155.7</td>\n",
       "      <td>51.0</td>\n",
       "      <td>N</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                            School      Grade            Name  Gender  Height  \\\n",
       "195               Fudan University     Junior    Xiaojuan Sun  Female   153.9   \n",
       "196            Tsinghua University     Senior         Li Zhao  Female   160.9   \n",
       "197  Shanghai Jiao Tong University     Senior  Chengqiang Chu  Female   153.9   \n",
       "198  Shanghai Jiao Tong University     Senior   Chengmei Shen    Male   175.3   \n",
       "199            Tsinghua University  Sophomore     Chunpeng Lv    Male   155.7   \n",
       "\n",
       "     Weight Transfer  \n",
       "195    46.0        N  \n",
       "196    50.0        N  \n",
       "197    45.0        N  \n",
       "198    71.0        N  \n",
       "199    51.0        N  "
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df.tail()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "`info, describe`分别返回表的信息概况和表中数值列对应的主要统计量 ："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'pandas.core.frame.DataFrame'>\n",
      "RangeIndex: 200 entries, 0 to 199\n",
      "Data columns (total 7 columns):\n",
      " #   Column    Non-Null Count  Dtype  \n",
      "---  ------    --------------  -----  \n",
      " 0   School    200 non-null    object \n",
      " 1   Grade     200 non-null    object \n",
      " 2   Name      200 non-null    object \n",
      " 3   Gender    200 non-null    object \n",
      " 4   Height    183 non-null    float64\n",
      " 5   Weight    189 non-null    float64\n",
      " 6   Transfer  188 non-null    object \n",
      "dtypes: float64(2), object(5)\n",
      "memory usage: 11.1+ KB\n"
     ]
    }
   ],
   "source": [
    "df.info()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>Height</th>\n",
       "      <th>Weight</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>count</th>\n",
       "      <td>183.000000</td>\n",
       "      <td>189.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>mean</th>\n",
       "      <td>163.218033</td>\n",
       "      <td>55.015873</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>std</th>\n",
       "      <td>8.608879</td>\n",
       "      <td>12.824294</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>min</th>\n",
       "      <td>145.400000</td>\n",
       "      <td>34.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>25%</th>\n",
       "      <td>157.150000</td>\n",
       "      <td>46.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>50%</th>\n",
       "      <td>161.900000</td>\n",
       "      <td>51.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>75%</th>\n",
       "      <td>167.500000</td>\n",
       "      <td>65.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>max</th>\n",
       "      <td>193.900000</td>\n",
       "      <td>89.000000</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "           Height      Weight\n",
       "count  183.000000  189.000000\n",
       "mean   163.218033   55.015873\n",
       "std      8.608879   12.824294\n",
       "min    145.400000   34.000000\n",
       "25%    157.150000   46.000000\n",
       "50%    161.900000   51.000000\n",
       "75%    167.500000   65.000000\n",
       "max    193.900000   89.000000"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df.describe()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "#### 【NOTE】更全面的数据汇总\n",
    "\n",
    "`info, describe`只能实现较少信息的展示，如果想要对一份数据集进行全面且有效的观察，特别是在列较多的情况下，推荐使用[pandas-profiling](https://pandas-profiling.github.io/pandas-profiling/docs/)包，它将在第十一章被再次提到。\n",
    "\n",
    "\n",
    "#### 【END】\n",
    "\n",
    "### 2. 特征统计函数\n",
    "在`Series`和`DataFrame`上定义了许多统计函数，最常见的是`sum, mean, median, var, std, max, min`。例如，选出身高和体重列进行演示："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Height    163.218033\n",
       "Weight     55.015873\n",
       "dtype: float64"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_demo = df[['Height', 'Weight']]\n",
    "df_demo.mean()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "关于平均数的官方文档\n",
    "```\n",
    "df.mean?\n",
    "```\n",
    "skipna是跳过空白值\n",
    "numeric_only默认是none，此模式下把所有单元格里的值都试一下\n",
    "\n",
    "```\n",
    "Signature: df.mean(axis=None, skipna=None, level=None, numeric_only=None, **kwargs)\n",
    "Docstring:\n",
    "Return the mean of the values for the requested axis.\n",
    "\n",
    "Parameters\n",
    "----------\n",
    "axis : {index (0), columns (1)}\n",
    "    Axis for the function to be applied on.\n",
    "skipna : bool, default True\n",
    "    Exclude NA/null values when computing the result.\n",
    "level : int or level name, default None\n",
    "    If the axis is a MultiIndex (hierarchical), count along a\n",
    "    particular level, collapsing into a Series.\n",
    "numeric_only : bool, default None\n",
    "    Include only float, int, boolean columns. If None, will attempt to use\n",
    "    everything, then use only numeric data. Not implemented for Series.\n",
    "**kwargs\n",
    "    Additional keyword arguments to be passed to the function.\n",
    "\n",
    "Returns\n",
    "-------\n",
    "Series or DataFrame (if level specified)\n",
    "```"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Height    193.9\n",
       "Weight     89.0\n",
       "dtype: float64"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_demo.max()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "此外，需要介绍的是`quantile, count, idxmax`这三个函数，它们分别返回的是分位数、非缺失值个数、最大值对应的索引："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Height    167.5\n",
       "Weight     65.0\n",
       "Name: 0.75, dtype: float64"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_demo.quantile(0.75)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Height    183\n",
       "Weight    189\n",
       "dtype: int64"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_demo.count()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Height    193\n",
       "Weight      2\n",
       "dtype: int64"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_demo.idxmax() # idxmin是对应的函数"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "上面这些所有的函数，由于操作后返回的是标量，所以又称为聚合函数，它们有一个公共参数`axis`，默认为0代表逐列聚合，如果设置为1则表示逐行聚合："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0    102.45\n",
       "1    118.25\n",
       "2    138.95\n",
       "3     41.00\n",
       "4    124.00\n",
       "dtype: float64"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_demo.mean(axis=1).head() # 在这个数据集上体重和身高的均值并没有意义"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "### 3. 唯一值函数\n",
    "对序列使用`unique`和`nunique`可以分别得到其唯一值组成的列表和唯一值的个数："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array(['Shanghai Jiao Tong University', 'Peking University',\n",
       "       'Fudan University', 'Tsinghua University'], dtype=object)"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df['School'].unique()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "```\n",
    "pd.unique?\n",
    "```\n",
    "对于时间戳的例子还是很有趣的：\n",
    "```\n",
    ">>>pd.unique(pd.Series([pd.Timestamp('20160101'),\n",
    "                   pd.Timestamp('20160101')]))  \n",
    ">>>array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')\n",
    "```\n",
    "```\n",
    "Signature: pd.unique(values)\n",
    "Docstring:\n",
    "Hash table-based unique. Uniques are returned in order\n",
    "of appearance. This does NOT sort.\n",
    "\n",
    "Significantly faster than numpy.unique. Includes NA values.\n",
    "\n",
    "Parameters\n",
    "----------\n",
    "values : 1d array-like\n",
    "\n",
    "Returns\n",
    "-------\n",
    "numpy.ndarray or ExtensionArray\n",
    "\n",
    "    The return can be:\n",
    "\n",
    "    * Index : when the input is an Index\n",
    "    * Categorical : when the input is a Categorical dtype\n",
    "    * ndarray : when the input is a Series/ndarray\n",
    "\n",
    "    Return numpy.ndarray or ExtensionArray.\n",
    "\n",
    "See Also\n",
    "--------\n",
    "Index.unique : Return unique values from an Index.\n",
    "Series.unique : Return unique values of Series object.\n",
    "\n",
    "Examples\n",
    "--------\n",
    ">>> pd.unique(pd.Series([2, 1, 3, 3]))\n",
    "array([2, 1, 3])\n",
    "\n",
    ">>> pd.unique(pd.Series([2] + [1] * 5))\n",
    "array([2, 1])\n",
    "\n",
    ">>> pd.unique(pd.Series([pd.Timestamp('20160101'),\n",
    "...                     pd.Timestamp('20160101')]))\n",
    "array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')\n",
    "\n",
    ">>> pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'),\n",
    "...                      pd.Timestamp('20160101', tz='US/Eastern')]))\n",
    "array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')],\n",
    "      dtype=object)\n",
    "\n",
    ">>> pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'),\n",
    "...                     pd.Timestamp('20160101', tz='US/Eastern')]))\n",
    "DatetimeIndex(['2016-01-01 00:00:00-05:00'],\n",
    "...           dtype='datetime64[ns, US/Eastern]', freq=None)\n",
    "\n",
    ">>> pd.unique(list('baabc'))\n",
    "array(['b', 'a', 'c'], dtype=object)\n",
    "\n",
    "An unordered Categorical will return categories in the\n",
    "order of appearance.\n",
    "\n",
    ">>> pd.unique(pd.Series(pd.Categorical(list('baabc'))))\n",
    "[b, a, c]\n",
    "Categories (3, object): [b, a, c]\n",
    "\n",
    ">>> pd.unique(pd.Series(pd.Categorical(list('baabc'),\n",
    "...                                    categories=list('abc'))))\n",
    "[b, a, c]\n",
    "Categories (3, object): [b, a, c]\n",
    "\n",
    "An ordered Categorical preserves the category ordering.\n",
    "\n",
    ">>> pd.unique(pd.Series(pd.Categorical(list('baabc'),\n",
    "...                                    categories=list('abc'),\n",
    "...                                    ordered=True)))\n",
    "[b, a, c]\n",
    "Categories (3, object): [a < b < c]\n",
    "\n",
    "An array of tuples\n",
    "\n",
    ">>> pd.unique([('a', 'b'), ('b', 'a'), ('a', 'c'), ('b', 'a')])\n",
    "array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "4"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df['School'].nunique()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "`value_counts`可以得到唯一值和其对应出现的频数："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Tsinghua University              69\n",
       "Shanghai Jiao Tong University    57\n",
       "Fudan University                 40\n",
       "Peking University                34\n",
       "Name: School, dtype: int64"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df['School'].value_counts()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "如果想要观察多个列组合的唯一值，可以使用`drop_duplicates`。其中的关键参数是`keep`，默认值`first`表示每个组合保留第一次出现的所在行，`last`表示保留最后一次出现的所在行，`False`表示把所有重复组合所在的行剔除。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>Gender</th>\n",
       "      <th>Transfer</th>\n",
       "      <th>Name</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>Female</td>\n",
       "      <td>N</td>\n",
       "      <td>Gaopeng Yang</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>Male</td>\n",
       "      <td>N</td>\n",
       "      <td>Changqiang You</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>12</th>\n",
       "      <td>Female</td>\n",
       "      <td>NaN</td>\n",
       "      <td>Peng You</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>21</th>\n",
       "      <td>Male</td>\n",
       "      <td>NaN</td>\n",
       "      <td>Xiaopeng Shen</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>36</th>\n",
       "      <td>Male</td>\n",
       "      <td>Y</td>\n",
       "      <td>Xiaojuan Qin</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>43</th>\n",
       "      <td>Female</td>\n",
       "      <td>Y</td>\n",
       "      <td>Gaoli Feng</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "    Gender Transfer            Name\n",
       "0   Female        N    Gaopeng Yang\n",
       "1     Male        N  Changqiang You\n",
       "12  Female      NaN        Peng You\n",
       "21    Male      NaN   Xiaopeng Shen\n",
       "36    Male        Y    Xiaojuan Qin\n",
       "43  Female        Y      Gaoli Feng"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_demo = df[['Gender','Transfer','Name']]\n",
    "df_demo.drop_duplicates(['Gender', 'Transfer'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>Gender</th>\n",
       "      <th>Transfer</th>\n",
       "      <th>Name</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>147</th>\n",
       "      <td>Male</td>\n",
       "      <td>NaN</td>\n",
       "      <td>Juan You</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>150</th>\n",
       "      <td>Male</td>\n",
       "      <td>Y</td>\n",
       "      <td>Chengpeng You</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>169</th>\n",
       "      <td>Female</td>\n",
       "      <td>Y</td>\n",
       "      <td>Chengquan Qin</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>194</th>\n",
       "      <td>Female</td>\n",
       "      <td>NaN</td>\n",
       "      <td>Yanmei Qian</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>197</th>\n",
       "      <td>Female</td>\n",
       "      <td>N</td>\n",
       "      <td>Chengqiang Chu</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>199</th>\n",
       "      <td>Male</td>\n",
       "      <td>N</td>\n",
       "      <td>Chunpeng Lv</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "     Gender Transfer            Name\n",
       "147    Male      NaN        Juan You\n",
       "150    Male        Y   Chengpeng You\n",
       "169  Female        Y   Chengquan Qin\n",
       "194  Female      NaN     Yanmei Qian\n",
       "197  Female        N  Chengqiang Chu\n",
       "199    Male        N     Chunpeng Lv"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_demo.drop_duplicates(['Gender', 'Transfer'], keep='last')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>Gender</th>\n",
       "      <th>Transfer</th>\n",
       "      <th>Name</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>Female</td>\n",
       "      <td>N</td>\n",
       "      <td>Gaopeng Yang</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>Male</td>\n",
       "      <td>N</td>\n",
       "      <td>Changqiang You</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>Male</td>\n",
       "      <td>N</td>\n",
       "      <td>Mei Sun</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>Male</td>\n",
       "      <td>N</td>\n",
       "      <td>Gaojuan You</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5</th>\n",
       "      <td>Female</td>\n",
       "      <td>N</td>\n",
       "      <td>Xiaoli Qian</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   Gender Transfer            Name\n",
       "0  Female        N    Gaopeng Yang\n",
       "1    Male        N  Changqiang You\n",
       "2    Male        N         Mei Sun\n",
       "4    Male        N     Gaojuan You\n",
       "5  Female        N     Xiaoli Qian"
      ]
     },
     "execution_count": 21,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_demo.drop_duplicates(['Name', 'Gender'], keep=False).head() # 保留只出现过一次的性别和姓名组合"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0    Shanghai Jiao Tong University\n",
       "1                Peking University\n",
       "3                 Fudan University\n",
       "5              Tsinghua University\n",
       "Name: School, dtype: object"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df['School'].drop_duplicates() # 在Series上也可以使用"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "此外，`duplicated`和`drop_duplicates`的功能类似，但前者返回了是否为唯一值的布尔列表，其`keep`参数与后者一致。其返回的序列，把重复元素设为`True`，否则为`False`。 `drop_duplicates`等价于把`duplicated`为`True`的对应行剔除。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0    False\n",
       "1    False\n",
       "2     True\n",
       "3     True\n",
       "4     True\n",
       "dtype: bool"
      ]
     },
     "execution_count": 27,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_demo.duplicated(['Gender', 'Transfer']).head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0    False\n",
       "1    False\n",
       "2     True\n",
       "3    False\n",
       "4     True\n",
       "Name: School, dtype: bool"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df['School'].duplicated().head() # 在Series上也可以使用"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "### 4. 替换函数\n",
    "一般而言，替换操作是针对某一个列进行的，因此下面的例子都以`Series`举例。`pandas`中的替换函数可以归纳为三类：映射替换、逻辑替换、数值替换。其中映射替换包含`replace`方法、第八章中的`str.replace`方法以及第九章中的`cat.codes`方法，此处介绍`replace`的用法。\n",
    "\n",
    "在`replace`中，可以通过字典构造，或者传入两个列表来进行替换："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0    0\n",
       "1    1\n",
       "2    1\n",
       "3    0\n",
       "4    1\n",
       "Name: Gender, dtype: int64"
      ]
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df['Gender'].replace({'Female':0, 'Male':1}).head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0    0\n",
       "1    1\n",
       "2    1\n",
       "3    0\n",
       "4    1\n",
       "Name: Gender, dtype: int64"
      ]
     },
     "execution_count": 47,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df['Gender'].replace(['Female', 'Male'], [0, 1]).head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "另外，`replace`还有一种特殊的方向替换，指定`method`参数为`ffill`则为用前面一个最近的未被替换的值进行替换，`bfill`则使用后面最近的未被替换的值进行替换。从下面的例子可以看到，它们的结果是不同的："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0    a\n",
       "1    a\n",
       "2    b\n",
       "3    b\n",
       "4    b\n",
       "5    b\n",
       "6    a\n",
       "dtype: object"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s = pd.Series(['a', 1, 'b', 2, 1, 1, 'a'])\n",
    "s.replace([1, 2], method='ffill')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0    a\n",
       "1    b\n",
       "2    b\n",
       "3    a\n",
       "4    a\n",
       "5    a\n",
       "6    a\n",
       "dtype: object"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s.replace([1, 2], method='bfill')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "```\n",
    "pd.DataFrame.replace?\n",
    "```\n",
    "参数很多，其中关键的有：\n",
    "method : {'pad', 'ffill', 'bfill', `None`}\n",
    "    The method to use when for replacement, when `to_replace` is a\n",
    "    scalar, list or tuple and `value` is ``None``.    \n",
    "pad是默认的  \n",
    "front fill    \n",
    "back(?) fill  \n",
    "None 啥都不做  \n",
    "regex : bool or same types as `to_replace`, default False  \n",
    "    Whether to interpret `to_replace` and/or `value` as regular  \n",
    "    expressions. If this is ``True`` then `to_replace` *must* be a  \n",
    "    string. Alternatively, this could be a regular expression or a  \n",
    "    list, dict, or array of regular expressions in which case  \n",
    "    `to_replace` must be ``None``.  \n",
    "regex这个参数是说选择True以后 to_place必须是字符串（默认有很多可以选）\n",
    "```\n",
    "Signature: pd.DataFrame.replace(self, to_replace=None, value=None, inplace=False, limit=None, regex=False, method='pad')\n",
    "Docstring:\n",
    "Replace values given in `to_replace` with `value`.\n",
    "\n",
    "Values of the DataFrame are replaced with other values dynamically.\n",
    "This differs from updating with ``.loc`` or ``.iloc``, which require\n",
    "you to specify a location to update with some value.\n",
    "\n",
    "Parameters\n",
    "----------\n",
    "to_replace : str, regex, list, dict, Series, int, float, or None\n",
    "    How to find the values that will be replaced.\n",
    "\n",
    "    * numeric, str or regex:\n",
    "\n",
    "        - numeric: numeric values equal to `to_replace` will be\n",
    "          replaced with `value`\n",
    "        - str: string exactly matching `to_replace` will be replaced\n",
    "          with `value`\n",
    "        - regex: regexs matching `to_replace` will be replaced with\n",
    "          `value`\n",
    "\n",
    "    * list of str, regex, or numeric:\n",
    "\n",
    "        - First, if `to_replace` and `value` are both lists, they\n",
    "          **must** be the same length.\n",
    "        - Second, if ``regex=True`` then all of the strings in **both**\n",
    "          lists will be interpreted as regexs otherwise they will match\n",
    "          directly. This doesn't matter much for `value` since there\n",
    "          are only a few possible substitution regexes you can use.\n",
    "        - str, regex and numeric rules apply as above.\n",
    "\n",
    "    * dict:\n",
    "\n",
    "        - Dicts can be used to specify different replacement values\n",
    "          for different existing values. For example,\n",
    "          ``{'a': 'b', 'y': 'z'}`` replaces the value 'a' with 'b' and\n",
    "          'y' with 'z'. To use a dict in this way the `value`\n",
    "          parameter should be `None`.\n",
    "        - For a DataFrame a dict can specify that different values\n",
    "          should be replaced in different columns. For example,\n",
    "          ``{'a': 1, 'b': 'z'}`` looks for the value 1 in column 'a'\n",
    "          and the value 'z' in column 'b' and replaces these values\n",
    "          with whatever is specified in `value`. The `value` parameter\n",
    "          should not be ``None`` in this case. You can treat this as a\n",
    "          special case of passing two lists except that you are\n",
    "          specifying the column to search in.\n",
    "        - For a DataFrame nested dictionaries, e.g.,\n",
    "          ``{'a': {'b': np.nan}}``, are read as follows: look in column\n",
    "          'a' for the value 'b' and replace it with NaN. The `value`\n",
    "          parameter should be ``None`` to use a nested dict in this\n",
    "          way. You can nest regular expressions as well. Note that\n",
    "          column names (the top-level dictionary keys in a nested\n",
    "          dictionary) **cannot** be regular expressions.\n",
    "\n",
    "    * None:\n",
    "\n",
    "        - This means that the `regex` argument must be a string,\n",
    "          compiled regular expression, or list, dict, ndarray or\n",
    "          Series of such elements. If `value` is also ``None`` then\n",
    "          this **must** be a nested dictionary or Series.\n",
    "\n",
    "    See the examples section for examples of each of these.\n",
    "value : scalar, dict, list, str, regex, default None\n",
    "    Value to replace any values matching `to_replace` with.\n",
    "    For a DataFrame a dict of values can be used to specify which\n",
    "    value to use for each column (columns not in the dict will not be\n",
    "    filled). Regular expressions, strings and lists or dicts of such\n",
    "    objects are also allowed.\n",
    "inplace : bool, default False\n",
    "    If True, in place. Note: this will modify any\n",
    "    other views on this object (e.g. a column from a DataFrame).\n",
    "    Returns the caller if this is True.\n",
    "limit : int, default None\n",
    "    Maximum size gap to forward or backward fill.\n",
    "regex : bool or same types as `to_replace`, default False\n",
    "    Whether to interpret `to_replace` and/or `value` as regular\n",
    "    expressions. If this is ``True`` then `to_replace` *must* be a\n",
    "    string. Alternatively, this could be a regular expression or a\n",
    "    list, dict, or array of regular expressions in which case\n",
    "    `to_replace` must be ``None``.\n",
    "method : {'pad', 'ffill', 'bfill', `None`}\n",
    "    The method to use when for replacement, when `to_replace` is a\n",
    "    scalar, list or tuple and `value` is ``None``.\n",
    "\n",
    "    .. versionchanged:: 0.23.0\n",
    "        Added to DataFrame.\n",
    "\n",
    "Returns\n",
    "-------\n",
    "DataFrame\n",
    "    Object after replacement.\n",
    "\n",
    "Raises\n",
    "------\n",
    "AssertionError\n",
    "    * If `regex` is not a ``bool`` and `to_replace` is not\n",
    "      ``None``.\n",
    "\n",
    "TypeError\n",
    "    * If `to_replace` is not a scalar, array-like, ``dict``, or ``None``\n",
    "    * If `to_replace` is a ``dict`` and `value` is not a ``list``,\n",
    "      ``dict``, ``ndarray``, or ``Series``\n",
    "    * If `to_replace` is ``None`` and `regex` is not compilable\n",
    "      into a regular expression or is a list, dict, ndarray, or\n",
    "      Series.\n",
    "    * When replacing multiple ``bool`` or ``datetime64`` objects and\n",
    "      the arguments to `to_replace` does not match the type of the\n",
    "      value being replaced\n",
    "\n",
    "ValueError\n",
    "    * If a ``list`` or an ``ndarray`` is passed to `to_replace` and\n",
    "      `value` but they are not the same length.\n",
    "\n",
    "See Also\n",
    "--------\n",
    "DataFrame.fillna : Fill NA values.\n",
    "DataFrame.where : Replace values based on boolean condition.\n",
    "Series.str.replace : Simple string replacement.\n",
    "\n",
    "Notes\n",
    "-----\n",
    "* Regex substitution is performed under the hood with ``re.sub``. The\n",
    "  rules for substitution for ``re.sub`` are the same.\n",
    "* Regular expressions will only substitute on strings, meaning you\n",
    "  cannot provide, for example, a regular expression matching floating\n",
    "  point numbers and expect the columns in your frame that have a\n",
    "  numeric dtype to be matched. However, if those floating point\n",
    "  numbers *are* strings, then you can do this.\n",
    "* This method has *a lot* of options. You are encouraged to experiment\n",
    "  and play with this method to gain intuition about how it works.\n",
    "* When dict is used as the `to_replace` value, it is like\n",
    "  key(s) in the dict are the to_replace part and\n",
    "  value(s) in the dict are the value parameter.\n",
    "\n",
    "Examples\n",
    "--------\n",
    "\n",
    "**Scalar `to_replace` and `value`**\n",
    "\n",
    ">>> s = pd.Series([0, 1, 2, 3, 4])\n",
    ">>> s.replace(0, 5)\n",
    "0    5\n",
    "1    1\n",
    "2    2\n",
    "3    3\n",
    "4    4\n",
    "dtype: int64\n",
    "\n",
    ">>> df = pd.DataFrame({'A': [0, 1, 2, 3, 4],\n",
    "...                    'B': [5, 6, 7, 8, 9],\n",
    "...                    'C': ['a', 'b', 'c', 'd', 'e']})\n",
    ">>> df.replace(0, 5)\n",
    "   A  B  C\n",
    "0  5  5  a\n",
    "1  1  6  b\n",
    "2  2  7  c\n",
    "3  3  8  d\n",
    "4  4  9  e\n",
    "\n",
    "**List-like `to_replace`**\n",
    "\n",
    ">>> df.replace([0, 1, 2, 3], 4)\n",
    "   A  B  C\n",
    "0  4  5  a\n",
    "1  4  6  b\n",
    "2  4  7  c\n",
    "3  4  8  d\n",
    "4  4  9  e\n",
    "\n",
    ">>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])\n",
    "   A  B  C\n",
    "0  4  5  a\n",
    "1  3  6  b\n",
    "2  2  7  c\n",
    "3  1  8  d\n",
    "4  4  9  e\n",
    "\n",
    ">>> s.replace([1, 2], method='bfill')\n",
    "0    0\n",
    "1    3\n",
    "2    3\n",
    "3    3\n",
    "4    4\n",
    "dtype: int64\n",
    "\n",
    "**dict-like `to_replace`**\n",
    "\n",
    ">>> df.replace({0: 10, 1: 100})\n",
    "     A  B  C\n",
    "0   10  5  a\n",
    "1  100  6  b\n",
    "2    2  7  c\n",
    "3    3  8  d\n",
    "4    4  9  e\n",
    "\n",
    ">>> df.replace({'A': 0, 'B': 5}, 100)\n",
    "     A    B  C\n",
    "0  100  100  a\n",
    "1    1    6  b\n",
    "2    2    7  c\n",
    "3    3    8  d\n",
    "4    4    9  e\n",
    "\n",
    ">>> df.replace({'A': {0: 100, 4: 400}})\n",
    "     A  B  C\n",
    "0  100  5  a\n",
    "1    1  6  b\n",
    "2    2  7  c\n",
    "3    3  8  d\n",
    "4  400  9  e\n",
    "\n",
    "**Regular expression `to_replace`**\n",
    "\n",
    ">>> df = pd.DataFrame({'A': ['bat', 'foo', 'bait'],\n",
    "...                    'B': ['abc', 'bar', 'xyz']})\n",
    ">>> df.replace(to_replace=r'^ba.$', value='new', regex=True)\n",
    "      A    B\n",
    "0   new  abc\n",
    "1   foo  new\n",
    "2  bait  xyz\n",
    "\n",
    ">>> df.replace({'A': r'^ba.$'}, {'A': 'new'}, regex=True)\n",
    "      A    B\n",
    "0   new  abc\n",
    "1   foo  bar\n",
    "2  bait  xyz\n",
    "\n",
    ">>> df.replace(regex=r'^ba.$', value='new')\n",
    "      A    B\n",
    "0   new  abc\n",
    "1   foo  new\n",
    "2  bait  xyz\n",
    "\n",
    ">>> df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})\n",
    "      A    B\n",
    "0   new  abc\n",
    "1   xyz  new\n",
    "2  bait  xyz\n",
    "\n",
    ">>> df.replace(regex=[r'^ba.$', 'foo'], value='new')\n",
    "      A    B\n",
    "0   new  abc\n",
    "1   new  new\n",
    "2  bait  xyz\n",
    "\n",
    "Note that when replacing multiple ``bool`` or ``datetime64`` objects,\n",
    "the data types in the `to_replace` parameter must match the data\n",
    "type of the value being replaced:\n",
    "\n",
    ">>> df = pd.DataFrame({'A': [True, False, True],\n",
    "...                    'B': [False, True, False]})\n",
    ">>> df.replace({'a string': 'new value', True: False})  # raises\n",
    "Traceback (most recent call last):\n",
    "    ...\n",
    "TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str'\n",
    "\n",
    "This raises a ``TypeError`` because one of the ``dict`` keys is not of\n",
    "the correct type for replacement.\n",
    "\n",
    "Compare the behavior of ``s.replace({'a': None})`` and\n",
    "``s.replace('a', None)`` to understand the peculiarities\n",
    "of the `to_replace` parameter:\n",
    "\n",
    ">>> s = pd.Series([10, 'a', 'a', 'b', 'a'])\n",
    "\n",
    "When one uses a dict as the `to_replace` value, it is like the\n",
    "value(s) in the dict are equal to the `value` parameter.\n",
    "``s.replace({'a': None})`` is equivalent to\n",
    "``s.replace(to_replace={'a': None}, value=None, method=None)``:\n",
    "\n",
    ">>> s.replace({'a': None})\n",
    "0      10\n",
    "1    None\n",
    "2    None\n",
    "3       b\n",
    "4    None\n",
    "dtype: object\n",
    "\n",
    "When ``value=None`` and `to_replace` is a scalar, list or\n",
    "tuple, `replace` uses the method parameter (default 'pad') to do the\n",
    "replacement. So this is why the 'a' values are being replaced by 10\n",
    "in rows 1 and 2 and 'b' in row 4 in this case.\n",
    "The command ``s.replace('a', None)`` is actually equivalent to\n",
    "``s.replace(to_replace='a', value=None, method='pad')``:\n",
    "\n",
    ">>> s.replace('a', None)\n",
    "0    10\n",
    "1    10\n",
    "2    10\n",
    "3     b\n",
    "4     b\n",
    "dtype: object\n",
    "```"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "#### 【WARNING】正则替换请使用`str.replace`\n",
    "\n",
    "虽然对于`replace`而言可以使用正则替换，但是当前版本下对于`string`类型的正则替换还存在`bug`，因此如有此需求，请选择`str.replace`进行替换操作，具体的方式将在第八章中讲解。\n",
    "\n",
    "#### 【END】\n",
    "\n",
    "逻辑替换包括了`where`和`mask`，这两个函数是完全对称的：`where`函数在传入条件为`False`的对应行进行替换，而`mask`在传入条件为`True`的对应行进行替换，当不指定替换值时，替换为缺失值。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0    -1.0\n",
       "1     NaN\n",
       "2     NaN\n",
       "3   -50.0\n",
       "dtype: float64"
      ]
     },
     "execution_count": 31,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s = pd.Series([-1, 1.2345, 100, -50])\n",
    "s.where(s<0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0     -1.0\n",
       "1    100.0\n",
       "2    100.0\n",
       "3    -50.0\n",
       "dtype: float64"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s.where(s<0, 100)\r\n",
    "#后一个替换值"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0         NaN\n",
       "1      1.2345\n",
       "2    100.0000\n",
       "3         NaN\n",
       "dtype: float64"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s.mask(s<0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0    -50.0000\n",
       "1      1.2345\n",
       "2    100.0000\n",
       "3    -50.0000\n",
       "dtype: float64"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s.mask(s<0, -50)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "需要注意的是，传入的条件只需是与被调用的`Series`索引一致的布尔序列即可："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0    -50.0000\n",
       "1      1.2345\n",
       "2    100.0000\n",
       "3    -50.0000\n",
       "dtype: float64"
      ]
     },
     "execution_count": 32,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s_condition= pd.Series([True,False,False,True],index=s.index)\n",
    "s.mask(s_condition, -50)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "数值替换包含了`round, abs, clip`方法，它们分别表示取整、取绝对值和截断："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0     -1.00\n",
       "1      1.23\n",
       "2    100.00\n",
       "3    -50.00\n",
       "dtype: float64"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s = pd.Series([-1, 1.2345, 100, -50])\n",
    "s.round(2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0      1.0000\n",
       "1      1.2345\n",
       "2    100.0000\n",
       "3     50.0000\n",
       "dtype: float64"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s.abs()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0     0.0000\n",
       "1     1.2345\n",
       "2    60.0000\n",
       "3     0.0000\n",
       "dtype: float64"
      ]
     },
     "execution_count": 42,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s.clip(0, 50) # 前两个数分别表示上下截断边界\r\n",
    "#a=s.clip(0,50)\r\n",
    "#a.where(a!=50,60)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "```\n",
    "pd.Series.clip?\n",
    "```\n",
    "inplace是否对原数据动手\n",
    "inplace : bool, default False\n",
    "    Whether to perform the operation in place on the data.\n",
    "```\n",
    "Signature: pd.Series.clip(self: ~FrameOrSeries, lower=None, upper=None, axis=None, inplace: bool = False, *args, **kwargs) -> ~FrameOrSeries\n",
    "Docstring:\n",
    "Trim values at input threshold(s).\n",
    "\n",
    "Assigns values outside boundary to boundary values. Thresholds\n",
    "can be singular values or array like, and in the latter case\n",
    "the clipping is performed element-wise in the specified axis.\n",
    "\n",
    "Parameters\n",
    "----------\n",
    "lower : float or array_like, default None\n",
    "    Minimum threshold value. All values below this\n",
    "    threshold will be set to it.\n",
    "upper : float or array_like, default None\n",
    "    Maximum threshold value. All values above this\n",
    "    threshold will be set to it.\n",
    "axis : int or str axis name, optional\n",
    "    Align object with lower and upper along the given axis.\n",
    "inplace : bool, default False\n",
    "    Whether to perform the operation in place on the data.\n",
    "*args, **kwargs\n",
    "    Additional keywords have no effect but might be accepted\n",
    "    for compatibility with numpy.\n",
    "\n",
    "Returns\n",
    "-------\n",
    "Series or DataFrame\n",
    "    Same type as calling object with the values outside the\n",
    "    clip boundaries replaced.\n",
    "\n",
    "See Also\n",
    "--------\n",
    "Series.clip : Trim values at input threshold in series.\n",
    "DataFrame.clip : Trim values at input threshold in dataframe.\n",
    "numpy.clip : Clip (limit) the values in an array.\n",
    "\n",
    "Examples\n",
    "--------\n",
    ">>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}\n",
    ">>> df = pd.DataFrame(data)\n",
    ">>> df\n",
    "   col_0  col_1\n",
    "0      9     -2\n",
    "1     -3     -7\n",
    "2      0      6\n",
    "3     -1      8\n",
    "4      5     -5\n",
    "\n",
    "Clips per column using lower and upper thresholds:\n",
    "\n",
    ">>> df.clip(-4, 6)\n",
    "   col_0  col_1\n",
    "0      6     -2\n",
    "1     -3     -4\n",
    "2      0      6\n",
    "3     -1      6\n",
    "4      5     -4\n",
    "\n",
    "Clips using specific lower and upper thresholds per column element:\n",
    "\n",
    ">>> t = pd.Series([2, -4, -1, 6, 3])\n",
    ">>> t\n",
    "0    2\n",
    "1   -4\n",
    "2   -1\n",
    "3    6\n",
    "4    3\n",
    "dtype: int64\n",
    "\n",
    ">>> df.clip(t, t + 4, axis=0)\n",
    "   col_0  col_1\n",
    "0      6      2\n",
    "1     -3     -4\n",
    "2      0      3\n",
    "3      6      8\n",
    "4      5      3\n",
    "```"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "#### 【练一练】\n",
    "\n",
    "在 clip 中，超过边界的只能截断为边界值，如果要把超出边界的替换为自定义的值，应当如何做？  \n",
    "搞两遍？替换成边界，再replace成自定义的值\n",
    "```\n",
    "a=s.clip(0,50)\n",
    "a.where(a!=50,60)\n",
    "```\n",
    "\n",
    "#### 【END】\n",
    "\n",
    "### 5. 排序函数\n",
    "排序共有两种方式，其一为值排序，其二为索引排序，对应的函数是`sort_values`和`sort_index`。\n",
    "\n",
    "为了演示排序函数，下面先利用`set_index`方法把年级和姓名两列作为索引，多级索引的内容和索引设置的方法将在第三章进行详细讲解。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th>Height</th>\n",
       "      <th>Weight</th>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Grade</th>\n",
       "      <th>Name</th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th rowspan=\"2\" valign=\"top\">Freshman</th>\n",
       "      <th>Gaopeng Yang</th>\n",
       "      <td>158.9</td>\n",
       "      <td>46.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Changqiang You</th>\n",
       "      <td>166.5</td>\n",
       "      <td>70.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Senior</th>\n",
       "      <th>Mei Sun</th>\n",
       "      <td>188.9</td>\n",
       "      <td>89.0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                         Height  Weight\n",
       "Grade    Name                          \n",
       "Freshman Gaopeng Yang     158.9    46.0\n",
       "         Changqiang You   166.5    70.0\n",
       "Senior   Mei Sun          188.9    89.0"
      ]
     },
     "execution_count": 44,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_demo = df[['Grade', 'Name', 'Height', 'Weight']].set_index(['Grade','Name'])\n",
    "df_demo.head(3)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "对身高进行排序，默认参数`ascending=True`为升序："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th>Height</th>\n",
       "      <th>Weight</th>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Grade</th>\n",
       "      <th>Name</th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>Junior</th>\n",
       "      <th>Xiaoli Chu</th>\n",
       "      <td>145.4</td>\n",
       "      <td>34.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Senior</th>\n",
       "      <th>Gaomei Lv</th>\n",
       "      <td>147.3</td>\n",
       "      <td>34.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Sophomore</th>\n",
       "      <th>Peng Han</th>\n",
       "      <td>147.8</td>\n",
       "      <td>34.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Senior</th>\n",
       "      <th>Changli Lv</th>\n",
       "      <td>148.7</td>\n",
       "      <td>41.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Sophomore</th>\n",
       "      <th>Changjuan You</th>\n",
       "      <td>150.5</td>\n",
       "      <td>40.0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                         Height  Weight\n",
       "Grade     Name                         \n",
       "Junior    Xiaoli Chu      145.4    34.0\n",
       "Senior    Gaomei Lv       147.3    34.0\n",
       "Sophomore Peng Han        147.8    34.0\n",
       "Senior    Changli Lv      148.7    41.0\n",
       "Sophomore Changjuan You   150.5    40.0"
      ]
     },
     "execution_count": 45,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_demo.sort_values('Height').head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th>Height</th>\n",
       "      <th>Weight</th>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Grade</th>\n",
       "      <th>Name</th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th rowspan=\"3\" valign=\"top\">Senior</th>\n",
       "      <th>Xiaoqiang Qin</th>\n",
       "      <td>193.9</td>\n",
       "      <td>79.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Mei Sun</th>\n",
       "      <td>188.9</td>\n",
       "      <td>89.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Gaoli Zhao</th>\n",
       "      <td>186.5</td>\n",
       "      <td>83.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Freshman</th>\n",
       "      <th>Qiang Han</th>\n",
       "      <td>185.3</td>\n",
       "      <td>87.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Senior</th>\n",
       "      <th>Qiang Zheng</th>\n",
       "      <td>183.9</td>\n",
       "      <td>87.0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                        Height  Weight\n",
       "Grade    Name                         \n",
       "Senior   Xiaoqiang Qin   193.9    79.0\n",
       "         Mei Sun         188.9    89.0\n",
       "         Gaoli Zhao      186.5    83.0\n",
       "Freshman Qiang Han       185.3    87.0\n",
       "Senior   Qiang Zheng     183.9    87.0"
      ]
     },
     "execution_count": 46,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_demo.sort_values('Height', ascending=False).head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "在排序中，进场遇到多列排序的问题，比如在体重相同的情况下，对身高进行排序，并且保持身高降序排列，体重升序排列："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th>Height</th>\n",
       "      <th>Weight</th>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Grade</th>\n",
       "      <th>Name</th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>Sophomore</th>\n",
       "      <th>Peng Han</th>\n",
       "      <td>147.8</td>\n",
       "      <td>34.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Senior</th>\n",
       "      <th>Gaomei Lv</th>\n",
       "      <td>147.3</td>\n",
       "      <td>34.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Junior</th>\n",
       "      <th>Xiaoli Chu</th>\n",
       "      <td>145.4</td>\n",
       "      <td>34.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Sophomore</th>\n",
       "      <th>Qiang Zhou</th>\n",
       "      <td>150.5</td>\n",
       "      <td>36.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Freshman</th>\n",
       "      <th>Yanqiang Xu</th>\n",
       "      <td>152.4</td>\n",
       "      <td>38.0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                       Height  Weight\n",
       "Grade     Name                       \n",
       "Sophomore Peng Han      147.8    34.0\n",
       "Senior    Gaomei Lv     147.3    34.0\n",
       "Junior    Xiaoli Chu    145.4    34.0\n",
       "Sophomore Qiang Zhou    150.5    36.0\n",
       "Freshman  Yanqiang Xu   152.4    38.0"
      ]
     },
     "execution_count": 47,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_demo.sort_values(['Weight','Height'],ascending=[True,False]).head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "```\n",
    "pd.DataFrame.sort_values?\n",
    "```\n",
    "升序降序：ascending : bool or list of bool, default True\n",
    "     Sort ascending vs. descending. Specify list for multiple sort\n",
    "     orders.  If this is a list of bools, must match the length of\n",
    "     the by.\n",
    "     \n",
    "把nan放在哪儿：na_position : {'first', 'last'}, default 'last'\n",
    "     Puts NaNs at the beginning if `first`; `last` puts NaNs at the\n",
    "     end.\n",
    "\n",
    "```\n",
    "Signature: pd.DataFrame.sort_values(self, by, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last', ignore_index=False, key: Union[Callable[[ForwardRef('Series')], Union[ForwardRef('Series'), ~AnyArrayLike]], NoneType] = None)\n",
    "Docstring:\n",
    "Sort by the values along either axis.\n",
    "\n",
    "Parameters\n",
    "----------\n",
    "        by : str or list of str\n",
    "            Name or list of names to sort by.\n",
    "\n",
    "            - if `axis` is 0 or `'index'` then `by` may contain index\n",
    "              levels and/or column labels.\n",
    "            - if `axis` is 1 or `'columns'` then `by` may contain column\n",
    "              levels and/or index labels.\n",
    "\n",
    "            .. versionchanged:: 0.23.0\n",
    "\n",
    "               Allow specifying index or column level names.\n",
    "axis : {0 or 'index', 1 or 'columns'}, default 0\n",
    "     Axis to be sorted.\n",
    "ascending : bool or list of bool, default True\n",
    "     Sort ascending vs. descending. Specify list for multiple sort\n",
    "     orders.  If this is a list of bools, must match the length of\n",
    "     the by.\n",
    "inplace : bool, default False\n",
    "     If True, perform operation in-place.\n",
    "kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'\n",
    "     Choice of sorting algorithm. See also ndarray.np.sort for more\n",
    "     information.  `mergesort` is the only stable algorithm. For\n",
    "     DataFrames, this option is only applied when sorting on a single\n",
    "     column or label.\n",
    "na_position : {'first', 'last'}, default 'last'\n",
    "     Puts NaNs at the beginning if `first`; `last` puts NaNs at the\n",
    "     end.\n",
    "ignore_index : bool, default False\n",
    "     If True, the resulting axis will be labeled 0, 1, …, n - 1.\n",
    "\n",
    "     .. versionadded:: 1.0.0\n",
    "\n",
    "key : callable, optional\n",
    "    Apply the key function to the values\n",
    "    before sorting. This is similar to the `key` argument in the\n",
    "    builtin :meth:`sorted` function, with the notable difference that\n",
    "    this `key` function should be *vectorized*. It should expect a\n",
    "    ``Series`` and return a Series with the same shape as the input.\n",
    "    It will be applied to each column in `by` independently.\n",
    "\n",
    "    .. versionadded:: 1.1.0\n",
    "\n",
    "Returns\n",
    "-------\n",
    "DataFrame or None\n",
    "    DataFrame with sorted values if inplace=False, None otherwise.\n",
    "\n",
    "See Also\n",
    "--------\n",
    "DataFrame.sort_index : Sort a DataFrame by the index.\n",
    "Series.sort_values : Similar method for a Series.\n",
    "\n",
    "Examples\n",
    "--------\n",
    ">>> df = pd.DataFrame({\n",
    "...     'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],\n",
    "...     'col2': [2, 1, 9, 8, 7, 4],\n",
    "...     'col3': [0, 1, 9, 4, 2, 3],\n",
    "...     'col4': ['a', 'B', 'c', 'D', 'e', 'F']\n",
    "... })\n",
    ">>> df\n",
    "  col1  col2  col3 col4\n",
    "0    A     2     0    a\n",
    "1    A     1     1    B\n",
    "2    B     9     9    c\n",
    "3  NaN     8     4    D\n",
    "4    D     7     2    e\n",
    "5    C     4     3    F\n",
    "\n",
    "Sort by col1\n",
    "\n",
    ">>> df.sort_values(by=['col1'])\n",
    "  col1  col2  col3 col4\n",
    "0    A     2     0    a\n",
    "1    A     1     1    B\n",
    "2    B     9     9    c\n",
    "5    C     4     3    F\n",
    "4    D     7     2    e\n",
    "3  NaN     8     4    D\n",
    "\n",
    "Sort by multiple columns\n",
    "\n",
    ">>> df.sort_values(by=['col1', 'col2'])\n",
    "  col1  col2  col3 col4\n",
    "1    A     1     1    B\n",
    "0    A     2     0    a\n",
    "2    B     9     9    c\n",
    "5    C     4     3    F\n",
    "4    D     7     2    e\n",
    "3  NaN     8     4    D\n",
    "\n",
    "Sort Descending\n",
    "\n",
    ">>> df.sort_values(by='col1', ascending=False)\n",
    "  col1  col2  col3 col4\n",
    "4    D     7     2    e\n",
    "5    C     4     3    F\n",
    "2    B     9     9    c\n",
    "0    A     2     0    a\n",
    "1    A     1     1    B\n",
    "3  NaN     8     4    D\n",
    "\n",
    "Putting NAs first\n",
    "\n",
    ">>> df.sort_values(by='col1', ascending=False, na_position='first')\n",
    "  col1  col2  col3 col4\n",
    "3  NaN     8     4    D\n",
    "4    D     7     2    e\n",
    "5    C     4     3    F\n",
    "2    B     9     9    c\n",
    "0    A     2     0    a\n",
    "1    A     1     1    B\n",
    "\n",
    "Sorting with a key function\n",
    "\n",
    ">>> df.sort_values(by='col4', key=lambda col: col.str.lower())\n",
    "   col1  col2  col3 col4\n",
    "0    A     2     0    a\n",
    "1    A     1     1    B\n",
    "2    B     9     9    c\n",
    "3  NaN     8     4    D\n",
    "4    D     7     2    e\n",
    "5    C     4     3    F\n",
    "```"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "索引排序的用法和值排序完全一致，只不过元素的值在索引中，此时需要指定索引层的名字或者层号，用参数`level`表示。另外，需要注意的是字符串的排列顺序由字母顺序决定。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th>Height</th>\n",
       "      <th>Weight</th>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Grade</th>\n",
       "      <th>Name</th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th rowspan=\"5\" valign=\"top\">Freshman</th>\n",
       "      <th>Yanquan Wang</th>\n",
       "      <td>163.5</td>\n",
       "      <td>55.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Yanqiang Xu</th>\n",
       "      <td>152.4</td>\n",
       "      <td>38.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Yanqiang Feng</th>\n",
       "      <td>162.3</td>\n",
       "      <td>51.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Yanpeng Lv</th>\n",
       "      <td>NaN</td>\n",
       "      <td>65.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Yanli Zhang</th>\n",
       "      <td>165.1</td>\n",
       "      <td>52.0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                        Height  Weight\n",
       "Grade    Name                         \n",
       "Freshman Yanquan Wang    163.5    55.0\n",
       "         Yanqiang Xu     152.4    38.0\n",
       "         Yanqiang Feng   162.3    51.0\n",
       "         Yanpeng Lv        NaN    65.0\n",
       "         Yanli Zhang     165.1    52.0"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_demo.sort_index(level=['Grade','Name'],ascending=[True,False]).head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "```\n",
    "pd.DataFrame.sort_index?\n",
    "```\n",
    "相比与value，多了\n",
    "\n",
    "sort_remaining : bool, default True\n",
    "    If True and sorting by level and index is multilevel, sort by other\n",
    "    levels too (in order) after sorting by specified level.\n",
    "    \n",
    "    \n",
    " 以index为标准排序后再排\n",
    "```\n",
    "Signature: pd.DataFrame.sort_index(self, axis=0, level=None, ascending: bool = True, inplace: bool = False, kind: str = 'quicksort', na_position: str = 'last', sort_remaining: bool = True, ignore_index: bool = False, key: Union[Callable[[ForwardRef('Index')], Union[ForwardRef('Index'), ~AnyArrayLike]], NoneType] = None)\n",
    "Docstring:\n",
    "Sort object by labels (along an axis).\n",
    "\n",
    "Returns a new DataFrame sorted by label if `inplace` argument is\n",
    "``False``, otherwise updates the original DataFrame and returns None.\n",
    "\n",
    "Parameters\n",
    "----------\n",
    "axis : {0 or 'index', 1 or 'columns'}, default 0\n",
    "    The axis along which to sort.  The value 0 identifies the rows,\n",
    "    and 1 identifies the columns.\n",
    "level : int or level name or list of ints or list of level names\n",
    "    If not None, sort on values in specified index level(s).\n",
    "ascending : bool or list of bools, default True\n",
    "    Sort ascending vs. descending. When the index is a MultiIndex the\n",
    "    sort direction can be controlled for each level individually.\n",
    "inplace : bool, default False\n",
    "    If True, perform operation in-place.\n",
    "kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'\n",
    "    Choice of sorting algorithm. See also ndarray.np.sort for more\n",
    "    information.  `mergesort` is the only stable algorithm. For\n",
    "    DataFrames, this option is only applied when sorting on a single\n",
    "    column or label.\n",
    "na_position : {'first', 'last'}, default 'last'\n",
    "    Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.\n",
    "    Not implemented for MultiIndex.\n",
    "sort_remaining : bool, default True\n",
    "    If True and sorting by level and index is multilevel, sort by other\n",
    "    levels too (in order) after sorting by specified level.\n",
    "ignore_index : bool, default False\n",
    "    If True, the resulting axis will be labeled 0, 1, …, n - 1.\n",
    "\n",
    "    .. versionadded:: 1.0.0\n",
    "\n",
    "key : callable, optional\n",
    "    If not None, apply the key function to the index values\n",
    "    before sorting. This is similar to the `key` argument in the\n",
    "    builtin :meth:`sorted` function, with the notable difference that\n",
    "    this `key` function should be *vectorized*. It should expect an\n",
    "    ``Index`` and return an ``Index`` of the same shape. For MultiIndex\n",
    "    inputs, the key is applied *per level*.\n",
    "\n",
    "    .. versionadded:: 1.1.0\n",
    "\n",
    "Returns\n",
    "-------\n",
    "DataFrame\n",
    "    The original DataFrame sorted by the labels.\n",
    "\n",
    "See Also\n",
    "--------\n",
    "Series.sort_index : Sort Series by the index.\n",
    "DataFrame.sort_values : Sort DataFrame by the value.\n",
    "Series.sort_values : Sort Series by the value.\n",
    "\n",
    "Examples\n",
    "--------\n",
    ">>> df = pd.DataFrame([1, 2, 3, 4, 5], index=[100, 29, 234, 1, 150],\n",
    "...                   columns=['A'])\n",
    ">>> df.sort_index()\n",
    "     A\n",
    "1    4\n",
    "29   2\n",
    "100  1\n",
    "150  5\n",
    "234  3\n",
    "\n",
    "By default, it sorts in ascending order, to sort in descending order,\n",
    "use ``ascending=False``\n",
    "\n",
    ">>> df.sort_index(ascending=False)\n",
    "     A\n",
    "234  3\n",
    "150  5\n",
    "100  1\n",
    "29   2\n",
    "1    4\n",
    "\n",
    "A key function can be specified which is applied to the index before\n",
    "sorting. For a ``MultiIndex`` this is applied to each level separately.\n",
    "\n",
    ">>> df = pd.DataFrame({\"a\": [1, 2, 3, 4]}, index=['A', 'b', 'C', 'd'])\n",
    ">>> df.sort_index(key=lambda x: x.str.lower())\n",
    "   a\n",
    "A  1\n",
    "b  2\n",
    "C  3\n",
    "d  4\n",
    "```"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "### 6. apply方法\n",
    "`apply`方法常用于`DataFrame`的行迭代或者列迭代，它的`axis`含义与第2小节中的统计聚合函数一致，`apply`的参数往往是一个以序列为输入的函数。例如对于`.mean()`，使用`apply`可以如下地写出："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Height    163.218033\n",
       "Weight     55.015873\n",
       "dtype: float64"
      ]
     },
     "execution_count": 50,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_demo = df[['Height', 'Weight']]\n",
    "def my_mean(x):\n",
    "     res = x.mean()\n",
    "     return res\n",
    "df_demo.apply(my_mean)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "同样的，可以利用`lambda`表达式使得书写简洁，这里的`x`就指代被调用的`df_demo`表中逐个输入的序列："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Height    163.218033\n",
       "Weight     55.015873\n",
       "dtype: float64"
      ]
     },
     "execution_count": 51,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_demo.apply(lambda x:x.mean())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "若指定`axis=1`，那么每次传入函数的就是行元素组成的`Series`，其结果与之前的逐行均值结果一致。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0    102.45\n",
       "1    118.25\n",
       "2    138.95\n",
       "3     41.00\n",
       "4    124.00\n",
       "dtype: float64"
      ]
     },
     "execution_count": 52,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_demo.apply(lambda x:x.mean(), axis=1).head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "这里再举一个例子：`mad`函数返回的是一个序列中偏离该序列均值的绝对值大小的均值，例如序列1,3,7,10中，均值为5.25，每一个元素偏离的绝对值为4.25,2.25,1.75,4.75，这个偏离序列的均值为3.25。现在利用`apply`计算升高和体重的`mad`指标："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Height     6.707229\n",
       "Weight    10.391870\n",
       "dtype: float64"
      ]
     },
     "execution_count": 53,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_demo.apply(lambda x:(x-x.mean()).abs().mean())\r\n",
    "#lamdba函数显示计算了偏离值，然后取绝对值，然后取均值"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "这与使用内置的`mad`函数计算结果一致："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Height     6.707229\n",
       "Weight    10.391870\n",
       "dtype: float64"
      ]
     },
     "execution_count": 54,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_demo.mad()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "#### 【WARNING】谨慎使用`apply`\n",
    "\n",
    "得益于传入自定义函数的处理，`apply`的自由度很高，但这是以性能为代价的。一般而言，使用`pandas`的内置函数处理和`apply`来处理同一个任务，其速度会相差较多，因此只有在确实存在自定义需求的情境下才考虑使用`apply`。\n",
    "\n",
    "#### 【END】\n",
    "\n",
    "## 四、窗口对象\n",
    "`pandas`中有3类窗口，分别是滑动窗口`rolling`、扩张窗口`expanding`以及指数加权窗口`ewm`。需要说明的是，以日期偏置为窗口大小的滑动窗口将在第十章讨论，指数加权窗口见本章练习。\n",
    "\n",
    "### 1. 滑窗对象\n",
    "要使用滑窗函数，就必须先要对一个序列使用`.rolling`得到滑窗对象，其最重要的参数为窗口大小`window`。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Rolling [window=3,center=False,axis=0]"
      ]
     },
     "execution_count": 55,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s = pd.Series([1,2,3,4,5])\n",
    "roller = s.rolling(window = 3)\n",
    "roller"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "在得到了滑窗对象后，能够使用相应的聚合函数进行计算，需要注意的是窗口包含当前行所在的元素，例如在第四个位置进行均值运算时，应当计算(2+3+4)/3，而不是(1+2+3)/3："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0    NaN\n",
       "1    NaN\n",
       "2    2.0\n",
       "3    3.0\n",
       "4    4.0\n",
       "dtype: float64"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "roller.mean()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0     NaN\n",
       "1     NaN\n",
       "2     6.0\n",
       "3     9.0\n",
       "4    12.0\n",
       "dtype: float64"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "roller.sum()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "对于滑动相关系数或滑动协方差的计算，可以如下写出："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0     NaN\n",
       "1     NaN\n",
       "2     2.5\n",
       "3     7.0\n",
       "4    12.0\n",
       "dtype: float64"
      ]
     },
     "execution_count": 56,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s2 = pd.Series([1,2,6,16,30])\n",
    "roller.cov(s2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0         NaN\n",
       "1         NaN\n",
       "2    0.944911\n",
       "3    0.970725\n",
       "4    0.995402\n",
       "dtype: float64"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "roller.corr(s2)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "此外，还支持使用`apply`传入自定义函数，其传入值是对应窗口的`Series`，例如上述的均值函数可以等效表示："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0    NaN\n",
       "1    NaN\n",
       "2    2.0\n",
       "3    3.0\n",
       "4    4.0\n",
       "dtype: float64"
      ]
     },
     "execution_count": 57,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "roller.apply(lambda x:x.mean())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "`shift, diff, pct_change`是一组类滑窗函数，它们的公共参数为`periods=n`，默认为1，分别表示取向前第`n`个元素的值、与向前第`n`个元素做差（与`Numpy`中不同，后者表示`n`阶差分）、与向前第`n`个元素相比计算增长率。这里的`n`可以为负，表示反方向的类似操作。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0    NaN\n",
       "1    NaN\n",
       "2    1.0\n",
       "3    3.0\n",
       "4    6.0\n",
       "dtype: float64"
      ]
     },
     "execution_count": 66,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s = pd.Series([1,3,6,10,15])\n",
    "s.shift(2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0     NaN\n",
       "1     NaN\n",
       "2     NaN\n",
       "3     9.0\n",
       "4    12.0\n",
       "dtype: float64"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s.diff(3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0         NaN\n",
       "1    2.000000\n",
       "2    1.000000\n",
       "3    0.666667\n",
       "4    0.500000\n",
       "dtype: float64"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s.pct_change()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0     3.0\n",
       "1     6.0\n",
       "2    10.0\n",
       "3    15.0\n",
       "4     NaN\n",
       "dtype: float64"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s.shift(-1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0   -5.0\n",
       "1   -7.0\n",
       "2   -9.0\n",
       "3    NaN\n",
       "4    NaN\n",
       "dtype: float64"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s.diff(-2)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "将其视作类滑窗函数的原因是，它们的功能可以用窗口大小为`n+1`的`rolling`方法等价代替："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0    NaN\n",
       "1    NaN\n",
       "2    1.0\n",
       "3    3.0\n",
       "4    6.0\n",
       "dtype: float64"
      ]
     },
     "execution_count": 71,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s.rolling(3).apply(lambda x:list(x)[0]) # s.shift(2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0     NaN\n",
       "1     NaN\n",
       "2     NaN\n",
       "3     9.0\n",
       "4    12.0\n",
       "dtype: float64"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    " s.rolling(4).apply(lambda x:list(x)[-1]-list(x)[0]) # s.diff(3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0         NaN\n",
       "1    2.000000\n",
       "2    1.000000\n",
       "3    0.666667\n",
       "4    0.500000\n",
       "dtype: float64"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "def my_pct(x):\n",
    "     L = list(x)\n",
    "     return L[-1]/L[0]-1\n",
    "s.rolling(2).apply(my_pct) # s.pct_change()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "#### 【练一练】\n",
    "\n",
    "`rolling`对象的默认窗口方向都是向前的，某些情况下用户需要向后的窗口，例如对1,2,3设定向后窗口为2的`sum`操作，结果为3,5,NaN，此时应该如何实现向后的滑窗操作？（提示：使用`shift`）\n",
    "直接nan不要了不就好了\n",
    "\n",
    "#### 【END】\n",
    "\n",
    "### 2. 扩张窗口\n",
    "扩张窗口又称累计窗口，可以理解为一个动态长度的窗口，其窗口的大小就是从序列开始处到具体操作的对应位置，其使用的聚合函数会作用于这些逐步扩张的窗口上。具体地说，设序列为a1, a2, a3, a4，则其每个位置对应的窗口即\\[a1\\]、\\[a1, a2\\]、\\[a1, a2, a3\\]、\\[a1, a2, a3, a4\\]。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 78,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0    1.0\n",
       "1    1.0\n",
       "2    1.0\n",
       "3    1.0\n",
       "dtype: float64"
      ]
     },
     "execution_count": 78,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s = pd.Series([1, 3, 6, 10])\n",
    "s.expanding().mean()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "#### 【练一练】\n",
    "\n",
    "`cummax, cumsum, cumprod`函数是典型的类扩张窗口函数，请使用`expanding`对象依次实现它们。\n",
    "\n",
    "\n",
    "#### 【END】\n",
    "\n",
    "## 五、练习\n",
    "### Ex1：口袋妖怪数据集\n",
    "现有一份口袋妖怪的数据集，下面进行一些背景说明：\n",
    "\n",
    "* `#`代表全国图鉴编号，不同行存在相同数字则表示为该妖怪的不同状态\n",
    "\n",
    "* 妖怪具有单属性和双属性两种，对于单属性的妖怪，`Type 2`为缺失值\n",
    "* `Total, HP, Attack, Defense, Sp. Atk, Sp. Def, Speed`分别代表种族值、体力、物攻、防御、特攻、特防、速度，其中种族值为后6项之和"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>#</th>\n",
       "      <th>Name</th>\n",
       "      <th>Type 1</th>\n",
       "      <th>Type 2</th>\n",
       "      <th>Total</th>\n",
       "      <th>HP</th>\n",
       "      <th>Attack</th>\n",
       "      <th>Defense</th>\n",
       "      <th>Sp. Atk</th>\n",
       "      <th>Sp. Def</th>\n",
       "      <th>Speed</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>1</td>\n",
       "      <td>Bulbasaur</td>\n",
       "      <td>Grass</td>\n",
       "      <td>Poison</td>\n",
       "      <td>318</td>\n",
       "      <td>45</td>\n",
       "      <td>49</td>\n",
       "      <td>49</td>\n",
       "      <td>65</td>\n",
       "      <td>65</td>\n",
       "      <td>45</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>2</td>\n",
       "      <td>Ivysaur</td>\n",
       "      <td>Grass</td>\n",
       "      <td>Poison</td>\n",
       "      <td>405</td>\n",
       "      <td>60</td>\n",
       "      <td>62</td>\n",
       "      <td>63</td>\n",
       "      <td>80</td>\n",
       "      <td>80</td>\n",
       "      <td>60</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>3</td>\n",
       "      <td>Venusaur</td>\n",
       "      <td>Grass</td>\n",
       "      <td>Poison</td>\n",
       "      <td>525</td>\n",
       "      <td>80</td>\n",
       "      <td>82</td>\n",
       "      <td>83</td>\n",
       "      <td>100</td>\n",
       "      <td>100</td>\n",
       "      <td>80</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   #       Name Type 1  Type 2  Total  HP  Attack  Defense  Sp. Atk  Sp. Def  \\\n",
       "0  1  Bulbasaur  Grass  Poison    318  45      49       49       65       65   \n",
       "1  2    Ivysaur  Grass  Poison    405  60      62       63       80       80   \n",
       "2  3   Venusaur  Grass  Poison    525  80      82       83      100      100   \n",
       "\n",
       "   Speed  \n",
       "0     45  \n",
       "1     60  \n",
       "2     80  "
      ]
     },
     "execution_count": 79,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df = pd.read_csv('data/pokemon.csv')\n",
    "df.head(3)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "1. 对`HP, Attack, Defense, Sp. Atk, Sp. Def, Speed`进行加总，验证是否为`Total`值。\n",
    "\n",
    "2. 对于`#`重复的妖怪只保留第一条记录，解决以下问题：\n",
    "\n",
    "* 求第一属性的种类数量和前三多数量对应的种类\n",
    "* 求第一属性和第二属性的组合种类\n",
    "* 求尚未出现过的属性组合\n",
    "\n",
    "3. 按照下述要求，构造`Series`：\n",
    "\n",
    "* 取出物攻，超过120的替换为`high`，不足50的替换为`low`，否则设为`mid`\n",
    "* 取出第一属性，分别用`replace`和`apply`替换所有字母为大写\n",
    "* 求每个妖怪六项能力的离差，即所有能力中偏离中位数最大的值，添加到`df`并从大到小排序\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0      318\n",
      "1      405\n",
      "2      525\n",
      "3      625\n",
      "4      309\n",
      "      ... \n",
      "795    600\n",
      "796    700\n",
      "797    600\n",
      "798    680\n",
      "799    600\n",
      "Length: 800, dtype: int64\n",
      "0      318\n",
      "1      405\n",
      "2      525\n",
      "3      625\n",
      "4      309\n",
      "      ... \n",
      "795    600\n",
      "796    700\n",
      "797    600\n",
      "798    680\n",
      "799    600\n",
      "Name: Total, Length: 800, dtype: int64\n"
     ]
    }
   ],
   "source": [
    "#对HP, Attack, Defense, Sp. Atk, Sp. Def, Speed进行加总，验证是否为Total值。\r\n",
    "print(df[['HP', 'Attack', 'Defense', 'Sp. Atk', 'Sp. Def', 'Speed']].sum(1))\r\n",
    "print(df['Total'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 83,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Water       105\n",
       "Normal       93\n",
       "Grass        66\n",
       "Bug          63\n",
       "Psychic      47\n",
       "Fire         47\n",
       "Rock         41\n",
       "Electric     36\n",
       "Ground       30\n",
       "Poison       28\n",
       "Dark         28\n",
       "Fighting     25\n",
       "Dragon       24\n",
       "Ghost        23\n",
       "Ice          23\n",
       "Steel        22\n",
       "Fairy        17\n",
       "Flying        3\n",
       "Name: Type 1, dtype: int64"
      ]
     },
     "execution_count": 83,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#求第一属性的种类数量和前三多数量对应的种类\r\n",
    "dp2a = df.drop_duplicates('#')\r\n",
    "dp2a['Type 1'].value_counts()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 86,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>#</th>\n",
       "      <th>Name</th>\n",
       "      <th>Type 1</th>\n",
       "      <th>Type 2</th>\n",
       "      <th>Total</th>\n",
       "      <th>HP</th>\n",
       "      <th>Attack</th>\n",
       "      <th>Defense</th>\n",
       "      <th>Sp. Atk</th>\n",
       "      <th>Sp. Def</th>\n",
       "      <th>Speed</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>1</td>\n",
       "      <td>Bulbasaur</td>\n",
       "      <td>Grass</td>\n",
       "      <td>Poison</td>\n",
       "      <td>318</td>\n",
       "      <td>45</td>\n",
       "      <td>49</td>\n",
       "      <td>49</td>\n",
       "      <td>65</td>\n",
       "      <td>65</td>\n",
       "      <td>45</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>4</td>\n",
       "      <td>Charmander</td>\n",
       "      <td>Fire</td>\n",
       "      <td>NaN</td>\n",
       "      <td>309</td>\n",
       "      <td>39</td>\n",
       "      <td>52</td>\n",
       "      <td>43</td>\n",
       "      <td>60</td>\n",
       "      <td>50</td>\n",
       "      <td>65</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>6</th>\n",
       "      <td>6</td>\n",
       "      <td>Charizard</td>\n",
       "      <td>Fire</td>\n",
       "      <td>Flying</td>\n",
       "      <td>534</td>\n",
       "      <td>78</td>\n",
       "      <td>84</td>\n",
       "      <td>78</td>\n",
       "      <td>109</td>\n",
       "      <td>85</td>\n",
       "      <td>100</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9</th>\n",
       "      <td>7</td>\n",
       "      <td>Squirtle</td>\n",
       "      <td>Water</td>\n",
       "      <td>NaN</td>\n",
       "      <td>314</td>\n",
       "      <td>44</td>\n",
       "      <td>48</td>\n",
       "      <td>65</td>\n",
       "      <td>50</td>\n",
       "      <td>64</td>\n",
       "      <td>43</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>13</th>\n",
       "      <td>10</td>\n",
       "      <td>Caterpie</td>\n",
       "      <td>Bug</td>\n",
       "      <td>NaN</td>\n",
       "      <td>195</td>\n",
       "      <td>45</td>\n",
       "      <td>30</td>\n",
       "      <td>35</td>\n",
       "      <td>20</td>\n",
       "      <td>20</td>\n",
       "      <td>45</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>773</th>\n",
       "      <td>703</td>\n",
       "      <td>Carbink</td>\n",
       "      <td>Rock</td>\n",
       "      <td>Fairy</td>\n",
       "      <td>500</td>\n",
       "      <td>50</td>\n",
       "      <td>50</td>\n",
       "      <td>150</td>\n",
       "      <td>50</td>\n",
       "      <td>150</td>\n",
       "      <td>50</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>778</th>\n",
       "      <td>708</td>\n",
       "      <td>Phantump</td>\n",
       "      <td>Ghost</td>\n",
       "      <td>Grass</td>\n",
       "      <td>309</td>\n",
       "      <td>43</td>\n",
       "      <td>70</td>\n",
       "      <td>48</td>\n",
       "      <td>50</td>\n",
       "      <td>60</td>\n",
       "      <td>38</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>790</th>\n",
       "      <td>714</td>\n",
       "      <td>Noibat</td>\n",
       "      <td>Flying</td>\n",
       "      <td>Dragon</td>\n",
       "      <td>245</td>\n",
       "      <td>40</td>\n",
       "      <td>30</td>\n",
       "      <td>35</td>\n",
       "      <td>45</td>\n",
       "      <td>40</td>\n",
       "      <td>55</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>797</th>\n",
       "      <td>720</td>\n",
       "      <td>HoopaHoopa Confined</td>\n",
       "      <td>Psychic</td>\n",
       "      <td>Ghost</td>\n",
       "      <td>600</td>\n",
       "      <td>80</td>\n",
       "      <td>110</td>\n",
       "      <td>60</td>\n",
       "      <td>150</td>\n",
       "      <td>130</td>\n",
       "      <td>70</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>799</th>\n",
       "      <td>721</td>\n",
       "      <td>Volcanion</td>\n",
       "      <td>Fire</td>\n",
       "      <td>Water</td>\n",
       "      <td>600</td>\n",
       "      <td>80</td>\n",
       "      <td>110</td>\n",
       "      <td>120</td>\n",
       "      <td>130</td>\n",
       "      <td>90</td>\n",
       "      <td>70</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>143 rows × 11 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "       #                 Name   Type 1  Type 2  Total  HP  Attack  Defense  \\\n",
       "0      1            Bulbasaur    Grass  Poison    318  45      49       49   \n",
       "4      4           Charmander     Fire     NaN    309  39      52       43   \n",
       "6      6            Charizard     Fire  Flying    534  78      84       78   \n",
       "9      7             Squirtle    Water     NaN    314  44      48       65   \n",
       "13    10             Caterpie      Bug     NaN    195  45      30       35   \n",
       "..   ...                  ...      ...     ...    ...  ..     ...      ...   \n",
       "773  703              Carbink     Rock   Fairy    500  50      50      150   \n",
       "778  708             Phantump    Ghost   Grass    309  43      70       48   \n",
       "790  714               Noibat   Flying  Dragon    245  40      30       35   \n",
       "797  720  HoopaHoopa Confined  Psychic   Ghost    600  80     110       60   \n",
       "799  721            Volcanion     Fire   Water    600  80     110      120   \n",
       "\n",
       "     Sp. Atk  Sp. Def  Speed  \n",
       "0         65       65     45  \n",
       "4         60       50     65  \n",
       "6        109       85    100  \n",
       "9         50       64     43  \n",
       "13        20       20     45  \n",
       "..       ...      ...    ...  \n",
       "773       50      150     50  \n",
       "778       50       60     38  \n",
       "790       45       40     55  \n",
       "797      150      130     70  \n",
       "799      130       90     70  \n",
       "\n",
       "[143 rows x 11 columns]"
      ]
     },
     "execution_count": 86,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "dp2b = dp2a.drop_duplicates(['Type 1', 'Type 2'])\r\n",
    "dp2b\r\n",
    "#使用attr_dup.shape[0]来显示，shape显示不了"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 90,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0       low\n",
       "1       mid\n",
       "2       mid\n",
       "3       mid\n",
       "4       mid\n",
       "       ... \n",
       "795     mid\n",
       "796    high\n",
       "797     mid\n",
       "798    high\n",
       "799     mid\n",
       "Name: Attack, Length: 800, dtype: object"
      ]
     },
     "execution_count": 90,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#取出物攻，超过120的替换为high，不足50的替换为low，否则设为mid\r\n",
    "#mask：true\r\n",
    "#where：false\r\n",
    "\r\n",
    "df['Attack'].mask(df['Attack']>120, 'high').mask(df['Attack']<50, 'low').mask((50<=df['Attack'])&(df['Attack']<=120), 'mid')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 92,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0        GRASS\n",
       "1        GRASS\n",
       "2        GRASS\n",
       "3        GRASS\n",
       "4         FIRE\n",
       "        ...   \n",
       "795       ROCK\n",
       "796       ROCK\n",
       "797    PSYCHIC\n",
       "798    PSYCHIC\n",
       "799       FIRE\n",
       "Name: Type 1, Length: 800, dtype: object"
      ]
     },
     "execution_count": 92,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#取出第一属性，分别用replace和apply替换所有字母为大写\r\n",
    "\r\n",
    "\r\n",
    "df['Type 1'].apply(lambda x:str.upper(x))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 99,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>#</th>\n",
       "      <th>Name</th>\n",
       "      <th>Type 1</th>\n",
       "      <th>Type 2</th>\n",
       "      <th>Total</th>\n",
       "      <th>HP</th>\n",
       "      <th>Attack</th>\n",
       "      <th>Defense</th>\n",
       "      <th>Sp. Atk</th>\n",
       "      <th>Sp. Def</th>\n",
       "      <th>Speed</th>\n",
       "      <th>Deviation</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>230</th>\n",
       "      <td>213</td>\n",
       "      <td>Shuckle</td>\n",
       "      <td>Bug</td>\n",
       "      <td>Rock</td>\n",
       "      <td>505</td>\n",
       "      <td>20</td>\n",
       "      <td>10</td>\n",
       "      <td>230</td>\n",
       "      <td>10</td>\n",
       "      <td>230</td>\n",
       "      <td>5</td>\n",
       "      <td>215.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>121</th>\n",
       "      <td>113</td>\n",
       "      <td>Chansey</td>\n",
       "      <td>Normal</td>\n",
       "      <td>NaN</td>\n",
       "      <td>450</td>\n",
       "      <td>250</td>\n",
       "      <td>5</td>\n",
       "      <td>5</td>\n",
       "      <td>35</td>\n",
       "      <td>105</td>\n",
       "      <td>50</td>\n",
       "      <td>207.5</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>261</th>\n",
       "      <td>242</td>\n",
       "      <td>Blissey</td>\n",
       "      <td>Normal</td>\n",
       "      <td>NaN</td>\n",
       "      <td>540</td>\n",
       "      <td>255</td>\n",
       "      <td>10</td>\n",
       "      <td>10</td>\n",
       "      <td>75</td>\n",
       "      <td>135</td>\n",
       "      <td>55</td>\n",
       "      <td>190.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>333</th>\n",
       "      <td>306</td>\n",
       "      <td>AggronMega Aggron</td>\n",
       "      <td>Steel</td>\n",
       "      <td>NaN</td>\n",
       "      <td>630</td>\n",
       "      <td>70</td>\n",
       "      <td>140</td>\n",
       "      <td>230</td>\n",
       "      <td>60</td>\n",
       "      <td>80</td>\n",
       "      <td>50</td>\n",
       "      <td>155.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>224</th>\n",
       "      <td>208</td>\n",
       "      <td>SteelixMega Steelix</td>\n",
       "      <td>Steel</td>\n",
       "      <td>Ground</td>\n",
       "      <td>610</td>\n",
       "      <td>75</td>\n",
       "      <td>125</td>\n",
       "      <td>230</td>\n",
       "      <td>55</td>\n",
       "      <td>95</td>\n",
       "      <td>30</td>\n",
       "      <td>145.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>143</th>\n",
       "      <td>132</td>\n",
       "      <td>Ditto</td>\n",
       "      <td>Normal</td>\n",
       "      <td>NaN</td>\n",
       "      <td>288</td>\n",
       "      <td>48</td>\n",
       "      <td>48</td>\n",
       "      <td>48</td>\n",
       "      <td>48</td>\n",
       "      <td>48</td>\n",
       "      <td>48</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>165</th>\n",
       "      <td>151</td>\n",
       "      <td>Mew</td>\n",
       "      <td>Psychic</td>\n",
       "      <td>NaN</td>\n",
       "      <td>600</td>\n",
       "      <td>100</td>\n",
       "      <td>100</td>\n",
       "      <td>100</td>\n",
       "      <td>100</td>\n",
       "      <td>100</td>\n",
       "      <td>100</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>255</th>\n",
       "      <td>236</td>\n",
       "      <td>Tyrogue</td>\n",
       "      <td>Fighting</td>\n",
       "      <td>NaN</td>\n",
       "      <td>210</td>\n",
       "      <td>35</td>\n",
       "      <td>35</td>\n",
       "      <td>35</td>\n",
       "      <td>35</td>\n",
       "      <td>35</td>\n",
       "      <td>35</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>206</th>\n",
       "      <td>191</td>\n",
       "      <td>Sunkern</td>\n",
       "      <td>Grass</td>\n",
       "      <td>NaN</td>\n",
       "      <td>180</td>\n",
       "      <td>30</td>\n",
       "      <td>30</td>\n",
       "      <td>30</td>\n",
       "      <td>30</td>\n",
       "      <td>30</td>\n",
       "      <td>30</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>271</th>\n",
       "      <td>251</td>\n",
       "      <td>Celebi</td>\n",
       "      <td>Psychic</td>\n",
       "      <td>Grass</td>\n",
       "      <td>600</td>\n",
       "      <td>100</td>\n",
       "      <td>100</td>\n",
       "      <td>100</td>\n",
       "      <td>100</td>\n",
       "      <td>100</td>\n",
       "      <td>100</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>800 rows × 12 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "       #                 Name    Type 1  Type 2  Total   HP  Attack  Defense  \\\n",
       "230  213              Shuckle       Bug    Rock    505   20      10      230   \n",
       "121  113              Chansey    Normal     NaN    450  250       5        5   \n",
       "261  242              Blissey    Normal     NaN    540  255      10       10   \n",
       "333  306    AggronMega Aggron     Steel     NaN    630   70     140      230   \n",
       "224  208  SteelixMega Steelix     Steel  Ground    610   75     125      230   \n",
       "..   ...                  ...       ...     ...    ...  ...     ...      ...   \n",
       "143  132                Ditto    Normal     NaN    288   48      48       48   \n",
       "165  151                  Mew   Psychic     NaN    600  100     100      100   \n",
       "255  236              Tyrogue  Fighting     NaN    210   35      35       35   \n",
       "206  191              Sunkern     Grass     NaN    180   30      30       30   \n",
       "271  251               Celebi   Psychic   Grass    600  100     100      100   \n",
       "\n",
       "     Sp. Atk  Sp. Def  Speed  Deviation  \n",
       "230       10      230      5      215.0  \n",
       "121       35      105     50      207.5  \n",
       "261       75      135     55      190.0  \n",
       "333       60       80     50      155.0  \n",
       "224       55       95     30      145.0  \n",
       "..       ...      ...    ...        ...  \n",
       "143       48       48     48        0.0  \n",
       "165      100      100    100        0.0  \n",
       "255       35       35     35        0.0  \n",
       "206       30       30     30        0.0  \n",
       "271      100      100    100        0.0  \n",
       "\n",
       "[800 rows x 12 columns]"
      ]
     },
     "execution_count": 99,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#求每个妖怪六项能力的离差，即所有能力中偏离中位数最大的值，添加到df并从大到小排序\r\n",
    "#求离差：\r\n",
    "df['Deviation'] = df[['HP', 'Attack', 'Defense', 'Sp. Atk', 'Sp. Def', 'Speed']].apply(lambda x:np.max((x-x.median()).abs()), 1)\r\n",
    "#从大到小\r\n",
    "df.sort_values('Deviation', ascending=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "### Ex2：指数加权窗口\n",
    "1. 作为扩张窗口的`ewm`窗口\n",
    "\n",
    "在扩张窗口中，用户可以使用各类函数进行历史的累计指标统计，但这些内置的统计函数往往把窗口中的所有元素赋予了同样的权重。事实上，可以给出不同的权重来赋给窗口中的元素，指数加权窗口就是这样一种特殊的扩张窗口。\n",
    "\n",
    "其中，最重要的参数是`alpha`，它决定了默认情况下的窗口权重为$w_i=(1−\\alpha)^i,i\\in\\{0,1,...,t\\}$，其中$i=t$表示当前元素，$i=0$表示序列的第一个元素。\n",
    "\n",
    "从权重公式可以看出，离开当前值越远则权重越小，若记原序列为$x$，更新后的当前元素为$y_t$，此时通过加权公式归一化后可知：\n",
    "\n",
    "$$ \\begin{split}y_t &=\\frac{\\sum_{i=0}^{t} w_i x_{t-i}}{\\sum_{i=0}^{t} w_i} \\\\\n",
    "&=\\frac{x_t + (1 - \\alpha)x_{t-1} + (1 - \\alpha)^2 x_{t-2} + ...\n",
    "+ (1 - \\alpha)^{t} x_{0}}{1 + (1 - \\alpha) + (1 - \\alpha)^2 + ...\n",
    "+ (1 - \\alpha)^{t-1}}\\\\\\end{split} $$\n",
    "\n",
    "对于`Series`而言，可以用`ewm`对象如下计算指数平滑后的序列："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 101,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0   -1\n",
       "1   -1\n",
       "2   -2\n",
       "3   -2\n",
       "4   -2\n",
       "dtype: int64"
      ]
     },
     "execution_count": 101,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "np.random.seed(0)\n",
    "s = pd.Series(np.random.randint(-1,2,30).cumsum())\n",
    "s.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 102,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0   -1.000000\n",
       "1   -1.000000\n",
       "2   -1.409836\n",
       "3   -1.609756\n",
       "4   -1.725845\n",
       "dtype: float64"
      ]
     },
     "execution_count": 102,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s.ewm(alpha=0.2).mean().head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "请用`expanding`窗口实现。\n",
    "\n",
    "2. 作为滑动窗口的`ewm`窗口\n",
    "\n",
    "从第1问中可以看到，`ewm`作为一种扩张窗口的特例，只能从序列的第一个元素开始加权。现在希望给定一个限制窗口`n`，只对包含自身最近的`n`个窗口进行滑动加权平滑。请根据滑窗函数，给出新的`wi`与`yt`的更新公式，并通过`rolling`窗口实现这一功能。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 117,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'\\ndef ewm_func(x, alpha=0.2):\\n    win = (1-alpha)**np.arange(x.shape[0])[::-1]\\n    res = (win*x).sum()/win.sum()\\n    return res\\ns.expanding().apply(ewm_func)\\n'"
      ]
     },
     "execution_count": 117,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''\r\n",
    "def expanding_fuc(x):\r\n",
    "    x.shape[0] = nb #总数\r\n",
    "    y = np.arange(nb,0,-1) #倒序\r\n",
    "    wix = (x*0.8**y).sum() #分子\r\n",
    "    wi = (0.8**y).sum() #分母\r\n",
    "    return wix/wi\r\n",
    "s.expanding().apply(ewm_func)\r\n",
    "'''\r\n",
    "#然而运行出来是什么乱七八糟的东西\r\n",
    "#抄答案\r\n",
    "\r\n",
    "def ewm_func(x, alpha=0.2):\r\n",
    "    win = (1-alpha)**np.arange(x.shape[0])[::-1]\r\n",
    "    res = (win*x).sum()/win.sum()\r\n",
    "    return res\r\n",
    "s.expanding().apply(ewm_func)\r\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "s.rolling(window=4).apply(ewm_func).head()  \n",
    "着实不知道window=4是什么意思  \n",
    "上标下标和为t就是了  \n",
    "![avatar](https://misaka19998.gitee.io/picture/teamlearning/pandas/IMG20201219200204.jpg)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "PaddlePaddle 2.0.0b0 (Python 3.5)",
   "language": "python",
   "name": "py35-paddle1.2.0"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
