{
 "metadata": {
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  },
  "orig_nbformat": 2,
  "kernelspec": {
   "name": "python3",
   "display_name": "Python 3.7.4 64-bit ('base': conda)"
  },
  "interpreter": {
   "hash": "2a7e95a32014fc1ccf24626d45a98c6e7b4373277259c22f47a91d487fc3e8a5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2,
 "cells": [
  {
   "source": [
    "### Pandas\n",
    "## [官方文档](https://pandas.pydata.org/docs/)  \n",
    "基于**numpy**实现的数据应用:采用高效的数据类型 + 用于数据分析的操作函数  \n",
    "\n",
    "1. Sries  一维数据类型\n",
    "2. Dataframe  多维数据类型  \n",
    "> 围绕两个数据类型,Pandas可通过操作索引实现数据的基本操作/运算操作/特征类操作/关联类操作\n",
    "\n",
    "|  Numpy   |  Pandas  |\n",
    "|  ----  |  ----  |\n",
    "| 基础数据类型ndarray  |  扩展数据类型sries/dataframe |\n",
    "| 关注数据结构表达\\[维度\\]  |  关注数据的应用表达\\[提取/运算\\] |\n",
    "| 维度:数据间关系 | 数据与**索引**间关系\n",
    "| 处理同质型的数值类数组数据 | 处理表格型或异质型数据\n",
    "共同点:矢量化的运算"
   ],
   "cell_type": "markdown",
   "metadata": {}
  },
  {
   "source": [
    "### Series 数据类型 \n",
    "# 由一组数据与之相关的数据索引组成\n",
    "import pandas as pd \n",
    "import numpy as np\n",
    "\n",
    "# python列表创建Series类型\n",
    "a = pd.Series(range(4),index=['a','b','c','d']) # index= 索引可省略,不写则使用自动索引\n",
    "print(a,'\\n')   # 数据类型基于numpy\n",
    "\n",
    "# 字典创建Series类型\n",
    "dic = {'a':0,'b':2,'c':1}\n",
    "states = ['c','b','a','d']      # 按照自定义索引顺序构造Series\n",
    "b = pd.Series(dic,index=states)\n",
    "print(b,pd.isnull(b),'\\n')      # 检查缺失数据\n",
    "\n",
    "# 从ndarray类型创建[较常用]\n",
    "c = pd.Series(np.arange(5),np.arange(9,4,-1))\n",
    "print(c,'\\n')\n",
    "\n",
    "# 从csv文件当中创建,如果是excel可以用pd.read_excel\n",
    "d = pd.read_csv('npa.csv')      # pd.to_csv() 可以将dataframe存入csv\n",
    "d"
   ],
   "cell_type": "code",
   "metadata": {},
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Series 数据类型的基本操作,包括[indedui'qix,value]两个属性\n",
    "# Series 是基于索引的操作,可自动对齐索引\n",
    "import pandas as pd \n",
    "import numpy as np\n",
    "\n",
    "a = pd.Series(range(4),index=['a','b','c','d'])\n",
    "print(a.index,a.values)     # a.index为Index类型,a.values属于numpy的array类型\n",
    "print(\n",
    "    a['a':'c'],     # Series的切片是包含右边的\n",
    "    a[1],\n",
    "    a.loc['b'],\n",
    "    a.iloc[1],      # 几种索引之间的区别\n",
    "    '\\n')     # 自动索引和用户定义的索引[不可混用]\n",
    "\n",
    "# 可以采用ndarray的操作\n",
    "print(np.exp(a),'\\n')    # 对Series类型的运算返回的仍是Series类型,索引不变\n",
    "\n",
    "# 可以采用dict的操作\n",
    "print('c' in a )\n",
    "print( 2 in a )\n",
    "print(a.get('f',100),'\\n')   # 在字典中选取'f'值,没有则返回100\n",
    "\n",
    "# Series 的对齐操作\n",
    "b = pd.Series(range(3),index=['c','d','e'])\n",
    "print(a+b,'\\n')  # 自动对齐不同索引的数据,并相加[pd的优势]]\n",
    "\n",
    "# Series 的对象和索引都有name属性\n",
    "b.name = 'Series对象值'\n",
    "b.index.name = '索引列'\n",
    "print(b)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### DataFrame 数据类型[表格型数据],包括[index,columns,value]三个属性\n",
    "# 一个共享相同索引的Series的字典,一个series代表一列。[二维带'标签'的数组]\n",
    "# 既有行索引axis,也有列索引colum\n",
    "# 函数操作中一般默认对Series对象进行遍历操作(axis=0)\n",
    "import pandas as pd \n",
    "import numpy as np\n",
    "\n",
    "# 二维ndarray类型创建\n",
    "a = pd.DataFrame(np.arange(10).reshape(2,5))\n",
    "print(a,'\\n')     # 生成自动行索引(0,1)和自动列索引(0,4)\n",
    "\n",
    "## dict类型创建\n",
    "dictb = {\n",
    "    'one':pd.Series([1,2,3],['A','B','C']),         # 第一列\n",
    "    'two':pd.Series([9,8,7,6],['A','B','C','D'])    # 第二列\n",
    "}   \n",
    "b = pd.DataFrame(dictb)     # 自动对齐'A','B','C'索引\n",
    "print(b)\n",
    "\n",
    "# 在已有的DataFrame中创建\n",
    "c = pd.DataFrame(dictb,index=['A','B','C'],columns=['two']) \n",
    "print(c,'\\n')\n",
    "# type object 'object' has no attribute 'dtype'\n",
    "\n",
    "# 也可用包含字典的嵌套字典来创建\n",
    "dictd = {\n",
    "    'one':[1,2,3,4],         # 第一列\n",
    "    'two':[9,8,7,6]          # 第二列\n",
    "}\n",
    "d = pd.DataFrame(dictd,index = ['a','b','c','d'])   # 由index给出行索引,需要完全对齐\n",
    "print(d,'\\n')\n",
    "\n",
    "## 列表list类型也可创建\n",
    "liste = [(x,x+1) for x in range(1,5)]\n",
    "e = pd.DataFrame(liste)     # (x,x+1)作为每一行\n",
    "print(e)        "
   ]
  },
  {
   "source": [
    "有以下几种数据载入,存储的类型:\n",
    "\n",
    "1. 读取文本文件/其他格式文件  \n",
    "2. 从数据库载入数据(暂时不讲)  \n",
    "3. 与网络资源进行交互(Web API)"
   ],
   "cell_type": "markdown",
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "! type npa.csv  # 查看文件原始内容"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### 读取文本文件/其他格式文件\n",
    "# 将表格型数据读取为DataFrame对象,有read_csv,read_table,read_excel等\n",
    "import pandas as pd\n",
    "data = pd.read_csv('npa.csv',header=None)  # 逗号是默认分隔符,不包含表头,也可以用names指定表头\n",
    "#另一个read_table中('\\t)是默认分隔符,可以指定分隔符,甚至用正则表达式作为分隔符\n",
    "\n",
    "# 使用DataFrame的to_csv方法，我们可以将数据导出为默认逗号分隔的文件\n",
    "data.to_csv('nap.csv',sep='|')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### 读取网页json传给dataframe\n",
    "import requests\n",
    "import pandas as pd\n",
    "\n",
    "url = 'https://api.github.com/repos/pandas-dev/pandas/issues'\n",
    "resp = requests.get(url)\n",
    "data = resp.json()\n",
    "issues = pd.DataFrame(data)\n",
    "issues.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 表格应用DataFrame的属性\n",
    "import pandas as pd\n",
    "\n",
    "dl = {\n",
    "    '环比':[101.5,101.2,101.3,102.0,100.1],\n",
    "    '同比':[120.7,127.3,119.4,140.9,101.4],\n",
    "    '定基':[121.4,127.8,120.0,145.5,101.6]\n",
    "}\n",
    "\n",
    "d = pd.DataFrame(dl,index=['北京','上海','广州','深圳','沈阳'])\n",
    "print(\n",
    "    d,'\\n',\n",
    "    d.head(2),'\\n',      # 查看前两行的数据(后几行可以用tail()))\n",
    "    d.index,'\\n',        # index作为行\n",
    "    d.columns,'\\n',      # columns作为列,列顺序是无序的\n",
    "    d.values,'\\n'        # 数据部分,返回ndarray类型\n",
    "    )\n",
    "\n",
    "# 选择数据\n",
    "print(\n",
    "    d['同比']['北京'],'同比,北京位置\\n',  # 获取某个单元格,先列后行\n",
    "    d.iloc[1,0],'行1列0位置\\n\\n',            # 按照'自动生成索引'获取某个单元格,先行后列\n",
    "    d['同比'],'\\n\\n',         # 获取某一列,列选择最为常见\n",
    "    # d.ix['北京'],'\\n',      # 该方法已被遗弃,被loc和ilocqu'dai\n",
    "    d.loc['北京'],'\\n\\n',     # 按照'索引'获取某一行\n",
    "    d.loc[['北京','广州'],:'同比'],'\\n\\n',     # 获取某个单元块,还可以切片索引\n",
    "    d[d-d.mean()>0]   # boll索引'筛选'某些符合条件的单元格,不满足的用NaN填充\n",
    "    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Pandas数据类型操作\n",
    "\n",
    "# 重新索引pd.DataFrame.reindex(index,columns,fill_value,copy)\n",
    "import pandas as pd\n",
    "\n",
    "al = {\n",
    "    '环比':[101.5,101.2,101.3,102.0,100.1],\n",
    "    '同比':[120.7,127.3,119.4,140.9,101.4],\n",
    "    '定基':[121.4,127.8,120.0,145.5,101.6]\n",
    "}\n",
    "\n",
    "a = pd.DataFrame(dl,index=['北京','上海','广州','深圳','沈阳'])\n",
    "print(\n",
    "    a,'\\n',\n",
    "    a.reindex(index=['上海','深圳','北京','广州','沈阳']),'\\n',     # 行重排\n",
    "    a.reindex(columns=['同比','定基','环比']),'\\n\\n'               # 列重排\n",
    "    )\n",
    "\n",
    "# 由于行index或列columns都是Index类型,是不可修改的对象[类似列表],一些操作如下\n",
    "newc = a.columns.insert(3,'新增列')   \n",
    "newi = a.index.insert(5,'新增行')\n",
    "b = a.reindex(index=newi,columns=newc,fill_value='100')   # 向前新增方式新增行和列\n",
    "\n",
    "ni = b.index.delete(2)       # 删除行\n",
    "nc = b.columns.delete(2)     # 删除列\n",
    "c = a.reindex(index=ni,columns=nc)  # 通过索引实现数据的操作!!![与ndarray的根本区别]\n",
    "print(\n",
    "    b,'\\n',\n",
    "    c,'\\n\\n',\n",
    ")\n",
    "\n",
    "# 删除指定索引对象 .drop()\n",
    "print(\n",
    "    a,'\\n',\n",
    "    a.drop(['北京']),'\\n',  # 删除指定行[drop默认操作axis=0元素,也就是行]\n",
    "    a.drop(['同比'],axis=1),'\\n\\n' # 删除指定列[axis=1]\n",
    ")\n",
    "\n",
    "# 类似matplotlib.pyplot\n",
    "a.plot(kind='line')     # DataFrame.plot似乎无法改变图表字体?"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "help(pd.DataFrame.plot)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Pandas数据类型运算,直接调用numpy通用函数\n",
    "import pandas as pd \n",
    "import numpy as np\n",
    "\n",
    "a = pd.DataFrame(np.arange(12).reshape(3,4),index=list('abc'),columns=list('defg'))\n",
    "b = pd.DataFrame(np.arange(20).reshape(4,5),index=list('abcd'),columns=list('defgh'))\n",
    "c = pd.Series(np.arange(4),index=list('abcd'))   # c = pd.DataFrame(np.arange(4)) 会报错\n",
    "print(\n",
    "    a,'\\n',\n",
    "    b,'\\n',\n",
    "    c,'\\n\\n'\n",
    ")\n",
    "\n",
    "# 通用函数运算\n",
    "f = lambda x: x.max()-x.min()\n",
    "print(\n",
    "    a.apply(f),'\\n',    # 函数f在a中的每一列调用一次\n",
    ")\n",
    "\n",
    "# 算术运算\n",
    "# 行列索引运算,补齐后运算\n",
    "print(\n",
    "    a+b,'\\n',                          # 自动补齐,缺项补NaN\n",
    "    b.add(a,fill_value=100),'\\n\\n'     # 自动补齐,fill_value替代后参与运算,可选参数\n",
    ")\n",
    "\n",
    "# 广播运算[低维数据作用在高维数据上]\n",
    "print('广播运算\\n',\n",
    "    a-10,'\\n',              # 每个元素都-10\n",
    "    a-c,'\\n',               # 默认在行[axis=1]上参与运算\n",
    "    a.sub(c,axis=1),'\\n',   # 结果同上\n",
    "    a.sub(c,axis=0),'\\n\\n'  # 在列上运算（axis='index’或axis=0）\n",
    ")\n",
    "\n",
    "# 比较运算\n",
    "# 只能比较相同索引,不进行补齐.\n",
    "# 同维度数据要求尺寸一致\n",
    "# 不同维度数据为广播运算\n",
    "print(\n",
    "    # a > b         # 同维度不同尺寸,将报错\n",
    "    c > 0,'\\n',     # 跨纬度比较,生成bool型dtype对象\n",
    "    a > c\n",
    ")"
   ]
  },
  {
   "source": [
    "### 数据清洗与准备\n",
    "> 处理缺失值  \n",
    ">> \n",
    "\n",
    "> 数据转换  \n",
    ">>   \n",
    "\n",
    "> 字符串操作  \n",
    ">> \n",
    "\n",
    "### 数据规整\n",
    "在很多应用中，数据可能分布在多个文件或数据库中，抑或以某种不易于分析的格式进行排列。\n",
    "> 分层索引  \n",
    ">> \n",
    "\n",
    "> 联合与合并  \n",
    ">>   \n",
    "\n",
    "> 重塑和透视  \n",
    ">> \n",
    "\n"
   ],
   "cell_type": "markdown",
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### 数据清洗\n",
    "\n",
    "# 数据缺失值处理: pandas对象的所有描述性统计信息默认情况下是排除缺失值的。\n",
    "import pandas as pd \n",
    "a = {\n",
    "    'one':pd.Series([1,2,3],['A','B','C']),         # 第一列\n",
    "    'two':pd.Series([9,8,np.nan,6],['A','B','C','D'])    # 第二列\n",
    "}   \n",
    "a_nan = pd.DataFrame(a)\n",
    "print(\n",
    "    a_nan.isnull(),'\\n\\n',                      # 缺失值单元格为True\n",
    "    a_nan.dropna(),'\\n\\n',                      # 将缺失的行删去\n",
    "    a_nan.fillna({'one':6,'two':66}),'\\n\\n',    # 为不同列设定补充值\n",
    "    a_nan.fillna(method='ffill')                # 缺失值向上填充\n",
    ")\n",
    "\n",
    "# pandas 进行数据异常值处理\n",
    "# describe(),箱型图boxplot()\n",
    "# 3σ方法\n",
    "a_nan[abs(a_nan-a_nan.mean())>3*a_nan.std()]    # 筛选出超出3σ范围的数据\n",
    "print(\n",
    "    a_nan.dropna(),'\\n\\n',                      # 缺失值单元格为True\n",
    ")\n",
    "\n",
    "# 修改数据内容\n",
    "print(\n",
    "    a_nan.replace([1,9],np.nan),'\\n\\n'                 \n",
    ")\n",
    "\n",
    "# 修改轴名称\n",
    "print(\n",
    "    a_nan.rename(index=str.lower,columns=str.title),'\\n\\n' ,            \n",
    "    a_nan.rename(index={'A':'NO.1'},columns={'one':'1st'}),'\\n\\n'          \n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "## 删除重复行/列\n",
    "import pandas as pd \n",
    "\n",
    "a_nan = pd.DataFrame(\n",
    "    {\n",
    "    'one':['A','B']*4,           # 第一列\n",
    "    'two':[1,2,3,4]*2    # 第二列\n",
    "    }   \n",
    ")\n",
    "print(\n",
    "    a_nan.duplicated(),'\\n\\n',  # 返回的是每一行是否存在重复\n",
    "    a_nan.drop_duplicates()   # 返回不存在重复的行\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Series的map方法:输入一个函数或含有映射关系的字典型对象,返回具有与调用者相同索引数据\n",
    "# 仍然不够理解.\n",
    "import pandas as pd\n",
    "from pandas import Series, DataFrame\n",
    " \n",
    "data = DataFrame({'food':['bacon','pulled pork','bacon','Pastrami',\n",
    "            'corned beef','Bacon','pastrami','honey ham','nova lox'],\n",
    "                  'ounces':[4,3,12,6,7.5,8,3,5,6]})\n",
    "meat_to_animal = {\n",
    "    'bacon':'pig',\n",
    "    'pulled pork':'pig',\n",
    "    'pastrami':'cow',\n",
    "    'corned beef':'cow',\n",
    "    'honey ham':'pig',\n",
    "    'nova lox':'salmon'    }  \n",
    " \n",
    "data['animal'] = data['food'].map(str.lower).map(meat_to_animal) \n",
    "data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "## 字符串操作 向量化字符串函数 .str\n",
    "import pandas as pd \n",
    "import numpy as np\n",
    "import re\n",
    "\n",
    "data={'DAVE':'dave@google.com',\n",
    "        'dsd':'steve@gmail.com',\n",
    "        'dsddsdaw':'rob@gmail.com',\n",
    "        'wes':np.nan}\n",
    "data = pd.Series(data)\n",
    "\n",
    "pattern=r'([A-Z0-9._%+-]+)@([A-Z0-9.-]+)\\.([A-Z]{2,4})'\n",
    "\n",
    "print(\n",
    "    data.str.contains('gmail') ,'\\n\\n'  ,   # 检查每个数据是否包含某字符串\n",
    "    data.str.findall(pattern,flags=re.IGNORECASE)  ,'\\n\\n', # pd数组的正则表达式应用\n",
    "    data.str[:5]    # 向量化切片\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### 数据规整 : 数据联合、连接以及重排列\n",
    "## 分层索引 , 一个数据有两个索引\n",
    "import pandas as pd \n",
    "import numpy as np\n",
    "data = pd.Series(np.arange(9),index=[list('aaabbccdd'),[1,2,3,1,3,1,2,2,3]])\n",
    "print(\n",
    "    data , '\\n\\n',  \n",
    "    data.unstack() , '\\n\\n',    # 将有两组索引的Series转为DataFrame ,stack则是反操作\n",
    ")\n",
    "# DataFrame分层索引\n",
    "frame = pd.DataFrame(np.arange(12).reshape((4, 3)),\n",
    "                     index=[['a', 'a', 'b', 'b'], [1, 2, 1, 2]],\n",
    "                     columns=[['Ohio', 'Ohio', 'Colorado'],\n",
    "                               ['Green', 'Red', 'Green']])\n",
    "frame.index.names=['word','number']         # 分层各层级的名称（字符串或python对象）\n",
    "frame.columns.names=['state','color']\n",
    "\n",
    "print(\n",
    "    frame , '\\n\\n',  \n",
    "    frame['Ohio'] , '\\n\\n',                # 部分列索引\n",
    "    frame.sort_index(level=1) , '\\n\\n',    # 指定索引进行排序\n",
    ")\n",
    "\n",
    "# 按层级汇总统计\n",
    "print(\n",
    "    frame.sum(level='word') , '\\n\\n',         # 指定索引相加,即1和2索引分别相加\n",
    "    frame.sum(level='color',axis=1), '\\n\\n',  # 指定列相加\n",
    ")\n",
    "\n",
    "# 使用数据中的列作为索引\n",
    "Frame = pd.DataFrame({'a': range(7), 'b': range(7, 0, -1),\n",
    "                      'c': ['one', 'one', 'one', 'two', 'two',\n",
    "                            'two', 'two'],\n",
    "                      'd': [0, 1, 2, 0, 1, 2, 3]})\n",
    "\n",
    "print(\n",
    "    Frame                       , '\\n\\n',         \n",
    "    Frame.set_index(['c','d'])  , '\\n\\n',  \n",
    ")   # set_index 函数生成使用一个或多个列作为索引的 dataframe "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "## 联合,合并数据集\n",
    "# pandas.merge根据一个或多个键将行进行连接。实现的是数据库的连接操作\n",
    "import pandas as pd \n",
    "import numpy as np\n",
    "\n",
    "# 一对多合并\n",
    "df1=pd.DataFrame({'key':['b','b','a','c','a','a','b'],'data1':range(7)})\n",
    "df2=pd.DataFrame({'key':['a','b','d'],'data2':range(3)})\n",
    "print(\n",
    "    df1  , '\\n\\n',         \n",
    "    df2  , '\\n\\n',  \n",
    "    pd.merge(df1,df2,on='key')  , '\\n\\n',   # 内连接,结果中的键是两张表的交集\n",
    "    pd.merge(df1,df2,how='outer')  , '\\n\\n',# 外连接,取并集. 其他有'right'和'left'\n",
    ")   # 连接的键信息没有指定，merge会自动将重叠列名作为连接的键\n",
    "\n",
    "# 多对多合并\n",
    "df1=pd.DataFrame({'key':['b','b','a','c','a','b'],'data1':range(6)})\n",
    "df2=pd.DataFrame({'key':['a','b','a','b','d'],'data2':range(5)})\n",
    "print(\n",
    "    pd.merge(df1,df2,on='key')  , '\\n\\n',   \n",
    ") # 多对多连接是行的笛卡尔积，就是A={a,b}, B={0,1,2}，则A×B={(a, 0), (a, 1), (a, 2), (b, 0), (b, 1), (b, 2)}\n",
    "\n",
    "# 合并时如何处理重叠的列名\n",
    "left = pd.DataFrame({'key1': ['foo', 'foo', 'bar'],\n",
    "                     'key2': ['one', 'two', 'one'],\n",
    "                     'lval': [1, 2, 3]})\n",
    "right = pd.DataFrame({'key1': ['foo', 'foo', 'bar', 'bar'],\n",
    "                      'key2': ['one', 'one', 'one', 'two'],\n",
    "                      'rval': [4, 5, 6, 7]})\n",
    "\n",
    "print(\n",
    "    left  , '\\n\\n',         \n",
    "    right  , '\\n\\n',  \n",
    "    pd.merge(left,right,on='key1',suffixes=('_left','_right'))  # suffixes指定重叠后缀\n",
    ") \n",
    "\n",
    "# 合并多个索引相同或相似但没有重叠列的 dataframe 对象\n",
    "left2 = pd.DataFrame([[1., 2.], [3., 4.], [5., 6.]],\n",
    "                     index=['a', 'c', 'e'],\n",
    "                     columns=['Ohio', 'Nevada'])\n",
    "right2 = pd.DataFrame([[7., 8.], [9., 10.], [11., 12.], [13, 14]],\n",
    "                      index=['b', 'c', 'd', 'e'],\n",
    "                      columns=['Missouri', 'Alabama'])\n",
    "another = pd.DataFrame([[7., 8.], [9., 10.], [11., 12.], [16., 17.]],\n",
    "                       index=['a', 'c', 'e', 'f'],\n",
    "                       columns=['New York', 'Oregon'])\n",
    "print(\n",
    "    left2  , '\\n\\n',         \n",
    "    right2  , '\\n\\n',  \n",
    "    left2.join([right2,another],how='outer')         # join的用法,各数组不重叠\n",
    ") "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# pandas.concat使对象在轴向上进行黏合或“堆叠”\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "\n",
    "# 创建两个index与columns有重合的数据框\n",
    "df1 = pd.DataFrame(data=np.ones((5,6))*1,columns=[\"a\",\"b\",\"c\",\"d\",\"e\",\"f\"],index=[0,1,2,3,4])\n",
    "df2 = pd.DataFrame(data=np.ones((5,6))*2,columns=[\"e\",\"f\",\"g\",\"h\",\"j\",\"k\"],index=[2,3,4,5,6])\n",
    "\n",
    "print(\n",
    "df1  , '\\n\\n',         \n",
    "df2  , '\\n\\n',  \n",
    "pd.concat(objs=[df1,df2],axis=0,join=\"outer\"),'\\n\\n', # 竖直方向的拼接 拼接方式是outer\n",
    "pd.concat(objs=[df1,df2],axis=1,join=\"inner\")         # 水平方向的拼接 拼接方式是inner\n",
    ") "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# combine_first允许将重叠的数据拼接在一起，填充对象中的缺失值\n",
    "import numpy as np\n",
    "df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]}, index=[0,1])\n",
    "df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])\n",
    "df1.combine_first(df2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "## 重塑和透视\n",
    "import pandas as pd \n",
    "# 宽 透视为 长\n",
    "df=pd.DataFrame({'key':['foo','bar','baz'],'A':[1,2,3],'B':[4,5,6],'C':[7,8,9]})\n",
    "dfm = pd.melt(df,'key')  # 使用melt时要指名哪列是分组坐标。这里key列作为分组坐标\n",
    "print(\n",
    "    df  , '\\n\\n',         \n",
    "    dfm , '\\n\\n', \n",
    "    dfm.pivot('key','variable','value')     # 使用pivote 将数据重塑到原先的布局\n",
    ") "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### 数据分组 Group()\n",
    "import pandas as pd \n",
    "# pd.DataFrame.groupby    # 将数据按照索引的特征进行分类\n",
    "pd.DataFrame.apply\n",
    "\n",
    "### 数据合并 join,contact,merge"
   ]
  },
  {
   "source": [
    "### 数据变换:规范化/连续属性离散化/特征二值化\n",
    "## 规范化  \n",
    "(*也可利用preprocessing函数进行规范化)  \n",
    "> 最小最大规范化  \n",
    ">> 当未来数值超过边界时需要重新定义\n",
    "\n",
    "> z-score规范化  \n",
    ">> 处理后所有数据均值为0,标准差为1  \n",
    "\n",
    "> 小数定标规范化  \n",
    ">> x'=x./10.^j\n",
    "\n",
    "## 连续属性离散化\n",
    "> 分箱法\n",
    ">> 等宽法,等频法\n",
    "\n",
    "> 聚类\n",
    "\n",
    "## 特征二值化\n",
    "(数据的特征可分为两类,eg:推荐,不推荐)\n",
    "> Binarizer()或LabelEncoder()函数"
   ],
   "cell_type": "markdown",
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### 离散化和分箱 cut & qcut\n",
    "import pandas as pd\n",
    "ages = [20,22,25,26,24,43,25,88,59,82,23,29]\n",
    "bins = [18,25,35,60,100]\n",
    "groupName = ['youth','youngadult','middleaged','senior']\n",
    "cats = pd.cut(ages,bins,labels = groupName)    # 生成各数据所对应箱体的数组\n",
    "print(\n",
    "    pd.value_counts(cats)       # 计算各箱体数据个数\n",
    ")\n",
    "\n",
    "cats2 = pd.qcut(ages,4)         # qcut按样本数目分为4份\n",
    "print(\n",
    "    pd.value_counts(cats2)      # 计算各箱体数据个数\n",
    ")     "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### 计算指标/虚拟变量 : 把列中的值拿出来当成列名,衍生一个k列的值为1和0的矩阵或DataFrame\n",
    "# 虚拟变量即哑变量，引入哑变量的目的是:'将不能够定量处理的变量量化'\n",
    "# 如季节对某些产品（如冷饮）销售的影响等等\n",
    "\n",
    "import pandas as pd\n",
    "data1 = {'color':['red','blue','green'],'class':['A','B','C']}\n",
    "df_data = pd.DataFrame(data1)\n",
    "print(df_data,'\\n')\n",
    "d_data = pd.get_dummies(df_data)\n",
    "print(d_data)\n",
    "# 有什么用???"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 波士顿房价数据\n",
    "from sklearn import datasets\n",
    "from sklearn.preprocessing import Binarizer\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "\n",
    "boston = datasets.load_boston()\n",
    "df = pd.DataFrame(boston.data[:,4:7])\n",
    "df.columns = boston.feature_names[4:7]\n",
    "tg = boston.target[:10].reshape(-1,1)   # 将房价中位数(1,506)的数据转为(506,1)的数据\n",
    "\n",
    "print(\n",
    "# 最小-最大规范化\n",
    "(df-df.min())/(df.max()-df.min()),'最小-最大规范化\\n',\n",
    "# z-score规范化\n",
    "(df-df.mean())/df.std(),'z-score规范化\\n',\n",
    "# 小数定标标准化\n",
    "df/10**np.ceil(np.log10(df.abs().max())),'小数定标标准化\\n',\n",
    "\n",
    "# 特征二值化\n",
    "Binarizer(threshold=20.0).fit_transform(tg),'特征二值化\\n',\n",
    "\n",
    "# 等宽分箱法\n",
    "pd.cut(df.AGE[:20],5,labels = range(5)),'等宽分箱法\\n',\n",
    "# 等频分箱法\n",
    "pd.qcut(df.AGE[:20],5,labels = range(5)),'等频分箱法\\n',\n",
    ")\n"
   ]
  },
  {
   "source": [
    "### 数据归约data reduction\n",
    "(*目标:尽可能接近数据的完整性,规约后分析结果不变)\n",
    "> 属性规约 **(*缓解多维灾难)**\n",
    ">> 向前选择  \n",
    ">> 向后删除  \n",
    ">> 决策树  \n",
    ">> **PCA(主成分分析)** \n",
    "\n",
    "> 数值规约  **(*从数据集中选择部分数据)**\n",
    ">> 有参方法:只需要存放参数,不需要实际数据\n",
    ">>>回归法/对数线性模型  \n",
    "\n",
    ">> 无参法:需要实际数据\n",
    ">>>**直方图**/聚类  \n",
    ">>>**抽样**:\n",
    ">>>>随机抽样:不放回(replace=False)与放回  \n",
    ">>>>分层抽样:数据划分为互不相交的几个部分,对每部分进行随机抽样  \n",
    ">>>>聚类抽样"
   ],
   "cell_type": "markdown",
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### 数据规约 属性规约 PCA\n",
    "from sklearn.decomposition import PCA \n",
    "from sklearn import preprocessing\n",
    "\n",
    "X = preprocessing.scale(boston.data)    # 数据规范化\n",
    "pca = PCA(n_components=5)    #保留5各特征数,尽可能少\n",
    "pca.fit(X)\n",
    "sum(pca.explained_variance_ratio_) #前五个特征成分解释了完整数据中80.73%的变化,尽可能高"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 数据规约 数值规约 直方图\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt \n",
    "\n",
    "data = np.random.randint(1,10,50)\n",
    "bins = np.linspace(data.min(),data.max(),3,endpoint = True) # 将数据分为3点2段\n",
    "plt.hist(data, bins = bins , rwidth=0.95)\n",
    "\n",
    "# 数据规约 数值规约 抽样"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 转为pandas数据后可进行随机抽样\n",
    "pd.Series(data).sample(frac = 0.2)    # 随机抽样20%的数据,默认为不放回"
   ]
  },
  {
   "source": [
    "## Pandas 数据特征分析\n",
    "数据的运算过程(摘要):有损地提取数据特征的过程 \n",
    "\n",
    "1. 基本统计(含排序)\n",
    "2. 分布/累计统计 \n",
    "3. 数据特征:相关性,周期性等\n",
    "4. 数据挖掘(形成知识) "
   ],
   "cell_type": "markdown",
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### 数据排序\n",
    "# sort_index() 根据索引的排序,默认零轴上做升序.\"零轴指的是列\"\n",
    "import pandas as pd \n",
    "import numpy as np\n",
    "\n",
    "a = pd.DataFrame(np.arange(20).reshape(4,5),index=['c','a','d','b'])\n",
    "print(\n",
    "    a,'\\n\\n',   \n",
    "    a.sort_index(),'\\n\\n',                         # 将index进行升序排序为 a b c d\n",
    "    a.sort_index(axis=1,ascending=False),'\\n\\n'    # 将columns进行降序排序为 4 3 2 1 0\n",
    ")\n",
    "\n",
    "# sort_values()  根据数据进行排序,默认零轴上做升序\n",
    "# NaN的数值排序自动放在末尾,不管升降\n",
    "print(\n",
    "    a.sort_values(by=2,ascending=False),'\\n\\n',        # 将index为2内的数值进行降序排序\n",
    "    a.sort_values(by='a',axis=1,ascending=False),'\\n\\n'# 将columns为'a'内的数值进行降序排序\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### 唯一值,计数,包含关系\n",
    "import pandas as pd \n",
    "obj = pd.Series(list('aldskjfiaashalsfkdja'))\n",
    "print(\n",
    "    obj.unique() , '\\n',                # 唯一值组成的数组\n",
    "    obj.value_counts()  , '\\n\\n',       # 各值计数构成的dataframe\n",
    "    obj.isin(['a','b','c'])             # 逐个检查元素是否包含\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### 数据基本统计分析(与numpy类似)\n",
    "# .sum()/.count()/.mean()/.\n",
    "# .argmin()/\n",
    "import pandas as pd \n",
    "import numpy as np\n",
    "\n",
    "b = pd.Series(np.random.randint(1,50,10))\n",
    "print(\n",
    "    b ,'\\n\\n',\n",
    "    '基本统计\\n',                   \n",
    "    b.median() ,'中位数\\n\\n',     # 中位数\n",
    "    b.mean() ,'平均值\\n\\n',       # 平均值\n",
    "    b.std()  ,'方差\\n\\n',         # 方差\n",
    "    b.idxmax() ,'最大值索引值\\n\\n',     # 最大值索引值\n",
    ")\n",
    "\n",
    "# 基本统计参数 .describe()\n",
    "a = pd.DataFrame(np.arange(20).reshape(4,5),index=['c','a','d','b'])\n",
    "print(\n",
    "    a ,'\\n\\n',\n",
    "    '基本统计\\n',                   \n",
    "    a.describe() ,'\\n\\n',       # 零轴上各统计数据值\n",
    "    type(a.describe()) ,'\\n',   # 为DataFrame数据类型\n",
    "    a.describe().loc['mean'],a.describe()[1] ,'\\n\\n'    # 调用方式\n",
    ")\n",
    "\n",
    "# 累计统计计算 .cumsum()/cumprod()/cummax()/cummin()\n",
    "print(\n",
    "    '累计统计计算\\n',                  \n",
    "    a.cumsum() ,'\\n\\n',             # 零轴上前N个元素累加和\n",
    ")\n",
    "\n",
    "#滚动计算(窗口计算)  .rolling(w).sum()/.rolling(w).mean()/.rolling(w).var()/.rolling(w).std()\n",
    "print(\n",
    "    '滚动计算\\n',                  \n",
    "    a.rolling(3).sum() ,'\\n\\n',     # 零轴上该位置与之前相邻元素(往前数)的累加\n",
    ")\n",
    "b"
   ]
  },
  {
   "source": [
    "## 数据的相关性分析(线性相关性)  \n",
    ".cov()协方差矩阵 .corr()相关系数矩阵  \n",
    "对于两个事物X和Y,判断其相关性:正相关/负相关/不相关 \n",
    ">协方差的公式.(>0正相关,<0负相关)  \n",
    "Pearson相关系数r.(r∈\\[-1,1\\]) r=0.8-1.0为极强相关,r=0.6-0.8为强相关,r=0.4-0.6为中等程度相关"
   ],
   "cell_type": "markdown",
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 散点图观察相关性\n",
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "hprice = pd.Series([3.04,22.93,12.75,22.6,12.33],index=['2008','2009','2010','2011','2012'])\n",
    "m2 = pd.Series([8.18,18.38,9.13,7.82,6.69],index=['2008','2009','2010','2011','2012'])\n",
    "\n",
    "plt.scatter(hprice.index,hprice)\n",
    "plt.scatter(m2.index,m2)\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### 数据的相关性  相关系数\n",
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "hprice = pd.Series([3.04,22.93,12.75,22.6,12.33],index=['2008','2009','2010','2011','2012'])\n",
    "m2 = pd.Series([8.18,18.38,9.13,7.82,6.69],index=['2008','2009','2010','2011','2012'])\n",
    "\n",
    "time = ['2008','2009','2010','2011','2012']\n",
    "plt.plot(time,hprice)\n",
    "plt.plot(time,m2)\n",
    "plt.show()\n",
    "\n",
    "hprice.corr(m2),hprice.cov(m2)     \n",
    "# 相关系数为0.52,说明房价增幅与M2的增幅为中等程度相关;协方差为20.6"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### 聚类分析\n",
    "K-均值算法;选择\n",
    "sklearn"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 案例:寻找学霸\n",
    "import numpy as np\n",
    "from sklearn.cluster import KMeans #invalid syntax???\n",
    "\n",
    "lis1 = [88,74,96,85]\n",
    "lis2 = [92,99,95,94]\n",
    "lis3 = [91,87,99,95]\n",
    "lis4 = [78,99,97,81]\n",
    "lis5 = [88,78,98,84]\n",
    "lis6 = [100,95,100,92]\n",
    "\n",
    "data = np.array([lis1,lis2,lis3,lis4,lis5,lis6])\n",
    "kmeans = KMeans(n_clusters=2).fit(X)\n",
    "pred = kmeans.predict(data)\n",
    "print(pred)\n",
    "\n",
    "# whiten = whiten(data)\n",
    "# centroids,_ = kmeans(whiten,2)      # 聚类成2个类别,并只取第一个属性"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ]
}