{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-09-14T07:39:07.833952500Z",
     "start_time": "2023-09-14T07:39:07.201600800Z"
    }
   },
   "outputs": [],
   "source": [
    "from pyhive import hive\n",
    "import pandas as pd"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "封装两个方法：`hive_read_hql` 方法可以快捷地通过 hive 连接执行 Hive SQL 语句，并把结果转换成 pandas 的 `DataFrame`；`hive_exec_hql` 方法可以用来执行不需要返回结果的语句。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-09-13T11:21:18.683851600Z",
     "start_time": "2023-09-13T11:21:18.677609300Z"
    }
   },
   "outputs": [],
   "source": [
    "def hive_read_hql(sql_code, connection):\n",
    "    cur = connection.cursor()\n",
    "    cur.execute(sql_code)\n",
    "    headers = [col[0] for col in cur.description]\n",
    "    df= pd.DataFrame(cur.fetchall(), columns=headers)\n",
    "    cur.close()\n",
    "    return df\n",
    "\n",
    "def hive_exec_hql(sql_code, connection):\n",
    "    cur = connection.cursor()\n",
    "    cur.execute(sql_code)\n",
    "    cur.close()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "连接 hive 并测试语句执行"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-09-13T11:21:20.641037700Z",
     "start_time": "2023-09-13T11:21:18.683851600Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": "  database_name\n0       default",
      "text/html": "<div>\n<style scoped>\n    .dataframe tbody tr th:only-of-type {\n        vertical-align: middle;\n    }\n\n    .dataframe tbody tr th {\n        vertical-align: top;\n    }\n\n    .dataframe thead th {\n        text-align: right;\n    }\n</style>\n<table border=\"1\" class=\"dataframe\">\n  <thead>\n    <tr style=\"text-align: right;\">\n      <th></th>\n      <th>database_name</th>\n    </tr>\n  </thead>\n  <tbody>\n    <tr>\n      <th>0</th>\n      <td>default</td>\n    </tr>\n  </tbody>\n</table>\n</div>"
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# hive 的 ip 地址要更换成你自己的\n",
    "conn = hive.connect(host='10.251.255.162', port=30000)\n",
    "\n",
    "hql = 'show databases'\n",
    "df = hive_read_hql(hql, conn)\n",
    "\n",
    "df"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "开启动态分区、开启允许所有分区都是动态的（非严格模式）、开启本地模式、设置允许的动态分区的最大数量为 1000、设置允许的每个 maper 或 reducer 创建的动态分区的最大数量为 1000 和设置本地模式运行 mapreduce。操作完成后即可进行动态分区的使用。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-09-13T11:21:20.840023300Z",
     "start_time": "2023-09-13T11:21:20.593740500Z"
    }
   },
   "outputs": [],
   "source": [
    "# 开启动态分区，默认是 false\n",
    "hive_exec_hql('set hive.exec.dynamic.partition=true', conn)\n",
    "# 允许所有分区都是动态的，否则必须要有静态分区才能使用\n",
    "hive_exec_hql('set hive.exec.dynamic.partition.mode=nonstrict', conn)\n",
    "# 尝试使用本地模式执行其他的操作\n",
    "hive_exec_hql('set hive.exec.mode.local.auto=true', conn)\n",
    "# 设置允许的动态分区的最大数量\n",
    "hive_exec_hql('set hive.exec.max.dynamic.partitions=1000', conn)\n",
    "hive_exec_hql('set hive.exec.max.dynamic.partitions.pernode=1000', conn)\n",
    "# 设置本地模式运行 mapreduce\n",
    "hive_exec_hql('set mapreduce.framework.name=local', conn)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "创建 life 表，导入原始数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-09-13T11:21:22.416983900Z",
     "start_time": "2023-09-13T11:21:20.735211200Z"
    }
   },
   "outputs": [
    {
     "ename": "OperationalError",
     "evalue": "TExecuteStatementResp(status=TStatus(statusCode=3, infoMessages=[\"*org.apache.hive.service.cli.HiveSQLException:Error while compiling statement: FAILED: SemanticException Line 1:23 Invalid path ''/opt/hive/exp/life.csv'': No files matching path file:/opt/hive/exp/life.csv:17:16\", 'org.apache.hive.service.cli.operation.Operation:toSQLException:Operation.java:335', 'org.apache.hive.service.cli.operation.SQLOperation:prepare:SQLOperation.java:199', 'org.apache.hive.service.cli.operation.SQLOperation:runInternal:SQLOperation.java:260', 'org.apache.hive.service.cli.operation.Operation:run:Operation.java:247', 'org.apache.hive.service.cli.session.HiveSessionImpl:executeStatementInternal:HiveSessionImpl.java:541', 'org.apache.hive.service.cli.session.HiveSessionImpl:executeStatement:HiveSessionImpl.java:516', 'org.apache.hive.service.cli.CLIService:executeStatement:CLIService.java:282', 'org.apache.hive.service.cli.thrift.ThriftCLIService:ExecuteStatement:ThriftCLIService.java:563', 'org.apache.hive.service.rpc.thrift.TCLIService$Processor$ExecuteStatement:getResult:TCLIService.java:1557', 'org.apache.hive.service.rpc.thrift.TCLIService$Processor$ExecuteStatement:getResult:TCLIService.java:1542', 'org.apache.thrift.ProcessFunction:process:ProcessFunction.java:39', 'org.apache.thrift.TBaseProcessor:process:TBaseProcessor.java:39', 'org.apache.hive.service.auth.TSetIpAddressProcessor:process:TSetIpAddressProcessor.java:56', 'org.apache.thrift.server.TThreadPoolServer$WorkerProcess:run:TThreadPoolServer.java:286', 'java.util.concurrent.ThreadPoolExecutor:runWorker:ThreadPoolExecutor.java:1149', 'java.util.concurrent.ThreadPoolExecutor$Worker:run:ThreadPoolExecutor.java:624', 'java.lang.Thread:run:Thread.java:750', \"*org.apache.hadoop.hive.ql.parse.SemanticException:Line 1:23 Invalid path ''/opt/hive/exp/life.csv'': No files matching path file:/opt/hive/exp/life.csv:25:9\", 'org.apache.hadoop.hive.ql.parse.LoadSemanticAnalyzer:applyConstraintsAndGetFiles:LoadSemanticAnalyzer.java:178', 'org.apache.hadoop.hive.ql.parse.LoadSemanticAnalyzer:analyzeLoad:LoadSemanticAnalyzer.java:343', 'org.apache.hadoop.hive.ql.parse.LoadSemanticAnalyzer:analyzeInternal:LoadSemanticAnalyzer.java:262', 'org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer:analyze:BaseSemanticAnalyzer.java:285', 'org.apache.hadoop.hive.ql.Driver:compile:Driver.java:659', 'org.apache.hadoop.hive.ql.Driver:compileInternal:Driver.java:1826', 'org.apache.hadoop.hive.ql.Driver:compileAndRespond:Driver.java:1773', 'org.apache.hadoop.hive.ql.Driver:compileAndRespond:Driver.java:1768', 'org.apache.hadoop.hive.ql.reexec.ReExecDriver:compileAndRespond:ReExecDriver.java:126', 'org.apache.hive.service.cli.operation.SQLOperation:prepare:SQLOperation.java:197'], sqlState='42000', errorCode=40000, errorMessage=\"Error while compiling statement: FAILED: SemanticException Line 1:23 Invalid path ''/opt/hive/exp/life.csv'': No files matching path file:/opt/hive/exp/life.csv\"), operationHandle=None)",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mOperationalError\u001B[0m                          Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[5], line 32\u001B[0m\n\u001B[0;32m     30\u001B[0m hive_exec_hql(create_table_life_hql, conn)\n\u001B[0;32m     31\u001B[0m \u001B[38;5;66;03m# 从 csv 文件导入数据\u001B[39;00m\n\u001B[1;32m---> 32\u001B[0m \u001B[43mhive_exec_hql\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[38;5;124;43mload data local inpath \u001B[39;49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[38;5;124;43m/opt/hive/exp/life.csv\u001B[39;49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[38;5;124;43m into table life\u001B[39;49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mconn\u001B[49m\u001B[43m)\u001B[49m\n",
      "Cell \u001B[1;32mIn[2], line 11\u001B[0m, in \u001B[0;36mhive_exec_hql\u001B[1;34m(sql_code, connection)\u001B[0m\n\u001B[0;32m      9\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mhive_exec_hql\u001B[39m(sql_code, connection):\n\u001B[0;32m     10\u001B[0m     cur \u001B[38;5;241m=\u001B[39m connection\u001B[38;5;241m.\u001B[39mcursor()\n\u001B[1;32m---> 11\u001B[0m     \u001B[43mcur\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mexecute\u001B[49m\u001B[43m(\u001B[49m\u001B[43msql_code\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m     12\u001B[0m     cur\u001B[38;5;241m.\u001B[39mclose()\n",
      "File \u001B[1;32mD:\\anaconda3\\envs\\django\\lib\\site-packages\\pyhive\\hive.py:481\u001B[0m, in \u001B[0;36mCursor.execute\u001B[1;34m(self, operation, parameters, **kwargs)\u001B[0m\n\u001B[0;32m    479\u001B[0m _logger\u001B[38;5;241m.\u001B[39mdebug(req)\n\u001B[0;32m    480\u001B[0m response \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_connection\u001B[38;5;241m.\u001B[39mclient\u001B[38;5;241m.\u001B[39mExecuteStatement(req)\n\u001B[1;32m--> 481\u001B[0m \u001B[43m_check_status\u001B[49m\u001B[43m(\u001B[49m\u001B[43mresponse\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m    482\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_operationHandle \u001B[38;5;241m=\u001B[39m response\u001B[38;5;241m.\u001B[39moperationHandle\n",
      "File \u001B[1;32mD:\\anaconda3\\envs\\django\\lib\\site-packages\\pyhive\\hive.py:611\u001B[0m, in \u001B[0;36m_check_status\u001B[1;34m(response)\u001B[0m\n\u001B[0;32m    609\u001B[0m _logger\u001B[38;5;241m.\u001B[39mdebug(response)\n\u001B[0;32m    610\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m response\u001B[38;5;241m.\u001B[39mstatus\u001B[38;5;241m.\u001B[39mstatusCode \u001B[38;5;241m!=\u001B[39m ttypes\u001B[38;5;241m.\u001B[39mTStatusCode\u001B[38;5;241m.\u001B[39mSUCCESS_STATUS:\n\u001B[1;32m--> 611\u001B[0m     \u001B[38;5;28;01mraise\u001B[39;00m OperationalError(response)\n",
      "\u001B[1;31mOperationalError\u001B[0m: TExecuteStatementResp(status=TStatus(statusCode=3, infoMessages=[\"*org.apache.hive.service.cli.HiveSQLException:Error while compiling statement: FAILED: SemanticException Line 1:23 Invalid path ''/opt/hive/exp/life.csv'': No files matching path file:/opt/hive/exp/life.csv:17:16\", 'org.apache.hive.service.cli.operation.Operation:toSQLException:Operation.java:335', 'org.apache.hive.service.cli.operation.SQLOperation:prepare:SQLOperation.java:199', 'org.apache.hive.service.cli.operation.SQLOperation:runInternal:SQLOperation.java:260', 'org.apache.hive.service.cli.operation.Operation:run:Operation.java:247', 'org.apache.hive.service.cli.session.HiveSessionImpl:executeStatementInternal:HiveSessionImpl.java:541', 'org.apache.hive.service.cli.session.HiveSessionImpl:executeStatement:HiveSessionImpl.java:516', 'org.apache.hive.service.cli.CLIService:executeStatement:CLIService.java:282', 'org.apache.hive.service.cli.thrift.ThriftCLIService:ExecuteStatement:ThriftCLIService.java:563', 'org.apache.hive.service.rpc.thrift.TCLIService$Processor$ExecuteStatement:getResult:TCLIService.java:1557', 'org.apache.hive.service.rpc.thrift.TCLIService$Processor$ExecuteStatement:getResult:TCLIService.java:1542', 'org.apache.thrift.ProcessFunction:process:ProcessFunction.java:39', 'org.apache.thrift.TBaseProcessor:process:TBaseProcessor.java:39', 'org.apache.hive.service.auth.TSetIpAddressProcessor:process:TSetIpAddressProcessor.java:56', 'org.apache.thrift.server.TThreadPoolServer$WorkerProcess:run:TThreadPoolServer.java:286', 'java.util.concurrent.ThreadPoolExecutor:runWorker:ThreadPoolExecutor.java:1149', 'java.util.concurrent.ThreadPoolExecutor$Worker:run:ThreadPoolExecutor.java:624', 'java.lang.Thread:run:Thread.java:750', \"*org.apache.hadoop.hive.ql.parse.SemanticException:Line 1:23 Invalid path ''/opt/hive/exp/life.csv'': No files matching path file:/opt/hive/exp/life.csv:25:9\", 'org.apache.hadoop.hive.ql.parse.LoadSemanticAnalyzer:applyConstraintsAndGetFiles:LoadSemanticAnalyzer.java:178', 'org.apache.hadoop.hive.ql.parse.LoadSemanticAnalyzer:analyzeLoad:LoadSemanticAnalyzer.java:343', 'org.apache.hadoop.hive.ql.parse.LoadSemanticAnalyzer:analyzeInternal:LoadSemanticAnalyzer.java:262', 'org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer:analyze:BaseSemanticAnalyzer.java:285', 'org.apache.hadoop.hive.ql.Driver:compile:Driver.java:659', 'org.apache.hadoop.hive.ql.Driver:compileInternal:Driver.java:1826', 'org.apache.hadoop.hive.ql.Driver:compileAndRespond:Driver.java:1773', 'org.apache.hadoop.hive.ql.Driver:compileAndRespond:Driver.java:1768', 'org.apache.hadoop.hive.ql.reexec.ReExecDriver:compileAndRespond:ReExecDriver.java:126', 'org.apache.hive.service.cli.operation.SQLOperation:prepare:SQLOperation.java:197'], sqlState='42000', errorCode=40000, errorMessage=\"Error while compiling statement: FAILED: SemanticException Line 1:23 Invalid path ''/opt/hive/exp/life.csv'': No files matching path file:/opt/hive/exp/life.csv\"), operationHandle=None)"
     ]
    }
   ],
   "source": [
    "create_table_life_hql = '''\n",
    "create table life\n",
    "(\n",
    "Country string,\n",
    "`Year` int,\n",
    "Status string,\n",
    "Life_expectancy float,\n",
    "Adult_Mortality int,\n",
    "infant_deaths int,\n",
    "Alcohol int,\n",
    "percentage_expenditure float,\n",
    "HepatitisB int,\n",
    "Measles int,\n",
    "BMI float,\n",
    "under_five_death int,\n",
    "Polio int,\n",
    "Total_expenditure float,\n",
    "Diphtheria int,\n",
    "HIV_AIDS float,\n",
    "GDP float,\n",
    "Population float,\n",
    "thinness_under_19 float,\n",
    "thinness_over_19 float,\n",
    "Income_composition_of_resources float,\n",
    "Schooling float\n",
    ")\n",
    "row format delimited fields terminated by ','\n",
    "'''\n",
    "\n",
    "hive_exec_hql(create_table_life_hql, conn)\n",
    "# 从 csv 文件导入数据\n",
    "hive_exec_hql(\"load data local inpath '/opt/hive/exp/life.csv' into table life\", conn)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "查看 life 数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-13T11:21:22.416983900Z"
    }
   },
   "outputs": [],
   "source": [
    "hql = 'select * from life'\n",
    "df = hive_read_hql(hql, conn)\n",
    "\n",
    "df"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "创建分区表和分区数据（动态分区是按照最后 n 列进行的，n 取决于有几个分区，所以应该将分区数据放在最后 n 列）。这里是用年份来分区，你也可以用国家来分区。当然也可以多个列进行分区，或者和之前的静态分区相关联，都是可以的，大家可以自行尝试。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-09-13T11:21:22.432622Z",
     "start_time": "2023-09-13T11:21:22.416983900Z"
    }
   },
   "outputs": [],
   "source": [
    "create_table_part3_hql = '''\n",
    "create table part3\n",
    "(\n",
    "Country string,\n",
    "`Year` int,\n",
    "`Status` string,\n",
    "Life_expectancy float,\n",
    "Adult_Mortality int,\n",
    "infant_deaths int,\n",
    "Alcohol int,\n",
    "percentage_expenditure float,\n",
    "HepatitisB int,\n",
    "Measles int,\n",
    "BMI float,\n",
    "under_five_death int,\n",
    "Polio int,\n",
    "Total_expenditure float,\n",
    "Diphtheria int,\n",
    "HIV_AIDS float,\n",
    "GDP float,\n",
    "Population float,\n",
    "thinness_under_19 float,\n",
    "thinness_over_19 float,\n",
    "Income_composition_of_resources float,\n",
    "Schooling float\n",
    ")\n",
    "partitioned by (period int)\n",
    "row format delimited fields terminated by ','\n",
    "'''\n",
    "\n",
    "hive_exec_hql(create_table_part3_hql, conn)\n",
    "\n",
    "hive_exec_hql('insert overwrite table default.part3 partition(period) select *, Year from life', conn)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "进行数据分析，下面这个例子分析了发达国家和发展中国家人均寿命分别随着时间变化的趋势。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-13T11:21:22.432622Z"
    }
   },
   "outputs": [],
   "source": [
    "query_hql = 'select avg(Life_expectancy) as avg_life, Year as year, Status as status from part3 group by Year, Status'\n",
    "df = hive_read_hql(query_hql, conn)\n",
    "\n",
    "df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-13T11:21:22.432622Z"
    }
   },
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "\n",
    "df.index = df[\"year\"]\n",
    "df = df.sort_index(ascending=True)\n",
    "groups = df.groupby(df[\"status\"])\n",
    "developing_df = groups.get_group(\"Developing\")\n",
    "developed_df = groups.get_group(\"Developed\")\n",
    "\n",
    "fig = plt.figure(figsize=(6,4))\n",
    "ax = fig.add_subplot(1,1,1)\n",
    "\n",
    "ax = developing_df.plot(x='year', kind='line', y='avg_life', label='Developing', ax=ax)\n",
    "ax = developed_df.plot(x='year', kind='line', y='avg_life', label='Developed', ax=ax)\n",
    "plt.show()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "base",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.5"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
