#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
# @Time    : 2019/3/5 9:48
# @Author  : Leonis
# @Site    :
# @File    : report.py.py
# @Software: PyCharm
# @Describe: [1] 根据用户提供 odps 数据表(包含 必要字段 order_id ), 下载匹配订单的征信报告.
             [2] 根据用户提供 odps 数据表(包含 任意可选字段 ), 下载数据表相关数据表.
"""

import datetime as dt
import gzip
import os
import sys
import time

import logging
import pandas as pd
import json

from odps import ODPS
from odps.tunnel import TableTunnel
import oss2

from .encrypt_algorithm import EncryptAlgorithm
from login.models import UserPrivilege

# Odps 配置信息: 通过 Bash-shell 中 export 设置环境变量, os.environment 中获取数据
odps_account = {
	'access_id': 'LTAI0hursl37O2pt',
	'secret_access_key': 'DQnzAepQIaAKZUWYMYSTZjEGOyP8qR',
	'project': 'shilupan_strategy',
	'endpoint': 'http://service.cn.maxcompute.aliyun.com/api',
	# 'tunnel_endpoint': '**your-tunnel-endpoint**'
}

# Oss 配置信息: 通过 Bash-shell 中 export 设置环境变量, os.environment 中获取数据
oss_account = {
	'access_key_id': 'LTAIOCOHEESz6EeS',
	'access_key_secret': 'reoe9lF1eClyvFRWXc3WojDe49ZOfX',
	'bucket_name': 'shilupan-panbao',
	'endpoint': 'oss-cn-shanghai-internal.aliyuncs.com',
}

# Get Logger
logger = logging.getLogger('download_record')


class OdpsTableOperation(object):
	"""
	Aliyun - Odps 数据表的相关操作.
	"""

	# odps object
	odps_object = ODPS(**odps_account)

	def __init__(self, project=None, table=None):
		"""
		根据输入参数下载 odps 数据表
		:param project: odps project, like: shilupan_strategy
		:param table: odps table name
		"""
		# 设置 project, table
		self.project = project
		self.table = table
		self.project_table = '%s.%s' % (project, table)

	def is_project_exist(self):
		"""  检测 odps project 是否存在."""
		return self.odps_object.exist_project(self.project)

	def is_table_exist(self):
		"""  检测odps数据表是否存在."""
		return self.odps_object.exist_table(self.table, project=self.project)

	def get_table_meta(self):
		"""
		显示 odps 数据表的 meta 信息 .
		Tips table_schema 无法进行序列化.
		"""
		# table meta info
		odps_table = self.odps_object.get_table(self.table, project=self.project)
		
		# 提取 table 相关信息
		table_name = odps_table.name
		table_owner = odps_table.owner
		table_comment = odps_table.comment
		table_create_time = odps_table.creation_time.strftime('%Y-%m-%d %H:%M:%S')
		table_life_cycle = str(odps_table.lifecycle)
		odps_size = round(odps_table.size / 1024 / 1024, 3)
		disk_size_min = round(odps_size * 4, 2)
		disk_size_max = round(odps_size * 10, 2)
		table_size = '%.3f MB [磁盘下载: %.3f - %.3f MB]' % (odps_size, disk_size_min, disk_size_max)
		table_columns = ','.join([item.name for item in odps_table.schema.columns])

		with odps_table.open_reader() as reader:
			# 数据表中的记录条数
			table_records = reader.count

		odps_table_metas = {
			'TableName': table_name,
			'TableOwner': table_owner,
			'CreateTime': table_create_time,
			'OdpsSize': str(odps_size),
			'TableSize': table_size,
			'TableComment': table_comment,
			'LifeCycle': table_life_cycle,
			'TableColumn': table_columns,
			'RecordNumber': str(table_records)
		}

		# 返回数据信息
		return json.dumps(odps_table_metas)

	def write_table(self, partition=None, records=None):
		"""
		向 odps 数据表写入数据的相关操作
		:param partition: odps 数据表分区, ex: pt=2018-01-01
		:param records  : 写入 odps 分区表的数据信息, ex: [record1, record2, record3, ...]
		"""
		
		odps_table_obj = self.odps_object.get_table(self.table, project=self.project)
		
		try:
			# 打开 odps 数据表
			fw = odps_table_obj.open_writer(partition=partition)
			fw.write(records)
		except BaseException:
			# 程序执行过程中的异常信息.
			exec_info = sys.exc_info()
			if partition:
				logger.error('向0dps数据表: %s.%s 分区为 %s, 写入数据失败.' % (self.project, self.table, partition))
			else:
				logger.error('向0dps数据表: %s.%s  写入数据失败.' % (self.project, self.table))
		finally:
			pass

	def create_table(self):
		""" 创建 odps 表的相关操作 """
		pass

	def delete_table(self):
		""" 删除 odps 表 操作"""
		self.odps_object.delete_table(self.table, project=self.project, if_exists=True)

	def download_with_dataframe(self):
		"""
		转化成 odps 数据框
		:return:
		"""
		
		odps_table_obj = self.odps_object.get_table(self.table, project=self.project)
		
		# 返回数据框
		return odps_table_obj.to_df()

	def download_with_tunnel(self, partition=None, start_no=0, limit=5000, columns=None):
		"""
		以 tunnel 方式下载数据
		:param partition: 下载 odps 数据表 的分区, ex: 'pt=test'
		:param start_no : 设置下载的首行记录编号
		:param limit    : 下载条数限制: limit.
		:param columns  : 默认下载全部数据列[columns=None], 否则: columns 为 tuple 类型: 数据列.
		"""
		tunnel = TableTunnel(self.odps_object)
		
		# 设置下载分区
		download_session = tunnel.create_download_session(self.table, partition_spec=partition)
		
		# 设置下载条数
		if limit is None:
			# 全部
			records_limit = download_session.count
		elif isinstance(limit, int):
			records_limit = limit
		elif isinstance(limit, str):
			if limit.isdigit():
				records_limit = int(limit)
			else:
				# limit.lower() == 'all'
				records_limit = download_session.count
		else:
			# 全部
			records_limit = download_session.count
		
		reader = download_session.open_record_reader(start=start_no, count=records_limit, columns=columns)
		
		# 返回下载数据
		return reader

	def upload_with_tunnel(self, partition=None, records=None):
		"""
		 以 tunnel 方式 上传数据
		:param partition: 上传 odps 数据表 的分区, ex: 'pt=test'
		:param records  : 即将上传的数据记录: ex: [record_list1, record_list2, record_list3]
		"""
		
		tunnel = TableTunnel(self.odps_object)
		table = self.odps_object.get_table(self.table, project=self.project)
		
		# 上传 session
		if partition is None:
			upload_session = tunnel.create_upload_session(table.name)
		else:
			upload_session = tunnel.create_upload_session(table.name, partition_spec=partition)
		
		# 默认: 从首行开始上传
		default_line_no = 0
		
		with upload_session.open_record_writer(default_line_no) as writer:
			for record_list in records:
				record = table.new_record(record_list)
				writer.write(record)
		
		upload_session.commit([0])

	def download_with_sql(self, odps_sql=None, tunnel=True):
		"""
		通过执行 odps - sql 方式查询数据, 然后下载数据.
		:param odps_sql: 传入的 odps_sql.
		:param tunnel  : 是否使用tunnel 方式, True or False
		:return:  返回执行 odps - sql  查询数据的结果.
		"""
		
		try:
			# 执行查询数据
			reader = self.odps_object.run_sql(sql=odps_sql, project=self.project).open_reader(tunnel=tunnel)
		except Exception:
			exec_info = sys.exc_info()
			logger.error('从 odps 数据表: %s.%s 下载数据失败, 具体原因: %s : %s.' % (self.project, self.table,
			                                                           exec_info[0], exec_info[1]))
		finally:
			pass
		
		return reader


class DownloadOssReport(OdpsTableOperation):
	"""
	用于从 Aliyun - Oss 对象存储系统中下载用户的征信报告文件.
	"""

	# Odps 配置信息
	odps_account = {
		'access_id': 'LTAI0hursl37O2pt',
		'secret_access_key': 'DQnzAepQIaAKZUWYMYSTZjEGOyP8qR',
		'project': 'shilupan_strategy',
		'endpoint': 'http://service.cn.maxcompute.aliyun.com/api',
		# 'tunnel_endpoint': '**your-tunnel-endpoint**'
	}

	# Oss 配置信息
	oss_account = {
		'access_key_id': 'LTAIOCOHEESz6EeS',
		'access_key_secret': 'reoe9lF1eClyvFRWXc3WojDe49ZOfX',
		'bucket_name': 'shilupan-panbao',
		'endpoint': 'oss-cn-shanghai-internal.aliyuncs.com',
	}

	def __init__(self, username=None, project='shilupan_strategy', table=None, limit=None,
	             is_encrypt=True, cache_path=None, partition=None):
		"""
		根据用户提交必要参数信息实例化数据
		:param username     : 下载用户名称.
		:param project      : odps 项目空间, 默认: shilupan_strategy.
		:param table        : odps 数据表.
		:param limit        : 下载条数限制, 默认: None(即全部).
		:param is_encrypt   : 是否加密敏感信息,默认: 加密.
		:param cache_path   : 征信报告的缓存路径[绝对路径].
		:param partition    : 用于动态指定 panbao 中订单分区信息.
		"""

		super().__init__(project, table)

		# 下载用户
		self.username = username

		# odps 数据表, ex: shilupan-dw.temp
		self.project_table = '%s.%s' % (project, table)

		# 是否对三要素加密: mobile, idNum 加密.
		self.is_encrypt = is_encrypt

		# 下载条数限制
		self.limit = limit

		# 征信报告的缓存路径
		self.cache_path = cache_path

		# 设置 odps 分区 pt
		if partition is None:
			yesterday = dt.date.today() - dt.timedelta(days=1)
			self.partition = yesterday.strftime('%Y%m%d')
		else:
			self.partition = partition

	@property
	def odps_sql(self):
		""" 根据用户提交的 project, table, 以及 项目默认分区(yesterday)信息, 用于生成 odps-sql."""
		
		if self.limit is None:
			# 下载条数限制: 全部
			odps_sql = """SELECT DISTINCT PB.order_id, PB.other FROM """ + self.project_table + """ DT """ + \
			           """LEFT OUTER JOIN panbao.slp_order PB ON DT.order_id = PB.order_id AND PB.pt = """ + \
			           self.partition + """ WHERE PB.order_id IS NOT NULL AND PB.other IS NOT NULL;"""
		else:
			if isinstance(self.limit, int):
				# 设置下载条数限制: Int: self.limit = 5000
				odps_sql = """SELECT DISTINCT PB.order_id, PB.other FROM """ + self.project_table + """ DT """ + \
				           """LEFT OUTER JOIN panbao.slp_order PB ON DT.order_id = PB.order_id AND PB.pt = """ + \
				           self.partition + """ WHERE PB.order_id IS NOT NULL LIMIT """ + str(self.limit) + """ ;"""
			elif isinstance(self.limit, str):
				if self.limit.isdigit():
					# 设置下载条数限制: Int: self.limit = 5000
					odps_sql = """SELECT PB.order_id, PB.other FROM """ + self.project_table + """ DT """ + \
					           """LEFT OUTER JOIN panbao.slp_order PB ON DT.order_id = PB.order_id AND PB.pt = """ + \
					           self.partition + """ WHERE PB.order_id IS NOT NULL LIMIT """ + self.limit + """ ;"""
				else:
					# 下载条数限制: 全部
					odps_sql = """SELECT PB.order_id, PB.other FROM """ + self.project_table + """ DT """ + \
					           """LEFT OUTER JOIN panbao.slp_order PB ON DT.order_id = PB.order_id AND PB.pt = """ + \
					           self.partition + """ WHERE PB.order_id IS NOT NULL;"""
			else:
				# 抛出异常
				raise TypeError('类型错误: 输入下载条数限制参数 limit 为 非整数类型, 打印 limit 值:', self.limit)
		
		return odps_sql

	@property
	def query_order_sql(self):
		""" 查询 去重后的订单数量：预计下载征信报告数量. """
		order_num = """SELECT COUNT(order_id) AS 总计下载, COUNT(DISTINCT order_id) AS '实际下载' FROM """ \
		            + self.project_table + """;"""
		return order_num

	@property
	def odps_object(self):
		""" 返回 odps 对象信息: odps_object """
		# 基本配置信息
		access_id = self.odps_account.get('access_id')
		secret_access_key = self.odps_account.get('secret_access_key')
		project = self.odps_account.get('project')
		endpoint = self.odps_account.get('endpoint')
		
		# 连接 odps 对象信息
		odps_object = ODPS(access_id=access_id,
		                   secret_access_key=secret_access_key,
		                   project=project,
		                   endpoint=endpoint)

		# 返回 odps object
		return odps_object

	@property
	def oss_object(self):
		""" 返回 oss 对象信息: oss_object ."""
		# 账户、密码 信息
		access_key_id = self.oss_account.get('access_key_id')
		access_key_secret = self.oss_account.get('access_key_secret')
		endpoint = self.oss_account.get('endpoint')
		bucket_name = self.oss_account.get('bucket_name')
		
		# 下载征信报告
		auth = oss2.Auth(access_key_id, access_key_secret)
		bucket = oss2.Bucket(auth, endpoint, bucket_name, connect_timeout=300.0)
		
		# 返回 bucket
		return bucket

	def get_report_keys(self):
		"""
		获取请求返回订单数据生成器 ex: (loan_order_no, report_url)
		:return: 返回列表. ex: [{'order_id': 12, 'key': personInfo/origin/138917269435064320.txt.gz}, ...]
		"""
		# 返回订单以及下载 key 列表, ex: [{'order_id': 12, 'key': personInfo/origin/138917269435064320.txt.gz}, ...]
		result = list()
		
		try:
			# 执行sql, 返回相应的 generator
			reader = self.odps_object.execute_sql(self.odps_sql).open_reader()
			records = reader.read()
		except Exception:
			info = sys.exc_info()
			logger.error('根据提供, 具体原因: %s, %s' % (info[0], info[1]))
		else:
			for record in records:
				temp = dict(record)
				if temp.get('other'):
					# 获取 征信报告的 key
					temp['key'] = temp.get('other').split('aliyuncs.com/')[1]
				result.append(temp)
		finally:
			return result

	def encrypt_report(self, key=None, report_file=None, encrypt_fields=('mobile', 'idNum'), encoding='utf-8'):
		"""
		 加密方式: 在征信报告的字符串中检索 mobile, idNum; 然后进行加密处理(解决报告格式不同的问题).
		:param key : oss 中 key, 用于打开征信报告.
		:param report_file    : 将加密后的征信报告写入文件 report_file [绝对路径]
		:param encrypt_fields : 加密敏感字段(mobile： 手机号码, idNum: 身份证号)
		:param encoding       : 指定 gzip 的由 str --> byte 的 编码方式.
		:return: 将加密后的 report str, 写入文件
		"""
		# 加密后征信报告 str
		encrypt_flag = False
		encrypt_report_str = None
		
		# 读取 oss 文件对象名称
		order_id = key.split('/')[-1]
		remote_stream = self.oss_object.get_object(key)
		
		with gzip.open(remote_stream, 'rt', encoding='utf-8') as fr:
			# 征信报告字符串
			report_str = fr.read()
		
		try:
			report_dict = json.loads(report_str)
		except json.JSONDecodeError as e:
			logger.error('%s 文件 json.loads 解析错误, 错误原因: %s ' % (key, e))
		except Exception:
			info = sys.exc_info()
			logger.error('[订单id: %s] 执行失败, 程序执行信息: %s: %s.' % (order_id, info[0], info[1]))
		else:
			# [1] 提取该用户的手机号码 mobile、身份证号 idNum
			if set(encrypt_fields).issubset(set(report_dict.keys())):
				# 小木鱼
				encrypt_flag = True
				mobile = report_dict.get('mobile')
				id_num = report_dict.get('idNum')
			else:
				if 'data' in report_dict:
					# 你我贷-机构
					data = report_dict.get('data')
					if set(encrypt_fields).issubset(set(data.keys())):
						encrypt_flag = True
						mobile = data.get('mobile')
						id_num = data.get('idNum')
			
			# [2] 进行字段加密
			if encrypt_flag:
				# encrypt_mobile = mobile[:3] + '*' * 4 + mobile[-4:]
				# encrypt_id_num = '*' * 6 + id_num[6:]
				encrypt_mobile = EncryptAlgorithm.with_md5(str(mobile))
				encrypt_id_num = EncryptAlgorithm.with_md5(str(id_num))
				# [方式1] 通过 replace 替换: 效率 10w = 14s
				temp_report_str = report_str.replace(mobile, encrypt_mobile)
				encrypt_report_str = temp_report_str.replace(id_num, encrypt_id_num)
			else:
				# 因缺少字段 mobile 或 idNum, 则不进行加密处理.
				encrypt_report_str = report_str
		finally:
			# 写入文件
			with gzip.open(report_file, 'w+') as fw:
				fw.write(encrypt_report_str.encode(encoding))

	def download_report(self, key='', order_id=''):
		"""
		根据传入order_id, key 下载单个征信报告文件
		:param key      : OSS 文件对象 file_obj 的名称.
		:param order_id : 订单id
		"""
		
		# 根据order_id生成征信报告文件[绝对路径]
		file_name = order_id + '.txt.gz'
		report_file = os.path.join(self.cache_path, file_name)
		
		try:
			if self.is_encrypt:
				self.encrypt_report(key=key, report_file=report_file)
			else:
				# 若征信报告不加密， 则直接下载.
				self.oss_object.get_object_to_file(key, report_file)
		except oss2.exceptions.NoSuchKey as e:
			logger.error('[订单id:{0}]: 从bucket{1}中找不到key, status={2},request_id={3}'.format(order_id,
			                                                                               self.oss_object.bucket_name,
			                                                                               e.status, e.request_id))
		except (oss2.exceptions.ClientError, oss2.exceptions.RequestError, oss2.exceptions.ServerError) as e:
			logger.error('[订单id: {0}] 下载失败 status={1}, request_id={2}'.format(order_id, e.status, e.request_id))
		except Exception:
			info = sys.exc_info()
			logger.error('[订单id: %s] 执行失败, 程序执行信息: %s: %s.' % (order_id, info[0], info[1]))
		else:
			logger.info('[订单id: %s ] 下载成功, 下载key: %s .' % (order_id, key))
		finally:
			# 删除 文件大小 size 为 0 的 征信报告文件
			if not os.path.getsize(report_file):
				os.remove(report_file)

	def batch_download(self):
		"""  批量下载征信报告文件, 并返回统计结果信息. """

		# 修改 Report 保存目录的所有者权限
		if not os.path.exists(self.cache_path):
			os.makedirs(self.cache_path)
		user = UserPrivilege.objects.filter(username=self.username).values('host_uid', 'host_gid')
		if user:
			# 若对用户在服务器账户配置过权限
			host_uid = int((user[0]).get('host_uid'))
			host_gid = int((user[0]).get('host_gid'))
		else:
			# 若对用户在服务器账户未进行权限配置[默认: root]
			host_uid = 0
			host_gid = 0

		os.chown(self.cache_path, host_uid, host_gid)

		# 定义下载的计时器
		start = time.time()

		# 下载报告的统计指标
		pre_download_reports = 0
		act_download_reports = 0
		download_success_rate = '0.0%'

		if self.is_table_exist():
			# 获取订单记录列表,  ex: [{'order_id': 12, 'key': personInfo/origin/138917269435064320.txt.gz}, ...]
			records_list = self.get_report_keys()

			if records_list:
				# 单进程下载(若需要提高下载效率，则需要优化)
				for cnt, record in enumerate(records_list, start=1):
					print('Download report [no: %4d, time: %s]' % (cnt, dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
					self.download_report(key=record.get('key'), order_id=record.get('order_id'))

				# 预计下载数量
				pre_download_reports = len(records_list)
				# 实际下载数量
				act_download_reports = len(os.listdir(self.cache_path))
				# 计算下载成功率
				download_success_rate = '%.2f%%' % (round(act_download_reports / pre_download_reports * 100, 2))
				# 下载提示信息
				response = 'Normal Download.'
			else:
				response = 'According to %s , the order result of query sql is empty list .' % self.project_table
		else:
			response = "NoSuchTable: Can't Find table: %s ." % self.project_table

		# 下载时长(精度: ex: 0.123 s)
		download_time = round(time.time() - start, 2)

		# 执行信息
		summary = {
			'Expect': pre_download_reports,
			'Actual': act_download_reports,
			'Rate': download_success_rate,
			'Consume': download_time,
			'Message': response
		}

		return summary


# 任务执行计划
# stat_summary = (self.apply_account, 'report', self.project_table, self.table, download_success_rate)

# @staticmethod
# def use_processes(self):
#     """
#     使用 CPU 核数
#     :return:
#     """
#     # 设置开启进程池数量
#     if os.cpu_count() >= 4:
#         process = int(os.cpu_count() * 0.75)
#     else:
#         process = 2
#     return process
#
# def tar_files(self):
#     """
#     压缩文件目录
#     """
#     today = dt.date.today().strftime('%Y-%m-%d')
#     file_dir = os.path.join(self.cache_path, today)
#
#     # 查找要压缩文件
#     cd_res = subprocess.run("cd " + file_dir, shell=True)
#     if cd_res.returncode == 0:
#         # 待压缩文件目录
#         tar_file = """find . -name "*.gz" > """ + today + """tar_file.txt"""
#         tar_instance = subprocess.run(tar_file, shell=True)
#         if tar_instance.returncode == 0:
#             compress_file = """tar -cvz -T """ + today + """tar_file.txt -f """ + file_dir + '.tar.gz'
#             # 压缩文件
#             compress_dir_res = subprocess.run(compress_file, shell=True)
#             if compress_dir_res.returncode == 0:
#                 print('文件压缩成功, 存放位置: %s...' % self.cache_path)
#     else:
#         print(' 文件压缩失败, 请手动处理...')


class DownloadOdpsTable(OdpsTableOperation):
	""" 用于下载 odps 数据表, Tips: 下载之前注意 reload."""

	def __init__(self,
	             username=None,
	             project='shilupan_strategy',
	             table=None,
	             partition=None,
	             limit=5000,
	             columns=None,
	             cache_file=None,
	             delimiter=','):
		"""
		初始化下载参数
		:param project      : odps 项目空间 project, 默认: shilupan_strategy
		:param table        : odps 数据表, 设置: dual 临时表
		:param limit        : 设置下载条数限制, 默认: limit=5000
		:param columns      : 提取 odps 数据表 某些列, 默认: default, None 即: 全部) or  tuple
		:param cache_file   : 设置缓存文件(绝对路径)
		:param delimiter    : 文本分隔符
		"""
		super().__init__(project, table)
		
		# 下载数据表
		self.username = username
		
		# odps 数据表
		self.project_table = '%s.%s' % (project, table)
		
		# odps 数据表分区
		self.partition = partition
		
		# 限制下载条数
		self.limit = limit
		
		# 设置 odps 数据表的字段列
		self.columns = columns
		
		# 文本内部分隔符
		self.delimiter = delimiter
		
		# 缓存文件
		self.cache_file = cache_file

	@property
	def generate_sql(self):
		""" 生成 odps sql 查询语句, 进行下载文件."""
		# 用于生成 sql
		columns = ', '.join([column for column in self.columns])
		if self.partition is None:
			if self.limit is None:
				odps_sql = "SELECT " + columns + " FROM " + self.project_table + " ;"
			else:
				odps_sql = "SELECT " + columns + " FROM " + self.project_table + " LIMIT " + str(self.limit) + " ;"
		else:
			if self.limit is None:
				odps_sql = "SELECT " + columns + " FROM " + self.project_table + " WHERE pt= \'" + self.partition + "\' ;"
			else:
				odps_sql = "SELECT " + columns + " FROM " + self.project_table + " WHERE pt= \'" + self.partition + \
				           "\' LIMIT " + str(self.limit) + " ;"
		
		# 返回 sql
		return odps_sql

	def generate_union_table_sql(self, project=None, table=None):
		"""
		 输入关联表的 project, table; 与实例化信息表的关联sql
		:param project : 待关联的 project.
		:param table   : 待关联的   table.
		:return: 等待执行的 odps sql
		"""
		# ex: 提取用户三要素信息
		union_project_table = '%s.%s' % (project, table)
		sql = """SELECT * FROM """ + self.project_table + """ DT LEFT JOIN """ + union_project_table + \
		      """ UT ON DT.order_id == UT.order_id; """
		return sql

	@classmethod
	def auto_check_cols(cls, dataframe):
		"""
		从 数据框中 尽可能的检索 手机号码、身份证号的列名
		:param dataframe: 等待检索的数据框
		:return: 手机号码、身份证号列名.
		"""
		
		# [1] 用于收集: 手机号码、身份证号 列名 以及 概率字典.(column: probability)
		mobile_info = dict()
		idcard_info = dict()
		
		# [2] 根据手机号码、身份证号的长度 进行筛选: 收集有可能性的列名
		for column in dataframe.columns:
			for index in range(3):
				value = dataframe.loc[index, column]
				if isinstance(value, (str, int)):
					if str(value).isdigit():
						if len(value) == 11:
							# [1.1]判断手机号码： 初始概率: 0.1
							mobile_info[column] = len(column)
						if 15 <= len(value) <= 18:
							# [1.2]判断身份证号： 初始概率: 0.1
							idcard_info[column] = len(column)
		
		# [3] 根据字段名称、容器内可选数目进行决策:
		if len(mobile_info.keys()) == 1:
			# 手机号码
			mobile_col = list(mobile_info.keys())[0]
		else:
			# [3.1] 制定规则
			mobile_key_fields = {'mobile', 'phone'}
			for col, pro in mobile_info.items():
				cols_tuple = set(col.lower().split('_'))
				if not mobile_key_fields.intersection(cols_tuple):
					# 删除 字段中 不包含 mobile_key_fields 字段
					mobile_info.pop(col)
			# 取出字段长度最短的 key 即为列名
			mobile_col_min_length = min(mobile_info.keys())
			for key, value in mobile_info.items():
				if value == mobile_col_min_length:
					mobile_col = key
		
		if len(idcard_info.keys()) == 1:
			# 身份证号
			idcard_col = list(idcard_info.keys())[0]
		else:
			idcard_key_fields = {'card', 'num', 'idnum', 'idcard'}
			for col, pro in mobile_info.items():
				cols_tuple = set(col.lower().split('_'))
				if not idcard_key_fields.intersection(cols_tuple):
					# 删除 字段中 不包含 idcard_key_fields 字段
					idcard_info.pop(col)
			# 取出字段长度最短的 key 即为列名
			idcard_col_min_length = min(mobile_info.keys())
			for key, value in mobile_info.items():
				if value == idcard_col_min_length:
					idcard_col = key
		
		return mobile_col, idcard_col

	def download_odps_table(self, download_mode='tunnel', is_encrypt=False, encrypt_fields=('mobile', 'idNum'),
	                        file_encoding='utf-8', compression='infer'):
		"""
		下载 odps 数据表.
		:param download_mode : 下载模式, 'tunnel' or 'sql' or 'dataframe', 默认: dataframe 模式, 避免 格式转换问题
		:param is_encrypt    : 是否加密, 默认: False.
		:param encrypt_fields: 加密字段元组, 默认: 'mobile', 'idNum'.
		:param file_encoding : 文件编码,默认: utf-8
		:param compression   : 压缩策略, zip, bz2
		"""

		if self.is_table_exist():
			# 若下载的数据表存在
			if download_mode == 'tunnel':
				# 方式1: tunnel 下载.
				reader = self.download_with_tunnel(partition=self.partition, limit=self.limit, columns=self.columns)
				dataframe = pd.DataFrame((dict(record) for record in reader))
			elif download_mode == 'sql':
				# 方式2: odps_sql 下载
				reader = self.download_with_sql(odps_sql=self.generate_sql)
				dataframe = pd.DataFrame((record for record in reader))
			else:
				# 方式3: dataframe 下载 [整表下载]
				dataframe = self.download_with_dataframe().to_pandas()

			if is_encrypt:
				# 从dataframe 中自动检测: mobile, id_card 列; 默认: MD5
				mobile, id_card = DownloadOdpsTable.auto_check_cols(dataframe)
				if 'mobile' in encrypt_fields:
					# 数据加密: 手机号码、身份证号
					dataframe[mobile] = EncryptAlgorithm.multi_encrypt(tuple(dataframe[mobile]))
				if 'idNum' in encrypt_fields:
					dataframe[id_card] = EncryptAlgorithm.multi_encrypt(tuple(dataframe[id_card]))

			# 将数据写入文件， 自定义函数
			message = self.write_info_file(dataframe=dataframe, file=self.cache_file, encoding=file_encoding,
			                               delimiter=self.delimiter, compression=compression)
		else:
			message = "NoSuchTable: Can't find table: %s in Odps-Project %s." % (self.table, self.project)

		return message

	def write_info_file(self, dataframe=None, file=None, encoding='utf-8', delimiter=',', compression='infer'):
		"""
		根据 file 的后缀格式, 将数据框中的数据写入目录文件.
		:param dataframe   : pandas 数据框
		:param file        : 写入文件[绝对路径,包含文件后缀.]
		:param encoding    : 文件编码格式, 默认: utf-8.
		:param delimiter   : 字段分隔符， 默认: 逗号.
		:param compression : 文件压缩方案, { zip, bz2 } 等等, 参看文档.
		"""

		if delimiter.lower() == 'tab':
			# 支持制表符
			delimiter = '\t'

		if isinstance(file, str):
			# 若文件不存在，则直接创建
			file_path, file_name = os.path.split(file)
			
			if not os.path.exists(file_path):
				# 文件路径, 文件命名[后缀]
				os.makedirs(file_path)
			
			if os.path.isabs(file_path):
				# 判断文件类型
				file_extension = file_name.split('.')[-1]
				if file_extension in ('csv', 'txt'):
					# Csv or Txt 文件
					dataframe.to_csv(path_or_buf=file,
					                 header=True,
					                 index=False,
					                 sep=delimiter,
					                 encoding=encoding,
					                 compression=compression,
					                 mode='w')
					message = 'Odps_table: %s had write into Text file: %s.' % (self.project_table, file)
				elif file_extension in ('xlsx', 'xls'):
					# Excel 文件
					with pd.ExcelWriter('output.xlsx') as writer:
						dataframe.to_excel(writer,
						                   sheet_name=file_name,
						                   index=False,
						                   engine='openpyxl',
						                   encoding=encoding)
						message = 'Odps_table: %s had write into Excel file: %s .' % (self.project_table, file)
				elif file_extension == 'json':
					# Json 文件
					dataframe.to_json(path_or_buf=file,
					                  orient='records',
					                  lines=True,
					                  compression=compression,
					                  encoding=encoding)
					message = 'Odps_table: %s had write into Json file: %s .' % (self.project_table, file)
				else:
					message = 'FileTypeError: File extension - %s not in [Csv, Txt, Xls, Xlsx]' % file_extension
			else:
				message = 'FilePathError: File save path is not absolute path: %s' % file
		else:
			message = "TypeError: Param file's type is not string type."

		# [2] 修改文件所有者: 策略组、模型组公共账号 model 权限:
		user = UserPrivilege.objects.filter(username=self.username).values('host_uid', 'host_gid')
		if user:
			# 若对用户在服务器账户配置过权限
			host_uid = int((user[0]).get('host_uid'))
			host_gid = int((user[0]).get('host_gid'))
		else:
			# 若对用户在服务器账户未进行权限配置[默认: root]
			host_uid = 0
			host_gid = 0
		os.chown(file, host_uid, host_gid)

		return message
