import folium
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
import os
from scipy.interpolate import make_interp_spline
from scipy.interpolate import lagrange
import matplotlib.pyplot as plt

# 数据库连接
engine = create_engine("mysql+pymysql://root:dream010923**@127.0.0.1:3306/dataviewlearn")
"""

# 读取Excel文件
pd.set_option("display.unicode.east_asian_width", True)
df = pd.read_excel("../data4/input 补充知识/input.xlsx")
print(df.head())
"""

"""
# 解决数据输出时列名不对齐的问题
pd.set_option('display.unicode.east_asian_width', True)
df = pd.read_excel('../data4/input 补充知识/input.xlsx')  # 读取Excel文件
print(df.head())  # 显示前5条数据
"""

"""
# 创建一个my5g1连接器，用户名为root，密码为dream010923**#地址为127.0.0.1，数据库名款为dataviewlearn，编码为utf-8
engine = create_engine("mysql+pymysql://root:dream010923**@127.0.0.1:3306/dataviewlearn?charset =utf8")
# print(engine)
"""

"""
engine = create_engine("mysql+pymysql://root:dream010923**@127.0.0.1:3306/dataviewlearn")
# 使用readsglguery查看dataviewlearn中的数据表数目
formlist = pd.read_sql_query('show tables', con=engine)
print('dataviewlearn数据库数据表清单为:\n', formlist)
"""
"""
engine = create_engine("mysql+pymysql://root:dream010923**@127.0.0.1:3306/dataviewlearn")
##使用read 5gltable读取订单详情表
detail1 = pd.read_sql_table('meal_order_detail1', con=engine)
print('使用read sql_table读取订单详情表的长度为:', len(detail1))
"""

"""
engine = create_engine("mysql+pymysql://root:dream010923**@127.0.0.1:3306/dataviewlearn")
## 使用read_sql读取订单详情表
detail2 = pd.read_sql('select * from meal_order_detail2', con=engine)

print('使用read sql函数+sql语句读取的订单详情表长度为:', len(detail2))

detail3 = pd.read_sql('meal_order_detail3', con=engine)

print('使用readsql函数+表格名称读取的订单详情表长度为:', len(detail3))
"""

"""
##使用resd_table读取订单信息表
order = pd.read_table('../data4/data任务程序/meal_order_info.csv', sep=',', encoding='gbk')
print('使用read_table读取的订单信息表的长度为:', len(order))
"""

"""
#使用read_csv读取订单信息表
order1 = pd.read_csv('../data4/data任务程序/meal_order_info.csv', encoding='gbk')
print('使用read csv读取的订单信息表的长度为:', len(order1))
"""

"""
# 使用read_table读取菜品订单信息表
order2 = pd.read_table('../data4/data任务程序/meal_order_info.csv', sep=';', encoding='gbk')
print('分隔符为;时订单信息表为:\n', order2)
"""

"""
# 使用read_csv读职菜品订单信息表header=None
order3 = pd.read_csv('../data4/data任务程序/meal_order_info.csv', sep=',', header=None, encoding='gbk')
print('订单信息表为:', '\n', order3)
"""

"""
# 设置数据显示的最大列数和宽度
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# 解决数据输出时列名不对齐的问题
pd.set_option('display.unicode.east_asian_width', True)
df1 = pd.read_csv('../data4/input 补充知识/1月.csv', encoding='gbk')  # 导入.csv文件，并指定编码格式
print(df1.head())  # 输出前5条数据
"""

"""
print('订单信息表写入文本文件前目录内文件列表为:\n', os.listdir('../data4'))
"""

"""
# 将ordor以csv格式存储
order = pd.read_table('../data4/data任务程序/meal_order_info.csv', sep=',', encoding='gbk')
order.to_csv('../data4/data任务程序/orderInfo.csv', sep=';', index=False)  # sep分隔符变为;index不把行索引写人文件
print('订单信息表写入文本文件后目录内文件列表为:\n', os.listdir('../data4/data任务程序'))
"""

"""
user = pd.read_excel('../data4/data任务程序/users.xlsx')  # 读取user.xlsx文件
print('客户信息表长度为:', len(user))
"""

"""
# 解决数据输出时列名不对齐的问题
pd.set_option('display.unicode.east_asian_width', True)
df = pd.read_excel('../data4/input 补充知识/1月.xlsx', sheet_name='莫寒')
print(df.head())  # 输出前5条数据
"""

"""
# 解决数据输出时列名不对齐的问题
pd.set_option('display.unicode.east_asian_width', True)
df1 = pd.read_excel('../data4/input 补充知识/1月.xlsx', index_col=0)  # 设置“买家会员名”为行索引
print(df1.head())  # 输出前5条数据
"""

"""
df2 = pd.read_excel('../data4/input 补充知识/1月.xlsx', header=1)  # 设置第1行为列索引
"""

"""
# 如果将数字作为列索引，可以设置header参数为None，关键代码如下:
df3 = pd.read_excel('../data4/input 补充知识/1月.xlsx', header=None)  # 列索引为数字
"""

"""
# 下面导入第一列数据(索引为0)
# 解决数据输出时列名不对齐的问题
pd.set_option('display.unicode.east_asian_width', True)
df1 = pd.read_excel('../data4/input 补充知识/1月.xlsx', usecols=[0])  # 导入第一列数据
print(df1.head())
"""

"""
# df1 = pd.read_excel('../data4/input 补充知识/1月.xlsx', usecols=[0, 3])
# 也可以指定列名称，关键代码如下:
df1 = pd.read_excel('../data4/input 补充知识/1月.xlsx', usecols=['买家会员名', '宝贝标题'])
"""
"""
df1 = pd.read_csv('../data4/input 补充知识/1月.txt', sep='\t', encoding='gbk')
print(df1.head())
"""

"""
# 访问html的table标签
df = pd.DataFrame()
url_list = ['http://www.espn.com/nba/salaries/_/seasontype/4']
for i in range(2, 13):
    url = 'http://www.espn.com/nba/salaries/_/page/%s/seasontype/4' % i
    url_list.append(url)
# 遍历网页中的table标签读取网页表格数据
for url in url_list:
    df = df.append(pd.read_html(url), ignore_index=True)
# 列表解析:遍历dataframe对象的第3列，以子字符串$开头
df = df[[x.startswith('$') for x in df[3]]]
print(df)
# df.to_csv('NBA.csv', header=['RK', 'NAME', 'TEAM', 'SALARY'], index=False)  # 导出.csv文件
"""
# dataFrame 操作
"""
# 解决数据输出时列名不对齐的问题
pd.set_option('display.unicode.east_asian_width', True)
input = [[110, 105, 99], [105, 88, 115], [109, 120, 130]]
index = [0, 1, 2]
columns = ['语文', '数学', '英语']  # 创建DataFrame数据
df = pd.DataFrame(input=input, index=index, columns=columns)
print(df)
# 遍历DataFrame数据的每一列
for col in df.columns:
    series = df[col]
    print(series)
"""

"""
# 解决数据输出时列名不对齐的问题
pd.set_option('display.unicode.east_asian_width', True)
input = [[110, 105, 99], [105, 88, 115], [109, 120, 130]]
columns = ['语文', '数学', '英语']
df = pd.DataFrame(input=input, columns=columns)
print(df)
"""

"""
# 解决数据输出时列名不对齐的问题
pd.set_option('display.unicode.east_asian_width', True)
df = pd.DataFrame({'语文': [110, 105, 99], '数学': [105, 88, 115],
                   '英语': [109, 120, 130], '班级': '高一7班'}, index=[0, 1, 2, ])
print(df)
"""

"""

engine = create_engine("mysql+pymysql://root:dream010923**@127.0.0.1:3306/dataviewlearn")
detail=pd.read_sql_table('meal_order-detail1',con=engine)
print('订单详情表的索引为:',detail.index)
print('订单详情表的所有值为:\n',detail.values)
print('订单详情表的列名为:\n',detail.colunns)
print('订单详情表的数据类型为:\n',detail.dtypes)
## 查看DataFrame的元素个数
print('订单详情表的元素个数为:',detail.size)##查看DataFrame的维度数
print('订单详情表的维度为:',detail.ndim)## 查看DataFrame的形状
print('订单详情表的形状为:',detail.shape)
print('订单详情表转置前形状为:',detail.shape)
print('订单详情表转置后形状为为:',detail.T.shape)
##使用访问字典方式取出orderInfo中的某一列
order_id = detail['order_id']
print('订单详情表中的order_id的形状为:\n',order_id. shape)
dishes_name = detail.dishes_name
print('订单详情表中的dishes_name的形状为:',dishes_name.shape)
dishes_name5 = detail['dishes_name'][:5]
print('订单详情表中的dishes_name前5个元素为:\n',dishes_name5)
orderDish = detail[['order_id','dishes_name']][:5]
print('订单详情表中的order_id和dishes_name前5个元素为:\n',orderDish)
order5=detail[:][1:6]
print('订单详情表的1-5行元素为:\n',order5)
#默认访问前5行数据
print('订单详情表中前五行数据为\n',detail.head())#欲的访问后5行数据
print('订单详情表中后五个元素为:\n',detail.tail())
dishes_name1 = detail.loc[:,'dishes_name']
print('使用loc提取dishes_nane列的size为:',dishes_name1.size)

#dishes name2 = detsil.ilocl:,3J #3表示第4列,logicprn name
dishes_name2 = detail.iloc[:,5] #5表示第6列,dishes name
print('使用iloc提取第6列的size为:',dishes_name2.size)

orderDish1 = detail.loc[:, ['order id','dishes_name']]
print('使用loc提取order_id和dishes_name列的size为:',orderDish1.size)

orderDish2= detail.iloc[:,[1,3]]
print('使用iloc提取第2和第4列的size为:',orderDish2.size)

print('列名为order_id和dishes_name的行名为3的数据为:\n',
detail.loc[3, ['order_id','dishes_name']])

print('列名为order_id和dishes_name行名为2,3,4,5,6的数据为:\n',
detail.loc[2:6, ['order_id', 'dishes_name']])

print('列位置为1和3行位置为3的数据为:\n',detail.iloc[3,[1,3]])

print('列位置为1和3行位置为2,3,4,5,6的数据为:\n',detail.iloc[2:7,[1,3]])

## loc内部传人表达式
print('detail中order_id为458的dishes_name为:\n',
    detail.loc[detail['order_id']=='458',
['order_id', 'dishes_name']])

print('detail中order_id为458的第1,5列数据为:\n',
detail.iloc[detail['order_id']=='458', [1,5]])

print('列名为dishes_name行名为2,3,4,5,6的数据为:\n',detail.loc[2:6,'dishes_name'])

print('列位置为5,行位置为2至6的数据为:\n',detail.iloc[2:6,5])

print('列位置为5行名为2至6的数据为:\n',detail.ix[2:6,5])


#更新修改DataFrame中的数据
#将arder_1d为458的，变换为45800
detail.loc[detail["order_id"]=='458','order_id']='45800'
print('更改后detail中oxdex_id为458的order_id为:\n',detail.loc[detail['order_id ]=='458','ordex_id ])
print('更改后detail中oxderid为45800的order_id为:\n',detail.loc[detail['order_id']=='45800','order_id'])

#DataFrame新增一列非定值
detail['paynent'] = detail['counts']*detail['anounts']

print('detail新增列payment的前五行为:\n',detail['payment'].head())

"""

"""
# 抽取一行数据
# 解决数据输出时列名不对齐的问题
pd.set_option('display.unicode.east_asian_width', True)
input = [[110, 105, 99], [105, 88, 115], [109, 120, 130], [112, 115]]
name = ['明日', '七月流火', '高袁圆', '二月二']
columns = ['语文', '数学', '英语']
df = pd.DataFrame(input=input, index=name, columns=columns)
print(df.loc['明日'])
"""

"""
# 抽取多行数据
# 解决数据输出时列名不对齐的问题
pd.set_option('display.unicode.east_asian_width', True)
input = [[110, 105, 99], [105, 88, 115], [109, 120, 130], [112, 115]]
name = ['明日', '七月流火', '高袁圆', '二月二']
columns = ['语文', '数学', '英语']
df = pd.DataFrame(input=input, index=name, columns=columns)
print(df.loc['明日'])
print(df.loc[['明日', '高袁圆']])
print(df.iloc[[0, 2]])

print(df.loc['明日':'二月二'])  # 从“明日”到“二月二”
print(df.loc[:'七月流火':])  # 第1行到“七月流火”
print(df.iloc[0:4])  # 第1行到第4行
print(df.iloc[1::])  # 第2行到最后1行
"""

"""
# 抽取指定列数据
# 解决数据输出时列名不对齐的问题
pd.set_option('display.unicode.east_asian_width', True)
input = [[110, 105, 99], [105, 88, 115], [109, 120, 130], [112, 115]]
name = ['明日', '七月流火', '高袁圆', '二月二']
columns = ['语文', '数学', '英语']
df = pd.DataFrame(input=input, index=name, columns=columns)
print(df[['语文', '数学']])
print(df.loc[:, ['语文', '数学']])  # 抽取“语文”和“数学”
print(df.iloc[:, [0, 1]])  # 抽取第1列和第2列
print(df.loc[:, '语文':])  # 抽取从“语文”开始到最后一列
print(df.iloc[:, :2])  # 连续抽取从1列开始到第3列，但不包括第3列
"""

"""
# 抽取指定行列数据
# 解决数据输出时列名不对齐的问题
pd.set_option('display.unicode.east_asian_width', True)
input = [[110, 105, 99], [105, 88, 115], [109, 120, 130], [112, 115]]
name = ['明日', '七月流火', '高袁圆', '二月二']
columns = ['语文', '数学', '英语']
df = pd.DataFrame(input=input, index=name, columns=columns)
print(df.loc['七月流火', '英语'])  # “英语”成绩
print(df.loc[['七月流火'], ['英语']])  # “七月流火”的“英语”成绩
print(df.loc[['七月流火'], ['数学', '英语']])  # “七月流火”的“数学”和“英语”成绩
print(df.iloc[[1], [2]])  # 第2行第3列
print(df.iloc[1:, [2]])  # 第2行到最后一行的第3列
print(df.iloc[1:, [0, 2]])  # 第2行到最后一行的第1列和第3列
print(df.iloc[:, 2])  # 所有行，第3列
"""

"""
# 按指定条件抽取数据
# 解决数据输出时列名不对齐的问题
pd.set_option('display.unicode.east_asian_width', True)
input = [[110, 105, 99], [105, 88, 115], [109, 120, 130], [112, 115]]
name = ['明日', '七月流火', '高袁圆', '二月二']
columns = ['语文', '数学', '英语']
df = pd.DataFrame(input=input, index=name, columns=columns)
print(df.loc[(df['语文'] > 105) & (df['数学'] > 88)])
"""

"""
# 按列增加数据
# 解决数据输出时列名不对齐的问题
pd.set_option('display.unicode.east_asian_width', True)
input = [[110, 105, 99], [105, 88, 115], [109, 120, 130], [112, 115, 140]]
name = ['明日', '七月流火', '高袁圆', '二月二']
columns = ['语文', '数学', '英语']
df = pd.DataFrame(input=input, index=name, columns=columns)
df['物理'] = [88, 79, 60, 50]
print(df)
"""

"""
# 按列增加数据
# 解决数据输出时列名不对齐的问题
pd.set_option('display.unicode.east_asian_width', True)
input = [[110, 105, 99], [105, 88, 115], [109, 120, 130], [112, 115, 140]]
name = ['明日', '七月流火', '高袁圆', '二月二']
columns = ['语文', '数学', '英语']
df = pd.DataFrame(input=input, index=name, columns=columns)
wl = [88, 79, 60, 50]
df.insert(1, '物理', wl)
print(df)
"""

"""
# 增加多行数据
# 解决数据输出时列名不对齐的问题
pd.set_option('display.unicode.east_asian_width', True)
input = [[110, 105, 99], [105, 88, 115], [109, 120, 130], [112, 115, 140]]
name = ['明日', '七月流火', '高袁圆', '二月二']
columns = ['语文', '数学', '英语']
df = pd.DataFrame(input=input, index=name, columns=columns)
df_insert = pd.DataFrame({'语文': [100, 123, 138], '数学': [99, 142, 60], '英语': [98, 139, 99]},
                         index=['钱多多', '章年', '无名'])
df1 = df.append(df_insert)
print(df1)
"""

"""
# 将“数学”修改为“数学(上)
pd.set_option('display.unicode.east_asian_width', True)
input = [[110, 105, 99], [105, 88, 115], [109, 120, 130], [112, 115, 140]]
name = ['明日', '七月流火', '高袁圆', '二月二']
columns = ['语文', '数学', '英语']
df = pd.DataFrame(input=input, index=name, columns=columns)
df.columns = ['语文', '数学(上)', '英语']
print(df)
"""

"""
#使用rename方法修改列标题。
pd.set_option('display.unicode.east_asian_width', True)
input=[[110,105,99],[105,88,115],[109,120,130],[112,115,140]]
name =['明日','七月流火','高袁圆','二月二']
columns =['语文','数学','英语']
df=pd.DataFrame(input=input,index=name,columns=columns)
df.rename(columns={'语文':'语文(上)','数学':'数学(上)','英语':'英语(上)'},inplace = True)
print(df)
"""

"""
# 修改行标题使用DataFrame对象中的index属性，直接赋值即可。
pd.set_option('display.unicode.east_asian_width', True)
input = [[110, 105, 99], [105, 88, 115], [109, 120, 130], [112, 115, 140]]
name = ['明日', '七月流火', '高袁圆', '二月二']
columns = ['语文', '数学', '英语']
df = pd.DataFrame(input=input, index=name, columns=columns)
df.index = list('1234')
print(df)
# 使用DataFrame对象中的rename方法也可以修改行标题。例如，将行标题统一关键代码如下:
df.rename({'明日': 1, '七月流火': 2, '高袁圆': 3, '二月二': 4}, axis=0, inplace=True)
print(df)
"""
"""
#(1)修改整行数据
pd.set_option('display.unicode.east_asian_width', True)
input=[[110,105,99],[105,88,115],[109,120,130],[112,115,140]]
name =['明日','七月流火','高袁圆','二月二']
columns =['语文','数学','英语']
df=pd.DataFrame(input=input,index=name,columns=columns)
df.loc['明日']=[120,115,109]
print(df)
df.loc['明日']=df.loc['明日']+10
print(df)
#(2)修改整列数据
df.loc[:,'语文']=[115,108,112,118]
print(df)

"""

"""
pd.set_option('display.unicode.east_asian_width', True)
input = [[110, 105, 99], [105, 88, 115], [109, 120, 130], [112, 115, 140]]
name = ['明日', '七月流火', '高袁圆', '二月二']
columns = ['语文', '数学', '英语']
df = pd.DataFrame(input=input, index=name, columns=columns)
# (3)修改某一处数据
df.loc['明日', '语文'] = 115
# (4)使用iloc属性修改数据
df.iloc[0, 0] = 115  # 修改某一处数据
df.iloc[:, 0] = [115, 108, 112, 118]  # 修改整列数据
df.iloc[0, :] = [120, 115, 109]  # 修改整行数据
print(df)
"""

"""
# 删除指定的学生成绩数据
pd.set_option('display.unicode.east_asian_width', True)
input = [[110, 105, 99], [105, 88, 115], [109, 120, 130], [112, 115, 140]]
name = ['明日', '七月流火', '高袁圆', '二月二']
columns = ['语文', '数学', '英语']
df = pd.DataFrame(input=input, index=name, columns=columns)
print(df)
# df.drop(['数学'],axis=1,inplace=True) #删除某列
# print(df)
df.drop(columns='数学', inplace=True)  # 删除columns为“数学”的列
print(df)
# df.drop(labels='数学',axis=1,inplace=True) #删除列标签为“数学”的列
# print(df)
df.drop(['明日', '二月二'], inplace=True)  # 删除某一行
print(df)
# df.drop(index='明日',inplace=True) #删除index为“明日”的行
# print(df)
# df.drop(labels='明日',axis=0,inplace=True) #删除行标签为“明日”的行
# print(df)
"""

"""
# 删除满足特定条件的行，首先找到满足该条件的行索引，然后再使用drop方法将其删除。
pd.set_option('display.unicode.east_asian_width', True)
input = [[110, 105, 99], [105, 88, 115], [109, 120, 130], [112, 115, 140]]
name = ['明日', '七月流火', '高袁圆', '二月二']
columns = ['语文', '数学', '英语']
df = pd.DataFrame(input=input, index=name, columns=columns)
print(df)
df.drop(index=df[df['数学'].isin([88])].index[0], inplace=True)  # 删除“数学”包含分数88的行
df.drop(index=df[df['语文'] < 110].index[0], inplace=True)  # 删除“语文”小于分数110的行
print(df)
"""
"""
# 使用np.mean函数计算平均价格
engine = create_engine("mysql+pymysql://root:dream010923**@127.0.0.1:3306/dataviewlearn")
detail = pd.read_sql_table('meal_order_detail1', con=engine)
print("订单详情表中amount的平均值:", np.mean(detail['amounts']))
"""

"""
# 计算“语文”“数学”“英语”各科成绩的最高分
# 解决数据输出时列名不对齐的问题
pd.set_option('display.unicode.east_asian_width', True)
input = [[110, 105, 99], [105, 88, 115], [109, 120, 130], [112, 115]]
index = [1, 2, 3, 4]
columns = ['语文', '数学 ', '英语']
df = pd.DataFrame(input=input, index=index, columns=columns)
new = df.max()
print(new)
# 增加一行数据(“语文”“数学”“英语”的最大值，忽略索引)
df = df.append(new, ignore_index=True)
print(df)
"""

"""
# 计算“语文”“数学”“英语”各科成绩的最低分
# 解决数据输出时列名不对齐的问题
pd.set_option('display.unicode.east_asian_width', True)
input = [[110, 105, 99], [105, 88, 115], [109, 120, 130], [112, 115]]
index = [1, 2, 3, 4]
columns = ['语文', '数学 ', '英语']
df = pd.DataFrame(input=input, index=index, columns=columns)
new = df.min()
# 增加一行数据(语文、数学和英语的最小值，忽略索引)
df = df.append(new, ignore_index=True)
print(df)
"""

"""
# 按列求均值
input = [[110, 120, 110], [130, 130, 130], [130, 120, 130]]
columns = ['语文', '数学', '英语']
df = pd.DataFrame(input=input, columns=columns)
print(df.median())
"""
"""
# 解决数据输出时列名不对齐的问题
pd.set_option('display.unicode.east_asian_width', True)
input = [[110, 105, 99], [105, 88, 115], [109, 120, 130], [112, 115]]
index = [1, 2, 3, 4]
columns = ['语文', '数学', '英语']
df = pd.DataFrame(input=input, index=index, columns=columns)
print(df.mode())  # 三科成绩的众数
print(df.mode(axis=1))  # 每一行的众数
print(df['数学'].mode())  # “数学”的众数
"""
"""
# 求方差
# 解决数据输出时列名不对齐的问题
pd.set_option('display.unicode.east_asian_width', True)
input = [[110, 113, 102, 105, 108], [118, 98, 119, 85, 118]]
index = ['小黑', '小白']
columns = ['物理1', '物理2', '物理3', '物理4', '物理5']
df = pd.DataFrame(input=input, index=index, columns=columns)
print(df.var(axis=1))
"""

"""
# 标准差
pd.set_option('display.unicode.east_asian_width', True)
input = [[110, 105, 99], [105, 88, 115], [109, 120, 130], [112, 115]]
index = [1, 2, 3, 4]
columns = ['语文', '数学', '英语']
df = pd.DataFrame(input=input, index=index, columns=columns)
print(df.std())
"""
"""
# 求分位数
# 创建DataFrame数据(数学成绩)
input = [120, 89, 98, 78, 65, 102, 112, 56, 79, 45]
columns = ['数学']
df = pd.DataFrame(input=input, columns=columns)  # 计算35%的分位数
x = df['数学'].quantile(0.35)  # 输出淘汰学生
print(df[df['数学'] <= x])
"""
"""
# 本信息，主要从数据的维度、形状、元素的个数3个方面进行。
detail = pd.read_sql_table("meal_order_detail1", con=engine)
order = pd.read_table('../data4/data任务程序/meal_order_info.csv', sep=',', encoding='gbk')
user = pd.read_excel('../data4/data任务程序/users.xlsx')
print('订单详情表的维度为:', detail.ndim)
print('订单信息表的维度为:', order.ndim)
print('客户信息表的维度为:', user.ndim)
print('订单详情表的形状为:', detail.shape)
print('订单信息表的形状为:', order.shape)
print('客户信息表的形状为:', user.shape)
print('订单详情表的元素个数为:', detail.size)
print('订单信息表的元素个数为:', order.size)
print('客户信息表的元素个数为:', user.size)


# 定义一个画数去除全为空值的列和标准差为0的列
def dropNullStd(input):
    beforelen = input.shape[1]  # 调用shape()方法获取行数与列数，shape[0]获取行数:shape[1]获取列部
    colisNull = input.describe().loc['count'] == 0  # 数值型特征的非空值数目为0的列
    for i in range(len(colisNull)):
        if colisNull[i]:
            input.drop(colisNull.index[i], axis=1, inplace=True)
    stdisZero = input.describe().loc['std'] == 0
    for i in range(len(stdisZero)):
        if stdisZero[i]:
            input.drop(stdisZero.index[i], axis=1, inplace=True)
    afterlen = input.shape[1]
    print('去除的列的数目为:', beforelen - afterlen)
    print('去除后数据的形状为:', input.shape)
dropNullStd(detail)
"""
# 时间格式处理
"""
# 时间格式处理
df = pd.DataFrame({'A': [1, 2], 'B': [pd.Timestamp('2019'), pd.Timestamp('2020')],
                   'C': [pd.Timedelta('1 days'), pd.Timedelta('2 days')]})
print(df.quantile(0.5, numeric_only=False))
order = pd.read_table('../data4/data任务程序/meal_order_info.csv', sep=',', encoding='gbk')
print('进行转换前订单信息表lock_time的类型为:', order['lock_time'].dtypes)
order['lock_time'] = pd.to_datetime(order['lock_time'])
print('进行转换后订单信息表lock_time的类型为:', order['lock_time'].dtypes)
print('最小时间为:', pd.Timestamp.min)
print('最大时间为:', pd.Timestamp.max)

"""

"""
order = pd.read_table('../data4/data任务程序/meal_order_info.csv', sep=',', encoding='gbk')
dateIndex = pd.DatetimeIndex(order['lock_time'])
# print(转换为DatetimeIndex后数据的类型为:ln’type(dateIndex)) print(’转换为DatetimeIndex后数据的类型为:\n，dateIndex)
periodIndex = pd.PeriodIndex(order['lock_time'], freq='s')
print('转换为PeriodIndex后数据的类型为:\n', type(periodIndex))
"""
"""
# 解决数据输出时列名不对齐的问题
pd.set_option('display.unicode.east_asian_width', True)
df = pd.DataFrame({'原日期': ['14-Feb-20', '02/14/2020', '2020.02.14', '2020/02/14', '20200214']})
df['转换后的日期'] = pd.to_datetime(df['原日期'])
print(df)
"""
"""
df = pd.DataFrame({'year': [2018, 2019, 2020],
                   'month': [1, 3, 2],
                   'day': [4, 5, 14],
                   'hour': [13, 8, 2],
                   'minute': [23, 12, 14],
                   'second': [2, 4, 0]})
df['组合后的日期'] = pd.to_datetime(df)
print(df)
"""

"""
# 解决数据输出时列名不对齐的问题
pd.set_option('display.unicode.east_asian_width', True)
df = pd.DataFrame({'原日期': ['2019.1.05', '2019.2.15', '2019.3.25', '2019.6.25', '2019.9.15', ' 2019. 12.31']})
df['日期'] = pd.to_datetime(df['原日期'])
print(df)
df['年'], df['月'], df['日'] = df['日期'].dt.year, df['日期'].dt.month, df['日期'].dt.day
df['星期几'] = df['日期'].dt.day_name()
df['季度'] = df['日期'].dt.quarter
df['是否年底'] = df['日期'].dt.is_year_end
print(df)
"""
"""
# 解决数据输出时列名不对齐的问题
pd.set_option('display.unicode.ambiguous_as_wide', True)
pd.set_option('display.unicode.east_asian_width', True)
df = pd.read_excel('../data4/input 补充知识/mingribooks.xls')
df1 = df[['订单付款时间', '买家会员名', '联系手机', '买家实际支付金额']]
df1 = df1.sort_values(by=['订单付款时间'])
df1 = df1.set_index('订单付款时间')  # 将日期设置为索引#获取某个区间数据
print(df1['2018-05-11':'2018-06-10'])
"""
"""
# 解决数据输出时列名不对齐的问题
pd.set_option('display.unicode.ambiguous_as_wide', True)
pd.set_option('display.unicode.east_asian_width', True)
df = pd.read_excel('../data4/input 补充知识/TB2018.xls')
df1 = df[['订单付款时间', '买家会员名', '联系手机', '买家实际支付金额']]
df1 = df1.set_index('订单付款时间')  # 将date设置为index

print('----按月统计数据-----')
# “HS”是每个月第一天为开始日期，M是每个月最后一天
print(df1.resample('M').sum().to_period('M'))
print('---按季统计数据--')
# 'QS'是每个季度第一天为开始日期,'Q'是每个季度最后一天
print(df1.resample('QS').sum())
print('--按年统计数据-')
# AS是每年第一天为开始日期，“A”是每年最后一天
print(df1.resample('AS').sum())
print('----按年统计并显示数据--')
# “AS”是每年第一天为开始日期，“A”是每年最后一天
print(df1.resample('as').sum().to_period('A'))
print('----按季度统计并显示数据-')
print(df1.resample('Q').sum().to_period('Q'))
print('--按月统计并显示数据-')
print(df1.resample('M').sum().to_period('M'))
df2 = df1.resample('M').sum().to_period('M')
print('----按星期统计并显示数据-')
print(df1.resample('w').sum().to_period('w').head())
"""

"""
#时间序列操作
# 根据代码4-41转换，order['lock_time']数据类型已经为Timestamp
order = pd.read_table('../data4/data任务程序/meal_order_info.csv', sep=',', encoding='gbk')
print('进行转换前订单信息表lock_time的类型为:', order['lock_time'].dtypes)
order['lock_time'] = pd.to_datetime(order['lock_time'])
print('进行转换后订单信息表lock_time的类型为:', order['lock_time'].dtypes)
dateIndex = pd.DatetimeIndex(order['lock_time'])
periodIndex = pd.PeriodIndex(order['lock_time'], freq='s')
year1 = [i.year for i in order['lock_time']]
print('lock_time中的年份数据前5个为:', year1[:5])
month1 = [i.month for i in order['lock_time']]
print('lock_time中的月份数据前5个为:', month1[:5])
day1 = [i.day for i in order['lock_time']]
print('lock_time中的日期数据前5个为:', day1[:5])

weekday1 = [i.day_name() for i in order['lock_time']]
print('lock_time中的星期名称数据前5个为:', weekday1[:5])


# print('dateIndex中的星期名称数据前5个为:', dateIndex.day_name()[:5])



# 将lock_time数据向后平移一天
# 将lock time数据向后平移2天6小时30分钟
time1 = order['lock_time'] + pd.Timedelta(days=2, hours=6, minutes=30)
print('lock_time在加上一天前前5行数据为:\n', order['lock_time'][:5])
print('lock_time在加上一天前前5行数据为:\n', time1[:5])

timeDelta = order['lock_time'] - pd.to_datetime('2017-1-1')
print('lock_time减去2017年1月1日0点0时0分后的数据:\n', timeDelta[:5])
print('lock_time减去2017年1月1日0点0时0分后的数据类型为:', timeDelta.dtypes)

order = pd.read_table('../data4/data任务程序/meal_order_info.csv', sep=',', encoding='gbk')
order['use_start time'] = pd.to_datetime(order['use_start_time'])
order['lock_time'] = pd.to_datetime(order['lock_time'])
print('进行转换后订单信息表use_start_time和lock_time的类型为:\n',
      order[['use_start_time', 'lock_time']].dtypes)

order = pd.read_table('../data4/data任务程序/meal_order_info.csv', sep=',', encoding='gbk')
year = [i.year for i in order['lock_time']]  # 提取年份信息
month = [i.month for i in order['lock_time']]  # 提取月份信息
day = [i.day for i in order['lock_time']]  # 提取日期信息
week = [i.week for i in order['lock_time']]  # 提取周信息
weekday = [i.weekday() for i in order['lock_time']]  # 提取星期信息
weekname = [i.day_name() for i in order['lock_time']]  # 提取星期名独信息
print('订单详情表中的前5条数据的年份信息为:', year[:5])
print('订单详情表中的前5条数据的月份信息为:', month[:5])
print('订单详情表中的前5条数据的日期信息为:', day[:5])
print('订单详情表中的前5条数据的周信息为:', week[:5])
print('订单详情表中的前5条数据的星期信息为:', weekday[:5])
print('订单详情表中的前5条数据的星期名称信息为:', weekname[:5])

timemin = order['lock_time'].min()
timemax = order['lock_time'].max()
print('订单最早的时间为:', timemin)
print('订单最晚的时间为:', timemax)

print('订单持续的时间为:', timemax - timemin)
order = pd.read_table('../data4/data任务程序/meal_order_info.csv', sep=',', encoding='gbk')
order['lock_time'] = pd.to_datetime(order['lock_time'])
order['use_start_time'] = pd.to_datetime(order['use_start_time'])
checkTime = order['lock_time'] - order['use_start_time']
print('平均点餐时间为:', checkTime.mean())
print('最小点餐时间为:', checkTime.min())
print('最大点餐时间为:', checkTime.max())
"""

"""
# 分组聚合进行组内计算
detail = pd.read_sql_table('meal_order_detail1', con=engine)
detailGroup = detail[['order_id', 'counts', 'amounts']].groupby(by='order_id')
print('分组后的订单详情表为:\n', detailGroup)

print('订单详情表分组后前5组每组的均值为:\n', detailGroup.mean().head())
print('订单详情表分组后前10组每组的均值为:\n', detailGroup.mean().head(10))

print('订单详情表分组后前5组每组的标准差为:\n', detailGroup.std().head())

print('订单详情表分组后前5组每组的大小为:\n', detailGroup.size().head())

print('订单详情表的菜品销量与售价的和与均值为:\n', detail[['counts', 'amounts']].agg([np.sum, np.mean]))

print('订单详情表的菜品销量总和与售价的均值为:\n', detail.agg({'counts': np.sum, 'amounts': np.mean}))

print('菜品订单详情表的菜品销量总和与售价的总和与均值为:\n', detail.agg({'counts': np.sum, 'amounts': [np.mean, np.sum]}))


# 自定义画我求两倍的和
def DoubleSum(input):
    s = input.sum() * 2
    return s


print('菜品订单详情表的菜品销量两倍总和为:\n', detail.agg({'counts': DoubleSum}, axis=0))


# 自定义画数求两倍的和
def DoubleSum1(input):
    s = np.sum(input) * 2
    return s
print('订单详情表的菜品销量两倍总和为:\n', detail.agg({'counts': DoubleSum1}, axis=0).head())
print('订单详情表的菜品销量与售价的和的两倍为:\n', detail[['counts', 'amounts']].agg(DoubleSum1))
print('订单详情表分组后前3组每组的均值为:\n', detailGroup.agg(np.mean).head(3))
print('订单详情表分组后前3组每组的标准差为:\n', detailGroup.agg(np.std).head(3))
print('订单详情分组前3组每组菜品总数和售价均值为:\n', detailGroup.agg({'counts': np.sum, 'amounts': np.mean}).head(3))
print('订单详情表的菜品销量与售价的均值为:\n', detail[['counts', 'amounts']].apply(np.mean))
print('订单详情表分组后前3组每组的均值为:\n', detailGroup.apply(np.mean).head(3))
print('订单详情表分组后前3组每组的标准差为:\n', detailGroup.apply(np.std).head(3))
print('订单详情表的菜品销量与售价的两倍为:\n', detail[['counts', 'amounts']].transform(lambda x: x * 2).head(4))
# print('订单详情表分组后实现组内离差标准化后前五行为:\n', detailGroup.transform(lambda x: (x.mean() - x.min()) / (x.max() - x.min())).head())
"""
"""
detail = pd.read_sql_table('meal_order_detail1', con=engine)
detail['place_order_time'] = pd.to_datetime(detail['place_order_time'])
detail['date'] = [i.date() for i in detail['place_order_time']]
detailGroup = detail[['date', 'counts', 'amounts']].groupby(by='date')
print('订单详情表前5组每组的数目为:\n', detailGroup.size().head())
"""
"""
detail = pd.read_sql_table('meal_order_detail1', con=engine)
detail['place_order_time'] = pd.to_datetime(detail['place_order_time'])
detail['date'] = [i.date() for i in detail['place_order_time']]
detailGroup = detail[['date', 'counts', 'amounts']].groupby(by='date')
dayMean = detailGroup.agg({'amounts': np.mean})
print('订单详情表前五组每日菜品均价为:\n', dayMean.head())
dayMedian = detailGroup.agg({'amounts': np.median})
print('订单详情表前五组每日菜品售价中位数为:\n', dayMedian.head())
daySaleSum = detailGroup.apply(np.sum)['counts']
print('订单详情表前五组单日菜品售出数目为:\n', daySaleSum.head())
"""
"""
# 透视表
detail = pd.read_sql_table("meal_order_detail1", con=engine)
detailPivot = pd.pivot_table(detail[['order_id', 'counts', 'amounts']], index='order_id')

print('以order_id作为分组键创建的订单透视表为:\n', detailPivot.head())

detailPivot1 = pd.pivot_table(
    detail[['order_id', 'counts', 'amounts']],
    index='order_id',
    aggfunc=np.sum
)
print('以order_id作为分组键创建的订单销量与售价总和透视表为:\n', detailPivot1.head())

detailPivot2 = pd.pivot_table(
    detail[['order_id', 'dishes_name', 'counts', 'amounts']],
    index=['order_id', 'dishes_name'],
    aggfunc=np.sum)

print('以order_id和dishes_name作为分组键创建的订单销量与售价总和透视表为:\n',
      detailPivot2.head())

detailPivot3 = pd.pivot_table(
    detail[['order_id', 'dishes_name', 'counts', 'amounts']],
    index='order_id',
    columns='dishes_name',
    aggfunc=np.sum)
print('以order_id和dishes_name作为行列分组键创建的透视表前5行4列为:\n',
      detailPivot3)

detailPivot4 = pd.pivot_table(
    detail[['order_id', 'dishes_name', 'counts', 'amounts']],
    index='order_id',
    values='counts',
    aggfunc=np.sum)
print('以order_id作为行分组键counts作为值创建的透视表前5行为:\n',
      detailPivot4.head())
detailPivot5 = pd.pivot_table(
    detail[['order_id', 'dishes_name', 'counts', 'amounts']],
    index='order_id',
    columns='dishes_name',
    aggfunc=np.sum,
    fill_value=0
)
print('空值填0后以order_id和dishes_name为行列分组键创建透视表前5行4列为:\n',
      detailPivot5.iloc[:5, :4])

detailPivot6 = pd.pivot_table(
    detail[['order_id', 'dishes_name', 'counts', 'amounts']],
    index='order_id',
    columns='dishes_name',
    aggfunc=np.sum,
    fill_value=0,
    margins=True
)
print('添加margins后以order_id和dishes_name为分组键的透视表前5行后4列为:\n',
      detailPivot6)  # detailPivot6.iloc!:5."4]

print('添加margins后以order_id和dishes_name为分组键的透视表前5行后4列为:\n',
      detailPivot6)  # detailPivot6.iloc!:5,:4]

detailCross = pd.crosstab(
    index=detail['order_id'],
    columns=detail['dishes_name'],
    values=detail['counts'],
    aggfunc=np.sum
)
print('以order_id和dishes_name为分组键counts为值的透视表前5行5列为:\n',
      detailCross.iloc[:5, :5])

detail['place_order_time'] = pd.to_datetime(detail['place_order_time'])
detail['date'] = [i.date() for i in detail['place_order_time']]
pivotDetail = pd.pivot_table(
    detail[['date', 'dishes_name', 'counts', 'amounts']],
    index='date',
    aggfunc=np.sum,
    margins=True
)
print('订单详情表单日菜品成交总额与总数透视表为:\n', pivotDetail)

crossDetail = pd.crosstab(
    index=detail['date'],
    columns=detail['dishes_name'],
    values=detail['amounts'],
    aggfunc=np.sum,
    margins=True
)
print('订单详情表单日单个菜品成交总额交叉表后5行5列为:\n', crossDetail.iloc[-5:, -5:])
"""

"""
detaill = pd.read_sql('meal_order_detail1', engine)
df1 = detaill.iloc[:, :10]
df2 = detaill.iloc[:, 10:]
print('外连接合并过后的大小为%s.', pd.concat([df1, df2], axis=1, join='outer').shape)
print('内连接合并过后的大小为%s.', pd.concat([df1, df2], axis=1, join='inner').shape)

df3 = detaill.iloc[:1500, :]
df4 = detaill.iloc[1500:, :]
print('外连接纵向合并后的数据框大小为:', pd.concat([df3, df4], axis=0, join='outer').shape)
print('内连接纵向合并后的数据框大小为：', pd.concat([df3, df4], axis=0, join='inner').shape)

dfs = [df1, df2, df3]
result = pd.concat(dfs)
print(result)
result = pd.concat(dfs, keys=['1月', '2月', '3月'])
print(result)
result = pd.concat([df1, df4], axis=1)
print(result)
result = pd.concat([df1, df4], axis=1, join='inner')
# result=pd.concat([df1,df4],axis = 1,join_axes([df4.index]))


# 纵向堆叠
# print('堆叠前df3的大小为%s,df4的大小为%3.'%(df3.shape,df4.shape))
print('append纵向堆叠后的数据大小为：', df3.append(df4).shape)

# 主键合并
order = pd.read_csv('../data5/input 任务程序/meal_order_info.csv', sep=',', encoding='gbk')
order['info_id'] = order['info_id'].astype('str')
order_detaill = pd.merge(detaill, order, left_on='order_id', right_on='info_id')

print('订单详情表的大小为：', detaill.shape)
print('order订单详情表的原始形状为：', order.shape)
# print('订单详情表和地清单详情表主键合并后的形状为：'.order_detaill.shape)


# merge方法，常规合并
pd.set_option('display.unicode.east_asian_width', True)
df1 = pd.DataFrame(
    {'编号': ['mr001', 'mr002', 'mr003'], '语文': [110, 105, 109], '数学': [109, 88, 120], '英语': [99, 115, 130]})
df2 = pd.DataFrame({'编号': ['mr001', 'mr002', 'mr003'], '体育': [34.5, 39.7, 38]})
df_merge = pd.merge(df1, df2, on='编号')
print(df_merge)
df_merge = pd.merge(df1, df2, right_index=True, left_index=True)
print(df_merge)
df_merge = pd.merge(df1, df2, on='编号', how='left')  # 去重
print(df_merge)

pd.set_option('display.unicode.east_asian_width', True)
df1 = pd.DataFrame({'编号': ['mr001', 'mr002', 'mr003'], '学生姓名': ['明日同学', '高圆圆', '钱多多']})
df2 = pd.DataFrame(
    {'编号': ['mr001', 'mr002', 'mr003'], '语文': [110, 105, 109], '数学': [109, 88, 120], '英语': [99, 115, 130],
     '时间': ['1月', '2月', '3月']})
df_merge = pd.merge(df1, df2, on='编号')
print(df_merge)

# 主键合并
order = pd.read_csv('../data5/input 任务程序/meal_order_info.csv', sep=',', encoding='gbk')
order['info_id'] = order['info_id'].astype('str')

order.rename({'info_id': 'order_id'}, axis=1, inplace=True)
detaill['order_id'] = detaill['order_id'].apply(int)

order_detaill = detaill.join(order, on='order_id', rsuffix='1')
print('订单详情表的和订单详情表join合并后的的形状为：', order_detaill.shape)

dict1 = {'ID': [1, 2, 3, 4, 5, 6, 7, 8, 9],
         'System': ['win10', 'win10', np.nan, 'win10', np.nan, np.nan, 'win7', 'win7', 'win8'],
         'cpu': ['i7', 'i5', np.nan, 'i7', np.nan, np.nan, 'i5', 'i5', 'i3']}
dict2 = {'ID': [1, 2, 3, 4, 5, 6, 7, 8, 9],
         'System': [np.nan, np.nan, 'win7', np.nan, 'win8', 'win7', np.nan, np.nan, np.nan],
         'cpu': [np.nan, np.nan, 'i3', np.nan, 'i7', 'i5', np.nan, np.nan, np.nan]}
df5 = pd.DataFrame(dict1)
df6 = pd.DataFrame(dict2)
print('经过重叠合并后的数据为：', df5.combine_first(df6))

detaill1 = pd.read_sql('meal_order_detail1', engine)
detaill2 = pd.read_sql('meal_order_detail2', engine)
detaill3 = pd.read_sql('meal_order_detail3', engine)
detail = detaill1.append(detaill2)
detail = detail.append(detaill3)
print('三张订单合并后的形状为：', detail.shape)

order = pd.read_csv('../data5/input 任务程序/meal_order_info.csv', sep=',', encoding='gb18030')
user = pd.read_excel('../data5/input 任务程序/users_info.xlsx')
order['info_id'] = order['info_id'].astype('str')
order['emp_id'] = order['emp_id'].astype('str')
input = pd.merge(detail, order, left_on=['order_id', 'emp_id'], right_on=['info_id', 'emp_id'])
print(input.shape)

# 清洗数据


import pandas as pd

detail = pd.read_csv('../data5/input 任务程序/detail.csv', index_col=0, encoding='gbk')


# 去重
def delRep(list1):
    list2 = []
    for i in list1:
        if i not in list2:
            list2.append(i)
    return list2


dishes = list(detail['dishes_name'])
print("去重前菜品总数：", len(dishes))
dish = delRep(dishes)
print('去重后的菜品总数为：', len(dish))

print("去重前菜品总数：", len(dishes))
dish_set = set(dishes)
print('去重后的菜品总数为：', len(dish_set))

import numpy as np
import pandas as pd
from sqlalchemy import create_engine

detaill = pd.read_sql('meal_order_detail1', engine)
print("去重前订单详情表为：", detaill.shape)
shapeDet = detail.drop_duplicates(subset=['order_id', 'emp_id']).shape
print('去重后的菜品总数为：', shapeDet)

import pandas as pd

df = pd.DataFrame(pd.read_excel("../data5/input 补充知识/1月.xlsx"))
print(df.duplicated)
print(df.drop_duplicates)
print(df.drop_duplicates(['买家会员名']))
print(df.drop_duplicates(['买家会员名', '买家支付宝账号'], inplace=False))

corrDet = detaill[['counts', 'amounts']].corr(method='kendall')
print('销量和售价的Kendall相似度为：', corrDet)


def FeatureEquals(df):
    dfEquals = pd.DataFrame([], columns=df.columns, index=df.columns)
    for i in df.columns:
        for j in df.columns:
            dfEquals.loc[i, j] = df.loc[:, i].equals(df.loc[:, j])
    return dfEquals


detEquals = FeatureEquals(detaill)
print('detail的特征相等的矩阵的前五行和前五列：', detEquals.iloc[:5, :5])

lenDet = detEquals.shape[0]
dupCol = []
for k in range(lenDet):
    for l in range(k + 1, lenDet):
        if detEquals.iloc[k, 1] & (detEquals.columns[1] not in dupCol):
            dupCol.append(detEquals.columns[1])
print('需要删除的列为：', dupCol)
detaill.drop(dupCol, axis=1, inplace=True)
print('删除多余列后的detail的特征数目为：', detaill.shape[1])

df = pd.read_excel("../data5/input 补充知识/TB2018.xls")
print(df)
print(df.info())

pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('display.unicode.east_asian_width', True)
df = pd.read_excel("../data5/input 补充知识/TB2018.xls")
print(df.isnull)
print(df.notnull)

print('detail每个特征缺失的数目为：', detaill.isnull().sum())
print('detail每个特征非缺失的数目为：', detaill.notnull().sum())

# 删除法
print('去除缺失的列前detail的形状为：', detaill.shape)

print('去除缺失的列后detail的形状为：', detaill.dropna(axis=1, how='any').shape)

# 替换法
detaill = detaill.fillna(-99)
print('detail每个特征缺失的数目为：', detaill.isnull().sum())

import numpy as np
from scipy.interpolate import make_interp_spline

x = np.array([1, 2, 3, 4, 5, 8, 9, 10])
y1 = np.array([2, 8, 18, 32, 50, 128, 162, 200])
y2 = np.array([3, 5, 7, 9, 11, 17, 19, 21])

SplineInsValue1 = make_interp_spline(x, y1)(np.array([6, 7]))
SplineInsValue2 = make_interp_spline(x, y2)(np.array([6, 7]))
print('当x为6,7时，使用样条插值y1：', SplineInsValue1)
print('当x为6,7时，使用样条插值y2：', SplineInsValue2)

from scipy.interpolate import lagrange

large1 = lagrange(x, y1)
large2 = lagrange(x, y2)
print('当x为6,7时，使用拉格朗日插值y1：', large1([6, 7]))
print('当x为6,7时，使用拉格朗日插值y2：', large2([6, 7]))


def outRange(Ser1):
    bpoolInd = (Ser1.mean() - 3 % Ser1.std() > Ser1) | (Ser1.mean() + 3 * Ser1.var() < Ser1)
    index = np.arange(Ser1.shape[0])[bpoolInd]
    outstrange = Ser1.iloc[index]
    return outstrange


detaill = pd.read_sql('meal_order_detail1', engine)
outlier = outRange(detaill['counts'])
print('使用拉一代准则判定异常值个数为：', outlier.shape[0])
print('异常值最大值为：', outlier.max())
print('异常值最小值为：', outlier.min())

# 箱线图分析
import matplotlib.pyplot as plt

plt.figure(figsize=(8, 6))
p = plt.boxplot(detaill['counts'].values, notch=True)
outliter1 = p['fliers'][0].get_ydata()
plt.savefig('菜品异常处理数据识别.png')
plt.show()
print('异常值个数为：', len(outliter1))
print('异常值最大值为：', max(outliter1))
print('异常值最小值为：', min(outliter1))

naRate = (detaill.isnull().sum() / detaill.shape[0] * 100).astype('str') + '%'
print('detail每个特征缺失率为：', naRate)
detaill.dropna(axis=1, how='all', inplace=True)
print('缺失值的数目为：', detaill.isnull().sum())


def outrange(ser1):
    ol = ser1.quantile(0.25)
    ou = ser1.quantile(0.75)
    ior = ou - ol
    ser1.loc[ser1 > (ou + 1.5 * ior)] = ou
    ser1.loc[ser1 > (ou - 1.5 * ior)] = ol
    return ser1


detaill['counts'] = outrange(detaill['counts'])
detaill['amounts'] = outrange(detaill['amounts'])

print('销售量最小值', detaill['counts'].min())
print('销售量最大值', detaill['counts'].max())
print('售价最大值', detaill['amounts'].min())
print('售价最小值', detaill['amounts'].max())
"""
pd.set_option('display.unicode.east_asian_width', True)
df1 = pd.DataFrame({
    '编号': ['mr001', 'mr002', 'mr003'],
    '语文': [110, 105, 109],
    '数学': [110, 105, 109],
    '英语': [110, 105, 109],
})
df2 = pd.DataFrame({
    '编号': ['mr001', 'mr002', 'mr003'],
    '体育': [34.5, 39.7, 38]
})
df_merge = pd.merge(df1, df2, on='编号')
# df_merge=pd.merge(df1,df2,right_index=True,left_index=True)
print(df_merge)

pd.set_option('display.unicode.east_asian_width', True)

detail = pd.read_csv('../data5/data 任务程序/detail.csv', index_col=0, encoding='gbk')


def minmaxscale(data):
    data = (data - data.min()) / (data.max() - data.min())
    return data


data1 = minmaxscale(detail['counts'])
data2 = minmaxscale(detail['amounts'])
data3 = pd.concat([data1, data2], axis=1)
print('离差标准化之前销量和售价数据为：\n', detail[['counts', 'amounts']].head())
print('离差标准化之后销量和售价数据为：\n', data3.head())


def standardscaler(data):
    data = (data - data.mean()) / data.std()
    return data


data4 = standardscaler(detail['counts'])
data5 = standardscaler(detail['amounts'])
data6 = pd.concat([data4, data5], axis=1)
print('标准差标准化之前销量和售价数据为：\n', detail[['counts', 'amounts']].head())
print('标准差标准化之后销量和售价数据为：\n', data6.head())


def decimalscaler(data):
    data = data / 10 ** np.ceil(np.log10(data.abs().max()))
    return data


data7 = decimalscaler(detail['counts'])
data8 = decimalscaler(detail['amounts'])
data9 = pd.concat([data7, data8], axis=1)
print('小数定标标准化之前的销量和售价数据：\n', detail[['counts', 'amounts']].head())
print('小数定标标准化之后的销量和售价数据：\n', data9.head())


def standardscaler(data):
    data = (data - data.mean()) / data.std()
    return data


data4 = standardscaler(detail['counts'])
data5 = standardscaler(detail['amounts'])
data6 = pd.concat([data4, data5], axis=1)
print('标准差标准化之后销量和售价数据：\n', data6.head(10))

detail = pd.read_csv('../data5/data 任务程序/detail.csv', encoding='gbk')
data = detail.loc[0:5, 'dishes_name']
print('哑变量处理前的数据为：\n', data)
print('哑变量处理后的数据为：\n', pd.get_dummies(data))

price = pd.cut(detail['amounts'], 5)
print('离散化后5条记录售价分布为：\n', price.value_counts())


def sameratecut(data, k):
    w = data.quantile(np.arange(0, 1 + 1.0 / k, 1.0 / k))
    data = pd.cut(data, w)
    return data


result = sameratecut(detail['amounts'], 5).value_counts()
print('菜品数据等频发离散化后各个类别数目分布状况为：\n', result)


def kmeancut(data, k):
    from sklearn.cluster import KMeans
    kmodel = KMeans(n_clusters=k)
    kmodel.fit(data.values.reshape((len(data), 1)))
    c = pd.DataFrame(kmodel.cluster_centers_).sort_values(0)
    w = c.rolling(2).mean().iloc[1:]
    w = [0] + list(w[0]) + [data.max()]
    data = pd.cut(data, w)
    return data


result = kmeancut(detail['amounts'], 5).value_counts()
print('菜品售价聚类离散化后各个类别数目分布状况为：\n', result)

data = detail.loc[:, 'dishes_name']
print('哑变量处理前的数据为：\n', data.iloc[:5])
print('哑变量处理后的数据为：\n', pd.get_dummies(data).iloc[:5, :5])


def sameratecut(data, k):
    w = data.quantile(np.arange(0, 1 + 1.0 / k, 1.0 / k))
    data = pd.cut(data, w)
    return data


result = sameratecut(detail['amounts'], 5).value_counts()
print('菜品数据等频发离散化后各个类别数目分布状况为：\n', result)

pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('display.unicode.east_asian_width', True)
df = pd.read_excel('../data5/input 补充知识/mrbooks.xls', usecols=['买家会员名', '收货地址'])
series = df['收货地址'].str.split('', expand=True)
df['省'] = series[0]
df['市'] = series[1]
df['区'] = series[2]
print(df.head())

pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('display.unicode.east_asian_width', True)
df = pd.read_excel('../data5/input 补充知识/mrbooks.xls', usecols=['买家会员名', '宝贝标题'])
df = df.join(df['宝贝标题'].str.split(',', expand=True))
print(df.head())

pd.set_option('display.unicode.east_asian_width', True)
df = pd.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [(1, 2), (3, 4), (5, 6), (7, 8), (9, 10)]})
print(df)
df[['b1', 'b2']] = df['b'].apply(pd.Series)
df = df.join(df['b'].apply(pd.Series))
print(df)

pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('display.unicode.east_asian_width', True)
df = pd.read_excel('../data5/input 补充知识/grade.xls')
df = df.set_index(['班级', '序号'])
df = df.stack()
print(df)

df = pd.read_excel('../data5/input 补充知识/grade.xls', sheet_name='英语2')
df = df.set_index(['班级', '序号', 'Unnamed: 2'])
print(df.unstack())

df = pd.read_excel('../data5/input 补充知识/grade.xls', sheet_name='英语3')
print(df.pivot(index='序号', columns='班级', values='得分'))

pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('display.unicode.east_asian_width', True)
df = pd.read_excel('../data5/input 补充知识/grade.xls', sheet_name='英语3')
print(df.pivot(index='序号', columns='班级', values='得分'))

df = pd.read_excel('../data5/input 补充知识/mrbooks.xls')
df1 = df.groupby(['宝贝标题'])['宝贝总数量'].sum().head()
mydict = df1.to_dict()
for i, j in mydict.items():
    print(i, '\t', j)

df = pd.read_excel('../data5/input 补充知识/mrbooks.xls')
df1 = df[['买家会员名']].head()
list1 = df1['买家会员名'].values.tolist()
for s in list1:
    print(s)

df = pd.read_excel('../data5/input 补充知识/fl4.xls')
df1 = df[['label1', 'label2']].head()
tuples = [tuple(x) for x in df1.values]
for t in tuples:
    print(t)
"""

pd.set_option('display.unicode.east_asian_width', True)
df1 = pd.DataFrame({
    '编号': ['mr001', 'mr002', 'mr003'],
    '语文': [110, 105, 109],
    '数学': [110, 105, 109],
    '英语': [110, 105, 109],
})
df2 = pd.DataFrame({
    '编号': ['mr001', 'mr002', 'mr003'],
    '体育': [34.5, 39.7, 38]
})
df_merge = pd.merge(df1, df2, on='编号')
# df_merge=pd.merge(df1,df2,right_index=True,left_index=True)
print(df_merge)

pd.set_option('display.unicode.east_asian_width', True)

detail = pd.read_csv('../data5/input 任务程序/detail.csv', index_col=0, encoding='gbk')


def minmaxscale(input):
    input = (input - input.min()) / (input.max() - input.min())
    return input


data1 = minmaxscale(detail['counts'])
data2 = minmaxscale(detail['amounts'])
data3 = pd.concat([data1, data2], axis=1)
print('离差标准化之前销量和售价数据为：\n', detail[['counts', 'amounts']].head())
print('离差标准化之后销量和售价数据为：\n', data3.head())


def standardscaler(input):
    input = (input - input.mean()) / input.std()
    return input


data4 = standardscaler(detail['counts'])
data5 = standardscaler(detail['amounts'])
data6 = pd.concat([data4, data5], axis=1)
print('标准差标准化之前销量和售价数据为：\n', detail[['counts', 'amounts']].head())
print('标准差标准化之后销量和售价数据为：\n', data6.head())


def decimalscaler(input):
    input = input / 10 ** np.ceil(np.log10(input.abs().max()))
    return input


data7 = decimalscaler(detail['counts'])
data8 = decimalscaler(detail['amounts'])
data9 = pd.concat([data7, data8], axis=1)
print('小数定标标准化之前的销量和售价数据：\n', detail[['counts', 'amounts']].head())
print('小数定标标准化之后的销量和售价数据：\n', data9.head())


def standardscaler(input):
    input = (input - input.mean()) / input.std()
    return input


data4 = standardscaler(detail['counts'])
data5 = standardscaler(detail['amounts'])
data6 = pd.concat([data4, data5], axis=1)
print('标准差标准化之后销量和售价数据：\n', data6.head(10))

detail = pd.read_csv('../data5/input 任务程序/detail.csv', encoding='gbk')
input = detail.loc[0:5, 'dishes_name']
print('哑变量处理前的数据为：\n', input)
print('哑变量处理后的数据为：\n', pd.get_dummies(input))

price = pd.cut(detail['amounts'], 5)
print('离散化后5条记录售价分布为：\n', price.value_counts())


def sameratecut(input, k):
    w = input.quantile(np.arange(0, 1 + 1.0 / k, 1.0 / k))
    input = pd.cut(input, w)
    return input


result = sameratecut(detail['amounts'], 5).value_counts()
print('菜品数据等频发离散化后各个类别数目分布状况为：\n', result)


def kmeancut(input, k):
    from sklearn.cluster import KMeans
    kmodel = KMeans(n_clusters=k)
    kmodel.fit(input.values.reshape((len(input), 1)))
    c = pd.DataFrame(kmodel.cluster_centers_).sort_values(0)
    w = c.rolling(2).mean().iloc[1:]
    w = [0] + list(w[0]) + [input.max()]
    input = pd.cut(input, w)
    return input


result = kmeancut(detail['amounts'], 5).value_counts()
print('菜品售价聚类离散化后各个类别数目分布状况为：\n', result)

input = detail.loc[:, 'dishes_name']
print('哑变量处理前的数据为：\n', input.iloc[:5])
print('哑变量处理后的数据为：\n', pd.get_dummies(input).iloc[:5, :5])


def sameratecut(input, k):
    w = input.quantile(np.arange(0, 1 + 1.0 / k, 1.0 / k))
    input = pd.cut(input, w)
    return input


result = sameratecut(detail['amounts'], 5).value_counts()
print('菜品数据等频发离散化后各个类别数目分布状况为：\n', result)

pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('display.unicode.east_asian_width', True)
df = pd.read_excel('../data5/input 补充知识/mrbooks.xls', usecols=['买家会员名', '收货地址'])
series = df['收货地址'].str.split('', expand=True)
df['省'] = series[0]
df['市'] = series[1]
df['区'] = series[2]
print(df.head())

pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('display.unicode.east_asian_width', True)
df = pd.read_excel('../data5/input 补充知识/mrbooks.xls', usecols=['买家会员名', '宝贝标题'])
df = df.join(df['宝贝标题'].str.split(',', expand=True))
print(df.head())

pd.set_option('display.unicode.east_asian_width', True)
df = pd.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [(1, 2), (3, 4), (5, 6), (7, 8), (9, 10)]})
print(df)
df[['b1', 'b2']] = df['b'].apply(pd.Series)
df = df.join(df['b'].apply(pd.Series))
print(df)

pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('display.unicode.east_asian_width', True)
df = pd.read_excel('../data5/input 补充知识/grade.xls')
df = df.set_index(['班级', '序号'])
df = df.stack()
print(df)

df = pd.read_excel('../data5/input 补充知识/grade.xls', sheet_name='英语2')
df = df.set_index(['班级', '序号', 'Unnamed: 2'])
print(df.unstack())

df = pd.read_excel('../data5/input 补充知识/grade.xls', sheet_name='英语3')
print(df.pivot(index='序号', columns='班级', values='得分'))

pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('display.unicode.east_asian_width', True)
df = pd.read_excel('../data5/input 补充知识/grade.xls', sheet_name='英语3')
print(df.pivot(index='序号', columns='班级', values='得分'))

df = pd.read_excel('../data5/input 补充知识/mrbooks.xls')
df1 = df.groupby(['宝贝标题'])['宝贝总数量'].sum().head()
mydict = df1.to_dict()
for i, j in mydict.items():
    print(i, '\t', j)

df = pd.read_excel('../data5/input 补充知识/mrbooks.xls')
df1 = df[['买家会员名']].head()
list1 = df1['买家会员名'].values.tolist()
for s in list1:
    print(s)

df = pd.read_excel('../data5/input 补充知识/fl4.xls')
df1 = df[['label1', 'label2']].head()
tuples = [tuple(x) for x in df1.values]
for t in tuples:
    print(t)
"""
