import matplotlib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras import Sequential
from keras.layers import LSTM, Dense
from mlxtend.preprocessing import TransactionEncoder
from mlxtend.frequent_patterns import apriori, association_rules
from sklearn.model_selection import train_test_split

matplotlib.rcParams['font.sans-serif'] = ['SimHei']  #用黑体显示中文


###数据清洗###
df = pd.read_excel(".\meal_order_detail.xlsx")
fm = df.head(30)

#数据简单处理
col = df.columns.values
df.columns = [x.strip() for x in col]

# 重复值
df.duplicated()  # 返回布尔型数据，告诉重复值的位置
df.duplicated().sum()  # 返回重复值数量
df.drop_duplicates(inplace=True)  # 删除重复值,inplace=True表示在原数据集上进行操作

#将下单时间列转换为日期类型
df['place_order_time'] = pd.to_datetime(df['place_order_time'])

#平均消费额
#根据order_id进行分组计算 顾客平均消费额
sum_by_customer = df.groupby('order_id')['amounts'].sum().reset_index()
sum_by_customer = sum_by_customer.rename(columns={'amounts': 'total_spent'})
#print(sum_by_customer)

#计算平均消费额
total_consumption = sum(df.amounts)# 计算总消费额
average_consumption = total_consumption / len(sum_by_customer)# 计算平均消费额
#print("平均消费额为：", average_consumption)# 输出平均消费额


###数据分析###
#1.销售趋势分析：运用时间序列分析技术来识别销售高峰和低谷

# 数据分组和计算销量
df_sales = df.groupby(df['place_order_time'].dt.day)['counts'].sum()  #按天分组并计算当日销量总和
df_dishes_sales = df.groupby('dishes_name')['counts'].sum() #按dishes_name分组并计算销量总和
df_sales_amounts = df.groupby(df['place_order_time'].dt.day)['amounts'].sum()  #按天分组并计算当日成交金额总和

#每天点菜数
plt.bar(df_sales.index,df_sales.values)
plt.xlabel('日期')
plt.ylabel('销售总量')
#plt.show()

#每天成交金额
plt.bar(df_sales_amounts.index,df_sales_amounts.values,facecolor="green")
plt.xlabel('日期')
plt.ylabel('销售总金额')
#plt.show()

df['time']=df['place_order_time'].dt.floor('30T').dt.time
t_c=df.groupby('time').agg({'order_id':'count'})

#时段对销量影响
plt.figure(figsize=(12,4))
plt.plot(range(t_c.shape[0]),t_c['order_id'])
plt.xticks(range(t_c.shape[0]),t_c.index,rotation=90)
plt.title('时段VS销量')
#plt.show()


#3.菜品热度分析（最受欢迎的菜品）
# 统计每个菜的点菜次数
df_dishes_amounts = df.groupby(df['place_order_time'])['counts'].sum()  #按名称分组并计算当销量总和
# 对结果进行排序（默认为降序排序，即从大到小）
df_dishes_amounts_sorted = df_dishes_amounts.sort_values(ascending=False)
# 打印排序后的结果
#print(df_dishes_amounts_sorted.head(10))

#4.组合分析：利用关联规则挖掘算法（如Apriori算法）分析菜品组合模式。
# 根据下单时间，转换为午餐/晚餐信息
# 午餐为0，晚餐为1
def get_meal_type(order_time):
    hour = order_time.hour
    return 0 if hour < 16 else 1

df['place_order_time'] = pd.to_datetime(df['place_order_time'])
df['meal_type'] = df['place_order_time'].apply(get_meal_type)

# 创建事务数据
transactions = df.groupby('order_id').apply(lambda x: x['dishes_id'].tolist() + [x['meal_type'].iloc[0]])

transactions.to_excel('.\output_meal_order_detail.xlsx', index=False)
transactions = transactions.to_list()

# 使用TransactionEncoder转换数据
te = TransactionEncoder()
te_ary = te.fit(transactions).transform(transactions)
df_trans = pd.DataFrame(te_ary, columns=te.columns_)

# 应用Apriori算法找出频繁项集
frequent_itemsets = apriori(df_trans, min_support=0.01, use_colnames=True)

# 生成关联规则
rules = association_rules(frequent_itemsets, metric="confidence", min_threshold=0.5)

# 输出关联规则
#rules.to_excel('.\output_meal_order_detail.xlsx', index=False)

#2.预测未来的销售趋势
# 提取日期时间的不同部分作为特征
df['day'] = df['place_order_time'].dt.day
# 创建每天的销售总额特征
df['day_amounts'] = df.groupby(df['day'])['amounts'].transform('sum')

# 删除原始的日期时间列 counts amounts列
df = df.drop(columns=['amounts'])

# 划分训练集和测试集
# 这里我们使用前70%的数据作为训练集，后30%作为测试集
train_df, test_df = train_test_split(df, test_size=0.3, random_state=42)

# 创建训练和测试数据集
def create_dataset(data, seq_length):
    X, y = [], []
    for i in range(len(data) - seq_length - 1):
        X.append(data[i:(i + seq_length)])
        y.append(data[i + seq_length])
    return np.array(X), np.array(y)


train_X, train_y = create_dataset(train_df['day_amounts'], seq_length=30)
test_X, test_y = create_dataset(test_df['day_amounts'], seq_length=30)

# 归一化数据
train_X = (train_X - np.mean(train_X)) / np.std(train_X)
train_y = (train_y - np.mean(train_y)) / np.std(train_y)
test_X = (test_X - np.mean(test_X)) / np.std(test_X)
test_y = (test_y - np.mean(test_y)) / np.std(test_y)

# 创建LSTM模型
model = Sequential()
model.add(LSTM(50, input_shape=(seq_length, 1)))  # 50个LSTM单元
model.add(Dense(1))  # 输出层，因为销量预测是连续的，所以这里是1
model.compile(loss='mean_squared_error', optimizer='adam')

# 训练模型
model.fit(train_X, train_y, epochs=100, batch_size=32, verbose=1)

# 预测销量
predicted_sales = model.predict(test_X)
predicted_sales = (predicted_sales * np.std(train_y) + np.mean(train_y))

# 可视化结果（可选）
plt.plot(test_df['day'], test_df['day_amounts'], label='Actual')
plt.plot(test_df['day'], predicted_sales, label='Predicted')
plt.legend()
plt.show()