import pandas as pd
import pandas as pd
import time
import numpy as np
from common_func_defs import *
from part3_defs import *
from datetime import datetime, timedelta, date
import os


####################################################### 私有函数定义 ########################################################
'''—————————————————————————————方法：表全量更新—————————————————————————————'''
def allupdate_insert(df: pd.DataFrame, class_name):
    info='success'

    #入库前对nan处理
    df.replace(np.nan, None, inplace=True) #0131增加

    # try:
    #连接数据库
    session = get_session()

    # 转换数据为字典
    df = df.to_dict(orient='records')

    # 删除原有数据
    session.query(class_name).delete()

    # 批量插入
    objects = [class_name(**eachline) for eachline in df]
    session.add_all(objects)
    print('开始commit')
    start_time = time.time()  # 记录程序开始运行时间
    session.commit()
    end_time = time.time()  # 记录程序结束运行时间
    print('此次commit ：cost %f second' % (end_time - start_time))

    # 关闭会话
    print('commit结束')
    session.close()
    # except:
    #     info='整表刷新失败'
    # return info

'''—————————————————————————————方法：表直接插入更新—————————————————————————————'''
def add_insert(df: pd.DataFrame, class_name):
    #入库前对nan处理
    df.replace(np.nan, None, inplace=True) #0131增加
    session = get_session()
    df = df.to_dict(orient='records')
    objects = [class_name(**eachline) for eachline in df]
    session.add_all(objects)
    print('开始commit')
    start_time = time.time()  # 记录程序开始运行时间
    session.commit()
    end_time = time.time()  # 记录程序结束运行时间
    print('此次commit ：cost %f second' % (end_time - start_time))
    print('commit结束')
    session.close()


####################################################### 加工方法定义 ########################################################
'''—————————————————————————————方法：根据上传内容全量更新—————————————————————————————'''
def upload_allupdate_insert(upload_df: pd.DataFrame,table_zh_name, class_name):
    # 上传结果info
    info='success'
    # try:
    # 得到处理后的上传表（中文列名转英文列名）
    df=upload_field_corr(upload_df, table_zh_name)

    #入库前对nan处理
    df.replace(np.nan, None, inplace=True) #0131增加

    #连接数据库
    session = get_session()

    # 添加上传该条数据记录的时间和插入该条数据记录的时间列
    df["upload_time"] = (datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
    df["create_time"] = (datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
    # 转换数据为字典
    df = df.to_dict(orient='records')

    # 删除原有数据
    session.query(class_name).delete()

    # 批量插入
    objects = [class_name(**eachline) for eachline in df]
    session.add_all(objects)
    print('开始commit')
    start_time = time.time()  # 记录程序开始运行时间
    session.commit()
    end_time = time.time()  # 记录程序结束运行时间
    print('此次commit ：cost %f second' % (end_time - start_time))

    # 关闭会话
    print('commit结束')
    session.close()
    # except:
    #     info='上传失败：请检查数据表中字段是否符合模板要求'
    # return info


'''—————————————————————————————方法：根据上传内容按stattime刷新数据—————————————————————————————'''
def upload_nostattime_insert(upload_df: pd.DataFrame, table_zh_name, class_name, disable_null_chn_list): # 0314修改
    if not set(disable_null_chn_list).issubset(set(upload_df.columns.tolist())):
        raise ValueError('上传失败：缺少后续用于生成报表的数值列')
    def check_columns_empty(df, columns):
        for column in columns:
            # 检测空值的条件: None, NaN, 空字符串，只包含空白字符的字符串
            if df[column].isnull().any() or (df[column].astype(str).str.strip() == '').any():
                return False
        return True

    def check_numeric_columns(df, columns):
        for column in columns:
            # 尝试将列转换为数值，无法转换的设置为NaN
            converted = pd.to_numeric(df[column], errors='coerce')
            # 如果转换后的列包含任何NaN，说明原列包含非数值数据
            if converted.isnull().any():
                return False
        return True

    # 对表不为空且需要为数值的列进行判断disable_null_chn_list
    if len(disable_null_chn_list) != 0:
        if not check_columns_empty(upload_df, disable_null_chn_list):
            raise ValueError('上传失败：' + '或'.join(disable_null_chn_list) + '列存在空值')
        if not check_numeric_columns(upload_df, disable_null_chn_list):
            raise ValueError('上传失败：' + '或'.join(disable_null_chn_list) + '列存在非数值数据')

    # 得到处理后的上传表（中文列名转英文列名）
    df = upload_field_corr(upload_df, table_zh_name)

    # 将处理后的上传表存入数据库（按stat_time更新数据）
    # 上传结果info
    info = 'success'

    # 上传的数据一定有得stat_time列，格式一定为date
    if 'stat_time' not in df.columns:
        raise ValueError('上传失败：请检查数据表中字段是否符合模板要求（缺少数据统计时间列）')
    # try:
    df['stat_time'] = pd.to_datetime(df['stat_time'])
    # except:
    #     info='上传失败：请检查数据表中字段是否符合模板要求（数据统计时间列不符合日期格式要求）'
    #     return info

    # 去重stat_time为空的数据
    df = df.dropna(subset=['stat_time'])
    df = df[df['stat_time'] != '']  # 0131增加

    # 入库前对nan处理
    df.replace(np.nan, None, inplace=True)  # 0131增加

    # try:
    # 连接数据库
    session = get_session()

    # 检测想插入的数据的stat_time是否在数据库里面有东西，有就删了,由此达到刷新数据的效果
    df['stat_time'] = df['stat_time'].astype(str)
    stat_time_ls = list(df["stat_time"].drop_duplicates())
    session.query(class_name).filter(class_name.stat_time.in_(stat_time_ls)).delete()
    # 将数据按stat_time排序从早到晚排序
    df = df.sort_values(by='stat_time')
    # 添加上传该条数据记录的时间和插入该条数据记录的时间列
    df["upload_time"] = (datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
    df["create_time"] = (datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
    # 转换数据为字典
    df = df.to_dict(orient='records')
    # 批量插入
    objects = [class_name(**eachline) for eachline in df]
    session.add_all(objects)
    session.commit()
    # 关闭会话
    session.close()
    # except:
    #     info='上传失败：请检查数据表中字段是否符合模板要求'
    # return info




'''—————————————————————————————方法：根据上传内容按指定字段刷新数据—————————————————————————————'''
def upload_nospecifiedfield_insert(upload_df: pd.DataFrame, table_zh_name, class_name):
    # 得到处理后的上传表（中文列名转英文列名）
    df = upload_field_corr(upload_df, table_zh_name)
    dfquery= df[['creativity_promotion_channel','creativity_id','creativity_image_url']]
    # 入库前对nan处理
    df.replace(np.nan, None, inplace=True)  # 0131增加
    session = get_session()
    # 检测想插入的数据的specified_field是否在数据库里面有东西，有就删了,由此达到刷新数据的效果
    # 定义一个函数来处理 DataFrame 的每一行
    def delete_row(row):
        session.query(class_name).filter(
            class_name.creativity_promotion_channel == row['creativity_promotion_channel'],
            class_name.creativity_id == row['creativity_id'],
            class_name.creativity_image_url == row['creativity_image_url'],
        ).delete()
    # 对 DataFrame 的每一行应用处理函数
    dfquery.apply(delete_row, axis=1)

    # 添加上传该条数据记录的时间和插入该条数据记录的时间列
    df["upload_time"] = (datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
    df["create_time"] = (datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
    # 转换数据为字典
    df = df.to_dict(orient='records')
    # 批量插入
    objects = [class_name(**eachline) for eachline in df]
    session.add_all(objects)
    session.commit()
    # 关闭会话
    session.close()

def upload_nospecifiedfield_insert1(upload_df: pd.DataFrame, table_zh_name, class_name):
    upload_df = upload_df.set_index('列名').transpose()
    # 得到处理后的上传表（中文列名转英文列名）
    df = upload_field_corr(upload_df, table_zh_name)
    dfquery= df[['stat_time']]
    # 入库前对nan处理
    df.replace(np.nan, None, inplace=True)  # 0131增加
    session = get_session()
    # 检测想插入的数据的specified_field是否在数据库里面有东西，有就删了,由此达到刷新数据的效果
    # 定义一个函数来处理 DataFrame 的每一行
    def delete_row(row):
        session.query(class_name).filter(
            class_name.stat_time == row['stat_time']
        ).delete()
        session.commit()
    # 对 DataFrame 的每一行应用处理函数
    dfquery.apply(delete_row, axis=1)
    # 添加上传该条数据记录的时间和插入该条数据记录的时间列
    df["crawl_time"] = (datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
    df["create_time"] = (datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
    # 转换数据为字典
    df = df.to_dict(orient='records')
    # 批量插入
    objects = [class_name(**eachline) for eachline in df]
    session.add_all(objects)
    session.commit()
    # 关闭会话
    session.close()



def upload_nospecifiedfield_insert2(upload_df: pd.DataFrame, table_zh_name, class_name):
    upload_df = upload_df.set_index('列名').transpose()
    # 得到处理后的上传表（中文列名转英文列名）
    df = upload_field_corr(upload_df, table_zh_name)
    dfquery= df[['big_promo_year','big_promo_name']]
    # 入库前对nan处理
    df.replace(np.nan, None, inplace=True)  # 0131增加
    session = get_session()
    # 检测想插入的数据的specified_field是否在数据库里面有东西，有就删了,由此达到刷新数据的效果
    # 定义一个函数来处理 DataFrame 的每一行
    def delete_row(row):
        session.query(class_name).filter(
            class_name.big_promo_year == row['big_promo_year'],
            class_name.big_promo_name == row['big_promo_name']
        ).delete()
        session.commit()
    # 对 DataFrame 的每一行应用处理函数
    dfquery.apply(delete_row, axis=1)
    # 添加上传该条数据记录的时间和插入该条数据记录的时间列
    df["crawl_time"] = (datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
    df["create_time"] = (datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
    # 转换数据为字典
    df = df.to_dict(orient='records')
    # 批量插入
    objects = [class_name(**eachline) for eachline in df]
    session.add_all(objects)
    session.commit()
    # 关闭会话
    session.close()


def upload_nospecifiedfield_insert3(upload_df: pd.DataFrame, table_zh_name, class_name,disable_null_chn_list):
    def filter_rows_with_numeric_or_all_empty(df, columns):
        # 初始化掩码为全True
        final_mask = pd.Series([True] * len(df))
        # 遍历每一列，构建每列的掩码
        for column in columns:
            # 列为空的掩码
            empty_mask = df[column].isnull()
            # 尝试将非空的列值转换为数值，无法转换的设置为NaN
            converted = pd.to_numeric(df[column], errors='coerce')
            # 非空且可以转换为数值的掩码
            numeric_mask = ~converted.isnull()
            # 更新最终掩码：仅保留对于当前列为空或可以转换为数值的行
            final_mask = final_mask & (empty_mask | numeric_mask)
        # 到目前为止，final_mask会保留所有列为空或都能转化为数值的行
        # 接下来需要确保仅当所有指定列同时满足条件时，行才被保留
        all_empty_mask = df[columns].isnull().all(axis=1)
        any_numeric_mask = df[columns].apply(lambda x: pd.to_numeric(x, errors='coerce')).notnull().all(axis=1)
        # 最终保留的行必须同时满足所有指定列都为空，或者所有指定列都可以转换为数值
        final_mask = all_empty_mask | any_numeric_mask
        return df[final_mask]
    # 要刷新的数据是符合格式的数据，包括空数据和非空且为数值的数据
    upload_df=filter_rows_with_numeric_or_all_empty(upload_df, disable_null_chn_list)
    # 得到处理后的上传表（中文列名转英文列名）
    df = upload_field_corr(upload_df, table_zh_name)
    dfquery= df[['stat_time','living_times_id','living_good_id']]
    # 入库前对nan处理
    df.replace(np.nan, None, inplace=True)  # 0131增加
    session = get_session()
    # 检测想插入的数据的specified_field是否在数据库里面有东西，有就删了,由此达到刷新数据的效果
    # 定义一个函数来处理 DataFrame 的每一行
    def delete_row(row):
        session.query(class_name).filter(
            class_name.stat_time == row['stat_time'],
            class_name.living_times_id == row['living_times_id'],
            class_name.living_good_id == row['living_good_id'] #xiugai
        ).delete()
        session.commit()
    # 对 DataFrame 的每一行应用处理函数
    dfquery.apply(delete_row, axis=1)
    # 添加上传该条数据记录的时间和插入该条数据记录的时间列
    df["upload_time"] = (datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
    df["create_time"] = (datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
    # 转换数据为字典
    df = df.to_dict(orient='records')
    # 批量插入
    objects = [class_name(**eachline) for eachline in df]
    session.add_all(objects)
    session.commit()
    # 关闭会话
    session.close()

'''—————————————————————————————方法：根据上传内容生成商品指标表good_indicator—————————————————————————————'''
def generate_69info():
    """
    读入csv生成
    :param file_name:
    :return:
    """
    engine = create_engine(DB_CONNECT)
    df_tp = pd.read_sql_query('SELECT * FROM good_product_combination', engine)
    df_tp.drop(['id', 'upload_time', 'create_time'], axis=1, inplace=True)
    #['product_combination', 'sixty_nine_code', 'product_code','product_bar_code', 'product_num']

    ###处理开单价###
    # 连接表
    shangpin_jiage = pd.read_sql_query('SELECT * FROM good_product_price', engine)
    shangpin_jiage.drop(['id', 'upload_time', 'create_time', 'product_code', 'product_name'], axis=1, inplace=True)

    df_type = pd.read_sql_query('SELECT * FROM good_product_code', engine)
    df_type.drop(['id', 'upload_time', 'create_time', 'product_code'], axis=1, inplace=True)

    shangpin_jiage = pd.merge(df_type, shangpin_jiage, on='product_bar_code', how='left')
    shangpin_jiage.fillna(0, inplace=True)  # 把type表中存在，但是价格体系表中不存在的值定为0
    shangpin_jiage['product_series'].replace(0, '', inplace=True)

    df_tp = pd.merge(df_tp, shangpin_jiage, on='product_bar_code', how='left')  # 此处建议还是让他们上传全的type表
    del shangpin_jiage, df_type

    ###赠品处理###

    zengpin_jiage = pd.read_sql_query('SELECT * FROM good_giveaway_item_price', engine)
    zengpin_jiage.drop(['id', 'upload_time', 'create_time'], axis=1, inplace=True)

    zengpin_jiage['giveaway'] = True

    df_tp = pd.merge(df_tp, zengpin_jiage, left_on='product_bar_code', right_on='giveaway_item_bar_code', how='left')
    del zengpin_jiage

    # 赠品 product_retail_price
    df_tp['good_giveaway_item_price'] = df_tp['product_num'] * df_tp['giveaway_item_price_per_pack']
    df_giveaway_item_price = df_tp.groupby('sixty_nine_code').agg(
        good_giveaway_item_price=('good_giveaway_item_price', 'sum')).reset_index()
    df_tp = df_tp.drop("good_giveaway_item_price", axis=1)
    df_tp = pd.merge(df_tp, df_giveaway_item_price, on='sixty_nine_code', how='left')
    del df_giveaway_item_price

    df_tp['giveaway'].fillna(False, inplace=True)
    # 选择giveaway取值为TRUE的行并保存到df_giveaway_item_combination中
    df_giveaway_item_combination = df_tp[df_tp['giveaway'] == True].copy()

    # 生成新变量giveaway_item_combination取值为变量pack_count(int)和type(str)的作为字符串粘贴后的值
    df_giveaway_item_combination['good_giveaway_item_combination'] = df_giveaway_item_combination['product_num'].astype(str) + '*' + \
                                                                df_giveaway_item_combination['product_code']
    df_giveaway_item_combination = df_giveaway_item_combination[['sixty_nine_code', 'good_giveaway_item_combination']]
    df_giveaway_item_combination = df_giveaway_item_combination.groupby('sixty_nine_code')['good_giveaway_item_combination'].agg(lambda x: '+'.join(x))
    df_tp = pd.merge(df_tp, df_giveaway_item_combination, on='sixty_nine_code', how='left')
    del df_giveaway_item_combination

    ### 正式生成系列等内容 ###

    # good_series
    # 找到每个分组中quantity最大值所对应的索引
    df_ttp = df_tp.loc[df_tp['product_series'] != '']
    max_quantity_idx = df_ttp.groupby('sixty_nine_code')['product_num'].transform('idxmax')
    # 使用索引获取相应的type值
    df_ttp.loc[:, 'good_series'] = df_ttp.loc[max_quantity_idx, 'product_series'].values
    df_ttp = df_ttp[['sixty_nine_code', 'good_series']].drop_duplicates()

    df_tp = df_tp.merge(df_ttp, how='left', on='sixty_nine_code')
    del max_quantity_idx, df_ttp

    # pack_count
    df_pack_count = df_tp.groupby('sixty_nine_code')['product_num'].sum().reset_index()
    df_pack_count.columns = ['sixty_nine_code', 'good_pack_count']
    df_tp = pd.merge(df_tp, df_pack_count, on='sixty_nine_code', how='left')
    del df_pack_count

    # 开单价 product_initial_price
    df_tp['good_initial_price'] = df_tp['product_num'] * df_tp['product_initial_price']
    df_initial_price = df_tp.groupby('sixty_nine_code').agg(
        good_initial_price=('good_initial_price', 'sum')).reset_index()
    df_tp = df_tp.drop("good_initial_price", axis=1)
    df_tp = pd.merge(df_tp, df_initial_price, on='sixty_nine_code', how='left')
    del df_initial_price

    # 零售价 product_retail_price
    df_tp['good_retail_price'] = df_tp['product_num'] * df_tp['product_retail_price']
    df_retail_price = df_tp.groupby('sixty_nine_code').agg(good_retail_price=('good_retail_price', 'sum')).reset_index()
    df_tp = df_tp.drop("good_retail_price", axis=1)
    df_tp = pd.merge(df_tp, df_retail_price, on='sixty_nine_code', how='left')
    del df_retail_price

    # 总片数 total_pieces
    df_tp['good_total_pieces'] = df_tp['product_num'] * df_tp['product_pack_pieces']
    df_total_pieces = df_tp.groupby('sixty_nine_code').agg(good_total_pieces=('good_total_pieces', 'sum')).reset_index()
    df_tp = df_tp.drop("good_total_pieces", axis=1)
    df_tp = pd.merge(df_tp, df_total_pieces, on='sixty_nine_code', how='left')
    del df_total_pieces

    # 大促 product_retail_price
    df_tp['good_big_promotion_price'] = df_tp['product_num'] * df_tp['product_big_promotion_price']
    df_big_promotion_price = df_tp.groupby('sixty_nine_code').agg(
        good_big_promotion_price=('good_big_promotion_price', 'sum')).reset_index()
    df_tp = df_tp.drop("good_big_promotion_price", axis=1)
    df_tp = pd.merge(df_tp, df_big_promotion_price, on='sixty_nine_code', how='left')
    del df_big_promotion_price

    # 日促 product_retail_price
    df_tp['good_small_promotion_price'] = df_tp['product_num'] * df_tp['product_small_promotion_price']
    df_small_promotion_price = df_tp.groupby('sixty_nine_code').agg(
        good_small_promotion_price=('good_small_promotion_price', 'sum')).reset_index()
    df_tp = df_tp.drop("good_small_promotion_price", axis=1)
    df_tp = pd.merge(df_tp, df_small_promotion_price, on='sixty_nine_code', how='left')
    del df_small_promotion_price

    df_69_info = df_tp.loc[:, ['sixty_nine_code', 'product_combination', 'good_series',
                               'good_initial_price', 'good_retail_price',
                               'good_total_pieces', 'good_pack_count',
                               'good_big_promotion_price', 'good_small_promotion_price',
                               'good_giveaway_item_price', 'good_giveaway_item_combination'
                               ]].drop_duplicates()
    del df_tp
    df_69_info.rename(columns={'product_combination': 'good_product_combination'}, inplace=True)
    df_69_info['good_giveaway_item_price_ratio'] = df_69_info['good_giveaway_item_price'] / df_69_info['good_initial_price']
    df_69_info['good_giveaway_item_combination'].replace(np.nan, '-', inplace=True)  # 将没有赠品组合的替换为-值
    df_69_info['create_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
    df_69_info.dropna(inplace=True)
    allupdate_insert(df_69_info,good_indicator)


'''—————————————————————————————方法：处理上传的产品明细和价格体系表入库，生成商品指标表—————————————————————————————'''
def good_product_related_upload_process(upload_df_product_details,upload_df_price_system_jietingsheet,upload_df_price_system_paiyangzhuangsheet):
    info='success'
    # try:
    upload_df_product_details=upload_df_product_details #产品明细上传表
    # 产品代码表《产品明细》单品sheet
    table_zh_name='产品代码表'
    good_product_code_df=upload_df_product_details['单品'][['款式编码','商家编码','商品名称']]
    good_product_code_df['商家编码'] = good_product_code_df['商家编码'].apply(lambda x: str(x).replace(' ', ''))
    upload_allupdate_insert(good_product_code_df, table_zh_name,good_product_code)

    # 产品组合表《产品明细》套餐sheet
    table_zh_name='产品组合表'
    good_product_combination_df=upload_df_product_details['套餐'][['套餐名称','套餐编码','商品名称','商家编码','数量']]
    good_product_combination_df['商家编码'] = good_product_combination_df['商家编码'].apply(lambda x: str(x).replace(' ', ''))
    upload_allupdate_insert(good_product_combination_df, table_zh_name,good_product_combination)

    # 产品价格表《价格体系》洁婷sheet 处理upload_df_price_system_jietingsheet
    table_zh_name='产品价格表'
    price_rename_mapping = {
        0: '开单价',
        1: '日销价',
        2: '小促价',
        3: '大促价'
    }
    price_columns = [col for col in upload_df_price_system_jietingsheet.columns if  '价格' in col]
    for i, col in enumerate(price_columns):
        upload_df_price_system_jietingsheet.rename(columns={col: price_rename_mapping[i]}, inplace=True)
    upload_df_price_system_jietingsheet['系列'] = upload_df_price_system_jietingsheet['系列'] .str.replace('\n', '', regex=False)
    upload_df_price_system_jietingsheet['系列'].fillna(method='ffill', inplace=True)
    upload_df_price_system_jietingsheet[upload_df_price_system_jietingsheet.filter(regex='价$').columns] = upload_df_price_system_jietingsheet[upload_df_price_system_jietingsheet.filter(regex='价$').columns].fillna(0)
    good_product_price_df=upload_df_price_system_jietingsheet[['系列', '代码', '条码', '名称', '包规', '零售价', '开单价', '日销价', '小促价', '大促价']]
    good_product_price_df['条码'] = good_product_price_df['条码'].apply(lambda x: str(x).replace(' ', ''))
    good_product_price_df = good_product_price_df.drop_duplicates(subset='条码', keep='last') #上传时就不应该重复，若重复保留越下面的记录
    upload_allupdate_insert(good_product_price_df, table_zh_name,good_product_price)

    # 赠品价格表《价格体系》派样装sheet upload_df_price_system_paiyangzhuangsheet
    table_zh_name='赠品价格表'
    good_giveaway_item_price_df=upload_df_price_system_paiyangzhuangsheet[['款式编码','商品条码','派送装费用（包/元）','派送装名称']]
    good_giveaway_item_price_df['商品条码'] = good_giveaway_item_price_df['商品条码'].apply(lambda x: str(x).replace(' ', ''))
    upload_allupdate_insert(good_giveaway_item_price_df, table_zh_name,good_giveaway_item_price)
    # except:
    #     info='上传失败：请检查数据表中字段是否符合模板要求'

    try:
        generate_69info()
    except Exception as e:
        return {'code': 400, 'msg': '上传后生成商品指标表失败'}
    # return info



'''—————————————————————————————方法：处理上传的抖音旗舰店直播间日表生成达播报表入库—————————————————————————————'''
def upload_nostattime_insert_and_process(upload_df: pd.DataFrame, table_zh_name, class_name, disable_null_chn_list): # 0314修改
    if not set(disable_null_chn_list).issubset(set(upload_df.columns.tolist())):
        raise ValueError('上传失败：缺少后续用于生成报表的数值列')
    def check_columns_empty(df, columns):
        for column in columns:
            # 检测空值的条件: None, NaN, 空字符串，只包含空白字符的字符串
            if df[column].isnull().any() or (df[column].astype(str).str.strip() == '').any():
                return False
        return True

    def check_numeric_columns(df, columns):
        for column in columns:
            # 尝试将列转换为数值，无法转换的设置为NaN
            converted = pd.to_numeric(df[column], errors='coerce')
            # 如果转换后的列包含任何NaN，说明原列包含非数值数据
            if converted.isnull().any():
                return False
        return True

    # 对表不为空且需要为数值的列进行判断disable_null_chn_list
    if len(disable_null_chn_list) != 0:
        if not check_columns_empty(upload_df, disable_null_chn_list):
            raise ValueError('上传失败：' + '或'.join(disable_null_chn_list) + '列存在空值')
        if not check_numeric_columns(upload_df, disable_null_chn_list):
            raise ValueError('上传失败：' + '或'.join(disable_null_chn_list) + '列存在非数值数据')

    # 得到处理后的上传表（中文列名转英文列名）
    df = upload_field_corr(upload_df, table_zh_name)

    # 将处理后的上传表存入数据库（按stat_time更新数据）
    # 上传结果info
    info = 'success'

    # 上传的数据一定有得stat_time列，格式一定为date
    if 'stat_time' not in df.columns:
        raise ValueError('上传失败：请检查数据表中字段是否符合模板要求（缺少数据统计时间列）')
    # try:
    df['stat_time'] = pd.to_datetime(df['stat_time'])
    # except:
    #     info='上传失败：请检查数据表中字段是否符合模板要求（数据统计时间列不符合日期格式要求）'
    #     return info

    # 去重stat_time为空的数据
    df = df.dropna(subset=['stat_time'])
    df = df[df['stat_time'] != '']  # 0131增加

    # 入库前对nan处理
    df.replace(np.nan, None, inplace=True)  # 0131增加

    # try:
    # 连接数据库
    session = get_session()

    # 检测想插入的数据的stat_time是否在数据库里面有东西，有就删了,由此达到刷新数据的效果
    df['stat_time'] = df['stat_time'].astype(str)
    stat_time_ls = list(df["stat_time"].drop_duplicates())
    session.query(class_name).filter(class_name.stat_time.in_(stat_time_ls)).delete()
    #提交
    session.commit()
    print('已删除')
    # 将数据按stat_time排序从早到晚排序
    df = df.sort_values(by='stat_time')
    # 添加上传该条数据记录的时间和插入该条数据记录的时间列
    df["upload_time"] = (datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
    df["create_time"] = (datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
    stodf=df
    # 转换数据为字典
    df = df.to_dict(orient='records')
    # 批量插入
    objects = [class_name(**eachline) for eachline in df]
    session.add_all(objects)
    print('开始上传明细表')
    #提交
    session.commit()
    print('上传明细表成功')
    # 生成对应日期的达播报表
    df=stodf
    df['sixty_nine_code'] = df['sixty_nine_code'].astype(str)
    outputdf = dy_flagship_store_living_room_day_stat_calcu(df, True)
    print('已生成对应的达播报表')
    #上传达播报表到dy_flagship_store_living_room_day_stat
    # 检测想插入的数据的stat_time是否在数据库里面有东西，有就删了,由此达到刷新数据的效果
    outputdf['stat_time'] = outputdf['stat_time'].astype(str)
    stat_time_ls = list(outputdf["stat_time"].drop_duplicates())
    session.query(dy_flagship_store_living_room_day_stat).filter(class_name.stat_time.in_(stat_time_ls)).delete()
    #提交
    session.commit()
    # 将数据按stat_time排序从早到晚排序
    outputdf = outputdf.sort_values(by='stat_time')
    outputdf.replace(np.nan, None, inplace=True)
    # 添加插入该条数据记录的时间列
    outputdf["create_time"] = (datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
    # 转换数据为字典
    outputdf = outputdf.to_dict(orient='records')
    # 批量插入
    objects = [dy_flagship_store_living_room_day_stat(**eachline) for eachline in outputdf]
    session.add_all(objects)
    #提交
    session.commit()
    # 关闭会话
    session.close()





'''—————————————————————————————方法：展示上传模块中对应数据库表内容—————————————————————————————'''
def display_uploaded(class_name,table_zh_name:str):
    #连接数据库
    session = get_session()
    query = session.query(class_name)
    df = pd.read_sql(query.statement, session.bind)
    del df['create_time']
    df=uploaded_field_corr_entozh(df,table_zh_name)
    #0131删除 df = df.fillna('nan')
    return df

def display_uploaded_ondate(class_name,table_zh_name:str,begin_date:str,end_date:str):
    #连接数据库
    session = get_session()
    query = session.query(class_name).filter(and_(class_name.stat_time >= begin_date,
                                                   class_name.stat_time <= end_date))
    df = pd.read_sql(query.statement, session.bind)
    del df['create_time']
    df=uploaded_field_corr_entozh(df,table_zh_name)
    #0131删除 df = df.fillna('nan')
    return df

def display_uploaded_oncondition(class_name,table_zh_name:str,big_promo_name:str,big_promo_year:int): # 0314增加
    #连接数据库
    session = get_session()
    query = session.query(class_name).filter(and_(class_name.big_promo_name == big_promo_name,
                                                   class_name.big_promo_year == big_promo_year))
    df = pd.read_sql(query.statement, session.bind)
    del df['create_time']
    df=uploaded_field_corr_entozh(df,table_zh_name)
    return df

def display_uploaded_screen(class_name,table_zh_name:str,screen_name,screen_value):
    #连接数据库
    session = get_session()
    query = session.query(class_name)
    df = pd.read_sql(query.statement, session.bind)
    del df['create_time']
    df=uploaded_field_corr_entozh(df,table_zh_name)
    df=df[df[screen_name]==screen_value]
    return df



####################################################### 前端调用加工方法 ########################################################
'''—————————————————————————————功能：数据上传-输入→天猫旗舰店概况日表输入表—————————————————————————————'''
#前端传入表:天猫旗舰店概况日表输入表
upload_df=pd.read_excel('测试/天猫旗舰店概况日表输入表.xlsx') #前端修改
#上传入库
resinfo=upload_nostattime_insert(upload_df, table_zh_name='天猫旗舰店概况日表输入表' ,class_name=tmall_flagship_store_overview_day_input,disable_null_chn_list=['直播佣金比例','直播坑位费','刷单金额'])
#如果resinfo不为None，则返回给用户
#读取展示上传数据
res_df=display_uploaded_ondate(tmall_flagship_store_overview_day_input,'天猫旗舰店概况日表输入表','2023-10-01','2024-01-01')


'''—————————————————————————————功能：数据上传-上传→抖音旗舰店直播间日表—————————————————————————————''' # 0314修改
#前端传入表:抖音旗舰店直播间日表上传表
upload_df=pd.read_excel('测试/抖音旗舰店直播间日表上传表.xlsx') #前端修改
#上传入库
upload_nostattime_insert_and_process(upload_df, table_zh_name='抖音旗舰店直播间日表' ,class_name=dy_flagship_store_living_room_day,disable_null_chn_list=['直播时长(分钟)',
'最高在线人数',
'平均在线人数',
'直播间成交订单数',
'直播间观看次数',
'直播间成交件数',
'直播间退款金额',
'直播服务费',
'直播佣金',
])
#读取展示上传数据
res_df = display_uploaded_ondate(dy_flagship_store_living_room_day,'抖音旗舰店直播间日表','2023-10-01','2024-01-01')



'''—————————————————————————————功能：数据上传-上传→天猫旗舰店竞店列表表—————————————————————————————'''
#前端传入表:天猫旗舰店竞店列表表上传表
upload_df=pd.read_excel('测试/天猫旗舰店竞店列表表上传表.xlsx') #前端修改
#上传入库
upload_allupdate_insert(upload_df=upload_df,table_zh_name='天猫旗舰店竞店列表表', class_name=tmall_flag_competing_store_list)
#读取展示上传数据
res_df = display_uploaded(tmall_flag_competing_store_list,'天猫旗舰店竞店列表表')


'''—————————————————————————————功能：数据上传-上传→竞品列表表—————————————————————————————'''
#前端传入表:竞品列表表上传表
upload_df=pd.read_excel('测试/竞品列表表上传表.xlsx') #前端修改
#上传入库
upload_allupdate_insert(upload_df=upload_df,table_zh_name='竞品列表表', class_name=competing_brand_list)
#读取展示上传数据
res_df = display_uploaded(competing_brand_list,'竞品列表表')


'''—————————————————————————————功能：数据上传-上传→产品明细价格体系表—————————————————————————————'''
#前端传入表（excel）:产品明细上传表、价格体系上传表
upload_df_product_details=pd.read_excel('测试/产品明细上传表.xlsx', sheet_name=None)
upload_df_price_system_jietingsheet=pd.read_excel('测试/价格体系上传表.xlsx', sheet_name='洁婷',header=2)
upload_df_price_system_paiyangzhuangsheet=pd.read_excel('测试/价格体系上传表.xlsx', sheet_name='派样装',header=6) #前端修改
#上传入库
good_product_related_upload_process(upload_df_product_details,upload_df_price_system_jietingsheet,upload_df_price_system_paiyangzhuangsheet)
#读取展示上传数据(下述表是将用户上传内容拆分、处理、加工后入库的表）
res_df = display_uploaded(good_product_combination,'产品组合表')
res_df = display_uploaded(good_product_code,'产品代码表')
res_df = display_uploaded(good_product_price,'产品价格表')
res_df = display_uploaded(good_giveaway_item_price,'赠品价格表')
res_df = display_uploaded(good_indicator,'商品指标表')


'''—————————————————————————————功能：数据上传-输入→天猫旗舰店达播商品日表输入表—————————————————————————————''' # 0314修改
##用户选择日期后，系统返回给用户df1
begin_date='2024-01-01' #用户选择 #前端控制
end_date='2024-01-31' #用户选择 #前端控制
df1=display_uploaded_ondate(tmall_flagship_store_livestreamer_living_room_good_day_input,'天猫旗舰店达播商品日表输入表',begin_date,end_date)
##用户仅可修改最后四列的信息即费用备注、服务费（含税）、直播机制、直播佣金比例（living_good_cost_notes、living_good_incl_tax_service_cost、living_good_mechanism、living_good_commission_rate）
##假设'测试/天猫旗舰店达播商品日表输入表.xlsx'是用户修改信息后的df1
upload_df=pd.read_excel('测试/天猫旗舰店达播商品日表输入表.xlsx')
upload_df.loc[len(upload_df)] = [''] * len(upload_df.columns)
upload_df=upload_df.iloc[-1:]
#上传入库
upload_nospecifiedfield_insert3(upload_df, table_zh_name='天猫旗舰店达播商品日表输入表' ,class_name=tmall_flagship_store_livestreamer_living_room_good_day_input,disable_null_chn_list=['服务费（含税）','直播佣金比例'])



'''—————————————————————————————功能：数据上传-输入→创意推广类型输入表—————————————————————————————'''
##用户选择创意推广渠道后，系统返回给用户df1
creativity_promotion_channel='万相台' #用户选择 #前端控制
df1=display_uploaded_screen(creativity_promotion_creativity_kind_input,'创意推广类型输入表','创意推广渠道',creativity_promotion_channel)
##用户仅可修改最后一列的信息即创意类型 creativity_kind
##假设'测试/创意推广类型输入表.xlsx'是用户修改信息后的df1
upload_df=pd.read_excel('测试/创意推广类型输入表.xlsx')
#上传入库
upload_nospecifiedfield_insert(upload_df, table_zh_name='创意推广类型输入表' ,class_name=creativity_promotion_creativity_kind_input)


'''—————————————————————————————功能：数据上传-上传→创意类型表—————————————————————————————'''
#用户上传表:创意类型表上传表
upload_df=pd.read_excel('测试/创意类型表上传表.xlsx') #用户上传 #前端控制
#上传入库
upload_allupdate_insert(upload_df=upload_df,table_zh_name='创意类型表', class_name=creativity_promotion_creativity_kind)
#读取展示上传数据
res_df = display_uploaded(creativity_promotion_creativity_kind,'创意类型表')


'''—————————————————————————————功能：数据上传-上传→大促区间表—————————————————————————————'''
#用户上传表:大促区间表上传表
upload_df=pd.read_excel('测试/大促区间表上传表.xlsx') #用户上传 #前端控制
#上传入库
upload_allupdate_insert(upload_df=upload_df,table_zh_name='大促区间表', class_name=big_promotion_duration)
#读取展示上传数据
res_df = display_uploaded(big_promotion_duration,'大促区间表')


'''—————————————————————————————功能：数据上传-上传→人群推广表—————————————————————————————''' # 0314修改
##增加上传维度的选项（下拉框，取值有大促和月）

##如果用户在上传维度选择大促，则：
#用户上传表:人群购买人群曝光人群大促表上传表
upload_df=pd.read_excel('测试/人群购买人群曝光人群大促表上传表.xlsx') #用户上传 #前端控制
#上传入库
upload_nospecifiedfield_insert2(upload_df=upload_df,table_zh_name='人群购买人群曝光人群大促表', class_name=pop_purpop_exppop_bigpromo)
#读取展示上传数据
res_df = display_uploaded_oncondition(pop_purpop_exppop_bigpromo,'人群购买人群曝光人群大促表','双十一',2023)

##如果用户在上传维度选择月，则：
#用户上传表:人群购买人群曝光人群月表上传表
upload_df=pd.read_excel('测试/人群购买人群曝光人群月表上传表.xlsx') #用户上传 #前端控制
#上传入库
upload_nospecifiedfield_insert1(upload_df=upload_df,table_zh_name='人群购买人群曝光人群月表', class_name=pop_purpop_exppop_month)
#读取展示上传数据
res_df = display_uploaded_ondate(pop_purpop_exppop_month,'人群购买人群曝光人群月表','2021-01','2024-05')








# ##xsx ceshi
##向tmall_flagship_store_livestreamer_living_room_good_day_input插入数据
# session = get_session()
# query = session.query(tmall_flagship_store_livestreamer_living_room_good_day)
# df = pd.read_sql(query.statement, session.bind)
# df=df[['stat_time','living_name','living_streamer_name','living_start_time','living_times_id','living_good_id','living_good_name']]
# df["upload_time"] = (datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
# df["create_time"] = (datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
# df['living_good_cost_notes']=np.nan
# df['living_good_incl_tax_service_cost']=np.nan
# df['living_good_mechanism']=np.nan
# df['living_good_commission_rate']=np.nan
# allupdate_insert(df,tmall_flagship_store_livestreamer_living_room_good_day_input)
#
##向tmall_flagship_store_livestreamer_living_room_good_day_input插入万相台数据
# session = get_session()
# query = session.query(wxt_cost_top_20_creativity_promotion_day_temp)
# df = pd.read_sql(query.statement, session.bind)[['creativity_id']].drop_duplicates(subset='creativity_id', keep='first') #一个creativity_id的数据只有一个
# query = session.query(wxt_creativity_promotion_creativity_img)
# df2= pd.read_sql(query.statement, session.bind)[['creativity_id','creativity_image_url','creativity_image']].drop_duplicates(subset=['creativity_id','creativity_image_url'], keep='first') #一个creativity_id的图片可能有多个
# df=pd.merge(df,df2,on='creativity_id',how='left')
# df["upload_time"] = (datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
# df["create_time"] = (datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
# df['creativity_promotion_channel']='万相台'
# df['creativity_kind']=np.nan
# allupdate_insert(df,creativity_promotion_creativity_kind_input)
#
# #向tmall_flagship_store_livestreamer_living_room_good_day_input插入品销宝数据
# session = get_session()
# query = session.query(pxb_cost_top_20_creativity_promotion_day)
# df = pd.read_sql(query.statement, session.bind)[['creativity_id']].drop_duplicates(subset='creativity_id', keep='first')
# query = session.query(pxb_creativity_promotion_creativity_img)
# df2= pd.read_sql(query.statement, session.bind)[['creativity_id','creativity_image_url','creativity_image']].drop_duplicates(subset=['creativity_id','creativity_image_url'], keep='first')
# df=pd.merge(df,df2,on='creativity_id',how='left')
# df["upload_time"] = (datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
# df["create_time"] = (datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
# df['creativity_promotion_channel']='品销宝'
# df['creativity_kind']=np.nan
# add_insert(df,creativity_promotion_creativity_kind_input)