import pandas as pd
import numpy as np
import re
from dateutil.relativedelta import relativedelta
from common_func_defs import *
from datetime import datetime, timedelta, date
from timefunc import *

####################################################### 私有函数定义 ########################################################

'''—————————————————————————————方法：输入日期字符串得到所属周序号—————————————————————————————'''
def get_week(date_str,begin=5): #begin表示一周从周几开始，默认从周五（因为我们的计算周期是上周五到这周四）
    # 将字符串转换为datetime对象
    if len(date_str)>10:
        date_str = date_str[:10]
    date_object = datetime.strptime(date_str, '%Y-%m-%d')
    # 获取星期几
    weekday = date_object.weekday()
    # 计算偏移量
    days_offset = (weekday - (begin-1)) % 7
    # 调整日期
    adjusted_date = date_object - timedelta(days=days_offset)
    # 获取ISO年份和ISO周数
    iso_year, iso_week, _ = adjusted_date.isocalendar()
    # 输出结果
    #print(f"The adjusted ISO year and week for {date_str} are: {iso_year}, {iso_week}")
    #注：.isocalendar() 中星期一是一周的第一天（ISO星期一为1，ISO星期日为7）；.weekday() 中星期一是一周的第零天（星期一为0，星期日为6）
    return iso_year, iso_week


'''—————————————————————————————方法：得到对应周的起始终止日期—————————————————————————————'''
def get_week_range(year_week, begin=5):
    numbers = re.findall(r'\d+', year_week)
    year = int(numbers[0])
    week = int(numbers[1])

    # 获取指定ISO年份和ISO周数的第一天
    january_fourth = datetime(year, 1, 4)
    day_offset = (begin - 1) - january_fourth.weekday()
    first_day = january_fourth + timedelta(days=day_offset)
    week_one = first_day + timedelta(weeks=(week - 1))
    # 计算该周的起始日期和结束日期
    start_date = week_one
    end_date = week_one + timedelta(days=6)
    result = start_date.strftime('%Y-%m-%d') + '~' + end_date.strftime('%Y-%m-%d')

    return result


'''—————————————————————————————方法：从数据库取数—————————————————————————————'''
def get_data_from_mysql(session, begin_date: str, end_date: str, table_class, varlist: list = []):
    query = session.query(table_class).filter(and_(table_class.stat_time >= begin_date,
                                            table_class.stat_time <= end_date))
    df = pd.read_sql(query.statement, session.bind)
    if varlist:
        df = df[varlist]
        return df
    else:
        return df



####################################################### 加工方法定义 ########################################################

'''—————————————————————————————方法：生成自播日、周、月、年统计表—————————————————————————————————————'''
def get_own_living_room_data(begin_date:str, end_date:str, unit:str):
    # 读取数据
    session = get_session()
    query = session.query(tmall_flagship_store_own_living_room_situ_day).filter(and_(tmall_flagship_store_own_living_room_situ_day.stat_time >= begin_date,
                                            tmall_flagship_store_own_living_room_situ_day.stat_time <= end_date))
    df = pd.read_sql(query.statement, session.bind)

    # 删除现有的可能存在的crawl_time、create_time和id列（相当于选取列）
    df = df.drop('crawl_time', axis=1, errors='ignore')
    df = df.drop('create_time', axis=1, errors='ignore')
    df = df.drop('id', axis=1, errors='ignore')

    # 若是没有范围内所需的数据，则直接返回None
    if df.empty:
        return None
    df = df.fillna(np.nan)  # 将所有None转化为nan，避免运算时报错

    df_grouped = df.copy()
    # 计算：店铺总GMV，为了在聚合时计算种草成交占比
    df_grouped['store_total_gmv'] = df_grouped['living_trans_amount'] / df_grouped['living_trans_amount_share_of_total']
    df_grouped.replace([np.inf, -np.inf], 0, inplace=True)  # 替换除数为0的inf

    session.close()

    # 根据所需统计单位输出表格
    if unit == '日':
        # 转换数字形式
        df.replace([np.inf, -np.inf], 0, inplace=True)  # 替换除数为0的inf
        # df['living_trans_amount'] = df['living_trans_amount'].round(2)
        # df['living_trans_amount_share_of_total'] = df['living_trans_amount_share_of_total'].round(2)
        # df['living_new_store_customer_count'] = df['living_new_store_customer_count'].round(0)
        # df['living_new_follower_count'] = df['living_new_follower_count'].round(0)
        # df['living_new_member_count'] = df['living_new_member_count'].round(0)
        # df['living_member_trans_amount'] = df['living_member_trans_amount'].round(2)
        # df['living_trans_user_count'] = df['living_trans_user_count'].round(0)
        # df['living_trans_member_count'] = df['living_trans_member_count'].round(0)
        # df['living_trans_good_count'] = df['living_trans_good_count'].round(0)
        # df['living_member_trans_good_count'] = df['living_member_trans_good_count'].round(0)
        # df['living_trans_order_count'] = df['living_trans_order_count'].round(0)
        # df['living_member_trans_order_count'] = df['living_member_trans_order_count'].round(0)
        # df['living_all_good_click_user_count'] = df['living_all_good_click_user_count'].round(0)
        # df['living_all_good_click_count'] = df['living_all_good_click_count'].round(0)
        # df['living_all_good_add_to_cart_user_count'] = df['living_all_good_add_to_cart_user_count'].round(0)
        # df['living_all_good_add_to_cart_count'] = df['living_all_good_add_to_cart_count'].round(0)

        # 按时间排序
        df = df.sort_values(by='stat_time', ascending=True)

        # 转中文字段名
        df = uploaded_field_corr_entozh_res(df, '天猫旗舰店自播直播间日、周、月、年统计表')
        # df = df.fillna('nan')

        return df

    elif unit == '周':
        # 筛选出完整的周0418
        begin_date_week, end_date_week = week_adjust(begin_date, end_date)
        df_grouped['stat_time_for_filter'] = pd.to_datetime(df_grouped['stat_time'])
        df_grouped = df_grouped[(df_grouped['stat_time_for_filter'] >= begin_date_week) & (df_grouped['stat_time_for_filter'] <= end_date_week)]
        if df_grouped.empty:
            return None

        # 针对数据库可能存在的不完整数据的情况0428
        min_date = df_grouped['stat_time_for_filter'].min()
        max_date = df_grouped['stat_time_for_filter'].max()
        min_date_str = min_date.strftime('%Y-%m-%d')
        max_date_str = max_date.strftime('%Y-%m-%d')
        begin_date_db, end_date_db = week_adjust(min_date_str, max_date_str)
        df_grouped = df_grouped[(df_grouped['stat_time_for_filter'] >= begin_date_db) & (df_grouped['stat_time_for_filter'] <= end_date_db)]
        if df_grouped.empty:
            return None

        # 添加周编号、周的日期范围
        df_grouped['week_index'] = df_grouped['stat_time'].apply(str).apply(get_week)
        df_grouped['week_range'] = df_grouped['week_index'].apply(str).apply(get_week_range)
        groupby_list = ['week_range']

    elif unit == '月':
        # 筛选出完整的月0418
        begin_date_month, end_date_month = month_adjust(begin_date, end_date)
        df_grouped['stat_time_for_filter'] = pd.to_datetime(df_grouped['stat_time'])
        df_grouped = df_grouped[(df_grouped['stat_time_for_filter'] >= begin_date_month) & (df_grouped['stat_time_for_filter'] <= end_date_month)]
        if df_grouped.empty:
            return None

        # 针对数据库可能存在的不完整数据的情况0428
        min_date = df_grouped['stat_time_for_filter'].min()
        max_date = df_grouped['stat_time_for_filter'].max()
        min_date_str = min_date.strftime('%Y-%m-%d')
        max_date_str = max_date.strftime('%Y-%m-%d')
        begin_date_db, end_date_db = month_adjust(min_date_str, max_date_str)
        df_grouped = df_grouped[(df_grouped['stat_time_for_filter'] >= begin_date_db) & (df_grouped['stat_time_for_filter'] <= end_date_db)]
        if df_grouped.empty:
            return None

        # 添加月编号
        df_grouped['month_index'] = pd.to_datetime(df_grouped['stat_time']).dt.strftime('%Y-%m')
        groupby_list = ['month_index']

    elif unit == '年':
        # 筛选出完整的周0418
        begin_date_year, end_date_year = find_largest_complete_years(begin_date, end_date)
        df_grouped['stat_time_for_filter'] = pd.to_datetime(df_grouped['stat_time'])
        df_grouped = df_grouped[(df_grouped['stat_time_for_filter'] >= begin_date_year) & (df_grouped['stat_time_for_filter'] <= end_date_year)]
        if df_grouped.empty:
            return None

        # 针对数据库可能存在的不完整数据的情况0428
        min_date = df_grouped['stat_time_for_filter'].min()
        max_date = df_grouped['stat_time_for_filter'].max()
        min_date_str = min_date.strftime('%Y-%m-%d')
        max_date_str = max_date.strftime('%Y-%m-%d')
        begin_date_db, end_date_db = find_largest_complete_years(min_date_str, max_date_str)
        df_grouped = df_grouped[(df_grouped['stat_time_for_filter'] >= begin_date_db) & (df_grouped['stat_time_for_filter'] <= end_date_db)]
        if df_grouped.empty:
            return None

        # 添加年编号
        df_grouped['year_index'] = pd.to_datetime(df_grouped['stat_time']).dt.strftime('%Y')
        groupby_list = ['year_index']

    else:
        print('请输入正确的统计单位！（日/周/月/年）')
        return None

    # 累加项的合并计算
    df_grouped = df_grouped.groupby(groupby_list, as_index=False).agg({'stat_time':np.size, 'living_trans_amount': np.sum,
                'store_total_gmv': np.sum,
                'living_new_store_customer_count': np.sum, 'living_new_follower_count': np.sum,
                'living_new_member_count': np.sum, 'living_member_trans_amount': np.sum,
                'living_trans_user_count': np.sum, 'living_trans_member_count': np.sum,
                'living_trans_good_count': np.sum, 'living_member_trans_good_count': np.sum,
                'living_trans_order_count': np.sum, 'living_member_trans_order_count': np.sum,
                'living_all_good_click_user_count': np.sum, 'living_all_good_click_count': np.sum,
                'living_all_good_add_to_cart_user_count': np.sum, 'living_all_good_add_to_cart_count': np.sum}).reset_index(drop=True)
    df_grouped = df_grouped.rename(columns={'stat_time': 'row_count'})  # 更名，以此表示合并计算后每个类别包含的数据行数
    df_grouped = df_grouped.rename(columns={'week_range': 'stat_time', 'month_index': 'stat_time',
                                            'year_index': 'stat_time'})  # 更名，将聚合后的新时间index改名为stat_time

    # 其他项（非直接累加）的合并计算：种草成交占比
    df_grouped['living_trans_amount_share_of_total'] = df_grouped['living_trans_amount'] / df_grouped['store_total_gmv']

    # 删除store_total_gmv
    df_grouped = df_grouped.drop('store_total_gmv', axis=1, errors='ignore')

    # 转换数字形式
    df_grouped.replace([np.inf, -np.inf], 0, inplace=True)  # 替换除数为0的inf
    # df_grouped['living_trans_amount'] = df_grouped['living_trans_amount'].round(2)
    # df_grouped['living_trans_amount_share_of_total'] = df_grouped['living_trans_amount_share_of_total'].round(2)
    # df_grouped['living_new_store_customer_count'] = df_grouped['living_new_store_customer_count'].round(0)
    # df_grouped['living_new_follower_count'] = df_grouped['living_new_follower_count'].round(0)
    # df_grouped['living_new_member_count'] = df_grouped['living_new_member_count'].round(0)
    # df_grouped['living_member_trans_amount'] = df_grouped['living_member_trans_amount'].round(2)
    # df_grouped['living_trans_user_count'] = df_grouped['living_trans_user_count'].round(0)
    # df_grouped['living_trans_member_count'] = df_grouped['living_trans_member_count'].round(0)
    # df_grouped['living_trans_good_count'] = df_grouped['living_trans_good_count'].round(0)
    # df_grouped['living_member_trans_good_count'] = df_grouped['living_member_trans_good_count'].round(0)
    # df_grouped['living_trans_order_count'] = df_grouped['living_trans_order_count'].round(0)
    # df_grouped['living_member_trans_order_count'] = df_grouped['living_member_trans_order_count'].round(0)
    # df_grouped['living_all_good_click_user_count'] = df_grouped['living_all_good_click_user_count'].round(0)
    # df_grouped['living_all_good_click_count'] = df_grouped['living_all_good_click_count'].round(0)
    # df_grouped['living_all_good_add_to_cart_user_count'] = df_grouped['living_all_good_add_to_cart_user_count'].round(0)
    # df_grouped['living_all_good_add_to_cart_count'] = df_grouped['living_all_good_add_to_cart_count'].round(0)

    # 按时间排序
    df_grouped = df_grouped.sort_values(by='stat_time', ascending=True)

    # 转中文字段名
    df_grouped = uploaded_field_corr_entozh_res(df_grouped, '天猫旗舰店自播直播间日、周、月、年统计表')
    # df_grouped = df_grouped.fillna('nan')

    return df_grouped


'''—————————————————————————————方法：生成达播日、周、月、年统计表—————————————————————————————————————'''
def get_livestreamer_living_room_data(begin_date:str, end_date:str, unit:str):
    # 读取数据
    session = get_session()
    query = session.query(tmall_flagship_store_livestreamer_living_room_situ_day).filter(and_(tmall_flagship_store_livestreamer_living_room_situ_day.stat_time >= begin_date,
                                            tmall_flagship_store_livestreamer_living_room_situ_day.stat_time <= end_date))
    df = pd.read_sql(query.statement, session.bind)

    # 删除现有的可能存在的不需要的列
    df = df.drop('crawl_time', axis=1, errors='ignore')
    df = df.drop('create_time', axis=1, errors='ignore')
    df = df.drop('id', axis=1, errors='ignore')

    # 若是没有范围内所需的数据，则直接返回None
    if df.empty:
        return None
    df = df.fillna(np.nan)  # 将所有None转化为nan，避免运算时报错

    df_grouped = df.copy()
    # 计算：店铺总GMV，为了在聚合时计算种草成交占比
    df_grouped['store_total_gmv'] = df_grouped['living_trans_amount'] / df_grouped['living_trans_amount_share_of_total']
    df_grouped.replace([np.inf, -np.inf], 0, inplace=True)  # 替换除数为0的inf

    session.close()

    # 根据所需统计单位输出表格
    if unit == '日':
        # 转换数字形式
        df.replace([np.inf, -np.inf], 0, inplace=True)  # 替换除数为0的inf
        # df['living_with_trans_rooms_count'] = df['living_with_trans_rooms_count'].round(0)
        # df['living_trans_amount'] = df['living_trans_amount'].round(2)
        # df['living_trans_amount_share_of_total'] = df['living_trans_amount_share_of_total'].round(2)
        # df['living_new_store_customer_count'] = df['living_new_store_customer_count'].round(0)
        # df['living_trans_user_count'] = df['living_trans_user_count'].round(0)
        # df['living_trans_good_count'] = df['living_trans_good_count'].round(0)
        # df['living_trans_order_count'] = df['living_trans_order_count'].round(0)
        # df['living_all_good_click_user_count'] = df['living_all_good_click_user_count'].round(0)
        # df['living_all_good_click_count'] = df['living_all_good_click_count'].round(0)
        # df['living_all_good_add_to_cart_user_count'] = df['living_all_good_add_to_cart_user_count'].round(0)
        # df['living_all_good_add_to_cart_count'] = df['living_all_good_add_to_cart_count'].round(0)

        # 按时间排序
        df = df.sort_values(by='stat_time', ascending=True)

        # 转中文字段名
        df = uploaded_field_corr_entozh_res(df, '天猫旗舰店达播直播间日、周、月、年统计表')
        # df = df.fillna('nan')

        return df

    elif unit == '周':
        # 筛选出完整的周0418
        begin_date_week, end_date_week = week_adjust(begin_date, end_date)
        df_grouped['stat_time_for_filter'] = pd.to_datetime(df_grouped['stat_time'])
        df_grouped = df_grouped[(df_grouped['stat_time_for_filter'] >= begin_date_week) & (df_grouped['stat_time_for_filter'] <= end_date_week)]
        if df_grouped.empty:
            return None

        # 针对数据库可能存在的不完整数据的情况0428
        min_date = df_grouped['stat_time_for_filter'].min()
        max_date = df_grouped['stat_time_for_filter'].max()
        min_date_str = min_date.strftime('%Y-%m-%d')
        max_date_str = max_date.strftime('%Y-%m-%d')
        begin_date_db, end_date_db = week_adjust(min_date_str, max_date_str)
        df_grouped = df_grouped[(df_grouped['stat_time_for_filter'] >= begin_date_db) & (df_grouped['stat_time_for_filter'] <= end_date_db)]
        if df_grouped.empty:
            return None

        # 添加周编号、周的日期范围
        df_grouped['week_index'] = df_grouped['stat_time'].apply(str).apply(get_week)
        df_grouped['week_range'] = df_grouped['week_index'].apply(str).apply(get_week_range)
        groupby_list = ['week_range']

    elif unit == '月':
        # 筛选出完整的月0418
        begin_date_month, end_date_month = month_adjust(begin_date, end_date)
        df_grouped['stat_time_for_filter'] = pd.to_datetime(df_grouped['stat_time'])
        df_grouped = df_grouped[(df_grouped['stat_time_for_filter'] >= begin_date_month) & (df_grouped['stat_time_for_filter'] <= end_date_month)]
        if df_grouped.empty:
            return None

        # 针对数据库可能存在的不完整数据的情况0428
        min_date = df_grouped['stat_time_for_filter'].min()
        max_date = df_grouped['stat_time_for_filter'].max()
        min_date_str = min_date.strftime('%Y-%m-%d')
        max_date_str = max_date.strftime('%Y-%m-%d')
        begin_date_db, end_date_db = month_adjust(min_date_str, max_date_str)
        df_grouped = df_grouped[(df_grouped['stat_time_for_filter'] >= begin_date_db) & (df_grouped['stat_time_for_filter'] <= end_date_db)]
        if df_grouped.empty:
            return None

        # 添加月编号
        df_grouped['month_index'] = pd.to_datetime(df_grouped['stat_time']).dt.strftime('%Y-%m')
        groupby_list = ['month_index']

    elif unit == '年':
        # 筛选出完整的周0418
        begin_date_year, end_date_year = find_largest_complete_years(begin_date, end_date)
        df_grouped['stat_time_for_filter'] = pd.to_datetime(df_grouped['stat_time'])
        df_grouped = df_grouped[(df_grouped['stat_time_for_filter'] >= begin_date_year) & (df_grouped['stat_time_for_filter'] <= end_date_year)]
        if df_grouped.empty:
            return None

        # 针对数据库可能存在的不完整数据的情况0428
        min_date = df_grouped['stat_time_for_filter'].min()
        max_date = df_grouped['stat_time_for_filter'].max()
        min_date_str = min_date.strftime('%Y-%m-%d')
        max_date_str = max_date.strftime('%Y-%m-%d')
        begin_date_db, end_date_db = find_largest_complete_years(min_date_str, max_date_str)
        df_grouped = df_grouped[(df_grouped['stat_time_for_filter'] >= begin_date_db) & (df_grouped['stat_time_for_filter'] <= end_date_db)]
        if df_grouped.empty:
            return None

        # 添加年编号
        df_grouped['year_index'] = pd.to_datetime(df_grouped['stat_time']).dt.strftime('%Y')
        groupby_list = ['year_index']

    else:
        print('请输入正确的统计单位！（日/周/月/年）')
        return None

    # 累加项的合并计算
    df_grouped = df_grouped.groupby(groupby_list, as_index=False).agg({'stat_time': np.size,
                                                                       'store_total_gmv': np.sum,
                                                                       'living_with_trans_rooms_count': np.sum,
                                                                        'living_trans_amount': np.sum,
                                                                        'living_new_store_customer_count': np.sum,
                                                                        'living_trans_user_count': np.sum,
                                                                        'living_trans_good_count': np.sum,
                                                                        'living_trans_order_count': np.sum,
                                                                        'living_all_good_click_user_count': np.sum,
                                                                        'living_all_good_click_count': np.sum,
                                                                        'living_all_good_add_to_cart_user_count': np.sum,
                                                                        'living_all_good_add_to_cart_count': np.sum}).reset_index(drop=True)
    df_grouped = df_grouped.rename(columns={'stat_time': 'row_count'})  # 更名，以此表示合并计算后每个类别包含的数据行数
    df_grouped = df_grouped.rename(columns={'week_range': 'stat_time', 'month_index': 'stat_time',
                                            'year_index': 'stat_time'})  # 更名，将聚合后的新时间index改名为stat_time

    # 其他项（非直接累加）的合并计算：种草成交占比
    df_grouped['living_trans_amount_share_of_total'] = df_grouped['living_trans_amount'] / df_grouped['store_total_gmv']

    # 删除store_total_gmv
    df_grouped = df_grouped.drop('store_total_gmv', axis=1, errors='ignore')

    # 转换数字形式
    df_grouped.replace([np.inf, -np.inf], 0, inplace=True)  # 替换除数为0的inf
    # df_grouped['living_with_trans_rooms_count'] = df_grouped['living_with_trans_rooms_count'].round(0)
    # df_grouped['living_trans_amount'] = df_grouped['living_trans_amount'].round(2)
    # df_grouped['living_trans_amount_share_of_total'] = df_grouped['living_trans_amount_share_of_total'].round(2)
    # df_grouped['living_new_store_customer_count'] = df_grouped['living_new_store_customer_count'].round(0)
    # df_grouped['living_trans_user_count'] = df_grouped['living_trans_user_count'].round(0)
    # df_grouped['living_trans_good_count'] = df_grouped['living_trans_good_count'].round(0)
    # df_grouped['living_trans_order_count'] = df_grouped['living_trans_order_count'].round(0)
    # df_grouped['living_all_good_click_user_count'] = df_grouped['living_all_good_click_user_count'].round(0)
    # df_grouped['living_all_good_click_count'] = df_grouped['living_all_good_click_count'].round(0)
    # df_grouped['living_all_good_add_to_cart_user_count'] = df_grouped['living_all_good_add_to_cart_user_count'].round(0)
    # df_grouped['living_all_good_add_to_cart_count'] = df_grouped['living_all_good_add_to_cart_count'].round(0)

    # 按时间排序
    df_grouped = df_grouped.sort_values(by='stat_time', ascending=True)

    # 转中文字段名
    df_grouped = uploaded_field_corr_entozh_res(df_grouped, '天猫旗舰店达播直播间日、周、月、年统计表')
    # df_grouped = df_grouped.fillna('nan')

    return df_grouped


'''—————————————————————————————方法：生成达播商品日表数据———————————————————————————————————————————'''
def get_livestreamer_goods_data(begin_date:str, end_date:str):
    # 读取数据
    session = get_session()
    df_goods = get_data_from_mysql(session=session, begin_date=begin_date, end_date=end_date,
                                   table_class=tmall_flagship_store_livestreamer_living_room_good_day,
                                   varlist=['stat_time',
                                            'living_name', 'living_streamer_name', 'living_start_time',
                                            'living_times_id',
                                            'living_good_id', 'living_good_name', 'living_good_main_image_url',
                                            'living_good_main_image',
                                            'living_good_click_user_count', 'living_good_click_count',
                                            'living_good_add_to_cart_user_count',
                                            'living_good_add_to_cart_count', 'living_good_trans_amount',
                                            'living_good_trans_user_count', 'living_good_trans_count',
                                            'living_good_trans_order_count'])
    if df_goods.empty:
        return None
    df_goods = df_goods.fillna(np.nan)

    good_id_list = df_goods['living_good_id'].unique().tolist()  # 提取不重复的good id方便后续提数据筛选

    df_input = get_data_from_mysql(session=session, begin_date=begin_date, end_date=end_date,
                                   table_class=tmall_flagship_store_livestreamer_living_room_good_day_input,
                                   varlist=['stat_time', 'living_name', 'living_streamer_name', 'living_start_time',
                                            'living_times_id', 'living_good_id', 'living_good_cost_notes',
                                            'living_good_incl_tax_service_cost',
                                            'living_good_mechanism', 'living_good_commission_rate']
                                   )
    df_input = df_input.fillna(np.nan)

    # 取订单小时表修改0322、修改只取所需列0325
    order_hour_begin = begin_date + ' 00:00:00'
    order_hour_end = end_date + ' 23:59:59'
    query_order_hour = session.query(tmall_flagship_store_order_hour.order_creation_time,
                                     tmall_flagship_store_order_hour.good_id,
                                     tmall_flagship_store_order_hour.sixty_nine_code,
                                     tmall_flagship_store_order_hour.good_initial_price).filter(
        and_(tmall_flagship_store_order_hour.order_creation_time >= order_hour_begin,
             tmall_flagship_store_order_hour.order_creation_time <= order_hour_end,
             tmall_flagship_store_order_hour.good_id.in_(good_id_list)))
    df_order_hour = pd.read_sql(query_order_hour.statement, session.bind)
    # df_order_hour = df_order_hour[['stat_time', 'order_creation_time', 'good_id', 'good_name',
    #                                              'sixty_nine_code', 'good_initial_price']]

    session.close()

    df_goods['stat_date'] = pd.to_datetime(df_goods['stat_time'])  # 新建一个名stat_date的列，方便和订单小时表匹配

    # 处理订单小时表
    df_order_hour['order_creation_time'] = pd.to_datetime(df_order_hour['order_creation_time'])
    df_order_hour['stat_date'] = df_order_hour['order_creation_time'].apply(lambda x: x.date())
    df_order_hour['stat_date'] = pd.to_datetime(df_order_hour['stat_date'])  # stat_date转换为日期格式，以便匹配
    average_good_actual_price = df_order_hour.groupby(['stat_date', 'good_id', 'sixty_nine_code'], as_index=False).agg({'good_initial_price': np.mean}).reset_index()
    average_good_actual_price = average_good_actual_price.rename(columns={'good_initial_price': 'mean_good_actual_price'})  # 一些字段进行更名

    # 进行连表
    df = pd.merge(df_goods, average_good_actual_price, left_on=['stat_date', 'living_good_id'], right_on=['stat_date', 'good_id'], how='left')
    df = pd.merge(df, df_input, on=['living_good_id', 'living_times_id', 'living_start_time', 'living_streamer_name', 'living_name', 'stat_time'], how='left')

    # 表内计算
    df['good_living_price'] = df['living_good_trans_amount'] / df['living_good_trans_count']
    df['commission_amount'] = df['living_good_trans_amount'] * df['living_good_commission_rate']
    df['total_cost'] = df['living_good_incl_tax_service_cost'] + df['commission_amount']
    df['cost_ratio'] = df['total_cost'] / (df['mean_good_actual_price'] * df['living_good_trans_order_count'])
    df['roi'] = (df['mean_good_actual_price'] * df['living_good_trans_order_count']) / df['total_cost']

    # 筛选所需列
    df = df[
        ['stat_time', 'living_streamer_name', 'living_name', 'living_times_id', 'living_start_time', 'living_good_id',
         'living_good_name', 'living_good_main_image', 'living_good_click_user_count', 'living_good_click_count',
         'living_good_add_to_cart_user_count', 'living_good_add_to_cart_count', 'living_good_trans_amount',
         'living_good_trans_user_count', 'living_good_trans_count', 'living_good_trans_order_count', 'sixty_nine_code',
         'living_good_cost_notes', 'living_good_incl_tax_service_cost', 'living_good_mechanism',
         'living_good_commission_rate', 'good_living_price', 'commission_amount', 'total_cost', 'cost_ratio', 'roi']
    ]

    # 转换数字形式
    df.replace([np.inf, -np.inf], 0, inplace=True)  # 替换除数为0的inf

    # 按时间排序
    df = df.sort_values(by='stat_time', ascending=True)

    # 转中文字段名
    df = uploaded_field_corr_entozh_res(df, '天猫旗舰店达播商品日统计表')
    # df = df.fillna('nan')

    return df


'''—————————————————————————————方法：生成万相台日花费top20创意推广数据———————————————————————————————————————————'''
def get_wxt_top20_data(begin_date:str, end_date:str):
    # 读取数据
    session = get_session()
    df = get_data_from_mysql(session=session, begin_date=begin_date, end_date=end_date,
                             table_class=wxt_cost_top_20_creativity_promotion_day,
                             varlist=['stat_time', 'creativity_id', 'creativity_name', 'entity_id', 'entity_kind',
                                      'entity_name', 'sixty_nine_code', 'good_series', 'good_product_combination',
                                      'good_actual_price', 'impression_count', 'click_count', 'cost',
                                      'click_through_rate', 'click_conversion_rate', 'total_trans_amount', 'roi',
                                      'cost_per_click', 'impression_cpm', 'total_presale_trans_amount',
                                      'total_presale_trans_order_count', 'direct_presale_trans_amount',
                                      'direct_presale_trans_order_count', 'indirect_presale_trans_amount',
                                      'indirect_presale_trans_order_count', 'direct_trans_amount',
                                      'indirect_trans_amount', 'total_trans_count', 'direct_trans_order_count',
                                      'indirect_trans_order_count', 'input_output_ratio', 'total_trans_cost',
                                      'total_good_add_to_cart_count', 'direct_good_add_to_cart_count',
                                      'indirect_good_add_to_cart_count', 'add_to_cart_rate', 'good_favorite_count',
                                      'store_favorite_count', 'store_favorite_cost',
                                      'total_favorite_and_add_to_cart_count', 'total_favorite_and_add_to_cart_cost',
                                      'good_favorite_and_add_to_cart_count', 'good_favorite_and_add_to_cart_cost',
                                      'total_favorite_count', 'good_favorite_cost', 'good_favorite_rate',
                                      'add_to_cart_cost', 'order_count', 'order_amount', 'direct_good_favorite_count',
                                      'indirect_good_favorite_count', 'get_coupon_count',
                                      'shopping_credit_recharge_count', 'shopping_credit_recharge_amount',
                                      'wangwang_consult_count', 'guide_visit_count', 'guide_visitor_count',
                                      'guide_visit_potential_customer_count', 'guide_visit_potential_customer_share',
                                      'new_member_rate', 'new_member_count', 'guide_visit_rate', 'deep_visit_count',
                                      'average_page_view_count', 'trans_user_new_customer_count',
                                      'trans_user_new_customer_share', 'member_first_purchase_member_count',
                                      'member_trans_amount', 'member_trans_order_count', 'trans_user_count',
                                      'trans_order_count_per_user', 'trans_amount_per_user']

                             )
    if df.empty:
        return None
    df = df.fillna(np.nan)

    query_img = session.query(wxt_creativity_promotion_creativity_img)
    df_img = pd.read_sql(query_img.statement, session.bind)
    df_img = df_img[['creativity_id', 'creativity_image_url', 'creativity_image', 'stat_time']]
    # 0615删去重复日期和creativeid的图片记录（否则会使记录变多，主要是针对00000000的id）
    df_img = df_img.drop_duplicates(subset=['creativity_id', 'stat_time'], keep='first')

    query_input = session.query(creativity_promotion_creativity_kind_input)
    df_input = pd.read_sql(query_input.statement, session.bind)
    df_input = df_input[['creativity_promotion_channel', 'creativity_id', 'creativity_image', 'creativity_image_url', 'creativity_kind']]
    df_input = df_input[df_input['creativity_promotion_channel'] == '万相台']
    session.close()

    # 说明字段增加
    df = df.assign(creativity_delivery_platform='天猫')
    df = df.assign(promotion_channel='万相台')

    # 进行连表
    df = pd.merge(df, df_img, on=['creativity_id', 'stat_time'], how='left')
    df = pd.merge(df, df_input, on=['creativity_id', 'creativity_image'], how='left')

    # 筛选所需字段
    df = df[
        ['stat_time', 'creativity_delivery_platform', 'promotion_channel', 'creativity_id', 'creativity_name', 'creativity_image',
         'creativity_kind', 'entity_id', 'entity_kind', 'entity_name', 'sixty_nine_code', 'good_series',
         'good_product_combination', 'good_actual_price', 'click_through_rate', 'click_conversion_rate', 'cost',
         'total_trans_amount', 'roi']
    ]

    # 按时间排序
    df = df.sort_values(by='stat_time', ascending=True)

    # 转中文字段名
    df = uploaded_field_corr_entozh_res(df, '天猫旗舰店万相台花费前20创意推广日统计表')
    # df = df.fillna('nan')

    return df


'''—————————————————————————————方法：生成品销宝日花费top20创意推广数据———————————————————————————————————————————'''
def get_pxb_top20_data(begin_date:str, end_date:str):
    # 读取数据
    session = get_session()
    df = get_data_from_mysql(session=session, begin_date=begin_date, end_date=end_date,
                             table_class=pxb_cost_top_20_creativity_promotion_day,
                             varlist=['stat_time', 'plan_name', 'unit_name', 'creativity_name', 'creativity_id',
                                      'impression_count', 'cost', 'impression_cpm', 'cost_per_click', 'click_count',
                                      'click_through_rate', 'good_favorite_count', 'good_add_to_cart_count',
                                      'trans_order_count', 'trans_amount', 'return_rate', 'conversion_rate']
                             )
    if df.empty:
        return None
    df = df.fillna(np.nan)

    query_img = session.query(pxb_creativity_promotion_creativity_img)
    df_img = pd.read_sql(query_img.statement, session.bind)
    df_img = df_img[['creativity_id', 'creativity_image_url', 'creativity_image', 'stat_time']]
    query_input = session.query(creativity_promotion_creativity_kind_input)
    df_input = pd.read_sql(query_input.statement, session.bind)
    df_input = df_input[['creativity_promotion_channel', 'creativity_id', 'creativity_image', 'creativity_image_url', 'creativity_kind']]
    df_input = df_input[df_input['creativity_promotion_channel'] == '品销宝']
    session.close()

    # 说明字段增加
    df = df.assign(creativity_delivery_platform='天猫')
    df = df.assign(promotion_channel='品销宝')

    # 进行连表
    df = pd.merge(df, df_img, on=['creativity_id', 'stat_time'], how='left')
    df = pd.merge(df, df_input, on=['creativity_id', 'creativity_image'], how='left')

    # 筛选所需字段
    df = df[
        ['stat_time', 'creativity_delivery_platform', 'promotion_channel', 'plan_name', 'unit_name', 'creativity_name',
         'creativity_id', 'creativity_image', 'creativity_kind', 'click_through_rate', 'conversion_rate', 'cost',
         'trans_amount', 'return_rate']
    ]

    # 按时间排序
    df = df.sort_values(by='stat_time', ascending=True)

    # 转中文字段名
    df = uploaded_field_corr_entozh_res(df, '天猫旗舰店品销宝花费前20创意推广日统计表')
    # df = df.fillna('nan')

    return df




####################################################### 调用方法示意 ########################################################
# resdf = get_own_living_room_data('2023-01-10', '2024-02-15', '周') #参数:begin_date: str, end_date: str, unit: str(仅能取'日''周''月''年')
# resdf = get_livestreamer_living_room_data('2023-10-10', '2024-02-15', '日') #参数:begin_date: str, end_date: str, unit: str(仅能取'日''周''月''年')
# resdf = get_livestreamer_goods_data('2024-04-14', '2024-04-14')
resdf = get_wxt_top20_data('2024-06-12', '2024-06-12')
# resdf = get_pxb_top20_data('2023-10-01', '2024-10-31')