import re
import pandas as pd
import numpy as np
from datetime import datetime, date, timedelta
from sqlalchemy import and_
from dateutil.relativedelta import relativedelta
from app.utils.common_func_defs import *
from app.models.data_base_models import *
from app.services import get_base_session as get_session

####################################################### 私有函数定义 ########################################################
'''—————————————————————————————方法：输入日期字符串得到所属周序号—————————————————————————————'''


def get_week(date_str, begin=5):  # begin表示一周从周几开始，默认从周五（因为我们的计算周期是上周五到这周四）
    # 将字符串转换为datetime对象
    if len(date_str) > 10:
        date_str = date_str[:10]
    date_object = datetime.strptime(date_str, '%Y-%m-%d')
    # 获取星期几
    weekday = date_object.weekday()
    # 计算偏移量
    days_offset = (weekday - (begin - 1)) % 7
    # 调整日期
    adjusted_date = date_object - timedelta(days=days_offset)
    # 获取ISO年份和ISO周数
    iso_year, iso_week, _ = adjusted_date.isocalendar()
    # 输出结果
    # print(f"The adjusted ISO year and week for {date_str} are: {iso_year}, {iso_week}")
    # 注：.isocalendar() 中星期一是一周的第一天（ISO星期一为1，ISO星期日为7）；.weekday() 中星期一是一周的第零天（星期一为0，星期日为6）
    return iso_year, iso_week


'''—————————————————————————————方法：得到对应周的起始终止日期—————————————————————————————'''


def get_week_range(year_week, begin=5):
    numbers = re.findall(r'\d+', year_week)
    year = int(numbers[0])
    week = int(numbers[1])

    # 获取指定ISO年份和ISO周数的第一天
    january_fourth = datetime(year, 1, 4)
    day_offset = (begin - 1) - january_fourth.weekday()
    first_day = january_fourth + timedelta(days=day_offset)
    week_one = first_day + timedelta(weeks=(week - 1))
    # 计算该周的起始日期和结束日期
    start_date = week_one
    end_date = week_one + timedelta(days=6)
    result = start_date.strftime('%Y-%m-%d') + '~' + end_date.strftime('%Y-%m-%d')

    return result


'''—————————————————————————————方法：从数据库取数—————————————————————————————'''


def get_data_from_mysql(session, begin_date: str, end_date: str, table_class, varlist: list = []):
    query = session.query(table_class).filter(and_(table_class.stat_time >= begin_date,
                                                   table_class.stat_time <= end_date))
    df = pd.read_sql(query.statement, session.bind)
    if varlist:
        df = df[varlist]
        return df
    else:
        return df


####################################################### 加工方法定义 ########################################################
'''—————————————————————————————方法：生成天猫四种推广日周月年表（推广报表加工）—————————————————————————————'''


def get_tmall_promotion_data(begin_date: str, end_date: str, unit: str, promotion_channel: str = ''):
    # 读取数据
    session = get_session()

    query_ztc = session.query(tmall_flagship_store_ztc_promotion_day).filter(
        and_(tmall_flagship_store_ztc_promotion_day.stat_time >= begin_date,
             tmall_flagship_store_ztc_promotion_day.stat_time <= end_date))
    df_ztc = pd.read_sql(query_ztc.statement, session.bind)
    df_ztc = df_ztc.fillna(np.nan)  # 将所有None转化为nan，避免运算时报错
    df_ztc['promotion_channel'] = '直通车'
    df_ztc = df_ztc.rename(columns={'input_output_ratio': 'roi'})

    query_ylmf = session.query(tmall_flagship_store_ylmf_promotion_day).filter(
        and_(tmall_flagship_store_ylmf_promotion_day.stat_time >= begin_date,
             tmall_flagship_store_ylmf_promotion_day.stat_time <= end_date))
    df_ylmf = pd.read_sql(query_ylmf.statement, session.bind)
    df_ylmf = df_ylmf.fillna(np.nan)  # 将所有None转化为nan，避免运算时报错
    df_ylmf['promotion_channel'] = '引力魔方'
    df_ylmf = df_ylmf.rename(columns={'input_output_ratio': 'roi'})

    query_wxt = session.query(tmall_flagship_store_wxt_promotion_day).filter(
        and_(tmall_flagship_store_wxt_promotion_day.stat_time >= begin_date,
             tmall_flagship_store_wxt_promotion_day.stat_time <= end_date))
    df_wxt = pd.read_sql(query_wxt.statement, session.bind)
    df_wxt = df_wxt.fillna(np.nan)  # 将所有None转化为nan，避免运算时报错
    df_wxt['promotion_channel'] = '万相台'
    df_wxt = df_wxt.rename(columns={'input_output_ratio': 'roi'})

    query_pxb = session.query(tmall_flagship_store_pxb_promotion_day).filter(
        and_(tmall_flagship_store_pxb_promotion_day.stat_time >= begin_date,
             tmall_flagship_store_pxb_promotion_day.stat_time <= end_date))
    df_pxb = pd.read_sql(query_pxb.statement, session.bind)
    df_pxb = df_pxb.fillna(np.nan)  # 将所有None转化为nan，避免运算时报错
    df_pxb['promotion_channel'] = '品销宝'
    df_pxb = df_pxb.rename(columns={'conversion_rate': 'click_conversion_rate', 'return_rate': 'roi'})

    session.close()

    # 合并数据
    df = pd.concat([df_pxb, df_ztc, df_ylmf, df_wxt], axis=0).reset_index(drop=True)  # 合并四表

    if df.empty:
        return None

    df = df[['stat_time', 'promotion_channel', 'impression_count', 'click_count', 'click_through_rate', 'cost',
             'cost_per_click', 'total_trans_amount', 'total_trans_order_count', 'click_conversion_rate',
             'total_good_add_to_cart_count', 'good_favorite_count', 'store_favorite_count', 'roi']]  # 筛字段

    # 判断是否需要筛选渠道
    if promotion_channel:
        df = df[df['promotion_channel'] == promotion_channel]

    df_grouped = df

    # 根据所需统计单位输出表格
    if unit == '日':
        # 转换数字形式
        df.replace([np.inf, -np.inf], 0, inplace=True)  # 替换除数为0的inf
        df['click_count'] = df['click_count'].round(0)
        df['click_through_rate'] = df['click_through_rate'].round(2)
        df['cost'] = df['cost'].round(2)
        df['cost_per_click'] = df['cost_per_click'].round(2)
        df['total_trans_amount'] = df['total_trans_amount'].round(2)
        df['total_trans_order_count'] = df['total_trans_order_count'].round(0)
        df['click_conversion_rate'] = df['click_conversion_rate'].round(2)
        df['total_good_add_to_cart_count'] = df['total_good_add_to_cart_count'].round(0)
        df['good_favorite_count'] = df['good_favorite_count'].round(0)
        df['store_favorite_count'] = df['store_favorite_count'].round(0)
        df['roi'] = df['roi'].round(2)

        # 转中文字段名
        df = uploaded_field_corr_entozh_res(df, '天猫旗舰店四种推广日、周、月、年统计表')
        df = df.fillna('nan')
        return df

    elif unit == '周':
        # 添加周编号、周的日期范围
        df_grouped['week_index'] = df_grouped['stat_time'].apply(str).apply(get_week)
        df_grouped['week_range'] = df_grouped['week_index'].apply(str).apply(get_week_range)
        groupby_list = ['week_range', 'promotion_channel']

    elif unit == '月':
        # 添加月编号
        df_grouped['month_index'] = pd.to_datetime(df_grouped['stat_time']).dt.strftime('%Y-%m')
        groupby_list = ['month_index', 'promotion_channel']

    elif unit == '年':
        # 添加年编号
        df_grouped['year_index'] = pd.to_datetime(df_grouped['stat_time']).dt.strftime('%Y')
        groupby_list = ['year_index', 'promotion_channel']

    else:
        print('请输入正确的统计单位！（日/周/月/年）')
        return None

    # 累加项的合并计算
    df_grouped = df_grouped.groupby(groupby_list, as_index=False).agg(
        {'stat_time': np.size, 'impression_count': np.sum, 'click_count': np.sum, 'cost': np.sum,
         'total_trans_amount': np.sum, 'total_trans_order_count': np.sum, 'total_good_add_to_cart_count': np.sum,
         'good_favorite_count': np.sum, 'store_favorite_count': np.sum}).reset_index()
    df_grouped = df_grouped.rename(columns={'stat_time': 'row_count'})  # 更名，以此表示合并计算后每个类别包含的数据行数
    df_grouped = df_grouped.rename(columns={'week_range': 'stat_time', 'month_index': 'stat_time',
                                            'year_index': 'stat_time'})  # 更名，将聚合后的新时间index改名为stat_time

    # 其他项（非直接累加）的合并计算
    df_grouped['click_through_rate'] = df_grouped['click_count'] / df_grouped['impression_count']  # 点击率
    df_grouped['cost_per_click'] = df_grouped['cost'] / df_grouped['click_count']  # 平均点击花费
    df_grouped['click_conversion_rate'] = df_grouped['total_trans_order_count'] / df_grouped['click_count']  # 点击转化率
    df_grouped['roi'] = df_grouped['total_trans_amount'] / df_grouped['cost']  # 投入产出比

    # 转换数字形式
    df_grouped.replace([np.inf, -np.inf], 0, inplace=True)  # 替换除数为0的inf
    df_grouped['click_count'] = df_grouped['click_count'].round(0)
    df_grouped['click_through_rate'] = df_grouped['click_through_rate'].round(2)
    df_grouped['cost'] = df_grouped['cost'].round(2)
    df_grouped['cost_per_click'] = df_grouped['cost_per_click'].round(2)
    df_grouped['total_trans_amount'] = df_grouped['total_trans_amount'].round(2)
    df_grouped['total_trans_order_count'] = df_grouped['total_trans_order_count'].round(0)
    df_grouped['click_conversion_rate'] = df_grouped['click_conversion_rate'].round(2)
    df_grouped['total_good_add_to_cart_count'] = df_grouped['total_good_add_to_cart_count'].round(0)
    df_grouped['good_favorite_count'] = df_grouped['good_favorite_count'].round(0)
    df_grouped['store_favorite_count'] = df_grouped['store_favorite_count'].round(0)
    df_grouped['roi'] = df_grouped['roi'].round(2)

    # 转中文字段名
    del df_grouped['index']
    df_grouped = uploaded_field_corr_entozh_res(df_grouped, '天猫旗舰店四种推广日、周、月、年统计表')
    df_grouped = df_grouped.fillna('nan')
    return df_grouped


def get_tmall_promotion_data_notconvert(begin_date: str, end_date: str, unit: str):
    # 读取数据
    session = get_session()

    query_ztc = session.query(tmall_flagship_store_ztc_promotion_day).filter(
        and_(tmall_flagship_store_ztc_promotion_day.stat_time >= begin_date,
             tmall_flagship_store_ztc_promotion_day.stat_time <= end_date))
    df_ztc = pd.read_sql(query_ztc.statement, session.bind)
    df_ztc['promotion_channel'] = '直通车'
    df_ztc = df_ztc.rename(columns={'input_output_ratio': 'roi'})

    query_ylmf = session.query(tmall_flagship_store_ylmf_promotion_day).filter(
        and_(tmall_flagship_store_ylmf_promotion_day.stat_time >= begin_date,
             tmall_flagship_store_ylmf_promotion_day.stat_time <= end_date))
    df_ylmf = pd.read_sql(query_ylmf.statement, session.bind)
    df_ylmf['promotion_channel'] = '引力魔方'
    df_ylmf = df_ylmf.rename(columns={'input_output_ratio': 'roi'})

    query_wxt = session.query(tmall_flagship_store_wxt_promotion_day).filter(
        and_(tmall_flagship_store_wxt_promotion_day.stat_time >= begin_date,
             tmall_flagship_store_wxt_promotion_day.stat_time <= end_date))
    df_wxt = pd.read_sql(query_wxt.statement, session.bind)
    df_wxt['promotion_channel'] = '万相台'
    df_ylmf = df_ylmf.rename(columns={'input_output_ratio': 'roi'})

    query_pxb = session.query(tmall_flagship_store_pxb_promotion_day).filter(
        and_(tmall_flagship_store_pxb_promotion_day.stat_time >= begin_date,
             tmall_flagship_store_pxb_promotion_day.stat_time <= end_date))
    df_pxb = pd.read_sql(query_pxb.statement, session.bind)
    df_pxb['promotion_channel'] = '品销宝'
    df_pxb = df_pxb.rename(columns={'conversion_rate': 'click_conversion_rate', 'return_rate': 'roi'})

    session.close()

    # 合并数据
    df = pd.concat([df_pxb, df_ztc, df_ylmf, df_wxt], axis=0).reset_index(drop=True)  # 合并四表
    df = df[['stat_time', 'promotion_channel', 'impression_count', 'click_count', 'click_through_rate', 'cost',
             'cost_per_click', 'total_trans_amount', 'total_trans_order_count', 'click_conversion_rate',
             'total_good_add_to_cart_count', 'good_favorite_count', 'store_favorite_count', 'roi']]  # 筛字段

    df_grouped = df

    # 根据所需统计单位输出表格
    if unit == '日':
        # 不转中文字段名
        return df

    elif unit == '周':
        # 添加周编号、周的日期范围
        df_grouped['week_index'] = df_grouped['stat_time'].apply(str).apply(get_week)
        df_grouped['week_range'] = df_grouped['week_index'].apply(str).apply(get_week_range)
        groupby_list = ['week_range', 'promotion_channel']

    elif unit == '月':
        # 添加月编号
        df_grouped['month_index'] = pd.to_datetime(df_grouped['stat_time']).dt.strftime('%Y-%m')
        groupby_list = ['month_index', 'promotion_channel']

    elif unit == '年':
        # 添加年编号
        df_grouped['year_index'] = pd.to_datetime(df_grouped['stat_time']).dt.strftime('%Y')
        groupby_list = ['year_index', 'promotion_channel']

    else:
        print('请输入正确的统计单位！（日/周/月/年）')
        return None

    # 累加项的合并计算
    df_grouped = df_grouped.groupby(groupby_list, as_index=False).agg(
        {'stat_time': np.size, 'impression_count': np.sum, 'click_count': np.sum, 'cost': np.sum,
         'total_trans_amount': np.sum, 'total_trans_order_count': np.sum, 'total_good_add_to_cart_count': np.sum,
         'good_favorite_count': np.sum, 'store_favorite_count': np.sum}).reset_index()
    df_grouped = df_grouped.rename(columns={'stat_time': 'row_count'})  # 更名，以此表示合并计算后每个类别包含的数据行数
    df_grouped = df_grouped.rename(columns={'week_range': 'stat_time', 'month_index': 'stat_time',
                                            'year_index': 'stat_time'})  # 更名，将聚合后的新时间index改名为stat_time

    # 其他项（非直接累加）的合并计算
    df_grouped['click_through_rate'] = df_grouped['click_count'] / df_grouped['impression_count']  # 点击率
    df_grouped['cost_per_click'] = df_grouped['cost'] / df_grouped['click_count']  # 平均点击花费
    df_grouped['click_conversion_rate'] = df_grouped['total_trans_order_count'] / df_grouped['click_count']  # 点击转化率
    df_grouped['roi'] = df_grouped['total_trans_amount'] / df_grouped['cost']  # 投入产出比

    del df_grouped['index']
    return df_grouped


'''—————————————————————————————方法：生成天猫四种推广汇总日周月年表（推广报表加工）—————————————————————————————'''


def get_tmall_promotion_sum_data(begin_date: str, end_date: str, unit: str):
    # 读取数据
    df = get_tmall_promotion_data_notconvert(begin_date, end_date, unit)

    if df.empty:
        return None
    df = df.fillna(np.nan)  # 将所有None转化为nan，避免运算时报错

    df_grouped = df
    # 合并为汇总表
    # 累加项的合并计算
    if unit == '日':
        df_grouped = df_grouped.groupby(['stat_time'], as_index=False).agg({'impression_count': np.sum,
                                                                            'cost': np.sum, 'click_count': np.sum,
                                                                            'total_trans_order_count': np.sum,
                                                                            'total_trans_amount': np.sum}).reset_index(
            drop=True)
    else:
        df_grouped = df_grouped.groupby(['stat_time'], as_index=False).agg(
            {'impression_count': np.sum,
             'cost': np.sum, 'click_count': np.sum, 'total_trans_order_count': np.sum,
             'row_count': np.mean, 'total_trans_amount': np.sum}).reset_index(
            drop=True)  # row_count理论上各推广渠道都一样，因此直接取mean

    # 其他项（非直接累加）的合并计算
    df_grouped['cost_per_click'] = df_grouped['cost'] / df_grouped['click_count']
    df_grouped['click_conversion_rate'] = df_grouped['total_trans_amount'] / df_grouped['impression_count']
    df_grouped['user_value'] = df_grouped['total_trans_amount'] / df_grouped['click_count']
    df_grouped['roi'] = df_grouped['total_trans_amount'] / df_grouped['cost']

    # 数据输出格式
    df_grouped.replace([np.inf, -np.inf], 0, inplace=True)  # 替换除数为0的inf
    df_grouped['impression_count'] = df_grouped['impression_count'].round(0)
    df_grouped['click_count'] = df_grouped['click_count'].round(0)
    df_grouped['cost'] = df_grouped['cost'].round(2)
    df_grouped['total_trans_order_count'] = df_grouped['total_trans_order_count'].round(0)
    df_grouped['total_trans_amount'] = df_grouped['total_trans_amount'].round(2)
    df_grouped['cost_per_click'] = df_grouped['cost_per_click'].round(2)
    df_grouped['click_conversion_rate'] = df_grouped['click_conversion_rate'].round(2)
    df_grouped['user_value'] = df_grouped['user_value'].round(2)
    df_grouped['roi'] = df_grouped['roi'].round(2)

    # 转中文字段名
    df_grouped = uploaded_field_corr_entozh_res(df_grouped, '天猫旗舰店四种推广汇总日、周、月、年表')
    df_grouped = df_grouped.fillna('nan')
    return df_grouped


'''—————————————————————————————方法：生成天猫旗舰店经营数据日周月年表（旗舰店日常报表加工）—————————————————————————————'''


def get_tmall_flagship_store_overview(begin_date: str, end_date: str, unit: str):
    # 与输入表匹配，得到天猫旗舰店概况日统计表
    session = get_session()
    # df_overview_day = get_tmall_flagship_store_overview_day(begin_date, end_date)
    df_overview_day = get_data_from_mysql(session=session, begin_date=begin_date, end_date=end_date,
                                          table_class=tmall_flagship_store_overview_day)

    if df_overview_day.empty:
        return None
    df_overview_day = df_overview_day.fillna(np.nan)  # 将所有None转化为nan，避免运算时报错

    df_input = get_data_from_mysql(session=session, begin_date=begin_date, end_date=end_date,
                                   table_class=tmall_flagship_store_overview_day_input,
                                   varlist=['stat_time', 'living_commission_rate', 'living_slot_cost',
                                            'fake_order_amount'])
    df_input = df_input.fillna(np.nan)  # 将所有None转化为nan，避免运算时报错

    df_overview_day = pd.merge(df_overview_day, df_input, on='stat_time', how='left')

    # 表内计算
    df_overview_day['actual_gmv_received'] = df_overview_day['gmv'] - df_overview_day['refund_amount']
    df_overview_day['living_commission'] = df_overview_day['livestreamer_living_gmv'] * df_overview_day[
        'living_commission_rate']
    df_overview_day['daily_gmv'] = df_overview_day['gmv'] - df_overview_day['tk_gmv'] - df_overview_day[
        'livestreamer_living_gmv'] - df_overview_day['promotion_gmv']
    df_overview_day['daily_and_promotion_gmv'] = df_overview_day['daily_gmv'] + df_overview_day['promotion_gmv']

    df_grouped = df_overview_day

    session.close()

    # 根据所需统计单位输出表格
    if unit == '日':
        # 调整数字格式（整数和小数）
        df_overview_day.replace([np.inf, -np.inf], 0, inplace=True)  # 替换除数为0的inf
        df_overview_day['new_member_count'] = df_overview_day['new_member_count'].round(0)
        df_overview_day['member_trans_amount'] = df_overview_day['member_trans_amount'].round(2)
        df_overview_day['visitor_count'] = df_overview_day['visitor_count'].round(0)
        df_overview_day['gmv'] = df_overview_day['gmv'].round(2)
        df_overview_day['refund_amount'] = df_overview_day['refund_amount'].round(2)
        df_overview_day['user_value'] = df_overview_day['user_value'].round(2)
        df_overview_day['payment_conversion_rate'] = df_overview_day['payment_conversion_rate'].round(2)
        df_overview_day['payment_buyer_count'] = df_overview_day['payment_buyer_count'].round(0)
        df_overview_day['avg_order_price'] = df_overview_day['avg_order_price'].round(2)
        df_overview_day['good_favorite_user_count'] = df_overview_day['good_favorite_user_count'].round(0)
        df_overview_day['good_add_to_cart_user_count'] = df_overview_day['good_add_to_cart_user_count'].round(0)
        df_overview_day['gmv_old_buyer'] = df_overview_day['gmv_old_buyer'].round(2)
        df_overview_day['gmv_new_buyer'] = df_overview_day['gmv_new_buyer'].round(2)
        df_overview_day['payment_old_buyer_count'] = df_overview_day['payment_old_buyer_count'].round(0)
        df_overview_day['paying_new_buyer_count'] = df_overview_day['paying_new_buyer_count'].round(0)
        df_overview_day['tk_gmv'] = df_overview_day['tk_gmv'].round(2)
        df_overview_day['livestreamer_living_gmv'] = df_overview_day['livestreamer_living_gmv'].round(2)
        df_overview_day['promotion_gmv'] = df_overview_day['promotion_gmv'].round(2)
        df_overview_day['own_living_gmv'] = df_overview_day['own_living_gmv'].round(2)
        df_overview_day['tk_promotion_cost'] = df_overview_day['tk_promotion_cost'].round(2)
        df_overview_day['promotion_cost'] = df_overview_day['promotion_cost'].round(2)
        df_overview_day['living_commission_rate'] = df_overview_day['living_commission_rate'].round(2)
        df_overview_day['living_slot_cost'] = df_overview_day['living_slot_cost'].round(2)
        df_overview_day['fake_order_amount'] = df_overview_day['fake_order_amount'].round(2)
        df_overview_day['actual_gmv_received'] = df_overview_day['actual_gmv_received'].round(2)
        df_overview_day['living_commission'] = df_overview_day['living_commission'].round(2)
        df_overview_day['daily_gmv'] = df_overview_day['daily_gmv'].round(2)
        df_overview_day['daily_and_promotion_gmv'] = df_overview_day['daily_and_promotion_gmv'].round(2)

        # 把缺失值的位置修改为nan
        df_overview_day['tk_gmv'].replace(0, np.nan, inplace=True)
        df_overview_day['promotion_gmv'].replace(0, np.nan, inplace=True)
        df_overview_day['own_living_gmv'].replace(0, np.nan, inplace=True)
        df_overview_day['livestreamer_living_gmv'].replace(0, np.nan, inplace=True)

        # 转中文字段名
        df_overview_day = uploaded_field_corr_entozh_res(df_overview_day, '天猫旗舰店概况日、周、月、年统计表')
        df_overview_day = df_overview_day.fillna('nan')
        return df_overview_day
    elif unit == '周':
        # 添加周编号、周的日期范围
        df_grouped['week_index'] = df_grouped['stat_time'].apply(str).apply(get_week)
        df_grouped['week_range'] = df_grouped['week_index'].apply(str).apply(get_week_range)
        groupby_list = ['week_range']

    elif unit == '月':
        # 添加月编号
        df_grouped['month_index'] = pd.to_datetime(df_grouped['stat_time']).dt.strftime('%Y-%m')
        groupby_list = ['month_index']

    elif unit == '年':
        # 添加年编号
        df_grouped['year_index'] = pd.to_datetime(df_grouped['stat_time']).dt.strftime('%Y')
        groupby_list = ['year_index']

    else:
        print('请输入正确的统计单位！（日/周/月/年）')
        return None

    # 累加项的合并计算
    df_grouped = df_grouped.groupby(groupby_list, as_index=False).agg({'stat_time': np.size, 'gmv': np.sum,
                                                                       'refund_amount': np.sum,
                                                                       'payment_buyer_count': np.sum,
                                                                       'good_favorite_user_count': np.sum,
                                                                       'good_add_to_cart_user_count': np.sum,
                                                                       'gmv_old_buyer': np.sum,
                                                                       'payment_old_buyer_count': np.sum,
                                                                       'tk_promotion_cost': np.sum,
                                                                       'new_member_count': np.sum,
                                                                       'member_trans_amount': np.sum,
                                                                       'livestreamer_living_gmv': np.sum,
                                                                       'own_living_gmv': np.sum,
                                                                       'promotion_cost': np.sum,
                                                                       'visitor_count': np.sum,
                                                                       'promotion_gmv': np.sum, 'tk_gmv': np.sum,
                                                                       'gmv_new_buyer': np.sum,
                                                                       'paying_new_buyer_count': np.sum,
                                                                       'living_slot_cost': np.sum,
                                                                       'fake_order_amount': np.sum,
                                                                       'actual_gmv_received': np.sum,
                                                                       'living_commission': np.sum, 'daily_gmv': np.sum,
                                                                       'daily_and_promotion_gmv': np.sum}).reset_index(
        drop=True)
    df_grouped = df_grouped.rename(columns={'stat_time': 'row_count'})  # 更名，以此表示合并计算后每个类别包含的数据行数
    df_grouped = df_grouped.rename(columns={'week_range': 'stat_time', 'month_index': 'stat_time',
                                            'year_index': 'stat_time'})  # 更名，将聚合后的新时间index改名为stat_time

    # 非累加项的计算
    df_grouped['payment_conversion_rate'] = df_grouped['payment_buyer_count'] / df_grouped['visitor_count']
    df_grouped['avg_order_price'] = df_grouped['gmv'] / df_grouped['payment_buyer_count']
    df_grouped['user_value'] = df_grouped['gmv'] / df_grouped['visitor_count']
    df_grouped['living_commission_rate'] = df_grouped['living_commission'] / df_grouped['livestreamer_living_gmv']

    # 调整数字格式（整数和小数）
    df_grouped.replace([np.inf, -np.inf], 0, inplace=True)  # 替换除数为0的inf
    df_grouped['new_member_count'] = df_grouped['new_member_count'].round(0)
    df_grouped['member_trans_amount'] = df_grouped['member_trans_amount'].round(2)
    df_grouped['visitor_count'] = df_grouped['visitor_count'].round(0)
    df_grouped['gmv'] = df_grouped['gmv'].round(2)
    df_grouped['refund_amount'] = df_grouped['refund_amount'].round(2)
    df_grouped['user_value'] = df_grouped['user_value'].round(2)
    df_grouped['payment_conversion_rate'] = df_grouped['payment_conversion_rate'].round(2)
    df_grouped['payment_buyer_count'] = df_grouped['payment_buyer_count'].round(0)
    df_grouped['avg_order_price'] = df_grouped['avg_order_price'].round(2)
    df_grouped['good_favorite_user_count'] = df_grouped['good_favorite_user_count'].round(0)
    df_grouped['good_add_to_cart_user_count'] = df_grouped['good_add_to_cart_user_count'].round(0)
    df_grouped['gmv_old_buyer'] = df_grouped['gmv_old_buyer'].round(2)
    df_grouped['gmv_new_buyer'] = df_grouped['gmv_new_buyer'].round(2)
    df_grouped['payment_old_buyer_count'] = df_grouped['payment_old_buyer_count'].round(0)
    df_grouped['paying_new_buyer_count'] = df_grouped['paying_new_buyer_count'].round(0)
    df_grouped['tk_gmv'] = df_grouped['tk_gmv'].round(2)
    df_grouped['livestreamer_living_gmv'] = df_grouped['livestreamer_living_gmv'].round(2)
    df_grouped['promotion_gmv'] = df_grouped['promotion_gmv'].round(2)
    df_grouped['own_living_gmv'] = df_grouped['own_living_gmv'].round(2)
    df_grouped['tk_promotion_cost'] = df_grouped['tk_promotion_cost'].round(2)
    df_grouped['promotion_cost'] = df_grouped['promotion_cost'].round(2)
    df_grouped['living_commission_rate'] = df_grouped['living_commission_rate'].round(2)
    df_grouped['living_slot_cost'] = df_grouped['living_slot_cost'].round(2)
    df_grouped['fake_order_amount'] = df_grouped['fake_order_amount'].round(2)
    df_grouped['actual_gmv_received'] = df_grouped['actual_gmv_received'].round(2)
    df_grouped['living_commission'] = df_grouped['living_commission'].round(2)
    df_grouped['daily_gmv'] = df_grouped['daily_gmv'].round(2)
    df_grouped['daily_and_promotion_gmv'] = df_grouped['daily_and_promotion_gmv'].round(2)

    # 把缺失值的位置修改为nan
    df_grouped['tk_gmv'].replace(0, np.nan, inplace=True)
    df_grouped['promotion_gmv'].replace(0, np.nan, inplace=True)
    df_grouped['own_living_gmv'].replace(0, np.nan, inplace=True)
    df_grouped['livestreamer_living_gmv'].replace(0, np.nan, inplace=True)

    # 转中文字段名
    df_grouped = uploaded_field_corr_entozh_res(df_grouped, '天猫旗舰店概况日、周、月、年统计表')
    df_grouped = df_grouped.fillna('nan')
    return df_grouped


'''—————————————————————————————方法：生成天猫/抖音旗舰店市场竞店GMV日、周、月、年统计表（行业竞品数据）—————————————————————————————'''


def get_competing_gmv_data(begin_date: str, end_date: str, unit: str, platform: str):
    # 判断平台，对相应表名及字段进行调整
    if platform == '抖音':
        table_class = dy_competing_brand_gmv_day
        competing_list_class = competing_brand_list
        competing_name = 'competing_brand_name'
        competing_gmv = 'competing_brand_gmv'
        competing_name_class_column = table_class.competing_brand_name
        competing_gmv_this_year = 'competing_brand_gmv_thisyear'
        competing_gmv_last_year = 'competing_brand_gmv_lastyear'
        table_zh_name_day_month = '抖音竞品GMV日、月统计表'
        table_zh_name_week = '抖音竞品GMV周统计表'
        table_zh_name_year = '抖音竞品GMV年统计表'
    elif platform == '天猫':
        table_class = tmall_flagship_competing_store_gmv_day
        competing_list_class = tmall_flag_competing_store_list
        competing_name = 'competing_store_name'
        competing_gmv = 'competing_store_gmv'
        competing_name_class_column = table_class.competing_store_name
        competing_gmv_this_year = 'competing_store_gmv_thisyear'
        competing_gmv_last_year = 'competing_store_gmv_lastyear'
        table_zh_name_day_month = '天猫旗舰店市场竞店GMV日、月统计表'
        table_zh_name_week = '天猫旗舰店市场竞店GMV周统计表'
        table_zh_name_year = '天猫旗舰店市场竞店GMV年统计表'
    else:
        print('请输入正确平台名！（天猫/抖音）')

    # 读取今年数据
    session = get_session()
    begin_date_one_week_ago = (datetime.strptime(begin_date, '%Y-%m-%d') - relativedelta(weeks=1)).strftime(
        '%Y-%m-%d')  # 获取一周之前的日期，这是为了计算周环比（得到上一周的数据）
    first_day_this_year = (datetime(datetime.strptime(begin_date_one_week_ago, '%Y-%m-%d').year, 1, 1)).strftime(
        '%Y-%m-%d')  # 这是为了计算累计gmv的值
    competing_store_name_list = pd.read_sql(session.query(competing_list_class).statement, session.bind)[
        competing_name].tolist()

    # 根据需求的竞店取数
    t = table_class
    query = session.query(t).filter(and_(t.stat_time >= first_day_this_year,
                                         t.stat_time <= end_date,
                                         competing_name_class_column.in_(competing_store_name_list)))
    df = pd.read_sql(query.statement, session.bind)[['stat_time', competing_name, competing_gmv]]
    df['stat_time'] = pd.to_datetime(df['stat_time'])  # 比较稳健地将数据转化为日期格式
    df = df.fillna(np.nan)  # 将所有None转化为nan，避免运算时报错

    # 若是没有范围内所需的数据，则直接返回None
    if df[(df['stat_time'] >= begin_date) & (df['stat_time'] <= end_date)].empty:
        return None

    df['cumulative_gmv'] = df.groupby([df['stat_time'].dt.year, competing_name])[
        competing_gmv].cumsum()  # 今年数据的累计gmv
    df['stat_time_last_year'] = df['stat_time'].apply(lambda x: x - relativedelta(years=1))  # 方便和去年的stat_time进行连接

    # 去年的相应日期数据
    begin_date_last_year = (datetime.strptime(begin_date_one_week_ago, '%Y-%m-%d') - relativedelta(years=1)).strftime(
        '%Y-%m-%d')
    end_date_last_year = (datetime.strptime(end_date, '%Y-%m-%d') - relativedelta(years=1)).strftime('%Y-%m-%d')
    first_day_last_year = (datetime(datetime.strptime(begin_date_last_year, '%Y-%m-%d').year, 1, 1)).strftime(
        '%Y-%m-%d')

    # 根据需求的竞店取数
    t = table_class
    query = session.query(t).filter(and_(t.stat_time >= first_day_last_year,
                                         t.stat_time <= end_date_last_year,
                                         competing_name_class_column.in_(competing_store_name_list)))
    df_last_year = pd.read_sql(query.statement, session.bind)[
        ['stat_time', competing_name, competing_gmv]]
    df_last_year = df_last_year.fillna(np.nan)  # 将所有None转化为nan，避免运算时报错

    df_last_year['stat_time'] = pd.to_datetime(df_last_year['stat_time'])  # 比较稳健地将数据转化为日期格式

    # 若去年的数据为空，则累计GMV计算也置为空
    if df_last_year.empty:
        df_last_year['cumulative_gmv'] = np.nan
    else:
        df_last_year['cumulative_gmv'] = \
            df_last_year.groupby([df_last_year['stat_time'].dt.year, competing_name])[
                competing_gmv].cumsum()  # 去年数据的累计gmv

    # 生成基础的日统计表
    df = pd.merge(df, df_last_year, left_on=['stat_time_last_year', competing_name],
                  right_on=['stat_time', competing_name], suffixes=('_thisyear', '_lastyear'), how='left')
    df = df.fillna(np.nan)  # 将所有None转化为nan，避免运算时报错
    df['gmv_yoy_growth'] = (df[competing_gmv_this_year] / df[competing_gmv_last_year]) - 1
    df['cumulative_gmv_yoy_growth'] = (df['cumulative_gmv_thisyear'] / df['cumulative_gmv_lastyear']) - 1

    # 将表格相关属性更名，以对应数据字典
    df = df[['stat_time_thisyear', competing_name, competing_gmv_this_year,
             competing_gmv_last_year, 'gmv_yoy_growth', 'cumulative_gmv_thisyear', 'cumulative_gmv_lastyear',
             'cumulative_gmv_yoy_growth']]
    df = df.rename(columns={'stat_time_thisyear': 'stat_time',
                            competing_gmv_this_year: competing_gmv,
                            competing_gmv_last_year: 'last_year_same_period_gmv',
                            'cumulative_gmv_thisyear': 'cumulative_gmv',
                            'cumulative_gmv_lastyear': 'last_year_same_period_cumulative_gmv'})  # 更名
    session.close()

    # 根据所需统计单位输出表格
    if unit == '日':
        df = df[(df['stat_time'] >= begin_date) & (df['stat_time'] <= end_date)]
        df['rank'] = df.groupby('stat_time')[competing_gmv].rank(ascending=False)  # 添加排名

        df['stat_time'] = df['stat_time'].dt.date  # 去除时间部分
        df = df.sort_values(by=['stat_time', competing_name], ascending=[True, True])  # 排序

        # 调整数字格式（整数和小数）
        df.replace([np.inf, -np.inf], 0, inplace=True)  # 替换除数为0的inf
        df[competing_gmv] = df[competing_gmv].round(2)
        df['last_year_same_period_gmv'] = df['last_year_same_period_gmv'].round(2)
        df['gmv_yoy_growth'] = df['gmv_yoy_growth'].round(2)
        df['cumulative_gmv'] = df['cumulative_gmv'].round(2)
        df['last_year_same_period_cumulative_gmv'] = df['last_year_same_period_cumulative_gmv'].round(2)
        df['cumulative_gmv_yoy_growth'] = df['cumulative_gmv_yoy_growth'].round(2)
        df['rank'] = df['rank'].astype(int)

        # 把缺失值的位置修改为nan
        df['last_year_same_period_gmv'].replace(0, np.nan, inplace=True)
        df['gmv_yoy_growth'].replace(0, np.nan, inplace=True)

        # 转中文字段名
        df = uploaded_field_corr_entozh_res(df, table_zh_name_day_month)
        df = df.fillna('nan')
        return df

    elif unit == '周':
        df = df[(df['stat_time'] >= begin_date_one_week_ago) & (df['stat_time'] <= end_date)]
        df = df[['stat_time', competing_name, competing_gmv]]
        df['week_index'] = df['stat_time'].apply(str).apply(get_week)
        df['week_range'] = df['week_index'].apply(str).apply(get_week_range)
        df = df.groupby([competing_name, 'week_range', 'week_index'], as_index=False).agg(
            {'stat_time': np.size, competing_gmv: np.sum}).reset_index(drop=True)
        df = df.rename(columns={'stat_time': 'row_count'})  # 更名，以此表示合并计算后每个类别包含的数据行数
        df = df.rename(columns={'week_range': 'stat_time'})  # 更名，以此表示新的数据统计时间
        df = df.sort_values(by=[competing_name, 'week_index'], ascending=[True, True])  # 排序以便后面计算环比

        # 计算环比
        df['last_week_same_period_gmv'] = df.groupby(competing_name)[competing_gmv].shift(1)
        df['gmv_yoy_growth'] = (df[competing_gmv] / df['last_week_same_period_gmv']) - 1

        # 删去week_index
        df = df[[competing_name, 'stat_time', 'row_count', competing_gmv,
                 'last_week_same_period_gmv', 'gmv_yoy_growth']].reset_index(drop=True)

        df['rank'] = df.groupby('stat_time')[competing_gmv].rank(ascending=False)  # 添加排名

        df = df.sort_values(by=['stat_time', competing_name], ascending=[True, True])  # 排序

        df.dropna(inplace=True)  # 删除第一周多余的数据（之所以取他是为了计算环比有值）

        # 调整数字格式（整数和小数）
        df.replace([np.inf, -np.inf], 0, inplace=True)  # 替换除数为0的inf
        df[competing_gmv] = df[competing_gmv].round(2)
        df['last_week_same_period_gmv'] = df['last_week_same_period_gmv'].round(2)
        df['gmv_yoy_growth'] = df['gmv_yoy_growth'].round(2)
        df['rank'] = df['rank'].astype(int)

        # 转中文字段名
        df = uploaded_field_corr_entozh_res(df, table_zh_name_week)
        df = df.fillna('nan')
        return df

    elif unit == '月':  # 这张表要存
        df = df[(df['stat_time'] >= begin_date) & (df['stat_time'] <= end_date)]
        # 添加月编号
        df['month_index'] = pd.to_datetime(df['stat_time']).dt.strftime('%Y-%m')

        # 聚合
        df_monthly = df.groupby([competing_name, 'month_index'], as_index=False).agg({
            'stat_time': 'size',
            competing_gmv: 'sum',
            'last_year_same_period_gmv': 'sum',
            'cumulative_gmv': 'max',
            'last_year_same_period_cumulative_gmv': 'max'
        }).reset_index(drop=True)
        df_monthly = df_monthly.rename(columns={'stat_time': 'row_count'})  # 更名，以此表示合并计算后每个类别包含的数据行数
        df_monthly = df_monthly.rename(columns={'month_index': 'stat_time'})  # 更名，以此表示新的数据统计时间

        # 计算同比增长
        df_monthly['gmv_yoy_growth'] = df_monthly[competing_gmv] / df_monthly['last_year_same_period_gmv'] - 1
        df_monthly['cumulative_gmv_yoy_growth'] = df_monthly['cumulative_gmv'] / df_monthly[
            'last_year_same_period_cumulative_gmv'] - 1

        df_monthly['rank'] = df_monthly.groupby('stat_time')[competing_gmv].rank(ascending=False)  # 添加排名

        df_monthly = df_monthly.sort_values(by=['stat_time', competing_name], ascending=[True, True])  # 排序

        # 调整数字格式（整数和小数）
        df_monthly.replace([np.inf, -np.inf], 0, inplace=True)  # 替换除数为0的inf
        df_monthly[competing_gmv] = df_monthly[competing_gmv].round(2)
        df_monthly['last_year_same_period_gmv'] = df_monthly['last_year_same_period_gmv'].round(2)
        df_monthly['gmv_yoy_growth'] = df_monthly['gmv_yoy_growth'].round(2)
        df_monthly['cumulative_gmv'] = df_monthly['cumulative_gmv'].round(2)
        df_monthly['last_year_same_period_cumulative_gmv'] = df_monthly['last_year_same_period_cumulative_gmv'].round(2)
        df_monthly['cumulative_gmv_yoy_growth'] = df_monthly['cumulative_gmv_yoy_growth'].round(2)
        df_monthly['rank'] = df_monthly['rank'].astype(int)

        # 把缺失值的位置修改为nan
        df_monthly['last_year_same_period_gmv'].replace(0, np.nan, inplace=True)
        df_monthly['gmv_yoy_growth'].replace(0, np.nan, inplace=True)

        # 转中文字段名
        df_monthly = uploaded_field_corr_entozh_res(df_monthly, table_zh_name_day_month)
        df_monthly = df_monthly.fillna('nan')
        return df_monthly

    elif unit == '年':
        df = df[(df['stat_time'] >= begin_date) & (df['stat_time'] <= end_date)]
        # 添加年编号
        df_year = df[['stat_time', competing_name, competing_gmv]]
        df_year['year_index'] = pd.to_datetime(df_year['stat_time']).dt.strftime('%Y')
        df_year = df_year.groupby(['year_index', competing_name], as_index=False).agg(
            {'stat_time': np.size, competing_gmv: np.sum}).reset_index(drop=True)
        df_year = df_year.rename(columns={'stat_time': 'row_count'})  # 更名，以此表示合并计算后每个类别包含的数据行数
        df_year = df_year.rename(columns={'year_index': 'stat_time'})  # 更名，以此表示新的数据统计时间

        df_year['rank'] = df_year.groupby('stat_time')[competing_gmv].rank(ascending=False)  # 添加排名

        df_year = df_year.sort_values(by=['stat_time', competing_name], ascending=[True, True])  # 排序

        # 调整数字格式（整数和小数）
        df_year[competing_gmv] = df_year[competing_gmv].round(2)
        df_year['rank'] = df_year['rank'].astype(int)

        # 转中文字段名
        df_year = uploaded_field_corr_entozh_res(df_year, table_zh_name_year)
        df_year = df_year.fillna('nan')
        return df_year

    else:
        print('请输入正确的统计单位！（日/周/月/年）')
        return None
