import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import os
import re
from sqlalchemy import create_engine, Column, Integer, String, MetaData, ForeignKey, and_, or_, between

# 导入sql的引擎
from sqlalchemy import create_engine, Column, Integer, String, MetaData, ForeignKey, and_, or_
from sqlalchemy.orm import sessionmaker, declarative_base
from initdatabase import *  # 引入表类（每个类对应数据库中的一个表，类名等于表名，表名在我给的数据字典的第一行第一列）

# 设定全局的数据库链接
userName = 'root'
password = 'i6&3,@dn#@v(fa--y3!-!'
dbHost = '192.168.129.51'# 'localhost' #'39.100.113.99'
dbPort = 3306
dbName = 'jietingtest'
#DB_CONNECT = f'mysql://{userName}:{urlquote(password)}@{dbHost}:{dbPort}/{dbName}?charset=utf8mb4'
# DB_CONNECT = f'mysql://{userName}:{urlquote(password)}@localhost:{dbPort}/{dbName}?charset=utf8mb4'
DB_CONNECT = f'mysql://{userName}:{urlquote(password)}@{dbHost}:{dbPort}/{dbName}?charset=utf8mb4'
engine = create_engine(DB_CONNECT)
'''engine = create_engine('mysql://root:ZxChino958925@localhost:3306/jieting4')  # 设置上：max_overflow=0, pool_size=5'''
Session = sessionmaker(engine)
session = Session()
print("链接数据库成功！")


# 获取同比数据的取数日期
def get_last_year_date(day_date):
    #print(day_date)
    #print(day_date.split("-")[0])
    #year = int(day_date.split("-")[0]) - 1
    year = str(int(day_date.split("-")[0]) - 1)

    month = str(day_date.split("-")[1])
    day = str(day_date.split("-")[2])

    # 调整一下可能存在的闰年2月份的问题
    # 闰年：4的倍数
    if (int(year)%4 == 0) and (int(month) == 2):
        # 如果是闰年，就需要对二月份的最后一天进行调整
        day = "29"
    elif (int(year)%4 != 0) and (int(month) == 2):
        day = "28"
    else:
        pass

    last_year_day_date = year + "-" + month + "-" + day
    return last_year_day_date


# 竞店品牌词的存储列表
brand_ls = list()
# 先获取所有的竞店的品牌词
brandlist = list(
    pd.read_sql_query('SELECT * FROM tmall_flag_competing_store_list', engine)['competing_store_abbreviation'])
# 去掉洁婷这个品牌词
for item in brandlist:
    if "洁婷" in item:
        pass
    else:
        brand_ls.append(item)
# 先构建一个分配品牌词和品类词的函数
# 品牌词：包含品牌名的词
# 品类词：不包含品牌名的其他词
def deploy_band_category(mode: str, keyword_element: str, brand_ls = brand_ls):
    # 模式选择本店或者竞店，直接用apply传入参数
    # 需要生成一个新的列：keyword_type
    # 或者直接就再原来的列上做修改就行，抽象出二维的信息

    # 然后判断关键词的列里面，每个格子是不是含有这些词，根据这个分配品牌词还是品类词
    if mode == "本店":
        # 直接看品牌词在不在传入的元素里面
        if "洁婷" in keyword_element:
            # keyword_element = "品牌词"
            return "品牌词"
        else:
            # keyword_element = "品类词"
            return "品类词"
    elif mode == "竞店":
        # 先把所有的品牌词标识出来，其他的最后统一变成品类词
        # 先循环一遍，标完所有的品牌词
        for brand in brand_ls:
            print(brand)
            if brand in keyword_element:
                # keyword_element = "品牌词"
                return "品牌词"
            else:
                pass
        # 剩下的没有标成品牌词的，再统一转化成品类词
        if "品牌词" in keyword_element:
            return keyword_element
        else:
            # keyword_element = "品类词"
            return "品类词"


# 定义一个调整查询店铺名称的函数，保证获取到对的筛选用的店铺名称
def adjust_store_name(competing_store_name: str):
    # 从数据库里面获取竞店名称的list
    rival_names, rival_id = get_rival_name_ls(engine)
    if competing_store_name == "":
        return "您输入的竞店关键词有误"
    else:
        pass

    target_store = ""
    for item in rival_names:
        if (competing_store_name in item):
            target_store = item
        else:
            continue

    # 判断是否输入的竞店名称关键字有问题，如果有问题，就直接return回去
    if target_store == "":
        return "您输入的竞店关键词有误"
    else:
        return target_store


# 定义一个，获取上年同比年月的函数，帮助计算同比
def deploy_lastyear_ym(data: pd.DataFrame, stat_ym_column: pd.DataFrame.columns):
    data["last_year_ym"] = ""
    for i in range(0, len(stat_ym_column)):
        # 年和月-转化成数值型
        # 修改一下年月
        # 这里要算同比，所以年变，月不变
        year_date = int(str(stat_ym_column[i]).split("-")[0]) - 1
        month_date = int(str(stat_ym_column[i]).split("-")[1])

        year_date = str(year_date)
        month_date = str(month_date)
        last_month_range = year_date + "-" + month_date
        # 赋值到新列的对应位置
        data["last_year_ym"][i] = last_month_range
    data["last_year_ym"] = data["last_year_ym"].astype(str)
    return data


def get_rival_name_ls(engine):
    rival_name_ls = list()
    keywordlist = list(
        pd.read_sql_query('SELECT * FROM tmall_flag_competing_store_list', engine)['competing_store_name'])

    rival_id_ls = list()
    idlist = list(
        pd.read_sql_query('SELECT * FROM tmall_flag_competing_store_list', engine)['competing_store_id'])
    for item in keywordlist:
        if "洁婷" in item:
            pass
        else:
            rival_name_ls.append(item)

    for item in idlist:
        # print(item)
        if (item == ""):
            pass
        else:
            rival_id_ls.append(item)
    # print(idlist)

    return rival_name_ls, rival_id_ls


# 判断输入字符串是否为空值，并且start_date小于end_date
def judge_string_empty(start, end):
    if (str(start).strip() == '') or (str(start).strip() == "nan") or (str(start).strip() == "None"):
        raise ValueError("输入错误：开始日期无效")
    elif (str(end).strip() == '') or (str(end).strip() == "nan") or (str(end).strip() == "None"):
        raise ValueError("输入错误：结束日期无效")
    elif (str(start) > str(end)):
        raise ValueError("输入错误：开始日期大于结束日期")
    else:
        pass


# 调整传入的日期，为输入期限内的上周五到本周四的统计周期
def week_adjust(start_date, end_date):
    start_date = datetime.strptime(start_date, "%Y-%m-%d")
    end_date = datetime.strptime(end_date, "%Y-%m-%d")
    # Adjust the start date
    # 检查是否周五或之后（包括周六、周日）
    if start_date.weekday() > 4:  # 周六或周日
        # 移动到下一个周五
        adjusted_start_date = start_date + timedelta((11 - start_date.weekday()) % 7)
    else:  # 周一到周五
        # 移动到这周的周五
        adjusted_start_date = start_date + timedelta((4 - start_date.weekday()) % 7)
    # Adjust the end date
    if end_date.weekday() >= 3:  # Thursday or later
        # Move to the Thursday of the current week
        adjusted_end_date = end_date - timedelta(end_date.weekday() - 3)
    else:  # Before Thursday
        # Move back to the Thursday of the previous week
        adjusted_end_date = end_date - timedelta(end_date.weekday() + 4)
    # Check if the adjusted dates cover at least one full week
    if adjusted_end_date - adjusted_start_date < timedelta(days=6):
        raise Exception('未包含完整的周!')  
        return "", "" # 没有完整的周，返回空字符串
    else:
        return adjusted_start_date.strftime("%Y-%m-%d"), adjusted_end_date.strftime("%Y-%m-%d")


def month_adjust(start_date, end_date):
    # 将输入的字符串日期转换为日期对象
    start = datetime.strptime(start_date, "%Y-%m-%d")
    end = datetime.strptime(end_date, "%Y-%m-%d")

    # 确定开始月份
    if start.day != 1:
        start = start.replace(day=1, month=start.month + 1 if start.month < 12 else 1, year=start.year if start.month < 12 else start.year + 1)
    else:
        start = start.replace(day=1)

    # 确定结束月份
    if end != end.replace(day=1) + timedelta(days=(end.replace(day=1, month=end.month + 1 if end.month < 12 else 1, year=end.year if end.month < 12 else end.year + 1) - end.replace(day=1)).days - 1):
        end = end.replace(day=1, month=end.month - 1 if end.month > 1 else 12, year=end.year if end.month > 1 else end.year - 1)
        end = end + timedelta(days=(end.replace(day=1, month=end.month + 1 if end.month < 12 else 1, year=end.year if end.month < 12 else end.year + 1) - end.replace(day=1)).days - 1)
    else:
        end = end.replace(day=1) + timedelta(days=(end.replace(day=1, month=end.month + 1 if end.month < 12 else 1, year=end.year if end.month < 12 else end.year + 1) - end.replace(day=1)).days - 1)

    # 检查日期范围是否有效
    if start > end:
        raise Exception('未包含完整的月!')  
        return ('', '')  # 没有完整的月份，返回空字符串

    # 格式化输出日期
    start_date_str = start.strftime("%Y-%m-%d")
    end_date_str = end.strftime("%Y-%m-%d")
    # print(start, end)

    return start_date_str, end_date_str


def year_adjust(start_date, end_date):
    # 将输入的字符串日期转换为日期对象
    start = datetime.strptime(start_date, "%Y-%m-%d")
    end = datetime.strptime(end_date, "%Y-%m-%d")

    # 确定开始年份
    if start.month != 1 or start.day != 1:
        start = datetime(start.year + 1, 1, 1)
    else:
        start = datetime(start.year, 1, 1)

    # 确定结束年份
    if end.month != 12 or end.day != 31:
        end = datetime(end.year - 1, 12, 31)
    else:
        end = datetime(end.year, 12, 31)

    # 检查日期范围是否有效
    if start > end:
        raise Exception('未包含完整的年!')  # 没有完整的年份，返回空字符串
        return ('', '') 

    # 格式化输出日期
    start_date_str = start.strftime("%Y-%m-%d")
    end_date_str = end.strftime("%Y-%m-%d")

    return start_date_str, end_date_str

# 封装调整日期的函数，形成一个可以直接调用的，判断输入日期是否正确的函数
'''def adjust_date(start_date, end_date):
    adjust_start_date, adjust_end_date = adjust_dates_with_full_week_check_corrected(start_date=start_date,
                                                                                     end_date=end_date)
    # 判断调整后的日期是否是空值，如果是空值，那就直接返回日期输入不正确的异常
    if adjust_start_date == "":
        raise ("输入日期有误，请重新调整输入日期")
    elif adjust_end_date == "":
        raise ("输入日期有误，请重新调整输入日期")
    else:
        print("日期输入无误，调整成功！")
        return adjust_start_date, adjust_end_date'''


# 分配周：目前的统计口径-上周五到这周四
def deploy_stat_week(data_frame: pd.DataFrame, stat_date: pd.DataFrame.columns):  # type:ignore
    data_frame["stat_week"] = np.nan
    for i in range(0, len(stat_date)):
        date_start = stat_date[i]
        # 上周五到这周四
        # 如果是周五，那就是一周的开始咯
        if (datetime.strptime(date_start, '%Y-%m-%d').weekday() + 1) == 5:
            date_start_temp = datetime.strftime((datetime.strptime(date_start, '%Y-%m-%d')), '%Y-%m-%d')
            date_end_temp = datetime.strftime((datetime.strptime(date_start_temp, '%Y-%m-%d') + timedelta(days=6)),
                                              "%Y-%m-%d")
            week_range = date_start_temp + "~" + date_end_temp
            data_frame['stat_week'][i] = week_range
        # 如果是周四，那就是一周的结束，进行倒推
        elif (datetime.strptime(date_start, '%Y-%m-%d').weekday() + 1) == 4:
            date_end_temp = datetime.strftime((datetime.strptime(date_start, '%Y-%m-%d')), '%Y-%m-%d')
            date_start_temp = datetime.strftime((datetime.strptime(date_end_temp, '%Y-%m-%d') - timedelta(days=6)),
                                                "%Y-%m-%d")
            week_range = date_start_temp + "~" + date_end_temp
            data_frame['stat_week'][i] = week_range
        # 与周四同一周的：周一、周二、周三，先找到周四，再去找上周五
        elif ((datetime.strptime(date_start, '%Y-%m-%d').weekday() + 1) - 4) >= -3 and (
                (datetime.strptime(date_start, '%Y-%m-%d').weekday() + 1) - 4) <= -1:
            day_diff = abs(datetime.strptime(date_start, '%Y-%m-%d').weekday() + 1 - 4)
            date_end_temp = datetime.strftime((datetime.strptime(date_start, '%Y-%m-%d') + timedelta(days=day_diff)),
                                              '%Y-%m-%d')
            date_start_temp = datetime.strftime((datetime.strptime(date_end_temp, '%Y-%m-%d') - timedelta(days=6)),
                                                "%Y-%m-%d")
            week_range = date_start_temp + "~" + date_end_temp
            data_frame['stat_week'][i] = week_range
        # 与周五同一周的：周六周天，先去找周五，再去找下周四
        elif ((datetime.strptime(date_start, '%Y-%m-%d').weekday() + 1) - 5) >= 1 and (
                (datetime.strptime(date_start, '%Y-%m-%d').weekday() + 1) - 5) <= 2:
            day_diff = abs(datetime.strptime(date_start, '%Y-%m-%d').weekday() + 1 - 5)
            date_start_temp = datetime.strftime((datetime.strptime(date_start, '%Y-%m-%d') - timedelta(days=day_diff)),
                                                '%Y-%m-%d')
            date_end_temp = datetime.strftime((datetime.strptime(date_start_temp, '%Y-%m-%d') + timedelta(days=6)),
                                              "%Y-%m-%d")
            week_range = date_start_temp + "~" + date_end_temp
            data_frame['stat_week'][i] = week_range
    return data_frame


'''# 店铺流量来源-计算周环比
# 匹配上周，对应列的值，算环比用
def get_pre_week_flow_data(flow_data:pd.DataFrame, stat_week_column:pd.DataFrame.columns, pointed_column_name:str):

    for i in range(0,len(stat_week_column)):
        # 统计周的起止日期
        start_date = str(stat_week_column[i]).split("~")[0]
        end_date = str(stat_week_column[i]).split("~")[1]
        # 计算后上个统计周的起止日期
        previous_week_start = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d") - timedelta(7), "%Y-%m-%d")
        previous_week_end = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d") - timedelta(7), "%Y-%m-%d")
        last_week_range = previous_week_start + "~" + previous_week_end


        flow_data_temp = flow_data[stat_week_column == last_week_range]
        flow_data_temp = flow_data_temp[["traffic_first_source", "traffic_second_source", "traffic_third_source", pointed_column_name]]
        # 新增一列要匹配到本周的stat_week
        flow_data_temp["stat_week"] = stat_week_column[i]
        flow_data_temp.rename(columns = {pointed_column_name:"pre_week_" + pointed_column_name}, inplace = True)

        flow_data = pd.merge(flow_data, flow_data_temp, how="left", on=["traffic_first_source", "traffic_second_source", "traffic_third_source", "stat_week"])

        try:
            flow_data["pre_week_" + pointed_column_name + "_x"] = np.where(flow_data["pre_week_" + pointed_column_name + "_x"].isnull(), 
                                                                        flow_data["pre_week_" + pointed_column_name + "_y"], flow_data["pre_week_" + pointed_column_name + "_x"])
            flow_data.drop(labels=["pre_week_" + pointed_column_name + "_y"], axis=1, inplace=True)
            flow_data.rename(columns={"pre_week_" + pointed_column_name + "_x":"pre_week_" + pointed_column_name}, inplace=True)
        except:
            print("no")
            continue

    return flow_data


# 店铺流量来源-计算月环比
# 匹配上月的对应值，算环比用
def get_pre_month_flow_data(flow_data:pd.DataFrame, time_column:pd.DataFrame.columns, pointed_column_name:str):

    for i in range(0,len(time_column)):
        # 获取月份，并查找上月
        month = str(time_column[i])
        year = str(flow_data["year"][i])
        # 这个地方得考虑一月份，要去查上一年的情况了
        # 而且匹配月的时候，非得牵扯到年
        if (month == "1"):
            previous_month = "12"
            previous_year = str(int(year) - 1)
        else:
            previous_month = str(int(month) - 1)
            previous_year = year

        #print(month, previous_month)
        #print(year,previous_year)

        # 此处筛选出，上一个月的数据表
        flow_data_temp = flow_data[(flow_data["year"] == previous_year) & (time_column == previous_month)]
        flow_data_temp = flow_data_temp[["traffic_first_source", "traffic_second_source", "traffic_third_source", pointed_column_name]]
        # 新增一列要匹配到对应年月的，此处，是当前要匹配的目标时间
        flow_data_temp["year"] = year
        flow_data_temp["month"] = month
        flow_data_temp.rename(columns = {pointed_column_name:"pre_month_" + pointed_column_name}, inplace = True)

        flow_data = pd.merge(flow_data, flow_data_temp, how="left", on=["traffic_first_source", "traffic_second_source", "traffic_third_source", "year", "month"])
        try:
            flow_data["pre_month_" + pointed_column_name + "_x"] = np.where(flow_data["pre_month_" + pointed_column_name + "_x"].isnull(), 
                                                                           flow_data["pre_month_" + pointed_column_name + "_y"], flow_data["pre_month_" + pointed_column_name + "_x"])
            flow_data.drop(labels=["pre_month_" + pointed_column_name + "_y"], axis=1, inplace=True)
            flow_data.rename(columns={"pre_month_" + pointed_column_name + "_x":"pre_month_" + pointed_column_name}, inplace=True)
        except:
            print("no")
            continue
    return flow_data


# 店铺流量来源-计算年环比
# 匹配上年的对应值，算环比用
def get_pre_year_flow_data(flow_data:pd.DataFrame, time_column:pd.DataFrame.columns, pointed_column_name:str):

    for i in range(0,len(time_column)):
        # 获取年份，并查找上年
        year = str(time_column[i])
        previous_year = str(int(year) - 1)
        #print(year, previous_year)

        # 此处筛选出，上一周的数据表
        flow_data_temp = flow_data[(time_column == previous_year)]
        flow_data_temp = flow_data_temp[["traffic_first_source", "traffic_second_source", "traffic_third_source", pointed_column_name]]
        # 新增一列要匹配到本周的stat_week
        flow_data_temp["year"] = time_column[i]
        flow_data_temp.rename(columns = {pointed_column_name:"pre_year_" + pointed_column_name}, inplace = True)

        flow_data = pd.merge(flow_data, flow_data_temp, how="left", on=["traffic_first_source", "traffic_second_source", "traffic_third_source", "year"])
        try:
            flow_data["pre_year_" + pointed_column_name + "_x"] = np.where(flow_data["pre_year_" + pointed_column_name + "_x"].isnull(), 
                                                                           flow_data["pre_year_" + pointed_column_name + "_y"], flow_data["pre_year_" + pointed_column_name + "_x"])
            flow_data.drop(labels=["pre_year_" + pointed_column_name + "_y"], axis=1, inplace=True)
            flow_data.rename(columns={"pre_year_" + pointed_column_name + "_x":"pre_year_" + pointed_column_name}, inplace=True)
        except:
            print("no")
            continue
    return flow_data'''





'''### 竞店-流量来源统计
# 店铺流量来源-计算周环比
# 匹配上周，对应列的值，算环比用
def get_pre_week_flow_data_other(flow_data:pd.DataFrame, stat_week_column:pd.DataFrame.columns, store_name_column:pd.DataFrame.columns, pointed_column_name:str):

    for i in range(0,len(stat_week_column)):
        # 统计周的起止日期
        start_date = str(stat_week_column[i]).split("~")[0]
        end_date = str(stat_week_column[i]).split("~")[1]
        # 计算后上个统计周的起止日期
        previous_week_start = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d") - timedelta(7), "%Y-%m-%d")
        previous_week_end = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d") - timedelta(7), "%Y-%m-%d")
        last_week_range = previous_week_start + "~" + previous_week_end
        store_name = store_name_column[i]

        flow_data_temp = flow_data[(stat_week_column == last_week_range) & (store_name_column == store_name)]
        flow_data_temp = flow_data_temp[["traffic_first_source", "traffic_second_source", "traffic_third_source", pointed_column_name]]
        # 新增一列要匹配到本周的stat_week
        flow_data_temp["stat_week"] = stat_week_column[i]
        flow_data_temp["competing_store_name"] = store_name_column[i]
        flow_data_temp.rename(columns = {pointed_column_name:"pre_week_" + pointed_column_name}, inplace = True)

        flow_data = pd.merge(flow_data, flow_data_temp, how="left", on=["traffic_first_source", "traffic_second_source", "traffic_third_source", 
                                                                        "competing_store_name", "stat_week"])

        try:
            flow_data["pre_week_" + pointed_column_name + "_x"] = np.where(flow_data["pre_week_" + pointed_column_name + "_x"].isnull(), 
                                                                        flow_data["pre_week_" + pointed_column_name + "_y"], flow_data["pre_week_" + pointed_column_name + "_x"])
            flow_data.drop(labels=["pre_week_" + pointed_column_name + "_y"], axis=1, inplace=True)
            flow_data.rename(columns={"pre_week_" + pointed_column_name + "_x":"pre_week_" + pointed_column_name}, inplace=True)
        except:
            print("no")
            continue

    return flow_data



# 店铺流量来源-计算月环比
# 匹配上月的对应值，算环比用
def get_pre_month_flow_data_other(flow_data:pd.DataFrame, month_column:pd.DataFrame.columns, store_name_column:pd.DataFrame.columns, pointed_column_name:str):

    for i in range(0,len(month_column)):
        # 获取月份，并查找上月
        month = str(month_column[i])
        year = str(flow_data["year"][i])
        # 这个地方得考虑一月份，要去查上一年的情况了
        # 而且匹配月的时候，非得牵扯到年
        if (month == "1"):
            previous_month = "12"
            previous_year = str(int(year) - 1)
        else:
            previous_month = str(int(month) - 1)
            previous_year = year
        store_name = store_name_column[i]
        #print(month, previous_month)
        #print(year,previous_year)

        # 此处筛选出，上一个月的数据表
        flow_data_temp = flow_data[(flow_data["year"] == previous_year) & (month_column == previous_month) & (store_name_column == store_name)]
        flow_data_temp = flow_data_temp[["traffic_first_source", "traffic_second_source", "traffic_third_source", pointed_column_name]]
        # 新增一列要匹配到对应年月的，此处，是当前要匹配的目标时间
        flow_data_temp["year"] = year
        flow_data_temp["month"] = month
        flow_data_temp["competing_store_name"] = store_name_column[i]
        flow_data_temp.rename(columns = {pointed_column_name:"pre_month_" + pointed_column_name}, inplace = True)

        flow_data = pd.merge(flow_data, flow_data_temp, how="left", on=["traffic_first_source", "traffic_second_source", 
                                                                        "traffic_third_source", "competing_store_name", "year", "month"])

        try:
            flow_data["pre_month_" + pointed_column_name + "_x"] = np.where(flow_data["pre_month_" + pointed_column_name + "_x"].isnull(), 
                                                                           flow_data["pre_month_" + pointed_column_name + "_y"], flow_data["pre_month_" + pointed_column_name + "_x"])
            flow_data.drop(labels=["pre_month_" + pointed_column_name + "_y"], axis=1, inplace=True)
            flow_data.rename(columns={"pre_month_" + pointed_column_name + "_x":"pre_month_" + pointed_column_name}, inplace=True)
        except:
            print("no")
            continue
    return flow_data'''

'''# 店铺流量来源-计算年环比
# 匹配上年的对应值，算环比用
def get_pre_year_flow_data_other(flow_data:pd.DataFrame, year_column:pd.DataFrame.columns, store_name_column:pd.DataFrame.columns, pointed_column_name:str):

    for i in range(0,len(year_column)):
        # 获取年份，并查找上年
        year = str(year_column[i])
        previous_year = str(int(year) - 1)
        #print(year, previous_year)
        store_name = store_name_column[i]

        # 此处筛选出，上一周的数据表
        flow_data_temp = flow_data[(year_column == previous_year) & (store_name_column == store_name)]
        flow_data_temp = flow_data_temp[["traffic_first_source", "traffic_second_source", "traffic_third_source", pointed_column_name]]

        # 新增一列要匹配到本周的stat_week
        flow_data_temp["year"] = year_column[i]
        flow_data_temp["competing_store_name"] = store_name_column[i]

        flow_data_temp.rename(columns = {pointed_column_name:"pre_year_" + pointed_column_name}, inplace = True)

        flow_data = pd.merge(flow_data, flow_data_temp, how="left", on=["traffic_first_source", "traffic_second_source", "traffic_third_source",
                                                                         "competing_store_name","year"])
        try:
            flow_data["pre_year_" + pointed_column_name + "_x"] = np.where(flow_data["pre_year_" + pointed_column_name + "_x"].isnull(), 
                                                                           flow_data["pre_year_" + pointed_column_name + "_y"], flow_data["pre_year_" + pointed_column_name + "_x"])
            flow_data.drop(labels=["pre_year_" + pointed_column_name + "_y"], axis=1, inplace=True)
            flow_data.rename(columns={"pre_year_" + pointed_column_name + "_x":"pre_year_" + pointed_column_name}, inplace=True)
        except:
            print("no")
            continue
    return flow_data'''


'''## 周
# 获取竞店-流量-数据
# 这个地方的start和end，需要选两周，而且统计时间不能搞错，必须是上周五到这周四，才能拿到数据
def gen_traffic_other_week_report(start_date, end_date, competing_store_name:str):
    # 判断输入的值是否有问题
    judge_string_empty(start=start_date, end=end_date)
    start_date, end_date = adjust_date(start_date=start_date, end_date=end_date)
    # 数据读取
    # 这个地方需要读取所有的历史数据了，或者多读几周，因为要算环比
    # week_range = start_date + "~" + end_date

    try:
        previous_week_start = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d") - timedelta(7), "%Y-%m-%d")
        previous_week_end = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d"), "%Y-%m-%d")

        start_date_adjust = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d"), "%Y-%m-%d")
        end_date_adjust = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d"), "%Y-%m-%d")
        month_tag = start_date.split("-")[1]
        year_tag = start_date.split("-")[0]
    except:
        previous_week_start = datetime.strftime(datetime.strptime(start_date, "%Y.%m.%d") - timedelta(7), "%Y-%m-%d")
        previous_week_end = datetime.strftime(datetime.strptime(end_date, "%Y.%m.%d"), "%Y-%m-%d")

        start_date_adjust = datetime.strftime(datetime.strptime(start_date, "%Y.%m.%d"), "%Y-%m-%d")
        end_date_adjust = datetime.strftime(datetime.strptime(end_date, "%Y.%m.%d"), "%Y-%m-%d")
        month_tag = start_date.split(".")[1]
        year_tag = start_date.split(".")[0]

    # query = session.query(tmall_flagship_store_traffic_source_all_day).filter(tmall_flagship_store_traffic_source_all_day.stat_time == '2023.12.24')
    query = session.query(tmall_flagship_competing_store_traffic_source_day).filter(
        tmall_flagship_competing_store_traffic_source_day.stat_time.between(previous_week_start, previous_week_end))
    flow_other_df = pd.read_sql(query.statement, con=engine.connect())

    # 把拿到的SQL里面的数据，预处理一下
    flow_other_df.drop(labels=["id"], inplace=True, axis=1)
    flow_other_df["stat_time"] = flow_other_df["stat_time"].astype(str)

    # 数据处理
    flow_other_df = deploy_stat_week(flow_other_df, flow_other_df["stat_time"])
    flow_other_df["year"] = flow_other_df["stat_time"].apply(lambda x: int(re.split("-", x)[0]))
    flow_other_df["month"] = flow_other_df["stat_time"].apply(lambda x: int(re.split("-", x)[1]))
    flow_other_df["year"] = flow_other_df["year"].astype(str)
    flow_other_df["month"] = flow_other_df["month"].astype(str)
    # 修正竞店名称
    target_store = adjust_store_name(competing_store_name=competing_store_name)
    flow_other_df = flow_other_df[flow_other_df["competing_store_name"] == target_store]

    # 第一部分数据
    # 本店-周-数据报表 tmall_flagship_store_traffic_source_week_stat
    flow_other_sub1 = flow_other_df.groupby(
        ["traffic_first_source", "traffic_second_source", "traffic_third_source", "competing_store_name",
         "stat_week"]).agg(
        {"visitor_count": np.sum, "trans_amount": np.sum, "payment_user_count": np.sum}).reset_index()
    # 复制一份，用于匹配上周的数据，计算环比
    flow_other_last_week = flow_other_sub1.copy()
    flow_other_last_week.rename(columns={"visitor_count":"visitor_count_last_week",
                                         "trans_amount":"trans_amount_last_week",
                                         "payment_user_count":"payment_user_count_last_week",
                                         "stat_week":"last_week"},
                                inplace=True)
    flow_other_last_week = flow_other_last_week[["last_week", "competing_store_name",
                                                 "visitor_count_last_week", "trans_amount_last_week",
                                                 "traffic_first_source", "traffic_second_source", "traffic_third_source"]]
    # 给大表分配上周的列，用于匹配上周的数据
    flow_other_sub1 = deploy_lastweek_date(data=flow_other_sub1, stat_week_column=flow_other_sub1["stat_week"])
    # 匹配上周的数据
    flow_other_sub1 = pd.merge(left=flow_other_sub1, right=flow_other_last_week, how="left", on=["last_week",
                                                                                                 "traffic_first_source",
                                                                                                 "traffic_second_source",
                                                                                                 "traffic_third_source",
                                                                                                 "competing_store_name"])
    flow_other_sub1.drop(labels=["last_week"], axis=1, inplace=True)

    # 第二部分数据
    # 剔除汇总行记录
    # 这个地方转化出来的汇总行是用"-"符号表示的
    flow_other_sub2 = flow_other_sub1[flow_other_sub1["traffic_third_source"] != "-"]
    flow_other_sub2 = flow_other_sub1[flow_other_sub1["traffic_third_source"] != "汇总"]
    # 竞店的周汇总
    flow_other_sub2 = flow_other_sub2.groupby(["competing_store_name", "stat_week"]).agg(
        {"visitor_count": np.sum, "trans_amount": np.sum}).reset_index()
    flow_other_sub2.rename(columns={"visitor_count": "visitor_count_all", "trans_amount": "trans_amount_all"},
                           inplace=True)

    # 合并两部分数据
    flow_other_all = pd.merge(flow_other_sub1, flow_other_sub2, how="left", on=["competing_store_name", "stat_week"])

    # 流量访客来源占比
    flow_other_all["source_visitor_percent"] = flow_other_all["visitor_count"] / flow_other_all["visitor_count_all"]
    # 流量支付金额占比
    flow_other_all["source_trans_percent"] = flow_other_all["trans_amount"] / flow_other_all["trans_amount_all"]
    # 支付转化率
    flow_other_all["payment_conversion_rate"] = flow_other_all["payment_user_count"] / flow_other_all["visitor_count"]

    # 计算环比
    flow_other_all["visitor_count_mom"] = (flow_other_all["visitor_count"] - flow_other_all[
        "visitor_count_last_week"]) / flow_other_all["visitor_count_last_week"]
    flow_other_all["payment_amount_mom"] = (flow_other_all["trans_amount"] - flow_other_all[
        "trans_amount_last_week"]) / flow_other_all["trans_amount_last_week"]
    # 剔除计算环比的列
    flow_other_all.drop(labels=["visitor_count_last_week", "trans_amount_last_week"], axis=1, inplace=True)

    # 重新分配索引
    flow_other_all.reset_index(inplace=True)
    # 剔除多出来的两列
    flow_other_all.drop(labels=["index", "visitor_count_all", "trans_amount_all"], axis=1, inplace=True)

    flow_other_all.sort_values(by=["traffic_first_source", "traffic_second_source", "traffic_third_source"],
                              ascending=False,
                              inplace=True)
    flow_other_all.rename(columns={"stat_week": "数据统计时间", "competing_store_name": "店铺名称",
                                          "traffic_first_source": "一级流量来源",
                                          "traffic_second_source": "二级流量来源",
                                          "traffic_third_source": "三级流量来源",
                                          "visitor_count": "访客人数", "trans_amount": "交易金额",
                                          "source_visitor_percent": "来源访客人数占比",
                                          "source_trans_percent": "该来源交易金额占比",
                                          "payment_conversion_rate": "支付转化率",
                                          "visitor_count_mom": "访客人数环比", "payment_amount_mom": "交易金额环比",
                                          "payment_user_count": "支付人数"
                                          },
                                 inplace=True)
    return flow_other_all'''

'''## 月
# 本店-月-流量来源统计-函数
# 这个地方的start和end，需要选至少两个月，而且统计时间不能搞错
def gen_traffic_other_month_report(start_date, end_date, competing_store_name:str):
    # 判断输入的值是否有问题
    judge_string_empty(start=start_date, end=end_date)

    # 数据读取
    # 这个地方需要读取所有的历史数据了，或者多读几周，因为要算环比
    # week_range = start_date + "~" + end_date

    try:
        previous_week_start = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d") - timedelta(31), "%Y-%m-%d")
        previous_week_end = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d"), "%Y-%m-%d")

        start_date_adjust = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d"), "%Y-%m-%d")
        end_date_adjust = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d"), "%Y-%m-%d")
        month_tag = start_date.split("-")[1]
        year_tag = start_date.split("-")[0]
    except:
        previous_week_start = datetime.strftime(datetime.strptime(start_date, "%Y.%m.%d") - timedelta(31), "%Y-%m-%d")
        previous_week_end = datetime.strftime(datetime.strptime(end_date, "%Y.%m.%d"), "%Y-%m-%d")

        start_date_adjust = datetime.strftime(datetime.strptime(start_date, "%Y.%m.%d"), "%Y-%m-%d")
        end_date_adjust = datetime.strftime(datetime.strptime(end_date, "%Y.%m.%d"), "%Y-%m-%d")
        month_tag = start_date.split(".")[1]
        year_tag = start_date.split(".")[0]
    #print(month_tag, year_tag)
    # 读取数据
    # query = session.query(tmall_flagship_store_traffic_source_all_day).filter(tmall_flagship_store_traffic_source_all_day.stat_time == '2023.12.24')
    query = session.query(tmall_flagship_competing_store_traffic_source_day).filter(
        tmall_flagship_competing_store_traffic_source_day.stat_time.between(previous_week_start, previous_week_end))
    flow_other_df = pd.read_sql(query.statement, con=engine.connect())

    # 把拿到的SQL里面的数据，预处理一下
    flow_other_df.drop(labels=["id"], inplace=True, axis=1)
    flow_other_df["stat_time"] = flow_other_df["stat_time"].astype(str)

    # 数据处理
    flow_other_df = deploy_stat_week(flow_other_df, flow_other_df["stat_time"])
    flow_other_df["year"] = flow_other_df["stat_time"].apply(lambda x: int(re.split("-", x)[0]))
    flow_other_df["month"] = flow_other_df["stat_time"].apply(lambda x: int(re.split("-", x)[1]))
    flow_other_df["year"] = flow_other_df["year"].astype(str)
    flow_other_df["month"] = flow_other_df["month"].astype(str)
    flow_other_df["stat_ym"] = flow_other_df["year"] + "-" + flow_other_df["month"]

    # 修正竞店名称
    target_store = adjust_store_name(competing_store_name=competing_store_name)
    flow_other_df = flow_other_df[flow_other_df["competing_store_name"] == target_store]

    # 第一部分数据
    # 本店-周-数据报表 tmall_flagship_store_traffic_source_week_stat
    flow_other_sub1 = flow_other_df.groupby(
        ["traffic_first_source", "traffic_second_source", "traffic_third_source", "competing_store_name", "stat_ym"
         ]).agg({"visitor_count": np.sum, "trans_amount": np.sum, "payment_user_count": np.sum}).reset_index()
    # 复制一个，用来计算环比的大表，并且更改列名
    flow_own_last_week = flow_other_sub1.copy()
    flow_own_last_week.rename(columns={"visitor_count": "visitor_count_last_month",
                                       "trans_amount": "trans_amount_last_month",
                                       "payment_user_count": "payment_user_count_last_month",
                                       "stat_ym": "last_ym"},
                              inplace=True)
    flow_own_last_week = flow_own_last_week[["last_ym", "competing_store_name",
                                             "traffic_first_source", "traffic_second_source", "traffic_third_source",
                                             "visitor_count_last_month", "trans_amount_last_month"]]
    # 给sub1分配上周的标签，然后匹配上周的数据
    flow_other_sub1 = deploy_lastmonth_date(data=flow_other_sub1,
                                          stat_month_column=flow_other_sub1["stat_ym"])
    # print(key_word_own_sub1)
    # print(key_word_last_week)
    # 根据last week和search word匹配上周的相关数据
    flow_other_sub1 = pd.merge(left=flow_other_sub1, right=flow_own_last_week,
                                 how="left", on=["last_ym", "competing_store_name",
                                                 "traffic_first_source",
                                                 "traffic_second_source",
                                                 "traffic_third_source"])
    # 匹配到数据之后，去掉上周的那一列
    flow_other_sub1.drop(labels=["last_ym"], inplace=True, axis=1)


    # 第二部分数据
    # 剔除汇总行记录
    # 这个地方转化出来的汇总行是用"-"符号表示的
    flow_other_sub2 = flow_other_sub1[flow_other_sub1["traffic_third_source"] != "-"]
    flow_other_sub2 = flow_other_sub1[flow_other_sub1["traffic_third_source"] != "汇总"]
    # 每周的第一渠道的汇总统计（用于计算环比）-列
    flow_other_sub2 = flow_other_sub2.groupby(["competing_store_name", "stat_ym"]).agg(
        {"visitor_count": np.sum, "trans_amount": np.sum}).reset_index()
    flow_other_sub2.rename(columns={"visitor_count": "visitor_count_all", "trans_amount": "trans_amount_all"},
                           inplace=True)

    flow_other_all = pd.merge(flow_other_sub1, flow_other_sub2, how="left", on=["competing_store_name", "stat_ym"])

    # 流量访客来源占比
    flow_other_all["source_visitor_percent"] = flow_other_all["visitor_count"] / flow_other_all["visitor_count_all"]
    # 流量支付金额占比
    flow_other_all["source_trans_percent"] = flow_other_all["trans_amount"] / flow_other_all["trans_amount_all"]
    # 支付转化率
    flow_other_all["payment_conversion_rate"] = flow_other_all["payment_user_count"] / flow_other_all["visitor_count"]
    # 剔除多出来的两列
    flow_other_all.drop(labels=["visitor_count_all", "trans_amount_all"], axis=1, inplace=True)

    # 计算环比
    flow_other_all["visitor_count_mom"] = (flow_other_all["visitor_count"] - flow_other_all[
        "visitor_count_last_month"]) / flow_other_all["visitor_count_last_month"]
    flow_other_all["payment_amount_mom"] = (flow_other_all["trans_amount"] - flow_other_all[
        "trans_amount_last_month"]) / flow_other_all["trans_amount_last_month"]
    flow_other_all.drop(labels=["visitor_count_last_month", "trans_amount_last_month"], axis=1, inplace=True)

    flow_other_all.rename(columns={"stat_time": "数据统计时间", "competing_store_name": "天猫旗舰竞店店铺名称",
                                  "traffic_first_source": "一级流量来源",
                                  "traffic_second_source": "二级流量来源",
                                  "traffic_third_source": "三级流量来源",
                                  "visitor_count": "访客人数", "trans_amount": "交易金额",
                                  "source_visitor_percent": "来源访客人数占比",
                                  "source_trans_percent": "该来源交易金额占比",
                                  "payment_conversion_rate": "支付转化率",
                                  "visitor_count_mom": "访客人数环比", "payment_amount_mom": "交易金额环比",
                                  "payment_user_count": "支付人数"
                                  },
                         inplace=True)

    return flow_other_all'''


### 竞店流量来源-查询函数改动
## 日
# 获取竞店-流量-日数据
def gen_traffic_other_day_report(start_date, end_date, competing_store_name: str):
    # 判断输入的值是否有问题
    judge_string_empty(start=start_date, end=end_date)

    # 修正可能存在的日期格式有误的问题
    try:
        previous_week_start = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d"), "%Y-%m-%d")
        previous_week_end = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d"), "%Y-%m-%d")

        start_date_adjust = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d"), "%Y-%m-%d")
        end_date_adjust = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d"), "%Y-%m-%d")
        month_tag = start_date.split("-")[1]
        year_tag = start_date.split("-")[0]
    except:
        previous_week_start = datetime.strftime(datetime.strptime(start_date, "%Y.%m.%d"), "%Y-%m-%d")
        previous_week_end = datetime.strftime(datetime.strptime(end_date, "%Y.%m.%d"), "%Y-%m-%d")

        start_date_adjust = datetime.strftime(datetime.strptime(start_date, "%Y.%m.%d"), "%Y-%m-%d")
        end_date_adjust = datetime.strftime(datetime.strptime(end_date, "%Y.%m.%d"), "%Y-%m-%d")

    # query = session.query(tmall_flagship_store_traffic_source_all_day).filter(tmall_flagship_store_traffic_source_all_day.stat_time == '2023.12.24')
    query = session.query(tmall_flagship_competing_store_traffic_source_day).filter(
        tmall_flagship_competing_store_traffic_source_day.stat_time.between(previous_week_start, end_date))
    flow_own_df = pd.read_sql(query.statement, con=engine.connect())

    # 把拿到的SQL里面的数据，预处理一下
    flow_own_df.drop(labels=["id"], inplace=True, axis=1)
    flow_own_df["stat_time"] = flow_own_df["stat_time"].astype(str)

    # 应该直接筛选要的竞店，然后进行整合，这样数据量会小很多，可以直接提供查询
    target_store = adjust_store_name(competing_store_name=competing_store_name)
    flow_own_df = flow_own_df[flow_own_df["competing_store_name"] == target_store]

    # 第一部分数据
    flow_other_sub1 = flow_own_df.groupby(
        ["traffic_first_source", "traffic_second_source", "traffic_third_source", "competing_store_name",
         "stat_time"]).agg(
        {"visitor_count": np.sum, "trans_amount": np.sum, "payment_user_count": np.sum}).reset_index()

    # 第二部分数据
    # 这个地方转化出来的汇总行是用"-"符号表示的
    flow_other_sub2 = flow_other_sub1[flow_other_sub1["traffic_third_source"] != "-"]
    flow_other_sub2 = flow_other_sub1[flow_other_sub1["traffic_third_source"] != "汇总"]
    # 每周的第一渠道的汇总统计（用于计算环比）-列
    flow_other_sub2 = flow_other_sub2.groupby(["competing_store_name", "stat_time"]).agg(
        {"visitor_count": np.sum, "trans_amount": np.sum}).reset_index()
    flow_other_sub2.rename(columns={"visitor_count": "visitor_count_all", "trans_amount": "trans_amount_all"},
                           inplace=True)

    # 合并数据
    flow_other_all = pd.merge(flow_other_sub1, flow_other_sub2, how="left", on=["competing_store_name", "stat_time"])

    # 流量访客来源占比
    flow_other_all["source_visitor_percent"] = flow_other_all["visitor_count"] / flow_other_all["visitor_count_all"]
    # 流量支付金额占比
    flow_other_all["source_trans_percent"] = flow_other_all["trans_amount"] / flow_other_all["trans_amount_all"]
    # 支付转化率
    flow_other_all["payment_conversion_rate"] = flow_other_all["payment_user_count"] / flow_other_all["visitor_count"]
    # 重新分配索引
    flow_other_all.reset_index(inplace=True)
    # 剔除多出来的两列
    flow_other_all.drop(labels=["index", "visitor_count_all", "trans_amount_all"], axis=1, inplace=True)

    # 去掉一些不需要的字段
    # flow_other_all.drop(labels=["crawl_time", "create_time"], inplace=True, axis=1)
    # 更改排序
    flow_other_all.sort_values(
        by=["stat_time", "traffic_first_source", "traffic_second_source", "traffic_third_source"],
        ascending=False, inplace=True)
    # 改名
    flow_other_all.rename(columns={"stat_time": "数据统计时间", "traffic_first_source": "一级流量来源",
                                   "traffic_second_source": "二级流量来源", "traffic_third_source": "三级流量来源",
                                   "visitor_count": "访客人数", "trans_amount": "交易金额",
                                   "payment_user_count": "支付人数",
                                   "source_visitor_percent": "访客人数占比", "payment_conversion_rate": "支付转化率",
                                   "source_trans_percent": "交易金额占比",
                                   "competing_store_name": "店铺名称"
                                   }, inplace=True)
    '''flow_other_all.rename(columns={"trans_amount":"payment_amount",
                                   "payment_user_count":"payment_buyer_count"
                                   }, inplace=True)
    # 数值型全部变成字符串型
    flow_other_all["source_trans_percent"] = flow_other_all["source_trans_percent"].astype(str)
    flow_other_all["payment_conversion_rate"] = flow_other_all["payment_conversion_rate"].astype(str)
    flow_other_all["visitor_count"] = flow_other_all["visitor_count"].astype(str)
    flow_other_all["payment_amount"] = flow_other_all["payment_amount"].astype(str)
    flow_other_all["payment_buyer_count"] = flow_other_all["payment_buyer_count"].astype(str)
    flow_other_all["source_visitor_percent"] = flow_other_all["source_visitor_percent"].astype(str)
    # 处理nan的问题
    flow_other_all = deploy_nan(temp_file=flow_other_all,
                                columns=["visitor_count", "payment_amount",
                                         "payment_buyer_count", "source_visitor_percent",
                                         "payment_conversion_rate", "source_trans_percent"])'''
    return flow_other_all


## 周
# 这个地方的start和end，需要选两周，而且统计时间不能搞错，必须是上周五到这周四，才能拿到数据
def gen_traffic_other_week_report(start_date, end_date, competing_store_name: str):
    # 判断输入的值是否有问题
    judge_string_empty(start=start_date, end=end_date)
    start_date, end_date = week_adjust(start_date=start_date, end_date=end_date)

    # 数据读取
    # 这个地方需要读取所有的历史数据了，或者多读几周，因为要算环比
    # week_range = start_date + "~" + end_date

    # query = session.query(tmall_flagship_store_traffic_source_all_day).filter(tmall_flagship_store_traffic_source_all_day.stat_time == '2023.12.24')
    query = session.query(tm_competing_store_traffic_source_week_stat).filter(
        tm_competing_store_traffic_source_week_stat.stat_time.between(start_date, end_date))
    flow_other_df = pd.read_sql(query.statement, con=engine.connect())

    # 把拿到的SQL里面的数据，预处理一下
    flow_other_df.drop(labels=["id", "create_time"], inplace=True, axis=1)
    flow_other_df["stat_time"] = flow_other_df["stat_time"].astype(str)

    # 修正竞店名称
    target_store = adjust_store_name(competing_store_name=competing_store_name)
    flow_other_df = flow_other_df[flow_other_df["competing_store_name"] == target_store]

    # 更改排序
    flow_other_df.sort_values(by=["stat_time", "traffic_first_source", "traffic_second_source", "traffic_third_source"],
                              ascending=False, inplace=True)
    # 改名
    flow_other_df.rename(columns={"stat_time": "数据统计时间", "competing_store_name": "店铺名称",
                                  "traffic_first_source": "一级流量来源",
                                  "traffic_second_source": "二级流量来源",
                                  "traffic_third_source": "三级流量来源",
                                  "visitor_count": "访客人数", "trans_amount": "交易金额",
                                  "source_visitor_percent": "来源访客人数占比",
                                  "source_trans_percent": "该来源交易金额占比",
                                  "payment_conversion_rate": "支付转化率",
                                  "visitor_count_mom": "访客人数环比", "payment_amount_mom": "交易金额环比",
                                  "payment_user_count": "支付人数"
                                  },
                         inplace=True)

    return flow_other_df


## 月
# 输入的competing_store_name是品牌的关键词，但是不能有错别字
# 查询日期传入的格式 "2024-1" "2024-2"
def gen_traffic_other_month_report(start_date, end_date, competing_store_name: str):
    # 判断输入的值是否有问题
    judge_string_empty(start=start_date, end=end_date)

    # 数据读取
    # 这个地方需要读取所有的历史数据了，或者多读几周，因为要算环比
    # week_range = start_date + "~" + end_date

    # query = session.query(tmall_flagship_store_traffic_source_all_day).filter(tmall_flagship_store_traffic_source_all_day.stat_time == '2023.12.24')
    query = session.query(tm_competing_store_traffic_source_month_stat).filter(
        tm_competing_store_traffic_source_month_stat.stat_time.between(start_date, end_date))
    flow_other_df = pd.read_sql(query.statement, con=engine.connect())

    # 把拿到的SQL里面的数据，预处理一下
    flow_other_df.drop(labels=["id", "create_time"], inplace=True, axis=1)
    flow_other_df["stat_time"] = flow_other_df["stat_time"].astype(str)

    # 修正竞店名称
    target_store = adjust_store_name(competing_store_name=competing_store_name)
    flow_other_df = flow_other_df[flow_other_df["competing_store_name"] == target_store]

    # flow_other_df.drop(labels=["year", "month"], inplace=True, axis=1)
    flow_other_df.sort_values(by=["stat_time", "traffic_first_source", "traffic_second_source", "traffic_third_source"],
                              ascending=False, inplace=True)
    # 改名
    flow_other_df.rename(columns={"stat_time": "数据统计时间", "competing_store_name": "天猫旗舰竞店店铺名称",
                                  "traffic_first_source": "一级流量来源",
                                  "traffic_second_source": "二级流量来源",
                                  "traffic_third_source": "三级流量来源",
                                  "visitor_count": "访客人数", "trans_amount": "交易金额",
                                  "source_visitor_percent": "来源访客人数占比",
                                  "source_trans_percent": "该来源交易金额占比",
                                  "payment_conversion_rate": "支付转化率",
                                  "visitor_count_mom": "访客人数环比", "payment_amount_mom": "交易金额环比",
                                  "payment_user_count": "支付人数"
                                  },
                         inplace=True)

    return flow_other_df


### 竞店-年
## 竞店-年-流量来源统计-函数
# 这个地方的start和end，需要选至少两年
def gen_traffic_other_year_report(start_date, end_date, competing_store_name: str):
    # 判断输入的值是否有问题
    judge_string_empty(start=start_date, end=end_date)
    start_date, end_date = year_adjust(start_date=start_date, end_date=end_date)

    # 数据读取
    # 这个地方需要读取所有的历史数据了，或者多读几周，因为要算环比
    # week_range = start_date + "~" + end_date

    try:
        previous_week_start = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d") - timedelta(366), "%Y-%m-%d")
        previous_week_end = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d"), "%Y-%m-%d")

        start_date_adjust = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d"), "%Y-%m-%d")
        end_date_adjust = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d"), "%Y-%m-%d")
        month_tag = start_date.split("-")[1]
        year_tag = start_date.split("-")[0]
    except:
        previous_week_start = datetime.strftime(datetime.strptime(start_date, "%Y.%m.%d") - timedelta(366), "%Y-%m-%d")
        previous_week_end = datetime.strftime(datetime.strptime(end_date, "%Y.%m.%d"), "%Y-%m-%d")

        start_date_adjust = datetime.strftime(datetime.strptime(start_date, "%Y.%m.%d"), "%Y-%m-%d")
        end_date_adjust = datetime.strftime(datetime.strptime(end_date, "%Y.%m.%d"), "%Y-%m-%d")
        month_tag = start_date.split(".")[1]
        year_tag = start_date.split(".")[0]
    # query = session.query(tmall_flagship_store_traffic_source_all_day).filter(tmall_flagship_store_traffic_source_all_day.stat_time == '2023.12.24')
    query = session.query(tmall_flagship_competing_store_traffic_source_day).filter(
        tmall_flagship_competing_store_traffic_source_day.stat_time.between(previous_week_start, previous_week_end))
    flow_other_df = pd.read_sql(query.statement, con=engine.connect())

    # 把拿到的SQL里面的数据，预处理一下
    flow_other_df.drop(labels=["id"], inplace=True, axis=1)
    flow_other_df["stat_time"] = flow_other_df["stat_time"].astype(str)

    # 数据处理
    flow_other_df = deploy_stat_week(flow_other_df, flow_other_df["stat_time"])
    flow_other_df["year"] = flow_other_df["stat_time"].apply(lambda x: int(re.split("-", x)[0]))
    flow_other_df["month"] = flow_other_df["stat_time"].apply(lambda x: int(re.split("-", x)[1]))
    flow_other_df["year"] = flow_other_df["year"].astype(str)
    flow_other_df["month"] = flow_other_df["month"].astype(str)

    # 应该直接筛选要的竞店，然后进行整合，这样数据量会小很多，可以直接提供查询
    target_store = adjust_store_name(competing_store_name=competing_store_name)
    flow_other_df = flow_other_df[flow_other_df["competing_store_name"] == target_store]

    # 第一部分数据
    # 本店-周-数据报表 tmall_flagship_store_traffic_source_week_stat
    flow_other_sub1 = flow_other_df.groupby(
        ["traffic_first_source", "traffic_second_source", "traffic_third_source", "competing_store_name", "year"]).agg(
        {"visitor_count": np.sum, "trans_amount": np.sum, "payment_user_count": np.sum}).reset_index()
    # 整一个匹配上一年数据的子数据做匹配
    flow_other_last_year = flow_other_sub1.copy()
    flow_other_last_year.rename(columns={"visitor_count": "visitor_count_last_year",
                                         "trans_amount": "trans_amount_last_year",
                                         "payment_user_count": "payment_user_count_last_year",
                                         "year": "last_year"},
                                inplace=True)
    flow_other_last_year = flow_other_last_year[["competing_store_name", "last_year",
                                                 "visitor_count_last_year", "trans_amount_last_year",
                                                 "traffic_first_source", "traffic_second_source",
                                                 "traffic_third_source"]]

    # 生成一个lastyear的列，匹配上年同期数据
    flow_other_sub1 = deploy_lastyear_date(data=flow_other_sub1, stat_year_column=flow_other_sub1["year"])
    flow_other_sub1 = pd.merge(left=flow_other_sub1, right=flow_other_last_year,
                               how="left", on=["traffic_first_source",
                                               "traffic_second_source",
                                               "traffic_third_source",
                                               "competing_store_name", "last_year"])

    # 第二部分数据
    # 这个地方转化出来的汇总行是用"-"符号表示的
    flow_other_sub2 = flow_other_sub1[flow_other_sub1["traffic_third_source"] != "-"]
    flow_other_sub2 = flow_other_sub1[flow_other_sub1["traffic_third_source"] != "汇总"]
    # 每周的第一渠道的汇总统计（用于计算环比）-列
    flow_other_sub2 = flow_other_sub2.groupby(["competing_store_name", "year"]).agg(
        {"visitor_count": np.sum, "trans_amount": np.sum}).reset_index()
    flow_other_sub2.rename(columns={"visitor_count": "visitor_count_all", "trans_amount": "trans_amount_all"},
                           inplace=True)

    # 合并数据
    flow_other_all = pd.merge(flow_other_sub1, flow_other_sub2, how="left", on=["competing_store_name", "year"])

    # 流量访客来源占比
    flow_other_all["source_visitor_percent"] = flow_other_all["visitor_count"] / flow_other_all["visitor_count_all"]
    # 流量支付金额占比
    flow_other_all["source_trans_percent"] = flow_other_all["trans_amount"] / flow_other_all["trans_amount_all"]
    # 支付转化率
    flow_other_all["payment_conversion_rate"] = flow_other_all["payment_user_count"] / flow_other_all["visitor_count"]

    # 计算环比
    flow_other_all["visitor_count_mom"] = (flow_other_all["visitor_count"] - flow_other_all[
        "visitor_count_last_year"]) / flow_other_all["visitor_count_last_year"]
    flow_other_all["payment_amount_mom"] = (flow_other_all["trans_amount"] - flow_other_all[
        "trans_amount_last_year"]) / flow_other_all["trans_amount_last_year"]

    # 剔除多出来的列
    flow_other_all.drop(labels=["visitor_count_all", "trans_amount_all", "last_year",
                                "visitor_count_last_year", "trans_amount_last_year"],
                        axis=1, inplace=True)

    # flow_own_all # .to_excel(output_path + "天猫旗舰店、竞店流量来源统计.xlsx", index=False)
    '''# 匹配要计算环比的上周的数据
    flow_other_all_weekly = get_pre_year_flow_data_other(flow_other_all.copy(), flow_other_all["year"], flow_other_all["competing_store_name"], "visitor_count")
    flow_other_all_weekly = get_pre_year_flow_data_other(flow_other_all_weekly, flow_other_all["year"], flow_other_all["competing_store_name"], "trans_amount")

    # 计算环比
    flow_other_all_weekly["visitor_count_mom"] = (flow_other_all_weekly["visitor_count"] - flow_other_all_weekly["pre_year_visitor_count"])/flow_other_all_weekly["pre_year_visitor_count"]
    flow_other_all_weekly["payment_amount_mom"] = (flow_other_all_weekly["trans_amount"] - flow_other_all_weekly["pre_year_trans_amount"])/flow_other_all_weekly["pre_year_trans_amount"]
    flow_other_all_weekly.drop(labels=["pre_year_visitor_count", "pre_year_trans_amount"], axis=1, inplace=True)
    # 需要再筛选一下，输入的对应日期内的数据
    flow_other_all_weekly = flow_other_all_weekly[flow_other_all_weekly["year"] >= year_tag] '''
    # 排序
    flow_other_all.sort_values(by=["year", "traffic_first_source", "traffic_second_source", "traffic_third_source"],
                               ascending=False, inplace=True)
    # 改名
    flow_other_all.rename(columns={"year": "数据统计时间", "competing_store_name": "天猫旗舰竞店店铺名称",
                                   "traffic_first_source": "一级流量来源", "traffic_second_source": "二级流量来源",
                                   "traffic_third_source": "三级流量来源",
                                   "visitor_count": "访客人数", "trans_amount": "交易金额",
                                   "source_visitor_percent": "来源访客人数占比",
                                   "source_trans_percent": "该来源交易金额占比",
                                   "payment_conversion_rate": "支付转化率",
                                   "visitor_count_mom": "访客人数环比", "payment_amount_mom": "交易金额环比",
                                   "payment_user_count": "支付人数"
                                   },
                          inplace=True)

    return flow_other_all




### 竞店搜索词
## 日
# 竞店日搜索词
# extra_rival_ls:输入品牌名即可，默认有十四个关注的品牌，如果想看更多的品牌，需要额外输入
def gen_search_other_day_report(start_date, end_date, competing_store_name: str):
    # 判断输入的值是否有问题
    judge_string_empty(start=start_date, end=end_date)

    # 读取数据
    # 竞店搜索词
    query = session.query(tm_competing_store_visitors_from_search_word_day_stat).filter(
        tm_competing_store_visitors_from_search_word_day_stat.stat_time.between(start_date, end_date))
    key_word_own_df = pd.read_sql(query.statement, con=engine.connect())
    # 把拿到的SQL里面的数据，预处理一下
    key_word_own_df.drop(labels=["id", "create_time"], inplace=True, axis=1)
    key_word_own_df["stat_time"] = key_word_own_df["stat_time"].astype(str)

    # 从数据库里面获取竞店名称的list
    # print(competing_store_name)
    target_store = adjust_store_name(competing_store_name=competing_store_name)
    # print(target_store)
    # 筛选查询的目标店铺的数据
    key_word_own_df = key_word_own_df[key_word_own_df["competing_store_name"] == target_store]

    key_word_own_df.sort_values(by=["visitors_from_search_word", "guide_order_buyer_count"], ascending=False,
                                inplace=True)
    # 改名
    key_word_own_df.rename(
        columns={"stat_time": "数据统计时间", "competing_store_name": "竞店名称", "search_word": "竞店搜索词",
                 "visitors_from_search_word": "带来的访客数", "guide_order_buyer_count": "引导下单买家数",
                 "shoutao_search_visitors": "手淘搜索访客数", "brand_keyword_visitor_ratio": "品牌关键词访客占比",

                 "visitors_from_search_word_mom": "带来的访客数-环比",
                 "guide_order_buyer_count_mom": "引导下单买家数-环比",
                 "brand_keyword_visitor_ratio_mom": "访客数占手淘搜索访客数-环比"},
        inplace=True)

    return key_word_own_df


## 周
# 周-竞店搜索词
# 这个地方的start和end，需要选两周，而且统计时间不能搞错，必须是上周五到这周四，才能拿到数据
def gen_search_other_week_report(start_date, end_date, competing_store_name: str):
    # 判断输入的值是否有问题
    judge_string_empty(start=start_date, end=end_date)
    start_date, end_date = week_adjust(start_date=start_date, end_date=end_date)

    # 读取数据，后续改成SQL
    # 竞店搜索词
    query = session.query(tm_competing_store_visitors_from_search_word_week_stat).filter(
        tm_competing_store_visitors_from_search_word_week_stat.stat_time.between(start_date, end_date))
    key_word_other_df = pd.read_sql(query.statement, con=engine.connect())

    # 把拿到的SQL里面的数据，预处理一下
    key_word_other_df.drop(labels=["id", "create_time"], inplace=True, axis=1)
    key_word_other_df["stat_time"] = key_word_other_df["stat_time"].astype(str)

    # 从数据库里面获取竞店名称的list
    target_store = adjust_store_name(competing_store_name=competing_store_name)

    # 筛选查询的目标店铺的数据
    key_word_other_df = key_word_other_df[key_word_other_df["competing_store_name"] == target_store]

    key_word_other_df.sort_values(by=["visitors_from_search_word", "guide_order_buyer_count_weekly"], ascending=False,
                                  inplace=True)

    # 改名
    key_word_other_df.rename(
        columns={"stat_time": "数据统计时间", "competing_store_name": "竞店名称", "search_word": "竞店搜索词",
                 "visitors_from_search_word": "带来的访客数", "guide_order_buyer_count_weekly": "引导下单买家数",
                 "shoutao_search_visitors": "手淘搜索访客数", "brand_keyword_visitor_ratio": "品牌关键词访客占比",

                 "visitors_from_search_word_mom": "带来的访客数-环比",
                 "guide_order_buyer_count_mom": "引导下单买家数-环比",
                 "brand_keyword_visitor_ratio_mom": "访客数占手淘搜索访客数-环比"},
        inplace=True)

    return key_word_other_df


## 月
# 月-竞店搜索词
def gen_search_other_month_report(start_date, end_date, competing_store_name: str):
    # 判断输入的值是否有问题
    judge_string_empty(start=start_date, end=end_date)

    # 读取数据，后续改成SQL
    # 竞店搜索词
    query = session.query(tm_competing_store_visitors_from_search_word_month_stat).filter(
        tm_competing_store_visitors_from_search_word_month_stat.stat_time.between(start_date, end_date))
    key_word_other_df = pd.read_sql(query.statement, con=engine.connect())

    # 把拿到的SQL里面的数据，预处理一下
    key_word_other_df.drop(labels=["id", "create_time"], inplace=True, axis=1)
    key_word_other_df["stat_time"] = key_word_other_df["stat_time"].astype(str)

    # 从数据库里面获取竞店名称的list
    target_store = adjust_store_name(competing_store_name=competing_store_name)
    # 筛选查询的目标店铺的数据
    key_word_other_df = key_word_other_df[key_word_other_df["competing_store_name"] == target_store]

    # key_word_other_df.sort_values(by=["visitors_from_search_word", "guide_order_buyer_count_weekly"], ascending=False,inplace=True)

    # 改名
    key_word_other_df.rename(
        columns={"stat_time": "数据统计时间", "competing_store_name": "竞店名称", "search_word": "竞店搜索词",
                 "visitors_from_search_word": "带来的访客数", "guide_order_buyer_count_monthly": "引导下单买家数",
                 "shoutao_search_visitors": "手淘搜索访客数", "brand_keyword_visitor_ratio": "品牌关键词访客占比",
                 "visitors_from_search_word_mom": "带来的访客数-环比",
                 "guide_order_buyer_count_mom": "引导下单买家数-环比",
                 "brand_keyword_visitor_ratio_mom": "访客数占手淘搜索访客数-环比",

                 "brand_keyword_visitor_ratio_yoy": "访客数占手淘搜索访客数-同比",
                 "visitors_from_search_word_yoy": "带来的访客数-同比",
                 "guide_order_buyer_count_yoy": "访客数占手淘搜索访客数-同比"},
        inplace=True)

    return key_word_other_df















###############################################################################################################
###############################################################################################################
###############################################################################################################
#############################################  新函数修改  ######################################################
###############################################################################################################
###############################################################################################################
###############################################################################################################

# 定义一个，给相关的数据列匹配上周对应的数据的函数
# 这里看看能不能，先给本周打个上周的标签，然后再用大表的周匹配一遍这个生成的上周的标签，从而获取到上周的数据
# 需要注意，这里需要拿到正确的上周的数据，保证输入的最早的那个周，能拿到上周的环比数据
def deploy_lastweek_date(data: pd.DataFrame, stat_week_column: pd.DataFrame.columns):
    data["last_week"] = ""
    for i in range(0, len(stat_week_column)):
        try:
            # 统计周的起止日期
            start_date = str(stat_week_column[i]).split("~")[0]
            end_date = str(stat_week_column[i]).split("~")[1]
            # 计算后上个统计周的起止日期
            previous_week_start = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d") - timedelta(7),
                                                    "%Y-%m-%d")
            previous_week_end = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d") - timedelta(7), "%Y-%m-%d")
            last_week_range = previous_week_start + "~" + previous_week_end
            # 赋值到新列的对应位置
            data["last_week"][i] = last_week_range
        except:
            continue
    data["last_week"] = data["last_week"].astype(str)
    return data


def deploy_lastmonth_date(data: pd.DataFrame, stat_month_column: pd.DataFrame.columns):
    data["last_ym"] = ""
    for i in range(0, len(stat_month_column)):
        try:
            # 年和月-转化成数值型
            year_date = int(str(stat_month_column[i]).split("-")[0])
            month_date = int(str(stat_month_column[i]).split("-")[1])
            # 判断一下年和月，然后将年和月变为上个月
            # 一月，转化为去年的十二月
            if month_date == 1:
                year_date = year_date - 1
                month_date == 12
            else:
                # 如果是同一年，则直接月份减一
                month_date = month_date - 1
            year_date = str(year_date)
            month_date = str(month_date)
            last_month_range = year_date + "-" + month_date
            # 赋值到新列的对应位置
            data["last_ym"][i] = last_month_range
        except:
            continue
    data["last_ym"] = data["last_ym"].astype(str)
    return data


def deploy_lastyear_date(data: pd.DataFrame, stat_year_column: pd.DataFrame.columns):
    data["last_year"] = ""
    for i in range(0, len(stat_year_column)):
        try:
            # 年和月-转化成数值型
            year_date = int(i) - 1
            # 赋值到新列的对应位置
            data["last_year"][i] = year_date
        except:
            continue
    data["last_year"] = data["last_year"].astype(str)
    return data



### 天猫旗舰店会员数据统计
## 日
# 会员数据统计-日
def gen_member_day_report(start_date, end_date):
     # 判断输入的值是否有问题
    judge_string_empty(start=start_date, end=end_date)

    # 导入数据
    # 日数据就是数据的底表
    query = session.query(tmall_flagship_store_member_day).filter(
        tmall_flagship_store_member_day.stat_time.between(start_date, end_date))
    member_df = pd.read_sql(query.statement, con=engine.connect())

    # 把拿到的SQL里面的数据，预处理一下
    member_df.drop(labels=["id"], inplace=True, axis=1)
    member_df["stat_time"] = member_df["stat_time"].astype(str)

    # member_df = deploy_stat_week(member_df, member_df["stat_time"])
    member_df.sort_values(by="stat_time", ascending=True, inplace=True)
    # member_df["year"] = member_df["stat_time"].apply(lambda x: int(re.split("-", x)[0]))
    # member_df["month"] = member_df["stat_time"].apply(lambda x: int(re.split("-", x)[1]))

    # # 周数据汇总
    # member_df_daily = member_df.groupby(["stat_time"])[
    #     ['member_amount', 'new_member_count', 'active_member_count', 'member_trans_amount',
    #      'member_trans_user_count', 'member_get_exclusive_coupon_count',
    #      'member_use_exclusive_coupon_count', 'member_exclusive_coupon_guide_trans_amount',
    #      'new_member_get_exclusive_coupon_count', 'new_member_use_exclusive_coupon_count',
    #      'new_member_exclusive_coupon_guide_trans_amount']].sum().reset_index()
    # member_df_daily['member_avg_trans_price'] = member_df_daily["member_trans_amount"] / member_df_daily[
    #     "member_trans_user_count"]
    # 改名
    member_df_daily=member_df
    member_df_daily.rename(
        columns={"stat_time": "数据统计时间", "member_amount": "会员规模", "new_member_count": "新增会员人数",
                 "active_member_count": "活跃会员人数",
                 "member_trans_amount": "会员成交金额", "member_trans_user_count": "会员成交人数",
                 "member_avg_trans_price": "会员客单价",
                 "member_get_exclusive_coupon_count": "会员专享券领取数",
                 "member_use_exclusive_coupon_count": "会员专享券使用数",
                 "member_exclusive_coupon_guide_trans_amount": "会员专享券引导成交金额",
                 "new_member_get_exclusive_coupon_count": "新会员专享券领取数",
                 "new_member_use_exclusive_coupon_count": "新会员专享券使用数",
                 "new_member_exclusive_coupon_guide_trans_amount": "新会员专享券引导成交金额"
                 },
        inplace=True)

    return member_df_daily


## 周
# 会员数据统计-周
def gen_member_week_report(start_date, end_date):
    # 判断输入的值是否有问题
    judge_string_empty(start=start_date, end=end_date)
    # 如果查询的时间早于数据库中有历史数据的最早时间，则需要对输入的日期进行调整
    if start_date <= "2023-08-15":
        # 给输入的起始日期重新赋值
        start_date = "2023-08-15"
    else:
        pass

    # 调用函数，调整输入的日期字符串
    start_date, end_date = week_adjust(start_date=start_date, end_date=end_date)

    # 判断返回的start_date和end_date是否为空
    if (start_date == "") or (end_date == ""):
        return None
    else:
        pass

    # 数据读取
    # 这个地方需要读取所有的历史数据了，或者多读几周，因为要算环比
    week_range = start_date + "~" + end_date

    previous_week_start = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d") - timedelta(7), "%Y-%m-%d")
    previous_week_end = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d"), "%Y-%m-%d")

    start_date_adjust = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d"), "%Y-%m-%d")
    end_date_adjust = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d"), "%Y-%m-%d")
    month_tag = start_date.split("-")[1]
    year_tag = start_date.split("-")[0]

    # 导入数据
    # 日数据就是数据的底表
    query = session.query(tmall_flagship_store_member_day).filter(
        tmall_flagship_store_member_day.stat_time.between(previous_week_start, end_date))
    member_df = pd.read_sql(query.statement, con=engine.connect())
    # 把拿到的SQL里面的数据，预处理一下
    member_df.drop(labels=["id"], inplace=True, axis=1)
    member_df["stat_time"] = member_df["stat_time"].astype(str)

    member_df = deploy_stat_week(member_df, member_df["stat_time"])
    member_df.sort_values(by="stat_time", ascending=True, inplace=True)
    member_df["year"] = member_df["stat_time"].apply(lambda x: int(re.split("-", x)[0]))
    member_df["month"] = member_df["stat_time"].apply(lambda x: int(re.split("-", x)[1]))

    # 周数据汇总
    member_df_weekly = member_df.groupby(["stat_week"])[
        ['member_amount', 'new_member_count', 'active_member_count', 'member_trans_amount',
         'member_trans_user_count', 'member_get_exclusive_coupon_count',
         'member_use_exclusive_coupon_count', 'member_exclusive_coupon_guide_trans_amount',
         'new_member_get_exclusive_coupon_count', 'new_member_use_exclusive_coupon_count',
         'new_member_exclusive_coupon_guide_trans_amount']].sum().reset_index()
    member_df_weekly['member_avg_trans_price'] = member_df_weekly["member_trans_amount"] / member_df_weekly[
        "member_trans_user_count"]

    '''# 需要再筛选一下，输入的对应日期内的数据
    member_df_weekly["start_date"] = member_df_weekly["stat_week"].apply(lambda x:str(x).split("~")[0])
    member_df_weekly["end_date"] = member_df_weekly["stat_week"].apply(lambda x:str(x).split("~")[1])
    member_df_weekly = member_df[(member_df_weekly["start_date"] >= start_date_adjust) & (member_df_weekly["end_date"] <= end_date_adjust)]
    member_df_weekly.drop(labels=["start_date", "end_date"], inplace=True, axis=1)'''

    # 需要再筛选一下，输入的对应日期内的数据
    member_df_weekly["start_date"] = member_df_weekly["stat_week"].apply(lambda x: str(x).split("~")[0])
    member_df_weekly["end_date"] = member_df_weekly["stat_week"].apply(lambda x: str(x).split("~")[1])
    member_df_weekly = member_df_weekly[
        (member_df_weekly["start_date"] >= start_date) & (member_df_weekly["end_date"] <= end_date)]
    member_df_weekly.drop(labels=["start_date", "end_date"], inplace=True, axis=1)

    # 改时间列名
    member_df_weekly.rename(columns={"stat_week": "stat_time"}, inplace=True)

    # 改名
    member_df_weekly.rename(
        columns={"stat_time": "数据统计时间", "member_amount": "会员规模", "new_member_count": "新增会员人数",
                 "active_member_count": "活跃会员人数",
                 "member_trans_amount": "会员成交金额", "member_trans_user_count": "会员成交人数",
                 "member_avg_trans_price": "会员客单价",
                 "member_get_exclusive_coupon_count": "会员专享券领取数",
                 "member_use_exclusive_coupon_count": "会员专享券使用数",
                 "member_exclusive_coupon_guide_trans_amount": "会员专享券引导成交金额",
                 "new_member_get_exclusive_coupon_count": "新会员专享券领取数",
                 "new_member_use_exclusive_coupon_count": "新会员专享券使用数",
                 "new_member_exclusive_coupon_guide_trans_amount": "新会员专享券引导成交金额"
                 },
        inplace=True)

    return member_df_weekly


## 月
# 会员数据统计-月
def gen_member_month_report(start_date, end_date):
    # 判断输入的值是否有问题
    judge_string_empty(start=start_date, end=end_date)
    # 如果查询的时间早于数据库中有历史数据的最早时间，则需要对输入的日期进行调整
    if start_date <= "2023-08-15":
        # 给输入的起始日期重新赋值
        start_date = "2023-08-15"
    else:
        pass

    # 调用函数，调整输入的日期字符串
    start_date, end_date = month_adjust(start_date=start_date, end_date=end_date)

    # 判断返回的start_date和end_date是否为空
    if (start_date == "") or (end_date == ""):
        return None
    else:
        pass

    # 数据读取
    # 这个地方需要读取所有的历史数据了，或者多读几周，因为要算环比
    previous_week_start = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d") - timedelta(31), "%Y-%m-%d")
    previous_week_end = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d"), "%Y-%m-%d")

    start_date_adjust = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d"), "%Y-%m-%d")
    end_date_adjust = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d"), "%Y-%m-%d")
    month_tag = start_date.split("-")[1]
    year_tag = start_date.split("-")[0]

    # 导入数据
    # 日数据就是数据的底表
    query = session.query(tmall_flagship_store_member_day).filter(
        tmall_flagship_store_member_day.stat_time.between(previous_week_start, end_date))
    member_df = pd.read_sql(query.statement, con=engine.connect())
    # 把拿到的SQL里面的数据，预处理一下
    member_df.drop(labels=["id"], inplace=True, axis=1)
    member_df["stat_time"] = member_df["stat_time"].astype(str)

    member_df = deploy_stat_week(member_df, member_df["stat_time"])
    member_df.sort_values(by="stat_time", ascending=True, inplace=True)
    member_df["year"] = member_df["stat_time"].apply(lambda x: int(re.split("-", x)[0]))
    member_df["month"] = member_df["stat_time"].apply(lambda x: int(re.split("-", x)[1]))

    # 月数据汇总
    member_df_monthly = member_df.groupby(["year", "month"])[
        ['member_amount', 'new_member_count', 'active_member_count', 'member_trans_amount',
         'member_trans_user_count', 'member_get_exclusive_coupon_count',
         'member_use_exclusive_coupon_count', 'member_exclusive_coupon_guide_trans_amount',
         'new_member_get_exclusive_coupon_count', 'new_member_use_exclusive_coupon_count',
         'new_member_exclusive_coupon_guide_trans_amount']].sum().reset_index()
    member_df_monthly['member_avg_trans_price'] = member_df_monthly["member_trans_amount"] / member_df_monthly[
        "member_trans_user_count"]

    # 需要再筛选一下，输入的对应日期内的数据
    # member_df_monthly["year"] = member_df_monthly["stat_ym"].apply(lambda x: int(str(x).split("-")[0]))
    # member_df_monthly["month"] = member_df_monthly["stat_ym"].apply(lambda x: int(str(x).split("-")[1]))
    member_df_monthly = member_df_monthly[
        ((member_df_monthly["month"] >= int(month_tag)) & (member_df_monthly["year"] == int(year_tag))) |
        ((member_df_monthly["month"] <= int(month_tag)) & (member_df_monthly["year"] > int(year_tag)))]
    # member_df_monthly.drop(labels=["year", "month"], inplace=True, axis=1)

    # 生成时间列，并且改名
    member_df_monthly["year"] = member_df_monthly["year"].astype(str)
    member_df_monthly["month"] = member_df_monthly["month"].apply(lambda x: ("0" + str(x)) if x < 10 else str(x))
    member_df_monthly["stat_ym"] = member_df_monthly["year"] + "-" + member_df_monthly["month"]
    member_df_monthly.rename(columns={"stat_ym": "stat_time"}, inplace=True)
    member_df_monthly.drop(labels=["year", "month"], inplace=True, axis=1)

    # 改名
    member_df_monthly.rename(
        columns={"stat_time": "数据统计时间", "member_amount": "会员规模", "new_member_count": "新增会员人数",
                 "active_member_count": "活跃会员人数",
                 "member_trans_amount": "会员成交金额", "member_trans_user_count": "会员成交人数",
                 "member_avg_trans_price": "会员客单价",
                 "member_get_exclusive_coupon_count": "会员专享券领取数",
                 "member_use_exclusive_coupon_count": "会员专享券使用数",
                 "member_exclusive_coupon_guide_trans_amount": "会员专享券引导成交金额",
                 "new_member_get_exclusive_coupon_count": "新会员专享券领取数",
                 "new_member_use_exclusive_coupon_count": "新会员专享券使用数",
                 "new_member_exclusive_coupon_guide_trans_amount": "新会员专享券引导成交金额"
                 },
        inplace=True)

    return member_df_monthly


## 年
# 会员数据统计-年
def gen_member_year_report(start_date, end_date):
    # 判断输入的值是否有问题
    judge_string_empty(start=start_date, end=end_date)
    # 如果查询的时间早于数据库中有历史数据的最早时间，则需要对输入的日期进行调整
    if start_date <= "2023-08-15":
        # 给输入的起始日期重新赋值
        start_date = "2023-08-15"
    else:
        pass

    # 调用函数，调整输入的日期字符串
    start_date, end_date = year_adjust(start_date=start_date, end_date=end_date)

    # 判断返回的start_date和end_date是否为空
    if (start_date == "") or (end_date == ""):
        return None
    else:
        pass

    # 数据读取
    # 这个地方需要读取所有的历史数据了，或者多读几周，因为要算环比
    previous_week_start = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d") - timedelta(366), "%Y-%m-%d")
    previous_week_end = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d"), "%Y-%m-%d")

    start_date_adjust = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d"), "%Y-%m-%d")
    end_date_adjust = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d"), "%Y-%m-%d")
    month_tag = start_date.split("-")[1]
    year_tag = start_date.split("-")[0]

    # 导入数据
    # 日数据就是数据的底表
    query = session.query(tmall_flagship_store_member_day).filter(
        tmall_flagship_store_member_day.stat_time.between(previous_week_start, end_date))
    member_df = pd.read_sql(query.statement, con=engine.connect())
    # 把拿到的SQL里面的数据，预处理一下
    member_df.drop(labels=["id"], inplace=True, axis=1)
    member_df["stat_time"] = member_df["stat_time"].astype(str)

    member_df = deploy_stat_week(member_df, member_df["stat_time"])
    member_df.sort_values(by="stat_time", ascending=True, inplace=True)
    member_df["year"] = member_df["stat_time"].apply(lambda x: int(re.split("-", x)[0]))
    member_df["month"] = member_df["stat_time"].apply(lambda x: int(re.split("-", x)[1]))

    # 年数据汇总
    member_df_yearly = member_df.groupby(["year"])[
        ['member_amount', 'new_member_count', 'active_member_count', 'member_trans_amount',
         'member_trans_user_count', 'member_get_exclusive_coupon_count',
         'member_use_exclusive_coupon_count', 'member_exclusive_coupon_guide_trans_amount',
         'new_member_get_exclusive_coupon_count', 'new_member_use_exclusive_coupon_count',
         'new_member_exclusive_coupon_guide_trans_amount']].sum().reset_index()
    member_df_yearly['member_avg_trans_price'] = member_df_yearly["member_trans_amount"] / member_df_yearly[
        "member_trans_user_count"]

    # 需要再筛选一下，输入的对应日期内的数据
    member_df_yearly["year"] = member_df_yearly["year"].astype(str)
    member_df_yearly = member_df_yearly[member_df_yearly["year"] >= year_tag]

    member_df_yearly.rename(columns={"year": "stat_time"}, inplace=True)

    # 改名
    member_df_yearly.rename(
        columns={"stat_time": "数据统计时间", "member_amount": "会员规模", "new_member_count": "新增会员人数",
                 "active_member_count": "活跃会员人数",
                 "member_trans_amount": "会员成交金额", "member_trans_user_count": "会员成交人数",
                 "member_avg_trans_price": "会员客单价",
                 "member_get_exclusive_coupon_count": "会员专享券领取数",
                 "member_use_exclusive_coupon_count": "会员专享券使用数",
                 "member_exclusive_coupon_guide_trans_amount": "会员专享券引导成交金额",
                 "new_member_get_exclusive_coupon_count": "新会员专享券领取数",
                 "new_member_use_exclusive_coupon_count": "新会员专享券使用数",
                 "new_member_exclusive_coupon_guide_trans_amount": "新会员专享券引导成交金额"
                 },
        inplace=True)

    return member_df_yearly


### 本店搜索词
## 日
# 本店日搜索词
def gen_search_day_report(start_date, end_date):
    # 判断输入的值是否有问题
    judge_string_empty(start=start_date, end=end_date)

    # # 手淘访客数字段获取
    # tmall_flagship_store_traffic_source_day_stat = gen_traffic_day_report(start_date=start_date, end_date=end_date)
    # tmall_flagship_store_traffic_source_day_stat["数据统计时间"] = tmall_flagship_store_traffic_source_day_stat[
    #     "数据统计时间"].astype(str)
    # tmall_flagship_store_traffic_source_day_stat = tmall_flagship_store_traffic_source_day_stat[
    #     (tmall_flagship_store_traffic_source_day_stat["二级流量来源"] == "手淘搜索")
    #     & (tmall_flagship_store_traffic_source_day_stat["三级流量来源"] == "汇总")
    #     & (tmall_flagship_store_traffic_source_day_stat["一级流量来源"] == "平台流量")][["数据统计时间", "访客人数"]]
    # tmall_flagship_store_traffic_source_day_stat.rename(columns={"访客人数": "手淘访客人数"}, inplace=True)

    # 读取数据，后续改成SQL
    # 本店搜索词
    query = session.query(tmall_flagship_store_visitors_from_search_word_day).filter(
        tmall_flagship_store_visitors_from_search_word_day.stat_time.between(start_date, end_date))
    key_word_own_df = pd.read_sql(query.statement, con=engine.connect())
    # 把拿到的SQL里面的数据，预处理一下
    key_word_own_df.drop(labels=["id"], inplace=True, axis=1)
    key_word_own_df["stat_time"] = key_word_own_df["stat_time"].astype(str)

    '''# 匹配手淘访客数字段
    key_word_own_pro = pd.merge(key_word_own_df, tmall_flagship_store_traffic_source_day_stat, on=["stat_time"], how="left")
    key_word_own_pro["brand_keyword_visitor_ratio"] = key_word_own_pro["visitors_count"] / key_word_own_pro["shoutao_search_visitors"]
    key_word_own_pro.sort_values(by=["stat_time", "search_word", "brand_keyword_visitor_ratio"], ascending=True, inplace=True)'''

    # 去掉一些不需要的字段
    key_word_own_df.drop(labels=["crawl_time", "create_time", "bounce_lose_rate", "view_count","guide_payment_good_count"], inplace=True, axis=1)
    # 改名
    key_word_own_df.rename(
        columns={"stat_time": "数据统计时间", "search_word": "本店搜索词", "visitors_count": "带来的访客数",
                 "view_count": "带来的浏览量", "bounce_lose_rate": "跳失率",
                 "guide_order_buyer_count": "引导下单买家数",
                 "guide_order_conversion_rate": "引导下单转化率", "guide_payment_amount": "引导支付金额",
                 "good_cart_buyer_count": "加购人数", "shoutao_search_visitors": "手淘搜索访客数"},
        inplace=True)

    return key_word_own_df


## 周
def gen_search_week_report(start_date, end_date):
    # 判断输入的值是否有问题
    judge_string_empty(start=start_date, end=end_date)

    # 如果查询的时间早于数据库中有历史数据的最早时间，则需要对输入的日期进行调整
    if start_date <= "2023-12-10":
        # 给输入的起始日期重新赋值
        start_date = "2023-12-10"
    else:
        pass

    # 调用函数，调整输入的日期字符串
    start_date, end_date = week_adjust(start_date=start_date, end_date=end_date)

    # 判断返回的start_date和end_date是否为空
    if (start_date == "") or (end_date == ""):
        return None
    else:
        pass
    '''#print("sb::", start_date, end_date)
    # 由于数据库历史数据的问题，需要对具体的数据库日期进行进一步调整
    query = session.query(tmall_flagship_store_visitors_from_search_word_day).filter(
        tmall_flagship_store_visitors_from_search_word_day.stat_time.between(start_date, end_date))
    test = pd.read_sql(query.statement, con=engine.connect())
    # 选取数据库具体表中能取到的最小的日期，替换开始日期进行进一步调整
    start_date = str(test["stat_time"].min())
    # 取到了能取到的最早的时间之后，再对时间进行一次调整
    start_date, end_date = week_adjust(start_date=start_date, end_date=end_date)'''
    # 打印一下，便于检查
    print("sb::", start_date, end_date)

    # 数据读取
    # 这个地方需要读取所有的历史数据了，或者多读几周，因为要算环比
    # 计算后上个统计周的起止日期
    # week_range = start_date + "~" + end_date

    previous_week_start = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d") - timedelta(7), "%Y-%m-%d")
    previous_week_end = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d"), "%Y-%m-%d")

    start_date_adjust = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d"), "%Y-%m-%d")
    end_date_adjust = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d"), "%Y-%m-%d")
    month_tag = start_date.split("-")[1]
    year_tag = start_date.split("-")[0]

    # print(previous_week_start, end_date)
    # 第一部分数据
    # 手淘访客数字段获取
    tmall_flagship_store_traffic_source_week_stat = gen_traffic_week_report(start_date=previous_week_start,
                                                                            end_date=end_date)
    tmall_flagship_store_traffic_source_week_stat = tmall_flagship_store_traffic_source_week_stat[
        (tmall_flagship_store_traffic_source_week_stat["二级流量来源"] == "手淘搜索")
        & (tmall_flagship_store_traffic_source_week_stat["三级流量来源"] == "汇总")
        & (tmall_flagship_store_traffic_source_week_stat["一级流量来源"] == "平台流量")][["数据统计时间", "访客人数"]]
    tmall_flagship_store_traffic_source_week_stat.rename(
        columns={"访客人数": "shoutao_search_visitors", "数据统计时间": "stat_week"}, inplace=True)
    tmall_flagship_store_traffic_source_week_stat.reset_index(inplace=True)

    # 需要给手淘的这个表，也匹配一个上周的数据
    traffic_last_week = tmall_flagship_store_traffic_source_week_stat.copy()
    traffic_last_week.rename(columns={"shoutao_search_visitors": "shoutao_search_visitors_last_week",
                                      "stat_week": "last_week"},
                             inplace=True)
    # 给原表匹配一下每周的上一周的日期
    tmall_flagship_store_traffic_source_week_stat = deploy_lastweek_date(
        data=tmall_flagship_store_traffic_source_week_stat,
        stat_week_column=tmall_flagship_store_traffic_source_week_stat["stat_week"]
    )
    # 匹配上周的那个表，然后筛选对应的列
    tmall_flagship_store_traffic_source_week_stat = pd.merge(left=tmall_flagship_store_traffic_source_week_stat,
                                                             right=traffic_last_week,
                                                             how="left", on=["last_week"])
    # 拥有上周数据的手淘的表
    tmall_flagship_store_traffic_source_week_stat = tmall_flagship_store_traffic_source_week_stat[["stat_week",
                                                                                                   "shoutao_search_visitors",
                                                                                                   "shoutao_search_visitors_last_week"]]
    # print(tmall_flagship_store_traffic_source_week_stat)


    # 本店搜索词
    query = session.query(tmall_flagship_store_visitors_from_search_word_day).filter(
        tmall_flagship_store_visitors_from_search_word_day.stat_time.between(previous_week_start, end_date))
    key_word_own_df = pd.read_sql(query.statement, con=engine.connect())
    # 把拿到的SQL里面的数据，预处理一下
    key_word_own_df.drop(labels=["id"], inplace=True, axis=1)
    key_word_own_df["stat_time"] = key_word_own_df["stat_time"].astype(str)

    key_word_own_df = deploy_stat_week(key_word_own_df, key_word_own_df["stat_time"])
    key_word_own_df["year"] = key_word_own_df["stat_time"].apply(lambda x: int(re.split("-", x)[0]))
    key_word_own_df["month"] = key_word_own_df["stat_time"].apply(lambda x: int(re.split("-", x)[1]))
    key_word_own_df["year"] = key_word_own_df["year"].astype(str)
    key_word_own_df["month"] = key_word_own_df["month"].astype(str)

    # 调用函数，把所有的搜索词分为品牌词和品类词
    # 直接把search_word抽象成目前的二维信息即可
    key_word_own_df["search_word"] = key_word_own_df["search_word"].apply(
        lambda x: deploy_band_category(mode="本店", keyword_element=x))
    # print(key_word_own_df)
    # print(key_word_own_df["search_word"])


    # 第二部分数据
    # 本店-周-搜索词（品牌词、品类词）层面-访客数聚合
    key_word_own_sub1 = key_word_own_df.groupby(["stat_week", "search_word"]).agg(
        {"visitors_count": np.sum, "view_count": np.sum,
         "guide_order_buyer_count": np.sum,
         "good_cart_buyer_count": np.sum}).reset_index()
    # 复制一个，用来计算环比的大表，并且更改列名
    key_word_last_week = key_word_own_sub1.copy()
    key_word_last_week.rename(columns={"visitors_count": "visitors_count_last_week",
                                       "view_count": "view_count_last_week",
                                       "guide_order_buyer_count": "guide_order_buyer_count_last_week",
                                       "good_cart_buyer_count": "good_cart_buyer_count_last_week",
                                       "stat_week": "last_week"},
                              inplace=True)
    # 给sub1分配上周的标签，然后匹配上周的数据
    key_word_own_sub1 = deploy_lastweek_date(data=key_word_own_sub1,
                                             stat_week_column=key_word_own_sub1["stat_week"])
    # print(key_word_own_sub1)
    # print(key_word_last_week)
    # 根据last week和search word匹配上周的相关数据
    key_word_own_sub1 = pd.merge(left=key_word_own_sub1, right=key_word_last_week,
                                 how="left", on=["last_week", "search_word"])
    # 匹配到数据之后，去掉上周的那一列
    key_word_own_sub1.drop(labels=["last_week"], inplace=True, axis=1)

    '''# 第三部分数据
    # 本店-周层面访客数聚合
    # 主要是用来算手淘访客数占比的
    key_word_own_sub2 = key_word_own_df.groupby(["stat_week"]).agg({"visitors_count": np.sum, "view_count": np.sum,
                                                                    "guide_order_buyer_count": np.sum,
                                                                    "good_cart_buyer_count": np.sum}).reset_index()
    key_word_own_sub2.rename(columns={"visitors_count": "visitors_count_weekly",
                                      "view_count": "view_count_weekly",
                                      "guide_order_buyer_count": "guide_order_buyer_count_weekly",
                                      "good_cart_buyer_count": "good_cart_buyer_count_weekly"}, inplace=True)
    key_word_own_sub2 = key_word_own_sub2[["stat_week", "visitors_count_weekly"]]
    # 整一个匹配上周数据，从而计算环比的子表
    key_word_sub2_last_week = key_word_own_sub2.copy()
    key_word_sub2_last_week.rename(columns={"stat_week": "last_week",
                                            "visitors_count_weekly": "visitors_count_weekly_last_week"},
                                   inplace=True)
    # 给sub2的表标记上周时间范围
    key_word_own_sub2 = deploy_lastweek_date(data=key_word_own_sub2,
                                             stat_week_column=key_word_own_sub2["stat_week"])
    key_word_own_sub2 = pd.merge(left=key_word_own_sub2, right=key_word_sub2_last_week,
                                 how="left", on=["last_week"])
    # 匹配到上周的数据之后，再把last week这个标记去掉
    key_word_own_sub2.drop(labels=["last_week"], inplace=True, axis=1)'''

    # 匹配手淘访客数字段
    key_word_own_pro = pd.merge(key_word_own_sub1, tmall_flagship_store_traffic_source_week_stat, on=["stat_week"],
                                how="left")
    # key_word_own_pro = pd.merge(key_word_own_pro, key_word_own_sub2, on=["stat_week"], how="left")

    # 计算一下相关的占比
    # 计算之前需要确保数据类型是对的——应该暂时没什么问题，有需要再处理
    # 搜索词带来的访客数-环比
    key_word_own_pro["visitors_count_mom"] = (key_word_own_pro["visitors_count"] - key_word_own_pro[
        "visitors_count_last_week"]) / key_word_own_pro["visitors_count_last_week"]
    # 浏览量-环比
    key_word_own_pro["view_count_mom"] = (key_word_own_pro["view_count"] - key_word_own_pro[
        "view_count_last_week"]) / key_word_own_pro["view_count_last_week"]
    # 购买买家数-环比
    key_word_own_pro["guide_order_buyer_count_mom"] = (key_word_own_pro["guide_order_buyer_count"] - key_word_own_pro[
        "guide_order_buyer_count_last_week"]) / key_word_own_pro["guide_order_buyer_count_last_week"]
    try:
        # 购买件数-环比
        key_word_own_pro["good_cart_buyer_count_mom"] = (key_word_own_pro["good_cart_buyer_count"] - key_word_own_pro[
            "good_cart_buyer_count_last_week"]) / key_word_own_pro["good_cart_buyer_count_last_week"]
    except:
        key_word_own_pro["good_cart_buyer_count_mom"] = 0
        # 手淘访客数占比-本周
    key_word_own_pro["brand_keyword_visitor_ratio"] = key_word_own_pro["visitors_count"] / key_word_own_pro[
        "shoutao_search_visitors"]
    # 手淘访客数占比-上周
    key_word_own_pro["brand_keyword_visitor_ratio_last_week"] = key_word_own_pro["visitors_count_last_week"] / \
                                                                key_word_own_pro[
                                                                    "shoutao_search_visitors_last_week"]
    # 手淘访客数占比-环比
    key_word_own_pro["brand_keyword_visitor_ratio_mom"] = (key_word_own_pro["brand_keyword_visitor_ratio"] -
                                                           key_word_own_pro["brand_keyword_visitor_ratio_last_week"]) / \
                                                          key_word_own_pro["brand_keyword_visitor_ratio_last_week"]
    # 去掉不需要的列
    key_word_own_pro.drop(labels=["visitors_count_last_week", "view_count_last_week",
                                  "guide_order_buyer_count_last_week", "good_cart_buyer_count_last_week",
                                  "brand_keyword_visitor_ratio_last_week",
                                  "shoutao_search_visitors_last_week"],
                          inplace=True, axis=1)

    # 需要再筛选一下，输入的对应日期内的数据
    key_word_own_pro["start_date"] = key_word_own_pro["stat_week"].apply(lambda x: str(x).split("~")[0])
    key_word_own_pro["end_date"] = key_word_own_pro["stat_week"].apply(lambda x: str(x).split("~")[1])
    key_word_own_pro = key_word_own_pro[
        (key_word_own_pro["start_date"] >= start_date) & (key_word_own_pro["end_date"] <= end_date)]
    key_word_own_pro.drop(labels=["start_date", "end_date", "view_count"], inplace=True, axis=1)

    # 改名
    key_word_own_pro.rename(
        columns={"stat_week": "数据统计时间", "search_word": "本店搜索词", "visitors_count": "带来的访客数",
                 "view_count": "带来的浏览量", "bounce_lose_rate": "跳失率",
                 "guide_order_buyer_count": "引导下单买家数",
                 "guide_order_conversion_rate": "引导下单转化率", "guide_payment_amount": "引导支付金额",
                 "good_cart_buyer_count": "加购人数", "shoutao_search_visitors": "手淘搜索访客数",
                 "brand_keyword_visitor_ratio": "访客数占手淘搜索访客数",

                 "visitors_count_mom": "带来的访客数-环比", "view_count_mom": "带来的浏览量-环比",
                 "guide_order_buyer_count_mom": "引导下单买家数-环比",
                 "good_cart_buyer_count_mom": "加购人数-环比",
                 "brand_keyword_visitor_ratio_mom": "访客数占手淘搜索访客数-环比"},
        inplace=True)

    return key_word_own_pro


## 月
# 需要输入一个月的时间跨度，才能拿到一个月的数据
# start_date必须从每个月的一月开始
def gen_search_month_report(start_date, end_date):
    # 判断输入的值是否有问题
    judge_string_empty(start=start_date, end=end_date)
    #print(start_date, end_date)

    # 如果查询的时间早于数据库中有历史数据的最早时间，则需要对输入的日期进行调整
    if start_date <= "2023-12-10":
        # 给输入的起始日期重新赋值
        start_date = "2023-12-10"
    else:
        pass

    # 调用函数，调整输入的日期字符串
    # start_date, end_date = adjust_date(start_date=start_date, end_date=end_date)
    start_date, end_date = month_adjust(start_date=start_date, end_date=end_date)

    # 判断返回的start_date和end_date是否为空
    if (start_date == "") or (end_date == ""):
        return None
    else:
        pass

    ''' # 由于数据库历史数据的问题，需要对具体的数据库日期进行进一步调整
    query = session.query(tmall_flagship_store_visitors_from_search_word_day).filter(
        tmall_flagship_store_visitors_from_search_word_day.stat_time.between(start_date, end_date))
    test = pd.read_sql(query.statement, con=engine.connect())
    # 选取数据库具体表中能取到的最小的日期，替换开始日期进行进一步调整
    start_date = str(test["stat_time"].min())
    # 取到了能取到的最早的时间之后，再对时间进行一次调整
    start_date, end_date = month_adjust(start_date=start_date, end_date=end_date)'''
    # 打印一下，便于检查
    #print("sb::", start_date, end_date)

    #print("sb==============",start_date, end_date)
    # 数据读取
    year_tag = start_date.split("-")[0]
    month_tag = start_date.split("-")[1]
    print("sb::", month_tag, year_tag)
    print("sb::", start_date, end_date)

    # 获取同比数据的日期
    start_date_last_year = get_last_year_date(start_date)
    end_date_last_year = get_last_year_date(end_date)
    print("sb::", start_date_last_year, end_date_last_year)
    # 这个地方需要读取所有的历史数据了，或者多读几周，因为要算环比

    previous_week_start = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d") - timedelta(31), "%Y-%m-%d")
    previous_week_end = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d"), "%Y-%m-%d")

    start_date_adjust = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d"), "%Y-%m-%d")
    end_date_adjust = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d"), "%Y-%m-%d")

    print(previous_week_start, end_date)
    # 第一部分数据
    # 手淘访客数字段获取
    tmall_flagship_store_traffic_source_month_stat = gen_traffic_month_report(start_date=previous_week_start,
                                                                              end_date=end_date)
    tmall_flagship_store_traffic_source_month_stat = tmall_flagship_store_traffic_source_month_stat[
        (tmall_flagship_store_traffic_source_month_stat["二级流量来源"] == "手淘搜索")
        & (tmall_flagship_store_traffic_source_month_stat["三级流量来源"] == "汇总")
        & (tmall_flagship_store_traffic_source_month_stat["一级流量来源"] == "平台流量")][["数据统计时间", "访客人数"]]
    tmall_flagship_store_traffic_source_month_stat.rename(
        columns={"访客人数": "shoutao_search_visitors", "数据统计时间": "stat_ym"}, inplace=True)
    tmall_flagship_store_traffic_source_month_stat.reset_index(inplace=True)

    # 需要给手淘的这个表，也匹配一个上周的数据
    traffic_last_month = tmall_flagship_store_traffic_source_month_stat.copy()
    traffic_last_month.rename(columns={"shoutao_search_visitors": "shoutao_search_visitors_last_month",
                                       "stat_ym": "last_ym"},
                              inplace=True)
    # 给原表匹配一下每周的上一周的日期
    tmall_flagship_store_traffic_source_month_stat = deploy_lastmonth_date(
        data=tmall_flagship_store_traffic_source_month_stat,
        stat_month_column=tmall_flagship_store_traffic_source_month_stat["stat_ym"]
    )
    # 匹配上周的那个表，然后筛选对应的列
    tmall_flagship_store_traffic_source_month_stat = pd.merge(left=tmall_flagship_store_traffic_source_month_stat,
                                                              right=traffic_last_month,
                                                              how="left", on=["last_ym"])
    # 拥有上周数据的手淘的表
    tmall_flagship_store_traffic_source_month_stat = tmall_flagship_store_traffic_source_month_stat[["stat_ym",
                                                                                                     "shoutao_search_visitors",
                                                                                                     "shoutao_search_visitors_last_month"]]
    # print(tmall_flagship_store_traffic_source_week_stat)

    # 读取数据，后续改成SQL
    # 本店搜索词
    query = session.query(tmall_flagship_store_visitors_from_search_word_day).filter(
        tmall_flagship_store_visitors_from_search_word_day.stat_time.between(previous_week_start, end_date))
    key_word_own_df = pd.read_sql(query.statement, con=engine.connect())
    # 把拿到的SQL里面的数据，预处理一下
    key_word_own_df.drop(labels=["id"], inplace=True, axis=1)
    key_word_own_df["stat_time"] = key_word_own_df["stat_time"].astype(str)

    key_word_own_df = deploy_stat_week(key_word_own_df, key_word_own_df["stat_time"])
    key_word_own_df["year"] = key_word_own_df["stat_time"].apply(lambda x: int(re.split("-", x)[0]))
    key_word_own_df["month"] = key_word_own_df["stat_time"].apply(lambda x: int(re.split("-", x)[1]))
    key_word_own_df["year"] = key_word_own_df["year"].astype(str)
    key_word_own_df["month"] = key_word_own_df["month"].astype(str)
    key_word_own_df["stat_ym"] = key_word_own_df["year"] + "-" + key_word_own_df["month"]

    # 调用函数，把所有的搜索词分为品牌词和品类词
    # 直接把search_word抽象成目前的二维信息即可
    key_word_own_df["search_word"] = key_word_own_df["search_word"].apply(
        lambda x: deploy_band_category(mode="本店", keyword_element=x))
    # print(key_word_own_df)
    # print(key_word_own_df["search_word"])

    # 第二部分数据
    # 本店-周-搜索词（品牌词、品类词）层面-访客数聚合
    key_word_own_sub1 = key_word_own_df.groupby(["stat_ym", "search_word"]).agg(
        {"visitors_count": np.sum, "view_count": np.sum,
         "guide_order_buyer_count": np.sum,
         "good_cart_buyer_count": np.sum}).reset_index()
    # 复制一个，用来计算环比的大表，并且更改列名
    key_word_last_week = key_word_own_sub1.copy()
    key_word_last_week.rename(columns={"visitors_count": "visitors_count_last_month",
                                       "view_count": "view_count_last_month",
                                       "guide_order_buyer_count": "guide_order_buyer_count_last_month",
                                       "good_cart_buyer_count": "good_cart_buyer_count_last_month",
                                       "stat_ym": "last_ym"},
                              inplace=True)
    # 给sub1分配上周的标签，然后匹配上周的数据
    key_word_own_sub1 = deploy_lastmonth_date(data=key_word_own_sub1,
                                              stat_month_column=key_word_own_sub1["stat_ym"])
    # print(key_word_own_sub1)
    # print(key_word_last_week)
    # 根据last week和search word匹配上周的相关数据
    key_word_own_sub1 = pd.merge(left=key_word_own_sub1, right=key_word_last_week,
                                 how="left", on=["last_ym", "search_word"])
    # 匹配到数据之后，去掉上周的那一列
    key_word_own_sub1.drop(labels=["last_ym"], inplace=True, axis=1)

    '''# 第三部分数据
    # 本店-周层面访客数聚合
    # 主要是用来算手淘访客数占比的
    key_word_own_sub2 = key_word_own_df.groupby(["stat_ym"]).agg({"visitors_count": np.sum, "view_count": np.sum,
                                                                  "guide_order_buyer_count": np.sum,
                                                                  "good_cart_buyer_count": np.sum}).reset_index()
    key_word_own_sub2.rename(columns={"visitors_count": "visitors_count_monthly",
                                      "view_count": "view_count_monthly",
                                      "guide_order_buyer_count": "guide_order_buyer_count_monthly",
                                      "good_cart_buyer_count": "good_cart_buyer_count_monthly"}, inplace=True)
    key_word_own_sub2 = key_word_own_sub2[["stat_ym", "visitors_count_monthly"]]
    # 整一个匹配上周数据，从而计算环比的子表
    key_word_sub2_last_week = key_word_own_sub2.copy()
    key_word_sub2_last_week.rename(columns={"stat_ym": "last_ym",
                                            "visitors_count_monthly": "visitors_count_monthly_last_month"},
                                   inplace=True)
    # 给sub2的表标记上周时间范围
    key_word_own_sub2 = deploy_lastmonth_date(data=key_word_own_sub2,
                                              stat_month_column=key_word_own_sub2["stat_ym"])
    key_word_own_sub2 = pd.merge(left=key_word_own_sub2, right=key_word_sub2_last_week,
                                 how="left", on=["last_ym"])
    # 匹配到上周的数据之后，再把last week这个标记去掉
    key_word_own_sub2.drop(labels=["last_ym"], inplace=True, axis=1)'''

    # 第四部分数据：手淘数据——用于计算环比
    traffic_source_last_year = gen_traffic_month_report(start_date=start_date_last_year, end_date=end_date_last_year)
    traffic_source_last_year = traffic_source_last_year[
        (traffic_source_last_year["二级流量来源"] == "手淘搜索")
        & (traffic_source_last_year["三级流量来源"] == "汇总")
        & (traffic_source_last_year["一级流量来源"] == "平台流量")][["数据统计时间", "访客人数"]]
    traffic_source_last_year.rename(
        columns={"访客人数": "shoutao_search_visitors_last_year", "数据统计时间": "last_year_ym"}, inplace=True)
    traffic_source_last_year.reset_index(inplace=True)


    # 第五部分数据：搜索词数据——用于计算环比
    query = session.query(tmall_flagship_store_visitors_from_search_word_day).filter(
        tmall_flagship_store_visitors_from_search_word_day.stat_time.between(start_date_last_year, end_date_last_year))
    key_word_own_new = pd.read_sql(query.statement, con=engine.connect())
    # 把拿到的SQL里面的数据，预处理一下
    key_word_own_new.drop(labels=["id"], inplace=True, axis=1)
    key_word_own_new["stat_time"] = key_word_own_new["stat_time"].astype(str)
    # key_word_own_df = deploy_stat_week(key_word_own_df, key_word_own_df["stat_time"])
    key_word_own_new["year"] = key_word_own_new["stat_time"].apply(lambda x: int(re.split("-", x)[0]))
    key_word_own_new["month"] = key_word_own_new["stat_time"].apply(lambda x: int(re.split("-", x)[1]))
    key_word_own_new["year"] = key_word_own_new["year"].astype(str)
    key_word_own_new["month"] = key_word_own_new["month"].astype(str)
    key_word_own_new["stat_ym"] = key_word_own_new["year"] + "-" + key_word_own_new["month"]

    # 调用函数，把所有的搜索词分为品牌词和品类词
    # 直接把search_word抽象成目前的二维信息即可
    key_word_own_new["search_word"] = key_word_own_new["search_word"].apply(
        lambda x: deploy_band_category(mode="本店", keyword_element=x))

    # 本店-周-搜索词（品牌词、品类词）层面-访客数聚合
    key_word_own_last_year = key_word_own_new.groupby(["stat_ym", "search_word"]).agg(
        {"visitors_count": np.sum, "view_count": np.sum,
         "guide_order_buyer_count": np.sum,
         "good_cart_buyer_count": np.sum}).reset_index()
    # 改名
    key_word_own_last_year.rename(columns={"visitors_count": "visitors_count_last_year",
                                           "view_count": "view_count_last_year",
                                           "guide_order_buyer_count": "guide_order_buyer_count_last_year",
                                           "good_cart_buyer_count": "good_cart_buyer_count_last_year",
                                           "stat_ym": "last_year_ym"},
                                  inplace=True)
    key_word_own_last_year = key_word_own_last_year[["last_year_ym",
                                                     "search_word",
                                                     "visitors_count_last_year",
                                                     "view_count_last_year",
                                                     "guide_order_buyer_count_last_year",
                                                     "good_cart_buyer_count_last_year"]]

    '''# 第六部分数据——用于计算环比
    key_word_own_sub_last_year = key_word_own_new.groupby(["stat_ym"]).agg(
        {"visitors_count": np.sum, "view_count": np.sum,
         "guide_order_buyer_count": np.sum,
         "good_cart_buyer_count": np.sum}).reset_index()
    key_word_own_sub_last_year.rename(columns={"visitors_count": "visitors_count_monthly_last_year",
                                               "stat_ym": "last_year_ym"},
                                      inplace=True)
    key_word_own_sub_last_year = key_word_own_sub_last_year[["visitors_count_monthly_last_year", "last_year_ym"]]'''

    # 匹配手淘访客数字段
    key_word_own_pro = pd.merge(key_word_own_sub1, tmall_flagship_store_traffic_source_month_stat, on=["stat_ym"],
                                how="left")
    # key_word_own_pro = pd.merge(key_word_own_pro, key_word_own_sub2, on=["stat_ym"], how="left")
    # 合并同比的相关数据，然后计算同比
    # 先给分配上一年同期的时间，然后用于匹配
    # print(key_word_own_pro.columns)
    key_word_own_pro = deploy_lastyear_ym(data=key_word_own_pro,
                                          stat_ym_column=key_word_own_pro["stat_ym"])
    key_word_own_pro = pd.merge(key_word_own_pro, traffic_source_last_year, on=["last_year_ym"], how="left")
    key_word_own_pro = pd.merge(key_word_own_pro, key_word_own_last_year, on=["last_year_ym", "search_word"],
                                how="left")
    # key_word_own_pro = pd.merge(key_word_own_pro, key_word_own_sub_last_year, on=["last_year_ym"], how="left")
    key_word_own_pro.drop(labels=["last_year_ym"], inplace=True, axis=1)
    # print(key_word_own_pro.columns)

    # 计算一下相关的占比
    # 计算之前需要确保数据类型是对的——应该暂时没什么问题，有需要再处理
    # 搜索词带来的访客数-环比
    key_word_own_pro["visitors_count_mom"] = (key_word_own_pro["visitors_count"] - key_word_own_pro[
        "visitors_count_last_month"]) / key_word_own_pro["visitors_count_last_month"]
    # 浏览量-环比
    key_word_own_pro["view_count_mom"] = (key_word_own_pro["view_count"] - key_word_own_pro[
        "view_count_last_month"]) / key_word_own_pro["view_count_last_month"]
    # 购买买家数-环比
    key_word_own_pro["guide_order_buyer_count_mom"] = (key_word_own_pro["guide_order_buyer_count"] - key_word_own_pro[
        "guide_order_buyer_count_last_month"]) / key_word_own_pro["guide_order_buyer_count_last_month"]
    try:
        # 加购人数-环比
        key_word_own_pro["good_cart_buyer_count_mom"] = (key_word_own_pro["good_cart_buyer_count"] - key_word_own_pro[
            "good_cart_buyer_count_last_month"]) / key_word_own_pro["good_cart_buyer_count_last_month"]
    except:
        key_word_own_pro["good_cart_buyer_count_mom"] = 0

    # 手淘访客数占比-本周
    key_word_own_pro["brand_keyword_visitor_ratio"] = key_word_own_pro["visitors_count"] / key_word_own_pro[
        "shoutao_search_visitors"]
    # 手淘访客数占比-上周
    key_word_own_pro["brand_keyword_visitor_ratio_last_month"] = key_word_own_pro["visitors_count_last_month"] / \
                                                                 key_word_own_pro[
                                                                     "shoutao_search_visitors_last_month"]
    # 手淘访客数占比-环比
    key_word_own_pro["brand_keyword_visitor_ratio_mom"] = (key_word_own_pro["brand_keyword_visitor_ratio"] -
                                                           key_word_own_pro["brand_keyword_visitor_ratio_last_month"]) / \
                                                          key_word_own_pro["brand_keyword_visitor_ratio_last_month"]

    # 计算同比
    # 计算之前需要确保数据类型是对的——应该暂时没什么问题，有需要再处理
    # 手淘访客数占比-上一年同期
    key_word_own_pro["brand_keyword_visitor_ratio_last_year"] = key_word_own_pro["visitors_count_last_year"] / \
                                                                key_word_own_pro["shoutao_search_visitors_last_year"]
    # 手淘访客数-同比
    key_word_own_pro["brand_keyword_visitor_ratio_yoy"] = (key_word_own_pro["brand_keyword_visitor_ratio"] -
                                                           key_word_own_pro["brand_keyword_visitor_ratio_last_year"]) / \
                                                          key_word_own_pro["brand_keyword_visitor_ratio_last_year"]
    # 搜索词带来的访客数-同比
    key_word_own_pro["visitors_count_yoy"] = (key_word_own_pro["visitors_count"] - key_word_own_pro[
        "visitors_count_last_year"]) / key_word_own_pro["visitors_count_last_year"]
    # 浏览量-同比
    key_word_own_pro["view_count_yoy"] = (key_word_own_pro["view_count"] - key_word_own_pro[
        "view_count_last_year"]) / key_word_own_pro["view_count_last_year"]
    # 购买买家数-同比
    key_word_own_pro["guide_order_buyer_count_yoy"] = (key_word_own_pro["guide_order_buyer_count"] - key_word_own_pro[
        "guide_order_buyer_count_last_year"]) / key_word_own_pro["guide_order_buyer_count_last_year"]
    try:
        # 购买件数-环比
        key_word_own_pro["good_cart_buyer_count_yoy"] = (key_word_own_pro["good_cart_buyer_count"] - key_word_own_pro[
            "good_cart_buyer_count_last_year"]) / key_word_own_pro["good_cart_buyer_count_last_year"]
    except:
        key_word_own_pro["good_cart_buyer_count_yoy"] = 0
    # 去掉不需要的列
    key_word_own_pro.drop(labels=["visitors_count_last_month", "view_count_last_month",
                                  "guide_order_buyer_count_last_month", "good_cart_buyer_count_last_month",
                                  "brand_keyword_visitor_ratio_last_month",
                                  "shoutao_search_visitors_last_month"],
                          inplace=True, axis=1)
    key_word_own_pro.drop(labels=["brand_keyword_visitor_ratio_last_year",  "shoutao_search_visitors_last_year",
                                  "visitors_count_last_year", "view_count_last_year",
                                  "guide_order_buyer_count_last_year",
                                  "good_cart_buyer_count_last_year",
                                  "index"],
                          inplace=True, axis=1)

    # 需要再筛选一下，输入的对应日期内的数据
    key_word_own_pro["year"] = key_word_own_pro["stat_ym"].apply(lambda x: int(str(x).split("-")[0]))
    key_word_own_pro["month"] = key_word_own_pro["stat_ym"].apply(lambda x: int(str(x).split("-")[1]))
    key_word_own_pro = key_word_own_pro[
        ((key_word_own_pro["month"] >= int(month_tag)) & (key_word_own_pro["year"] == int(year_tag)))|
        ((key_word_own_pro["month"] <= int(month_tag)) & (key_word_own_pro["year"] > int(year_tag)))]
    key_word_own_pro.drop(labels=["year", "month","view_count"], inplace=True, axis=1)

    # 改名
    key_word_own_pro.rename(
        columns={"stat_ym": "数据统计时间", "search_word": "本店搜索词", "visitors_count": "带来的访客数",
                 "view_count": "带来的浏览量", "bounce_lose_rate": "跳失率",
                 "guide_order_buyer_count": "引导下单买家数",
                 "guide_order_conversion_rate": "引导下单转化率", "guide_payment_amount": "引导支付金额",
                 "good_cart_buyer_count": "加购人数", "shoutao_search_visitors": "手淘搜索访客数",
                 "brand_keyword_visitor_ratio": "访客数占手淘搜索访客数",
                 "visitors_count_mom": "带来的访客数-环比",
                 "view_count_mom": "带来的浏览量-环比",
                 "guide_order_buyer_count_mom": "引导下单买家数-环比",
                 "good_cart_buyer_count_mom": "加购人数-环比",
                 "brand_keyword_visitor_ratio_mom": "访客数占手淘搜索访客数-环比",

                 "brand_keyword_visitor_ratio_yoy": "访客数占手淘搜索访客数-同比",
                 "visitors_count_yoy": "带来的访客数-同比",
                 "view_count_yoy": "带来的浏览量-同比",
                 "guide_order_buyer_count_yoy": "引导下单买家数-同比",
                 "good_cart_buyer_count_yoy": "加购人数-同比"},
        inplace=True)

    return key_word_own_pro






### 本店流量来源
## 日
def gen_traffic_day_report(start_date, end_date):
    judge_string_empty(start=start_date, end=end_date)

    # 判断返回的start_date和end_date是否为空
    if (start_date == "") or (end_date == ""):
        return None
    else:
        pass

    # query = session.query(tmall_flagship_store_traffic_source_all_day).filter(tmall_flagship_store_traffic_source_all_day.stat_time == '2023.12.24')
    query = session.query(tmall_flagship_store_traffic_source_all_day).filter(
        tmall_flagship_store_traffic_source_all_day.stat_time.between(start_date, end_date))
    flow_own_df = pd.read_sql(query.statement, con=engine.connect())

    # 把拿到的SQL里面的数据，预处理一下
    flow_own_df.drop(labels=["id"], inplace=True, axis=1)
    flow_own_df["stat_time"] = flow_own_df["stat_time"].astype(str)

    # 剔除汇总-汇总行记录
    # flow_own_df = flow_own_df[flow_own_df["traffic_third_source"] != "汇总"]
    flow_own_df = flow_own_df[flow_own_df["traffic_second_source"] != "汇总"]
    #flow_own_df = flow_own_df[flow_own_df["traffic_third_source"] != "-"]


    # 分别计算每类渠道的汇总数据
    # 第一级渠道
    flow_first_all = flow_own_df.copy()
    # 需要剔除所有的汇总行，才能加总出正确的数据
    flow_first_all = flow_first_all[flow_first_all["traffic_third_source"] != "汇总"]
    flow_first_all = flow_first_all.groupby(
        ["traffic_first_source", "stat_time"]).agg(
        {"visitor_count": np.sum, "payment_amount": np.sum, "payment_buyer_count": np.sum}).reset_index()
    flow_first_all["traffic_second_source"] = "汇总"
    flow_first_all["traffic_third_source"] = "汇总"

    '''# 第二级渠道
    flow_second_all = flow_own_df.groupby(
        ["traffic_first_source", "traffic_second_source", "stat_time"]).agg(
        {"visitor_count": np.sum, "payment_amount": np.sum, "payment_buyer_count": np.sum}).reset_index()
    flow_second_all["traffic_third_source"] = "汇总"'''


    # 本店-周-数据报表 tmall_flagship_store_traffic_source_week_stat
    # 指标计算
    flow_own_sub1 = flow_own_df.groupby(
        ["traffic_first_source", "traffic_second_source", "traffic_third_source", "stat_time"]).agg(
        {"visitor_count": np.sum, "payment_amount": np.sum, "payment_buyer_count": np.sum}).reset_index()

    # 合并一下汇总的数据和分渠道的数据
    flow_all_df = pd.concat(objs=[flow_own_sub1, flow_first_all], axis=0)
    # print(flow_all_df.columns)


    flow_own_sub2 = flow_own_df.copy()
    # 剔除所有的汇总，然后再加总
    flow_own_sub2 = flow_own_sub2[flow_own_sub2["traffic_third_source"] != "汇总"]
    # 每周的第一渠道的汇总统计（用于计算环比）-列
    flow_own_sub2 = flow_own_sub2.groupby(["stat_time"]).agg(
        {"visitor_count": np.sum, "payment_amount": np.sum}).reset_index()
    flow_own_sub2.rename(columns={"visitor_count": "visitor_count_all", "payment_amount": "payment_amount_all"},
                         inplace=True)

    flow_own_all = pd.merge(flow_all_df, flow_own_sub2, how="left", on=["stat_time"])

    # 流量访客来源占比
    flow_own_all["source_visitor_percent"] = flow_own_all["visitor_count"] / flow_own_all["visitor_count_all"]
    # 流量支付金额占比
    flow_own_all["source_trans_percent"] = flow_own_all["payment_amount"] / flow_own_all["payment_amount_all"]
    # 支付转化率
    flow_own_all["payment_conversion_rate"] = flow_own_all["payment_buyer_count"] / flow_own_all["visitor_count"]
    # 重新分配索引
    flow_own_all.reset_index(inplace=True)
    # 剔除多出来的两列
    flow_own_all.drop(labels=["index", "visitor_count_all", "payment_amount_all"], axis=1, inplace=True)

    # 去掉一些不需要的字段
    # flow_own_df.drop(labels=["crawl_time", "create_time"], inplace=True, axis=1)
    # 改名
    flow_own_all["competing_store_name"] = "洁婷天猫旗舰店"

    # 排序之前先把汇总变一下，看看能不能简单的把汇总放在最前面
    flow_own_all["traffic_second_source"] = np.where(flow_own_all["traffic_second_source"] == "汇总", "1A汇总",
                                                     flow_own_all["traffic_second_source"])
    flow_own_all["traffic_third_source"] = np.where(flow_own_all["traffic_third_source"] == "汇总", "1A汇总",
                                                     flow_own_all["traffic_third_source"])
    # 更改排序
    flow_own_all.sort_values(by=["stat_time", "traffic_first_source", "traffic_second_source", "traffic_third_source"],
                             ascending=True, inplace=True)
    # 排序之后，再把汇总的值改回来
    flow_own_all["traffic_second_source"] = np.where(flow_own_all["traffic_second_source"] == "1A汇总", "汇总",
                                                     flow_own_all["traffic_second_source"])
    flow_own_all["traffic_third_source"] = np.where(flow_own_all["traffic_third_source"] == "1A汇总", "汇总",
                                                    flow_own_all["traffic_third_source"])
    # 改名
    flow_own_all.rename(columns={"stat_time": "数据统计时间", "competing_store_name": "店铺名称",
                                 "traffic_first_source": "一级流量来源",
                                 "traffic_second_source": "二级流量来源", "traffic_third_source": "三级流量来源",
                                 "visitor_count": "访客人数", "payment_amount": "交易金额",
                                 "payment_buyer_count": "支付人数",
                                 "source_visitor_percent": "访客人数占比", "payment_conversion_rate": "支付转化率",
                                 "source_trans_percent": "交易金额占比"
                                 }, inplace=True)

    return flow_own_all


## 周
def gen_traffic_week_report(start_date, end_date):
    judge_string_empty(start=start_date, end=end_date)

    # 如果查询的时间早于数据库中有历史数据的最早时间，则需要对输入的日期进行调整
    if start_date <= "2023-12-10":
        # 给输入的起始日期重新赋值
        start_date = "2023-12-10"
    else:
        pass

    # 调用函数，调整输入的日期字符串
    start_date, end_date = week_adjust(start_date=start_date, end_date=end_date)

    # 判断返回的start_date和end_date是否为空
    if (start_date == "") or (end_date == ""):
        return None
    else:
        pass

    # 打印一下，便于检查
    print("sb::", start_date, end_date)

    # 数据读取
    # 这个地方需要读取所有的历史数据了，或者多读几周，因为要算环比
    # 计算后上个统计周的起止日期
    # week_range = start_date + "~" + end_date


    previous_week_start = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d") - timedelta(7), "%Y-%m-%d")
    previous_week_end = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d"), "%Y-%m-%d")

    start_date_adjust = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d"), "%Y-%m-%d")
    end_date_adjust = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d"), "%Y-%m-%d")
    month_tag = start_date.split("-")[1]
    year_tag = start_date.split("-")[0]


    # query = session.query(tmall_flagship_store_traffic_source_all_day).filter(tmall_flagship_store_traffic_source_all_day.stat_time == '2023.12.24')
    query = session.query(tmall_flagship_store_traffic_source_all_day).filter(
        tmall_flagship_store_traffic_source_all_day.stat_time.between(previous_week_start, end_date))
    flow_own_df = pd.read_sql(query.statement, con=engine.connect())

    # 把拿到的SQL里面的数据，预处理一下
    flow_own_df.drop(labels=["id"], inplace=True, axis=1)
    flow_own_df["stat_time"] = flow_own_df["stat_time"].astype(str)
    # 数据处理
    flow_own_df = deploy_stat_week(flow_own_df, flow_own_df["stat_time"])
    flow_own_df["year"] = flow_own_df["stat_time"].apply(lambda x: int(re.split("-", x)[0]))
    flow_own_df["month"] = flow_own_df["stat_time"].apply(lambda x: int(re.split("-", x)[1]))
    flow_own_df["year"] = flow_own_df["year"].astype(str)
    flow_own_df["month"] = flow_own_df["month"].astype(str)

    # 剔除汇总行记录
    #flow_own_df = flow_own_df[flow_own_df["traffic_third_source"] != "汇总"]
    flow_own_df = flow_own_df[flow_own_df["traffic_second_source"] != "汇总"]
    # flow_own_df = flow_own_df[flow_own_df["traffic_third_source"] != "-"]

    # 分别计算每类渠道的汇总数据
    # 第一级渠道
    flow_first_all = flow_own_df.copy()
    flow_first_all = flow_first_all[flow_first_all["traffic_third_source"] != "汇总"]
    flow_first_all = flow_first_all.groupby(
        ["traffic_first_source", "stat_week"]).agg(
        {"visitor_count": np.sum, "payment_amount": np.sum, "payment_buyer_count": np.sum}).reset_index()
    flow_first_all["traffic_second_source"] = "汇总"
    flow_first_all["traffic_third_source"] = "汇总"

    '''# 第二级渠道
    flow_second_all = flow_own_df.groupby(
        ["traffic_first_source", "traffic_second_source", "stat_week"]).agg(
        {"visitor_count": np.sum, "payment_amount": np.sum, "payment_buyer_count": np.sum}).reset_index()
    flow_second_all["traffic_third_source"] = "汇总"'''

    # 第一部分数据
    # 本店-周-数据报表 tmall_flagship_store_traffic_source_week_stat
    flow_own_sub1 = flow_own_df.groupby(
        ["traffic_first_source", "traffic_second_source", "traffic_third_source", "stat_week"]).agg(
        {"visitor_count": np.sum, "payment_amount": np.sum, "payment_buyer_count": np.sum}).reset_index()

    # 合并一下汇总的数据和分渠道的数据
    flow_own_sub1 = pd.concat(objs=[flow_own_sub1, flow_first_all], axis=0)
    #print(flow_own_sub1.columns)

    # 新搞一个部分，用来算环比
    flow_own_lastweek = flow_own_sub1.copy()
    flow_own_lastweek.rename(columns={"visitor_count": "visitor_count_last_week",
                                      "payment_amount": "payment_amount_last_week",
                                      "payment_buyer_count": "payment_buyer_count_last_week",
                                      "stat_week": "last_week"},
                             inplace=True)
    flow_own_lastweek = flow_own_lastweek[["last_week",
                                           "traffic_first_source", "traffic_second_source", "traffic_third_source",
                                           "payment_amount_last_week", "visitor_count_last_week"]]
    # 给原数据分配last week后进行匹配
    flow_own_sub1 = deploy_lastweek_date(data=flow_own_sub1, stat_week_column=flow_own_sub1["stat_week"])
    flow_own_sub1 = pd.merge(left=flow_own_sub1, right=flow_own_lastweek, how="left", on=["last_week",
                                                                                          "traffic_first_source",
                                                                                          "traffic_second_source",
                                                                                          "traffic_third_source"])

    # 第二部分数据
    # 剔除汇总行记录
    flow_own_sub2 = flow_own_df.copy()
    flow_own_sub2 = flow_own_sub2[flow_own_sub2["traffic_third_source"] != "汇总"]
    # 每周的第一渠道的汇总统计（用于计算环比）-列
    flow_own_sub2 = flow_own_sub2.groupby(["stat_week"]).agg(
        {"visitor_count": np.sum, "payment_amount": np.sum}).reset_index()
    flow_own_sub2.rename(columns={"visitor_count": "visitor_count_all", "payment_amount": "payment_amount_all"},
                         inplace=True)

    # 合并两部分数据
    flow_own_all = pd.merge(flow_own_sub1, flow_own_sub2, how="left", on=["stat_week"])

    # 流量访客来源占比
    flow_own_all["source_visitor_percent"] = flow_own_all["visitor_count"] / flow_own_all["visitor_count_all"]
    # 流量支付金额占比
    flow_own_all["source_trans_percent"] = flow_own_all["payment_amount"] / flow_own_all["payment_amount_all"]
    # 支付转化率
    flow_own_all["payment_conversion_rate"] = flow_own_all["payment_buyer_count"] / flow_own_all["visitor_count"]
    # 重新分配索引
    flow_own_all.reset_index(inplace=True)
    # 剔除多出来的两列
    flow_own_all.drop(labels=["index", "visitor_count_all", "payment_amount_all"], axis=1, inplace=True)

    # 计算环比
    flow_own_all["visitor_count_mom"] = (flow_own_all["visitor_count"] - flow_own_all[
        "visitor_count_last_week"]) / flow_own_all["visitor_count_last_week"]
    flow_own_all["payment_amount_mom"] = (flow_own_all["payment_amount"] - flow_own_all[
        "payment_amount_last_week"]) / flow_own_all["payment_amount_last_week"]
    flow_own_all.drop(labels=["visitor_count_last_week", "payment_amount_last_week", "last_week"], axis=1, inplace=True)


    # 需要再筛选一下，输入的对应日期内的数据
    flow_own_all["start_date"] = flow_own_all["stat_week"].apply(lambda x: str(x).split("~")[0])
    flow_own_all["end_date"] = flow_own_all["stat_week"].apply(lambda x: str(x).split("~")[1])
    flow_own_all = flow_own_all[(flow_own_all["start_date"] >= start_date) &
                                (flow_own_all["end_date"] <= end_date)]
    flow_own_all.drop(labels=["start_date", "end_date"], inplace=True, axis=1)

    # 添加本店的字段
    flow_own_all["competing_store_name"] = "洁婷天猫旗舰店"


    # 排序之前先把汇总变一下，看看能不能简单的把汇总放在最前面
    flow_own_all["traffic_second_source"] = np.where(flow_own_all["traffic_second_source"] == "汇总", "1A汇总",
                                                     flow_own_all["traffic_second_source"])
    flow_own_all["traffic_third_source"] = np.where(flow_own_all["traffic_third_source"] == "汇总", "1A汇总",
                                                    flow_own_all["traffic_third_source"])
    # 更改排序
    flow_own_all.sort_values(by=["stat_week", "traffic_first_source", "traffic_second_source", "traffic_third_source"],
                             ascending=True, inplace=True)
    # 排序之后，再把汇总的值改回来
    flow_own_all["traffic_second_source"] = np.where(flow_own_all["traffic_second_source"] == "1A汇总", "汇总",
                                                     flow_own_all["traffic_second_source"])
    flow_own_all["traffic_third_source"] = np.where(flow_own_all["traffic_third_source"] == "1A汇总", "汇总",
                                                    flow_own_all["traffic_third_source"])
    # 改名
    flow_own_all.rename(columns={"stat_week": "数据统计时间", "competing_store_name": "天猫旗舰竞店店铺名称",
                                 "traffic_first_source": "一级流量来源", "traffic_second_source": "二级流量来源",
                                 "traffic_third_source": "三级流量来源",
                                 "visitor_count": "访客人数", "payment_amount": "交易金额",
                                 "source_visitor_percent": "来源访客人数占比",
                                 "source_trans_percent": "该来源交易金额占比", "payment_conversion_rate": "支付转化率",
                                 "visitor_count_mom": "访客人数环比", "payment_amount_mom": "交易金额环比",
                                 "payment_buyer_count": "支付人数"
                                 },
                        inplace=True)

    return flow_own_all


## 月
def gen_traffic_month_report(start_date, end_date):
    judge_string_empty(start=start_date, end=end_date)

    # 如果查询的时间早于数据库中有历史数据的最早时间，则需要对输入的日期进行调整
    if start_date <= "2023-12-10":
        # 给输入的起始日期重新赋值
        start_date = "2023-12-10"
    else:
        pass

    # 调用函数，调整输入的日期字符串
    start_date, end_date = month_adjust(start_date=start_date, end_date=end_date)

    # 判断返回的start_date和end_date是否为空
    if (start_date == "") or (end_date == ""):
        return None
    else:
        pass
    # 打印一下，便于检查
    print("sb::", start_date, end_date)
    # 数据读取
    # 这个地方需要读取所有的历史数据了，或者多读几周，因为要算环比
    # week_range = start_date + "~" + end_date

    previous_week_start = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d") - timedelta(31), "%Y-%m-%d")
    previous_week_end = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d"), "%Y-%m-%d")

    start_date_adjust = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d"), "%Y-%m-%d")
    end_date_adjust = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d"), "%Y-%m-%d")
    month_tag = start_date.split("-")[1]
    year_tag = start_date.split("-")[0]


    # query = session.query(tmall_flagship_store_traffic_source_all_day).filter(tmall_flagship_store_traffic_source_all_day.stat_time == '2023.12.24')
    query = session.query(tmall_flagship_store_traffic_source_all_day).filter(
        tmall_flagship_store_traffic_source_all_day.stat_time.between(previous_week_start, previous_week_end))
    flow_own_df = pd.read_sql(query.statement, con=engine.connect())

    # 把拿到的SQL里面的数据，预处理一下
    flow_own_df.drop(labels=["id"], inplace=True, axis=1)
    flow_own_df["stat_time"] = flow_own_df["stat_time"].astype(str)

    # 数据处理
    flow_own_df = deploy_stat_week(flow_own_df, flow_own_df["stat_time"])
    flow_own_df["year"] = flow_own_df["stat_time"].apply(lambda x: int(re.split("-", x)[0]))
    flow_own_df["month"] = flow_own_df["stat_time"].apply(lambda x: int(re.split("-", x)[1]))
    flow_own_df["year"] = flow_own_df["year"].astype(str)
    flow_own_df["month"] = flow_own_df["month"].astype(str)
    flow_own_df["stat_ym"] = flow_own_df["year"] + "-" + flow_own_df["month"]

    # 剔除汇总行记录
    # flow_own_df = flow_own_df[flow_own_df["traffic_third_source"] != "汇总"]
    flow_own_df = flow_own_df[flow_own_df["traffic_second_source"] != "汇总"]
    # flow_own_df = flow_own_df[flow_own_df["traffic_third_source"] != "-"]

    # 分别计算每类渠道的汇总数据
    # 第一级渠道
    flow_first_all = flow_own_df.copy()
    flow_first_all = flow_first_all[flow_first_all["traffic_third_source"] != "汇总"]
    flow_first_all = flow_first_all.groupby(
        ["traffic_first_source", "stat_ym"]).agg(
        {"visitor_count": np.sum, "payment_amount": np.sum, "payment_buyer_count": np.sum}).reset_index()
    flow_first_all["traffic_second_source"] = "汇总"
    flow_first_all["traffic_third_source"] = "汇总"


    # 第一部分数据
    # 本店-月-数据报表 tmall_flagship_store_traffic_source_month_stat
    flow_own_sub1 = flow_own_df.groupby(
        ["traffic_first_source", "traffic_second_source", "traffic_third_source", "stat_ym"]).agg(
        {"visitor_count": np.sum, "payment_amount": np.sum, "payment_buyer_count": np.sum}).reset_index()
    # 合并一下汇总的数据和分渠道的数据
    flow_own_sub1 = pd.concat(objs=[flow_own_sub1, flow_first_all], axis=0)

    # 复制一个，用来计算环比的大表，并且更改列名
    flow_own_last_week = flow_own_sub1.copy()
    flow_own_last_week.rename(columns={"visitor_count": "visitor_count_last_month",
                                       "payment_amount": "payment_amount_last_month",
                                       "payment_buyer_count": "payment_buyer_count_last_month",
                                       "stat_ym": "last_ym"},
                              inplace=True)
    flow_own_last_week = flow_own_last_week[["last_ym",
                                             "traffic_first_source", "traffic_second_source", "traffic_third_source",
                                             "visitor_count_last_month", "payment_amount_last_month"]]
    # 给sub1分配上周的标签，然后匹配上周的数据
    flow_own_sub1 = deploy_lastmonth_date(data=flow_own_sub1,
                                          stat_month_column=flow_own_sub1["stat_ym"])
    # print(key_word_own_sub1)
    # print(key_word_last_week)
    # 根据last week和search word匹配上周的相关数据
    flow_own_sub1 = pd.merge(left=flow_own_sub1, right=flow_own_last_week,
                             how="left", on=["last_ym", "traffic_first_source",
                                             "traffic_second_source", "traffic_third_source"])
    # 匹配到数据之后，去掉上周的那一列
    flow_own_sub1.drop(labels=["last_ym"], inplace=True, axis=1)


    # 第二部分数据
    # 去掉汇总行
    flow_own_sub2 = flow_own_df.copy()
    flow_own_sub2 = flow_own_sub2[flow_own_sub2["traffic_third_source"] != "汇总"]
    flow_own_sub2 = flow_own_sub2.groupby(["stat_ym"]).agg(
        {"visitor_count": np.sum, "payment_amount": np.sum}).reset_index()
    flow_own_sub2.rename(columns={"visitor_count": "visitor_count_all", "payment_amount": "payment_amount_all"},
                         inplace=True)

    # 合并两部分数据
    flow_own_all = pd.merge(flow_own_sub1, flow_own_sub2, how="left", on=["stat_ym"])
    # 流量访客来源占比
    flow_own_all["source_visitor_percent"] = flow_own_all["visitor_count"] / flow_own_all["visitor_count_all"]
    # 流量支付金额占比
    flow_own_all["source_trans_percent"] = flow_own_all["payment_amount"] / flow_own_all["payment_amount_all"]
    # 支付转化率
    flow_own_all["payment_conversion_rate"] = flow_own_all["payment_buyer_count"] / flow_own_all["visitor_count"]
    # 重新分配索引
    flow_own_all.reset_index(inplace=True)
    # 剔除多出来的两列
    flow_own_all.drop(labels=["index", "visitor_count_all", "payment_amount_all"], axis=1, inplace=True)

    # 计算环比
    flow_own_all["visitor_count_mom"] = (flow_own_all["visitor_count"] - flow_own_all[
        "visitor_count_last_month"]) / flow_own_all["visitor_count"]
    flow_own_all["payment_amount_mom"] = (flow_own_all["payment_amount"] - flow_own_all[
        "payment_amount_last_month"]) / flow_own_all["payment_amount"]
    flow_own_all.drop(labels=["visitor_count_last_month", "payment_amount_last_month"], axis=1, inplace=True)

    # flow_own_all # .to_excel(output_path + "天猫旗舰店、竞店流量来源统计.xlsx", index=False)
    # 匹配要计算环比的上周的数据
    # flow_own_all # .to_excel(output_path + "天猫旗舰店、竞店流量来源统计.xlsx", index=False)
    '''# 匹配要计算环比的上周的数据
    flow_own_all_monthly = get_pre_month_flow_data(flow_own_all.copy(), flow_own_all["month"], "visitor_count")
    flow_own_all_monthly = get_pre_month_flow_data(flow_own_all_monthly, flow_own_all["month"], "payment_amount")
    # 计算环比
    flow_own_all_monthly["visitor_count_mom"] = (flow_own_all_monthly["visitor_count"] - flow_own_all_monthly["pre_month_visitor_count"])/flow_own_all_monthly["pre_month_visitor_count"]
    flow_own_all_monthly["payment_amount_mom"] = (flow_own_all_monthly["payment_amount"] - flow_own_all_monthly["pre_month_payment_amount"])/flow_own_all_monthly["pre_month_payment_amount"]
    flow_own_all_monthly.drop(labels=["pre_month_visitor_count","pre_month_payment_amount"], axis=1, inplace=True)
    # 需要再筛选一下，输入的对应日期内的数据
    flow_own_all_monthly = flow_own_all_monthly[flow_own_all_monthly["month"] >= month_tag]

    flow_own_all_monthly["stat_ym"] = flow_own_all_monthly["year"] + "-" + flow_own_all_monthly["month"]
    flow_own_all_monthly.drop(labels=["year", "month"], inplace=True, axis=1)'''

    # 需要再筛选一下，输入的对应日期内的数据
    flow_own_all["year"] = flow_own_all["stat_ym"].apply(lambda x: str(x).split("-")[0])
    flow_own_all["month"] = flow_own_all["stat_ym"].apply(lambda x: str(x).split("-")[1])
    flow_own_all = flow_own_all[(flow_own_all["month"] >= month_tag) & (flow_own_all["year"] >= year_tag)]
    flow_own_all.drop(labels=["year", "month"], inplace=True, axis=1)

    flow_own_all["competing_store_name"] = "洁婷天猫旗舰店"

    # 排序之前先把汇总变一下，看看能不能简单的把汇总放在最前面
    flow_own_all["traffic_second_source"] = np.where(flow_own_all["traffic_second_source"] == "汇总", "1A汇总",
                                                     flow_own_all["traffic_second_source"])
    flow_own_all["traffic_third_source"] = np.where(flow_own_all["traffic_third_source"] == "汇总", "1A汇总",
                                                    flow_own_all["traffic_third_source"])
    # 更改排序
    flow_own_all.sort_values(by=["stat_ym", "traffic_first_source", "traffic_second_source", "traffic_third_source"],
                             ascending=True, inplace=True)
    # 排序之后，再把汇总的值改回来
    flow_own_all["traffic_second_source"] = np.where(flow_own_all["traffic_second_source"] == "1A汇总", "汇总",
                                                     flow_own_all["traffic_second_source"])
    flow_own_all["traffic_third_source"] = np.where(flow_own_all["traffic_third_source"] == "1A汇总", "汇总",
                                                    flow_own_all["traffic_third_source"])
    # 改名
    flow_own_all.rename(columns={"stat_ym": "数据统计时间", "competing_store_name": "天猫旗舰竞店店铺名称",
                                 "traffic_first_source": "一级流量来源", "traffic_second_source": "二级流量来源",
                                 "traffic_third_source": "三级流量来源",
                                 "visitor_count": "访客人数", "payment_amount": "交易金额",
                                 "source_visitor_percent": "来源访客人数占比",
                                 "source_trans_percent": "该来源交易金额占比", "payment_conversion_rate": "支付转化率",
                                 "visitor_count_mom": "访客人数环比", "payment_amount_mom": "交易金额环比",
                                 "payment_buyer_count": "支付人数"
                                 },
                        inplace=True)
    return flow_own_all


## 年
def gen_traffic_year_report(start_date, end_date):
    # 判断输入的值是否有问题
    judge_string_empty(start=start_date, end=end_date)

    # 如果查询的时间早于数据库中有历史数据的最早时间，则需要对输入的日期进行调整
    if start_date <= "2023-12-10":
        # 给输入的起始日期重新赋值
        start_date = "2023-12-10"
    else:
        pass

    # 调用函数，调整输入的日期字符串
    start_date, end_date = year_adjust(start_date=start_date, end_date=end_date)

    # 判断返回的start_date和end_date是否为空
    if (start_date == "") or (end_date == ""):
        return None
    else:
        pass
    # 打印一下，便于检查
    print("sb::", start_date, end_date)

    # 数据读取
    # 这个地方需要读取所有的历史数据了，或者多读几周，因为要算环比
    # week_range = start_date + "~" + end_date


    previous_week_start = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d") - timedelta(366), "%Y-%m-%d")
    previous_week_end = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d"), "%Y-%m-%d")

    start_date_adjust = datetime.strftime(datetime.strptime(start_date, "%Y-%m-%d"), "%Y-%m-%d")
    end_date_adjust = datetime.strftime(datetime.strptime(end_date, "%Y-%m-%d"), "%Y-%m-%d")
    month_tag = start_date.split("-")[1]
    year_tag = start_date.split("-")[0]


    # query = session.query(tmall_flagship_store_traffic_source_all_day).filter(tmall_flagship_store_traffic_source_all_day.stat_time == '2023.12.24')
    query = session.query(tmall_flagship_store_traffic_source_all_day).filter(
        tmall_flagship_store_traffic_source_all_day.stat_time.between(previous_week_start, end_date))
    flow_own_df = pd.read_sql(query.statement, con=engine.connect())

    # 把拿到的SQL里面的数据，预处理一下
    flow_own_df.drop(labels=["id"], inplace=True, axis=1)
    flow_own_df["stat_time"] = flow_own_df["stat_time"].astype(str)

    # 数据处理
    flow_own_df = deploy_stat_week(flow_own_df, flow_own_df["stat_time"])
    flow_own_df["year"] = flow_own_df["stat_time"].apply(lambda x: int(re.split("-", x)[0]))
    flow_own_df["month"] = flow_own_df["stat_time"].apply(lambda x: int(re.split("-", x)[1]))
    flow_own_df["year"] = flow_own_df["year"].astype(str)
    flow_own_df["month"] = flow_own_df["month"].astype(str)

    # 剔除汇总行记录
    # flow_own_df = flow_own_df[flow_own_df["traffic_third_source"] != "汇总"]
    flow_own_df = flow_own_df[flow_own_df["traffic_second_source"] != "汇总"]
    # flow_own_df = flow_own_df[flow_own_df["traffic_third_source"] != "-"]

    # 分别计算每类渠道的汇总数据
    # 第一级渠道
    flow_first_all = flow_own_df.copy()
    flow_first_all = flow_first_all[flow_first_all["traffic_third_source"] != "汇总"]
    flow_first_all = flow_first_all.groupby(
        ["traffic_first_source", "year"]).agg(
        {"visitor_count": np.sum, "payment_amount": np.sum, "payment_buyer_count": np.sum}).reset_index()
    flow_first_all["traffic_second_source"] = "汇总"
    flow_first_all["traffic_third_source"] = "汇总"


    # 第一部分数据
    # 本店-年-数据报表 tmall_flagship_store_traffic_source_year_stat
    flow_own_sub1 = flow_own_df.groupby(
        ["traffic_first_source", "traffic_second_source", "traffic_third_source", "year"]).agg(
        {"visitor_count": np.sum, "payment_amount": np.sum, "payment_buyer_count": np.sum}).reset_index()
    # 合并一下汇总的数据和分渠道的数据
    flow_own_sub1 = pd.concat(objs=[flow_own_sub1, flow_first_all], axis=0)

    # 整一个匹配上一年数据的子数据做匹配
    flow_own_last_year = flow_own_sub1.copy()
    flow_own_last_year.rename(columns={"visitor_count": "visitor_count_last_year",
                                       "payment_amount": "payment_amount_last_year",
                                       "payment_user_count": "payment_user_count_last_year",
                                       "year": "last_year"},
                              inplace=True)
    flow_own_last_year = flow_own_last_year[["last_year", "visitor_count_last_year", "payment_amount_last_year",
                                             "traffic_first_source", "traffic_second_source", "traffic_third_source"]]
    # 生成一个lastyear的列，匹配上年同期数据
    flow_own_sub1 = deploy_lastyear_date(data=flow_own_sub1, stat_year_column=flow_own_sub1["year"])
    flow_own_sub1 = pd.merge(left=flow_own_sub1, right=flow_own_last_year,
                             how="left", on=["traffic_first_source",
                                             "traffic_second_source",
                                             "traffic_third_source",
                                             "last_year"])

    # 第二部分数据
    flow_own_sub2 = flow_own_df.copy()
    flow_own_sub2 = flow_own_sub2[flow_own_sub2["traffic_third_source"] != "汇总"]
    flow_own_sub2 = flow_own_sub2.groupby(["year"]).agg(
        {"visitor_count": np.sum, "payment_amount": np.sum}).reset_index()
    flow_own_sub2.rename(columns={"visitor_count": "visitor_count_all", "payment_amount": "payment_amount_all"},
                         inplace=True)

    # 合并数据
    flow_own_all = pd.merge(flow_own_sub1, flow_own_sub2, how="left", on=["year"])

    # 流量访客来源占比
    flow_own_all["source_visitor_percent"] = flow_own_all["visitor_count"] / flow_own_all["visitor_count_all"]
    # 流量支付金额占比
    flow_own_all["source_trans_percent"] = flow_own_all["payment_amount"] / flow_own_all["payment_amount_all"]
    # 支付转化率
    flow_own_all["payment_conversion_rate"] = flow_own_all["payment_buyer_count"] / flow_own_all["visitor_count"]

    # 计算环比
    flow_own_all["visitor_count_mom"] = (flow_own_all["visitor_count"] - flow_own_all[
        "visitor_count_last_year"]) / flow_own_all["visitor_count"]
    flow_own_all["payment_amount_mom"] = (flow_own_all["payment_amount"] - flow_own_all[
        "payment_amount_last_year"]) / flow_own_all["payment_amount"]
    flow_own_all.drop(labels=["visitor_count_last_year", "payment_amount_last_year", "last_year"], axis=1, inplace=True)

    # 重新分配索引
    flow_own_all.reset_index(inplace=True)
    # 剔除多出来的列
    flow_own_all.drop(labels=["index", "visitor_count_all", "payment_amount_all"], axis=1, inplace=True)
    # flow_own_all # .to_excel(output_path + "天猫旗舰店、竞店流量来源统计.xlsx", index=False)
    # 匹配要计算环比的上周的数据
    # flow_own_all # .to_excel(output_path + "天猫旗舰店、竞店流量来源统计.xlsx", index=False)
    '''# 匹配要计算环比的上周的数据
    flow_own_all_yearly = get_pre_year_flow_data(flow_own_all.copy(), flow_own_all["year"], "visitor_count")
    flow_own_all_yearly = get_pre_year_flow_data(flow_own_all_yearly, flow_own_all["year"], "payment_amount")
    # 计算环比
    flow_own_all_yearly["visitor_count_mom"] = (flow_own_all_yearly["visitor_count"] - flow_own_all_yearly["pre_year_visitor_count"])/flow_own_all_yearly["pre_year_visitor_count"]
    flow_own_all_yearly["payment_amount_mom"] = (flow_own_all_yearly["payment_amount"] - flow_own_all_yearly["pre_year_payment_amount"])/flow_own_all_yearly["pre_year_payment_amount"]
    flow_own_all_yearly.drop(labels=["pre_year_visitor_count","pre_year_payment_amount"], axis=1, inplace=True)
    # 需要再筛选一下，输入的对应日期内的数据
    flow_own_all_yearly = flow_own_all_yearly[flow_own_all_yearly["year"] >= year_tag] '''

    # 需要再筛选一下，输入的对应日期内的数据
    flow_own_all = flow_own_all[flow_own_all["year"] >= year_tag]

    # flow_own_all_monthly["stat_ym"] = flow_own_all_monthly["year"] + "-" + flow_own_all_monthly["month"]
    # flow_own_all_yearly.drop(labels=["year", "month"], inplace=True, axis=1)
    flow_own_all["competing_store_name"] = "洁婷天猫旗舰店"

    # 排序之前先把汇总变一下，看看能不能简单的把汇总放在最前面
    flow_own_all["traffic_second_source"] = np.where(flow_own_all["traffic_second_source"] == "汇总", "1A汇总",
                                                     flow_own_all["traffic_second_source"])
    flow_own_all["traffic_third_source"] = np.where(flow_own_all["traffic_third_source"] == "汇总", "1A汇总",
                                                    flow_own_all["traffic_third_source"])
    # 更改排序
    flow_own_all.sort_values(by=["year", "traffic_first_source", "traffic_second_source", "traffic_third_source"],
                             ascending=True, inplace=True)
    # 排序之后，再把汇总的值改回来
    flow_own_all["traffic_second_source"] = np.where(flow_own_all["traffic_second_source"] == "1A汇总", "汇总",
                                                     flow_own_all["traffic_second_source"])
    flow_own_all["traffic_third_source"] = np.where(flow_own_all["traffic_third_source"] == "1A汇总", "汇总",
                                                    flow_own_all["traffic_third_source"])
    # 改名
    flow_own_all.rename(columns={"year": "数据统计时间", "competing_store_name": "天猫旗舰竞店店铺名称",
                                 "traffic_first_source": "一级流量来源", "traffic_second_source": "二级流量来源",
                                 "traffic_third_source": "三级流量来源",
                                 "visitor_count": "访客人数", "payment_amount": "交易金额",
                                 "source_visitor_percent": "来源访客人数占比",
                                 "source_trans_percent": "该来源交易金额占比", "payment_conversion_rate": "支付转化率",
                                 "visitor_count_mom": "访客人数环比", "payment_amount_mom": "交易金额环比",
                                 "payment_buyer_count": "支付人数"
                                 },
                        inplace=True)

    return flow_own_all





### 周级天猫旗舰店排行top20的商品统计
# 输入内容是周
# 这个地方加了很多字段，麻烦前端同学仔细看一下，加上去
def gen_top20_goods_report(start_date, end_date):
    # 判断输入的值是否有问题
    judge_string_empty(start=start_date, end=end_date)
    # 如果查询的时间早于数据库中有历史数据的最早时间，则需要对输入的日期进行调整
    if start_date <= "2023-12-10":
        # 给输入的起始日期重新赋值
        start_date = "2023-12-10"
    else:
        pass

    # 调用函数，调整输入的日期字符串
    start_date, end_date = week_adjust(start_date=start_date, end_date=end_date)

    # 判断返回的start_date和end_date是否为空
    if (start_date == "") or (end_date == ""):
        return None
    else:
        pass

    # tmall_flagship_store_visitors_from_search_word_week_stat

    # 第一部分数据，商品支付金额的排行信息-周top20商品
    query = session.query(tm_top_twenty_goods_top_traffic_source_week_stat).filter(
        tm_top_twenty_goods_top_traffic_source_week_stat.stat_time.between(start_date, end_date))
    goods_rank_df = pd.read_sql(query.statement, con=engine.connect())

    # 把拿到的SQL里面的数据，预处理一下
    goods_rank_df.drop(labels=["id", "create_time"], inplace=True, axis=1)
    goods_rank_df["stat_time"] = goods_rank_df["stat_time"].astype(str)

    # 改名
    goods_rank_df.rename(
        columns={"stat_time": "数据统计时间", "good_id": "商品ID", "good_name": "商品名称", "sixty_nine_code": "69码",
                 "good_actual_price": "实际到手价", "good_initial_price": "开单价",
                 "good_big_promotion_price": "大促底价",
                 "break_small_promotion_price_difference": "破大促价差",
                 "good_giveaway_item_price_ratio": "赠品费率",
                 "traffic_first_source": "核心活动动作一级", "traffic_second_source": "核心活动动作二级",
                 "traffic_third_source": "核心活动动作三级", "good_payment_amount": "支付金额",
                 "good_visitor_count": "商品访客数",
                 "good_payment_conversion_rate": "支付转化率", "good_payment_buyer_count": "支付买家数",
                 "good_payment_good_count": "支付商品件数",
                 "good_avg_order_price": "客单价", "payment_amount_share": "销售占比",
                 "GMV_weekly": "周GMV",
                 "payment_amount": "流量来源支付金额", "visitor_count": "流量来源访客数",
                 "payment_conversion_rate": "流量来源支付转化率", "payment_buyer_count": "流量来源支付买家数",
                 "payment_good_count": "流量来源支付商品件数"
                 },
        inplace=True)

    return goods_rank_df