import pymysql
import pandas as pd
import calendar
import datetime
import json
import numpy as np
import clickhouse_driver

from loguru import logger
from collections import defaultdict
from zoneinfo import ZoneInfo
from sqlalchemy import create_engine


logger.remove(0)
logger.add(r"G:\Code\ac_lingxing_api\all_ana\01.asin_ana\out.log")

## * 数据库链接 基本工具包

class mysql_db:
    def __init__(self, database:str):
        user="user_rpa"
        password="123456"
        # host="192.168.8.10" # 西安数据库
        host="10.20.30.1" # 河津数据库
        port=3306
        self.engine = create_engine(f'mysql+pymysql://{user}:{password}@{host}:{port}/{database}?charset=utf8mb4')
        self.conn = pymysql.connect(user=user, password=password, host=host, port=port, database=database, charset='utf8mb4')
        self.cur = self.conn.cursor()

    def read_data(self, sql):
        query = pd.read_sql(sql, con=self.engine)
        if 'asin' in query.columns:
            query = tools.replace_asin_with_mapping(query, tools.shared_df)
        return query
    def write_data(self, df: pd.DataFrame, table_name):
        df.to_sql(table_name, self.engine, if_exists="append", index=False)
        

class click_db:
    def __init__(self):
        self.client = clickhouse_driver.Client(
            # host="192.168.8.10",# 西安运行时，使用该host
            host="10.20.30.1", # 河津运行时，使用该host
            port=9000,
            user="default",
            password="123456",
            database="keyword"
        )

    def read_data(self, sql):
        data, columns = self.client.execute(sql, columnar=True, with_column_types=True)
        df = pd.DataFrame({col[0]: d for d, col in zip(data, columns)})
        if 'asin' in df.columns:
            df = tools.replace_asin_with_mapping(df, tools.shared_df)
        return df

class tools:
    
    
    shared_df = None
    @classmethod
    def load_data(cls):
        if cls.shared_df is None:
            db = mysql_db("data_operations_report")
            sql = f"""
                    select old_asin, new_asin
                    from ac_asin_merge_renovation_table
                """
            cls.shared_df = db.read_data(sql).drop_duplicates()
    
    @staticmethod
    def get_sid_by_country(country, db):
        sql = f"select lx_sid from store_info where lx_country = '{country}'"
        sid_df = db.read_data(sql)
        return list(sid_df['lx_sid'])
    @staticmethod
    def replace_asin_with_mapping(data1, data2, column_A='asin', old_asin_col='old_asin', new_asin_col='new_asin'):
        """
        根据数据2中的对应关系替换数据1中列A的ASIN值
        
        参数:
        data1: 包含需要替换ASIN的数据框
        data2: 包含旧ASIN与新ASIN对应关系的数据框
        column_A: data1中需要替换的列名（默认为'A'）
        old_asin_col: data2中旧ASIN的列名
        new_asin_col: data2中新ASIN的列名
        
        返回:
        替换后的数据框
        """
        
        # 创建映射字典
        asin_mapping = dict(zip(data2[old_asin_col], data2[new_asin_col]))
        
        # 复制原始数据
        result_data = data1.copy()
        
        # 进行替换，如果找不到对应关系则保持原值
        result_data[column_A] = result_data[column_A].map(lambda x: asin_mapping.get(x, x))
        
        return result_data

    

## * asin 搜索目录绩效查询 分析。

class operations_data:
    
    def __init__(self):
        
        self.opera_db = mysql_db("data_operations_report")
        self.md_ad = mysql_db("ad_api")
        self.md_base = mysql_db("ac_base_data")
        self.md_kw =  mysql_db("keyword")
        self.md_stock = mysql_db("stock_sales")
        self.cd_kw = click_db()
        logger.success("所有数据库链接创建完成")
        
    def get_sid_by_country(self, country):
        sql = f"select lx_sid from store_info where lx_country = '{country}'"
        sid_df = self.md_base.read_data(sql)
        logger.success("通过国家获取到了sid")
        return list(sid_df['lx_sid'])
    
    ## * 1.1 从搜索目录绩效表中获取数据
    def read_st_data_by_asin(self, asin_str, country, etime):
        
        sql = f"""
            select asin, category, impressions_count, clicks_count, clicks_ctr, cart_adds_count, purchases_count, purchases_conversion_rate, report_date_format
            from search_term_performance
            where asin in ({asin_str})
            and country  = '{country}'
            and report_date_format = '{etime}'
        """
        df = self.md_kw.read_data(sql)
        df = df.groupby(['asin', 'category', 'report_date_format'])[['impressions_count', 'clicks_count', 'cart_adds_count', 'purchases_count']].sum().reset_index()
        
        df['clicks_ctr'] = df.apply(lambda row: 0 if row["impressions_count"] == 0 else row["clicks_count"] / row["impressions_count"], axis=1)
        df['purchases_conversion_rate'] = df.apply(lambda row: 0 if row["clicks_count"] == 0 else row["purchases_count"] / row["clicks_count"], axis=1)
        
        logger.info(f"搜索目录绩效数据：", df)
        return df
    ## *搜索目录绩效表数据处理
    def st_data_handler(self, asin_str, country, end_date):
        ## *通过国家获取 sid列表
        # sid_lst = tools.get_sid_by_country(country, self.db)
        df = self.read_st_data_by_asin(asin_str, country,end_date)
        
        ## *生成转化率
        df['cart_adds_rate'] = df.apply(lambda row: 0 if row['clicks_count'] == 0 else row['purchases_count'] / row['clicks_count'], axis=1)
        df_colums = {
            "report_date_format":"etime",
            "impressions_count":"impressions",
            "clicks_count":"click",
            "clicks_ctr":"ctr",
            "cart_adds_count":"cart_adds",
            "purchases_count":"purchases",
            "purchases_conversion_rate":"cvr",
        }
        df = df.rename(columns=df_colums).drop_duplicates()
        df['update_time'] = datetime.datetime.now()
        df['country'] = country
        
        
        self.opera_db.write_data(df, "ac_product_all_data")
    
    ## * 1.2 asin广告数据分析
    ## 根据asin获与sid，获取指定时间内的广告数据
    def get_ad_data_by_asin(self, asin_str, sid_str, start_date, end_date):
        sql = f"""select ad_id, asin, ad_group_id, impressions, clicks, cost, report_date,  units,same_units, sales,same_sales, sid from sp_product_api 
                where asin in ({asin_str}) 
                and sid in ({sid_str}) 
                and report_date between "{start_date}" and "{end_date}" """
        ad_data = self.md_ad.read_data(sql).fillna(0)
        logger.info(f"通过asin获取广告组id：{ad_data}")
        return ad_data
    
    ## * 1.2 根据asin获取销售额
    def get_sales_by_asin(self, asin_str, sid_str, start_date, end_date):
        
        
        
        sql = f"""
            SELECT id, asin, amount
            FROM (
                SELECT id, asin, amount,
                    ROW_NUMBER() OVER (PARTITION BY asin, seller_sku, r_date ORDER BY get_date DESC) as rn
                FROM sales_30day_detail
                WHERE asin in ({asin_str})  and sid in ({sid_str}) 
                and r_date BETWEEN '{start_date}' AND '{end_date}'
            ) t
            WHERE rn = 1
        """
        
        sales_data = self.md_stock.read_data(sql).fillna(0)
        sales_data = sales_data.groupby(['asin'])[['amount']].sum().reset_index()
        
        return sales_data
        
    ## * 1.2 广告数据处理
    def ad_data_handler(self, asin_str, country, sid_str, start_date, end_date):
        
        ad_data = self.get_ad_data_by_asin(asin_str, sid_str, start_date, end_date)
        one_week_data = ad_data.groupby(["asin"])[["impressions", "clicks", "cost", "units","same_units", "sales", "same_sales"]].sum().reset_index()
        one_week_data['etime'] = end_date
        sales_data = self.get_sales_by_asin(asin_str, sid_str, start_date, end_date).fillna(0)
        one_week_data = one_week_data.merge(sales_data, how="left", on="asin")
        one_week_data['ctr'] = one_week_data.apply(lambda row: 0 if row['impressions'] == 0 else row['clicks'] / row['impressions'], axis=1)
        one_week_data['cvr'] = one_week_data.apply(lambda row: 0 if row['clicks'] == 0 else  row['units'] / row['clicks'], axis=1)
        one_week_data['acos'] = one_week_data.apply(lambda row: 0 if row['sales'] == 0 else  row['cost'] / row['sales'], axis=1)
        one_week_data['acoas'] = one_week_data.apply(lambda row: 0 if row['amount'] == 0 else  row['cost'] / row['amount'], axis=1)

        one_week_data = one_week_data[['asin','etime','cost','impressions','clicks','same_units','units','same_sales','sales','amount','ctr','cvr','acos','acoas']]
        one_week_data['update_time'] = datetime.datetime.now()
        one_week_data['etime'] = end_date
        one_week_data["country"] = country
        self.opera_db.write_data(one_week_data, "ac_product_advertising_data")
    
    
    ## * 根据 传入的asin，找到 该asin下的核心竞品asin
    def get_competing_products_top10(self, asin_str):
        asin_lst = []
        sql = f"""
            select asin, competitor_asin
            from competitor_products_info_test
            where is_competitor = 1 
            and asin in ({asin_str})
        """
        competitor_asin_df = self.md_kw.read_data(sql)
        return competitor_asin_df  # 两列值，本asin，竞品asin
    
    
    ## * 核心市场竞品前十名的数据，1.3部分和4.2,4.1部分都要用。
    def get_top10_asin_data(self, asin_str, country, report_date):
        # 先获取asin的所有竞品asin
        ## * 这里需要 竞品库的完成。返回的格式为：本asin，竞品asin 数据。
        asin_df = self.get_competing_products_top10(asin_str) # 两列值，本asin，竞品asin
        
        
        core_marketasin_lst = list(set(asin_df["competitor_asin"].tolist()))
        core_marketasin_str = ",".join(["'" + str(asin) + "'" for asin in core_marketasin_lst])

        sql = f"""
                select keyword, asin, impression_count, impression_share, click_count, click_share
                from top10_kw_week
                where asin in ({core_marketasin_str})
                and report_date_format  = '{report_date}'
        """
        core_marketasin_df = self.cd_kw.read_data(sql)
        asin_df = asin_df.drop_duplicates()
        core_marketasin_df = core_marketasin_df.drop_duplicates()
        
        core_marketasin_df = core_marketasin_df.rename(columns={"asin":"competitor_asin"})
        core_marketasin_df = core_marketasin_df.merge(asin_df, how="left", on="competitor_asin").drop_duplicates()
        return core_marketasin_df
    
    ##* 获取自己asin的前10名数据
    def get_top10_our_asin_data(self, asin_str, country, report_date):
        sql_our = f"""
                select keyword, asin, impression_count, impression_share, click_count, click_share
                from top10_kw_week
                where asin in ({asin_str})
                and report_date_format  = '{report_date}'
        """
        asin_top10_df = self.cd_kw.read_data(sql_our)
        
        asin_top10_df = asin_top10_df.drop_duplicates()
        asin_top10_df = asin_top10_df.groupby("asin")[['impression_count','click_count']].sum().reset_index().rename(columns={"impression_count":"asin_impression","click_count":"asin_click"})
        return asin_top10_df
        
    
    ##*  1.3 核心市场
    def asin_view_core_market(self, asin_str, country, etime):
        
        
        # asin下，竞品asin、关键词的数量
        asin_count = self.top10_core_marketasin_asin_df.groupby('asin')[["competitor_asin"]].nunique().reset_index()
        kw_count = self.top10_core_marketasin_asin_df.groupby('asin')[["keyword"]].nunique().reset_index()
        # asin下，其他数据的求和数据。
        asin_top10_data = self.top10_core_marketasin_asin_df.groupby('asin')[["impression_count", "click_count"]].sum().reset_index()
        asin_top10_data = asin_top10_data.rename(columns={
            "impression_count":"market_impression",
            "click_count":"market_click"
        })
        
        asin_our_df = self.get_top10_our_asin_data(asin_str, country, etime)
        
        
        asin_data = asin_top10_data.merge(asin_our_df, how="left", on=['asin']).merge(asin_count, how="left", on=['asin']).merge(kw_count, how="left", on=['asin'])
        asin_data["market_average_impression"] = asin_data["market_impression"] // asin_data["competitor_asin"]
        asin_data["market_average_click"] = asin_data["market_click"] // asin_data["competitor_asin"]
        
        asin_data["etime"] = etime
        asin_data['update_time'] = datetime.datetime.now()
        asin_data["country"] = country
        
        self.opera_db.write_data(asin_data, "ac_core_market_data")
        

    
    
    
    ## * 根据传入的asin找 关键词
    def get_kw_by_asin(self, asin_str, country):
        sql = f"""
            select asin, keyword, keyword_chinese
            from keyword_info_test
            where asin in ({asin_str})
            and country = '{country}'
        """
        asin_keyword_df = self.md_kw.read_data(sql)
        
        return asin_keyword_df
    
    ## * 4. 关键词分析
    def kw_ana_all(self, asin_str, sid_str, report_date, country):
        # 关键词数据
        asin_keyword_df = self.get_kw_by_asin(asin_str, country).drop_duplicates() ##*  asin, keyword, keyword_chinese
        kw_lst = list(set(asin_keyword_df['keyword'].tolist()))
        kw_str = ",".join(["'" + str(kw) + "'" for kw in kw_lst])
        # 4.1 整体数据
        kw_data_df = self.kw_ana_asin_view(asin_str, country, report_date, kw_str)
        asin_keyword_df = asin_keyword_df.merge(kw_data_df, how="left", on="keyword")
        asin_keyword_df = asin_keyword_df.fillna(0)
        
        
        # 4.2 本asin数据
        our_asin_data_df = self.asin_ana_asin_view(asin_str, country, report_date, kw_lst)
        asin_keyword_df = asin_keyword_df.merge(our_asin_data_df, how="left", on=["asin", "keyword"])
        
        # 4.3 前10名和 4.4 第一
        data_all_df = self.top10_asin_data(kw_lst)
        asin_keyword_df = asin_keyword_df.merge(data_all_df, how="left", on=["asin", "keyword"])
        asin_keyword_df['update_time'] = datetime.datetime.now()
        asin_keyword_df['etime'] = report_date
        asin_keyword_df['country'] = country
        asin_keyword_df['cvr'] = asin_keyword_df.apply(lambda row: row["cvr"] / 100 , axis=1)
        asin_keyword_df.fillna(0)
        self.opera_db.write_data(asin_keyword_df, "ac_product_kw_ana_data")
        return asin_keyword_df[['keyword', 'cvr']]
    
    ## * 4.1 asin视图 关键词整体数据
    def kw_ana_asin_view(self, asin_str, country, report_date, kw_str):
        
        
        sql_kw = f"""
            select keyword, search_query_count, impression_total, click_total, click_rate, cart_total, cart_rate, purchase_total, purchase_rate
            from asin_view_week
            where keyword in ({kw_str})
            and report_date_format  = '{report_date}'
        
        """
        sql_kw_df = self.cd_kw.read_data(sql_kw).drop_duplicates()
        sql_kw_df['views_density'] = sql_kw_df.apply(lambda row: 0 if row['search_query_count'] == 0 else row['impression_total'] // row['search_query_count'], axis=1)
        sql_kw_df["search_cvr"] = sql_kw_df.apply(lambda row: 0 if row['search_query_count'] == 0 else row['purchase_total'] / row['search_query_count'], axis=1)
        sql_kw_df["click_rate"] = sql_kw_df.apply(lambda row:  row['click_rate'] / 100, axis=1)
        sql_kw_df["cart_rate"] = sql_kw_df.apply(lambda row:  row['cart_rate'] / 100, axis=1)
        sql_kw_df["purchase_rate"] = sql_kw_df.apply(lambda row:  row['purchase_rate'] / 100, axis=1)
        
        new_clo = {
            "impression_total":"impression",
            "click_total":"clicks",
            "click_rate":"ctr",
            "cart_total":"carts",
            "purchase_total":"purchase",
            "purchase_rate":"cvr"
        }
        sql_kw_df = sql_kw_df.rename(columns=new_clo)
        return sql_kw_df
        
    
    ## *  4.3 前10名竞品的数据, 4.4 第一名竞品的数据
    def top10_asin_data(self, kw_lst):
        top10_core_marketasin_asin_df_kw = self.top10_core_marketasin_asin_df[self.top10_core_marketasin_asin_df['keyword'].isin(kw_lst)]
        top10_data = top10_core_marketasin_asin_df_kw.groupby(["asin", "keyword"])[['impression_count', "impression_share", "click_count", "click_share"]].sum().reset_index()
        top10_data_asin_count = top10_core_marketasin_asin_df_kw.groupby(["asin",  "keyword"])['competitor_asin'].nunique().reset_index()
        top10_data = top10_data.merge(top10_data_asin_count,  how="left", on=["asin", "keyword"])
        top10_data = top10_data.rename(columns={
            "competitor_asin":"competitor_asin_count",
            "impression_count":"top10_impression_count",
            "impression_share":"top10_impression_share",
            "click_count":"top10_click_count",
            "click_share":"top10_click_share",
        })
        top10_data['top10_ctr'] = top10_data.apply(lambda row: 0 if row['top10_impression_count'] == 0 else row['top10_click_count']/row['top10_impression_count'], axis=1)
        # self.opera_db.write_data(top10_data, "ac_product_top10_asin_data")
        top10_data['top10_impression_share'] = top10_data.apply(lambda row: row["top10_impression_share"] / 100 , axis=1)
        top10_data['top10_click_share'] = top10_data.apply(lambda row: row["top10_click_share"] / 100 , axis=1)
        
        no1_imp_data = top10_core_marketasin_asin_df_kw.groupby(["asin", "keyword"])[['impression_count', "impression_share"]].max().reset_index()
        
        no1_click_data = top10_core_marketasin_asin_df_kw.groupby(["asin", "keyword"])[['click_count', "click_share"]].max().reset_index()
        no1_data = no1_imp_data.merge(no1_click_data,  how="left", on=["asin", "keyword"])
        no1_data = no1_data.rename(columns={
            "impression_count":"no1_impression_count",
            "impression_share":"no1_impression_share",
            "click_count":"no1_click_count",
            "click_share":"no1_click_share",
        })
        no1_data['no1_impression_share'] = no1_data.apply(lambda row: row["no1_impression_share"] / 100 , axis=1)
        no1_data['no1_click_share'] = no1_data.apply(lambda row: row["no1_click_share"] / 100 , axis=1)
        data_all_df = top10_data.merge(no1_data, how="left", on=["asin", "keyword"])
        # self.opera_db.write_data(top10_data, "ac_product_no1_asin_data")
        
        return data_all_df
        
    
    def get_rank(self, data1, data2, count_name):
        """data1: 竞品的数据;data2:本产品的数据;count_name：统计的字段"""
        
        combined_data = pd.concat([
            data1[['asin', 'keyword', count_name]].assign(source='data1'),
            data2[['asin', 'keyword', count_name]].assign(source='data2')
            
        ], ignore_index=True)

        # 直接按组计算排名（不排序）
        combined_data['rank'] = combined_data.groupby(['asin', 'keyword'])[count_name].rank(method='min', ascending=False)
        rank = count_name + "_rank"
        # 提取数据2的排名
        result = combined_data[combined_data['source'] == 'data2'][['asin', 'keyword', count_name, "rank"]].reset_index(drop=True)
        result[rank] = result["rank"].astype(int)
        
        return result
    
    ## *  4.2 本产品asin的数据
    def asin_ana_asin_view(self, asin_str, country, report_date, kw_lst):
        kw_str = ",".join(["'" + str(kw) + "'" for kw in kw_lst])
        sql_asin = f"""
            select keyword, asin, impression_asin_count, impression_asin_ratio, click_asin_count, click_asin_ratio, cart_asin_count, cart_asin_ratio, purchase_asin_count, purchase_asin_ratio
            from asin_view_week
            where asin in ({asin_str}) and keyword in ({kw_str})
            and report_date_format = '{report_date}'
        """
        sql_asin_df = self.cd_kw.read_data(sql_asin).drop_duplicates()
        sql_asin_df = sql_asin_df.fillna(0)
        # self.opera_db.write_data(sql_asin_df, "ac_product_our_asin_view_data")
        our_asin_impression_data = sql_asin_df.groupby(["asin", "keyword"])[['impression_asin_count', "click_asin_count", "impression_asin_ratio", "click_asin_ratio", "cart_asin_count", "purchase_asin_count"]].sum().reset_index()
        
        our_asin_impression_data = our_asin_impression_data.rename(columns={"impression_asin_count":"impression_count", "click_asin_count":"click_count"})
        
        asin_imp_rank = self.get_rank(self.top10_core_marketasin_asin_df, our_asin_impression_data, "impression_count").drop_duplicates()
        asin_click_rank = self.get_rank(self.top10_core_marketasin_asin_df, our_asin_impression_data, "click_count").drop_duplicates()
        
        our_asin_impression_data = our_asin_impression_data.merge(asin_imp_rank[['asin', "keyword","impression_count_rank"]], how='left', on=["asin", "keyword"])
        our_asin_impression_data = our_asin_impression_data.merge(asin_click_rank[['asin', "keyword","click_count_rank"]], how='left', on=["asin", "keyword"])
        
        our_asin_impression_data = our_asin_impression_data.rename(columns={
            "impression_count":"impression_asin_count", "click_count":"click_asin_count"
        })
        our_asin_impression_data["impression_asin_ratio"] = our_asin_impression_data.apply(lambda row:  float(row['impression_asin_ratio']) / 100 , axis=1)
        our_asin_impression_data["click_asin_ratio"] = our_asin_impression_data.apply(lambda row:  row['click_asin_ratio'] / 100 , axis=1)
        our_asin_impression_data['asin_ctr'] = our_asin_impression_data.apply(lambda row: 0 if row['impression_asin_count'] == 0 else row['click_asin_count'] / row['impression_asin_count'], axis=1)
        our_asin_impression_data['asin_cart_ratio'] = our_asin_impression_data.apply(lambda row: 0 if int(row['click_asin_count']) == 0 else row['cart_asin_count'] / row['click_asin_count'], axis=1)
        our_asin_impression_data['asin_cvr'] = our_asin_impression_data.apply(lambda row: 0 if row['click_asin_count'] == 0 else row['purchase_asin_count'] / row['click_asin_count'], axis=1)
        # self.opera_db.write_data(our_asin_impression_data, "ac_product_our_asin_data")
        
        return our_asin_impression_data
        
        
        
        
        
        
    ## 根据asin获与sid，获取指定时间内的广告数据
    def get_ad_product_data_by_asin2(self, asin_str, sid_str, etime):
        sql = f"""select campaign_id, ad_group_id, sid,  asin
                from sp_product_ads_data_api 
                where asin in ({asin_str}) 
                and sid in ({sid_str}) 
                and update_time = '{etime}'
                """
        ad_data = self.md_ad.read_data(sql)
        return ad_data.drop_duplicates()
    ## 根据广告组id，拿关键词数据
    def get_ad_kw_data_by_ad_group_id(self, ad_group_id_str, etime):
        
        sql = f"""select campaign_id, ad_group_id, sid, keyword_text, match_type, bid
                from sp_keywords_data_api 
                where ad_group_id in ({ad_group_id_str})
                and update_time = '{etime}'  
                """
        ad_data = self.md_ad.read_data(sql)
        return ad_data.drop_duplicates()
    # 广告活动数据
    def get_campaign_report_data(self, campaign_id_lst, start_date, end_date):
        campaign_id_str = ",".join(["'" + str(campaign_id) + "'" for campaign_id in campaign_id_lst])

        sql = f"""
                select impressions,clicks, cost, campaign_id, units, sales  from sp_campaign_api
                where campaign_id in ({campaign_id_str})
                and report_date between "{start_date}" and "{end_date}" 
                and cost > 0
        """
        ad_data = self.md_ad.read_data(sql)
        return ad_data
    #根据广告活动id找广告活动名字，筛选开启的活动
    def get_ad_name_by_id(self, campaign_id_lst, etime):
        campaign_id_str = ",".join(["'" + str(campaign_id) + "'" for campaign_id in campaign_id_lst])
        sql = f"""
            select campaign_id, name, daily_budget, bidding, state
            from sp_campaigns_data_api
            where (campaign_id in ({campaign_id_str}) or state = 'enabled')
            and update_time = '{etime}'  
        """
        ad_data = self.md_ad.read_data(sql)
        return ad_data
    
    def bidding_handler(self, ad_data_df):
        # 把竞价策略这列单独拿出来
        list_temp = ad_data_df['bidding'].values.tolist()
        # 把str转换成字典格式
        list_temp = [json.loads(t) for t in list_temp]
        # 拿出竞价策略，
        temp = pd.DataFrame(list_temp)
        #把后面的百分比再做处理。
        list_temp2 = temp["adjustments"].values.tolist()
        # 把竞价策略全部拆开。

        adjustments_lst = []

        for t_lst in list_temp2:
            predicate_set = {"placementTop", "siteAmazonBusiness","placementProductPage","placementRestOfSearch"}
            temp_lst = dict()
            if t_lst != []:
                for t in t_lst:
                    predicate = t['predicate']
                    predicate_set = predicate_set - {predicate}
                    temp_lst[predicate] = str(t['percentage']) + "%"
            for predicate in predicate_set:
                temp_lst[predicate] = "0%"
            adjustments_lst.append(temp_lst)
        adjustments_df = pd.DataFrame(adjustments_lst)
        ad_data_df['strategy'] = temp['strategy']
        ad_data_df['placementProductPage'] = adjustments_df['placementProductPage']
        ad_data_df['siteAmazonBusiness'] = adjustments_df['siteAmazonBusiness']
        ad_data_df['placementRestOfSearch'] = adjustments_df['placementRestOfSearch']
        ad_data_df['placementTop'] = adjustments_df['placementTop']
        return ad_data_df
    
    
    # 获取广告位的数据。
    def get_splacement_data(self, campaign_id_lst, start_date, end_date):
        campaign_id_str = ",".join(["'" + str(campaign_id) + "'" for campaign_id in campaign_id_lst])

        sql = f"""
                select impressions,clicks, cost, campaign_id, units, sales, placement_type  from sp_splacement_api
                where campaign_id in ({campaign_id_str})
                and report_date between "{start_date}" and "{end_date}" 
        """
        ad_data = self.md_ad.read_data(sql)
        return ad_data
    
    
    ##*   5.0 广告数据汇总
    def ad_data_asin(self, asin_str, sid_str, country, stime, etime, kw_cvr:pd.DataFrame):
        # 根据asin找广告组
        ad_data_pro = self. get_ad_product_data_by_asin2(asin_str, sid_str, etime) ## * campaign_id, ad_group_id, sid, sku, asin
        campaign_id_lst = list(set(ad_data_pro['campaign_id'].tolist()))
        ad_data_campaign_report_data = self.get_campaign_report_data(campaign_id_lst, stime, etime) ##*  impressions,clicks, cost, campaign_id, units, sales
        
        # 找广告活动的相关设置
        ad_camp_data = self.get_ad_name_by_id(campaign_id_lst, etime)  ##* campaign_id, name, daily_budget, bidding, state
        
        
        
        # 广告组过滤掉 
        ad_data_pro = ad_data_pro[ad_data_pro['campaign_id'].isin(ad_camp_data['campaign_id'].tolist())]
        campaign_id_lst =  list(set(ad_data_pro['campaign_id'].tolist()))
        #根据asin找关键词
        asin_kw_df = self.get_kw_by_asin(asin_str, country)  ## * asin, keyword, keyword_chinese
        
        ad_group_id_lst = list(set(ad_data_pro['ad_group_id'].tolist()))
        ad_group_id_str = ",".join(["'" + str(ad_group_id) + "'" for ad_group_id in ad_group_id_lst])
        
        
        
        # 通过广告组id，找该id对应的投放词，和竞价
        ad_kw_data = self.get_ad_kw_data_by_ad_group_id(ad_group_id_str, etime) ##* campaign_id, ad_group_id, sid, keyword_text, match_type, bid
        # 把asin 与 投放词匹配起来
        ad_data_kw_group = ad_data_pro.merge(ad_kw_data, how="left", on=['campaign_id', "ad_group_id", "sid"])  ## * campaign_id, ad_group_id, sid, sku, asin,keyword_text, match_type, bid
        
        
        # 把竞价策略里的数据进行梳理。
        ad_camp_data = self.bidding_handler(ad_camp_data) ##* campaign_id, name, daily_budget, bidding, strategy, placementProductPage, siteAmazonBusiness, placementRestOfSearch, placementTop
        
        
        
        ad_data = asin_kw_df.merge(ad_data_kw_group, how="left", left_on=['keyword',"asin"], right_on=["keyword_text","asin"])  ##*  asin, keyword, keyword_chinese, campaign_id, ad_group_id, sid, sku, asin, keyword_text, match_type, bid
        # asin-》关键词-》广告活动的基础设置数据。
        ad_data = ad_data.merge(ad_camp_data, how="left", on='campaign_id')##*  asin, keyword, keyword_chinese, campaign_id, ad_group_id, sid, sku, asin, keyword_text, match_type, bid,  name, daily_budget, bidding, strategy, placementProductPage, siteAmazonBusiness, placementRestOfSearch, placementTop
        
        ##* 接下来就是对广告活动数据与各个位置的数据报告进行整合。
        
        ad_data_camp_splacement = self.get_splacement_data( campaign_id_lst, stime, etime) ##* impressions,clicks, cost, campaign_id, units, sales, placement_type
        
        
        
        ad_data_campaign_report_data = ad_data_campaign_report_data.groupby("campaign_id")[["impressions", "clicks", "cost","units", "sales"]].sum().reset_index() ## campaign_id	impressions	clicks	cost	units	sales
        
        
        ##*  asin, keyword, keyword_chinese, campaign_id, ad_group_id, sid, sku, asin, keyword_text, match_type, bid,  name, 
        ##* daily_budget, bidding, strategy, placementProductPage, siteAmazonBusiness, placementRestOfSearch, placementTop,impressions	clicks	cost	units	sales
        ad_data = ad_data.merge(ad_data_campaign_report_data, how="left", on="campaign_id")
        ad_data['1day_cost'] = ad_data.apply(lambda row: row['cost']/7, axis=1)
        ad_data['7day_cpc'] = ad_data.apply(lambda row: 0 if row['clicks'] == 0 else row['cost']/row['clicks'], axis=1)
        ad_data['ctr'] = ad_data.apply(lambda row: row['clicks']/row['impressions'], axis=1)
        ad_data['acos'] = ad_data.apply(lambda row: np.nan if row['sales'] == 0 else row['cost']/row['sales'], axis=1)
        ad_data['cvr'] = ad_data.apply(lambda row: 0 if row['clicks'] == 0 else row['units']/row['clicks'], axis=1)
        

        splacement_dict = {
            "top":"TOP OF SEARCH ON-AMAZON",
            "other":"OTHER ON-AMAZON",
            "page":"DETAIL PAGE ON-AMAZON",
            "business":"SITE AMAZON BUSINESS"
        }
        
        for spla, spla_name in splacement_dict.items():
            temp_splacement_page = ad_data_camp_splacement[ad_data_camp_splacement['placement_type'] == spla_name]
            temp_splacement_page = temp_splacement_page.groupby('campaign_id')[['impressions', 'clicks', 'cost', 'units', 'sales']].sum().reset_index()
            ##* temp_splacement_page  campaign_id,'impressions', 'clicks', 'cost', 'units', 'sales'
            temp_splacement_page[f'{spla}_7day_cpc'] = temp_splacement_page.apply(lambda row: 0 if row['clicks'] in [0, np.nan] else row['cost']/row['clicks'], axis=1)
            temp_splacement_page[f'{spla}_ctr'] = temp_splacement_page.apply(lambda row: 0 if row['impressions'] in [0, np.nan] else row['clicks']/row['impressions'], axis=1)
            temp_splacement_page[f'{spla}_cvr'] = temp_splacement_page.apply(lambda row: 0 if row['clicks'] in [0, np.nan] else row['units']/row['clicks'], axis=1)
            temp_splacement_page[f'{spla}_acos'] = temp_splacement_page.apply(lambda row: 0 if row['sales'] in [0, np.nan] else row['cost']/row['sales'], axis=1)
            rename_dic = {
                "impressions":f"{spla}_impressions",
                "clicks":f"{spla}_clicks",
                "cost":f"{spla}_cost",
                "units":f"{spla}_units",
                "sales":f"{spla}_sales"
            }
            temp_splacement_page = temp_splacement_page.rename(columns=rename_dic)
            ad_data = ad_data.merge(temp_splacement_page, how="left", on="campaign_id")
            
        ad_data = ad_data.fillna(0)
        
        ad_data['etime'] = etime
        ad_data["country"] = country
        kw_cvr = kw_cvr.rename(columns={"cvr":"kw_average_cvr"}).drop_duplicates()
        ad_data = ad_data.merge(kw_cvr, how="left", on="keyword")
        self.opera_db.write_data(ad_data, "ac_product_ad_detail_data")
        

        
        
        
        
        
    
    def get_last_week_date(self):
        now = datetime.datetime.now()
        sql = f"""
            select stime,etime  from ac_amz_week_calendar
            where stime < '{now}'
            and etime > '{now}'
        """
        df = self.md_base.read_data(sql)
        stime = (df['stime'][0].to_pydatetime() - datetime.timedelta(days=7)).strftime("%Y-%m-%d")
        etime = (df['etime'][0].to_pydatetime() - datetime.timedelta(days=7)).strftime("%Y-%m-%d")
        return stime, etime
    
    
    
        
    
    def main(self):
        ##* 获取上周的开始时间和截止时间 
        stime, etime = self.get_last_week_date()
        ##* 获取 asin 与国家
        country_en = "US"
        country_zh = "美国"
        asin_lst = ["B09JZDLDD6", "B0DX79LMRN", 'B0DSHWDY62']
        new_asin_df  = tools.shared_df[tools.shared_df['new_asin'].isin(asin_lst)]
        
        asin_lst = list(set(asin_lst + new_asin_df['old_asin'].tolist()))
        
        sid_lst = self.get_sid_by_country(country_zh)
        stime, etime = "2025-07-06", "2025-07-12"
        ##* 转换成字符串
        asin_str = ",".join(["'" + str(asin) + "'" for asin in asin_lst])
        sid_str = ",".join(["'" + str(sid) + "'" for sid in sid_lst])
        
        
        
        sql = "select stime, etime from ac_amz_week_calendar where year = 2025 and week < 31 and week  > 20"
        df = self.md_base.read_data(sql)
        for index, row in df.iterrows():

            stime, etime = row['stime'].strftime("%Y-%m-%d"), row['etime'].strftime("%Y-%m-%d")
            
        
            # ##* 1.1 生成 本产品上周整体数据- 已完成
            # self.st_data_handler(asin_str, country_zh, etime)
            
            # ##* 1.2 生成本产品上周广告数据
            # self.ad_data_handler(asin_str, country_zh, sid_str, stime, etime)
            # ## 获取自己与竞品的数据，和前10名竞品的数据。
            self.top10_core_marketasin_asin_df = self.get_top10_asin_data(asin_str, country_zh, etime)
            
            # ##* 1.3 精准市场上周数据
            # self.asin_view_core_market(asin_str, country_zh, etime)
            

            
            # ##* 4.关键词分析
            kw_cvr = self.kw_ana_all(asin_str, sid_str, etime, country_zh)
            # ##* 5.0 广告数据
            self.ad_data_asin(asin_str, sid_str, country_zh, stime, etime, kw_cvr)

    # def test_main(self):
    #     stime, etime = "2025-07-06", "2025-07-12"
        
    #     country_en = "US"
    #     country_zh = "美国"
    #     asin_lst = ["B09JZDLDD6", "B09Q5N8ZHB"]
    #     sid_lst = self.get_sid_by_country(country_zh)
    #     ##* 转换成字符串
    #     asin_str = ",".join(["'" + str(asin) + "'" for asin in asin_lst])
    #     sid_str = ",".join(["'" + str(sid) + "'" for sid in sid_lst])
        
        
    #     self.asin_view_core_market(asin_str, sid_str, stime, etime)

import time
tools.load_data()
start_time = time.time()
opd = operations_data()
    

opd.main()
end = time.time()

print(f"一共用了{end-start_time}秒")