import pandas as pd
import numpy as np
import logging
from datetime import datetime
import os
import glob

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s %(levelname)s %(message)s',
    handlers=[logging.StreamHandler()]
)
# 获取当天日期字符串
now_str = datetime.now().strftime('%Y-%m-%d')
# ========== 路径配置 ========== 
# 快手直签运营报表路径
ks_dir = r'C:/Users/86187/Desktop/file'
ks_filename = '快手直签运营报表.xlsx'
ks_path = f'{ks_dir}/{ks_filename}'
# CPA拉新-LT数据导出路径
cpa_dir = r'C:/Users/86187/Desktop/file'
cpa_filename = f'CPA拉新-LT数据导出-{now_str}.xlsx'
cpa_path = f'{cpa_dir}/{cpa_filename}'
# 消耗csv文件目录
csv_dir = r'C:/Users/86187/Desktop/file'
# 回填后输出文件路径
output_filename = f'快手直签运营报表_回填_{now_str}.xlsx'
output_path = f'{ks_dir}/{output_filename}'

# 1. 读取快手直签运营报表.xlsx
logging.info(f"正在读取快手直签运营报表: {ks_path}")
ks_df = pd.read_excel(ks_path, dtype=str)

# 2. 读取CPA拉新-LT数据导出-2025-07-15.xlsx
logging.info(f"正在读取CPA拉新-LT数据导出表: {cpa_path}")
cpa_df = pd.read_excel(cpa_path, sheet_name='sheet-1', dtype=str)

# 3. 预处理日期和数字列
cpa_df['日期'] = pd.to_datetime(cpa_df['日期'], errors='coerce')
cpa_df['日期_str'] = cpa_df['日期'].dt.strftime('%Y-%m-%d')
cpa_df['渠道ID'] = cpa_df['渠道ID'].str.replace("'", "")

# 4. 明确CPA表N~T列和KS表AB~AH列的表头
n_to_t_cols = cpa_df.columns[13:20]  # N~T
ab_to_ah_cols = ks_df.columns[27:34]  # AB~AH

# 新增：获取CPA表AC列和KS表S列的列名
ac_col = cpa_df.columns[28]  # AC列
s_col = ks_df.columns[18]    # S列

# 新增：获取CPA表M列和KS表M列的列名
m_col = cpa_df.columns[12]  # M列
ks_m_col = ks_df.columns[12]  # KS表M列

# 新增：获取CPA表L列和KS表K列的列名
l_col = cpa_df.columns[11]  # L列
ks_k_col = ks_df.columns[10]  # KS表K列

logging.info(f"CPA表N~T列: {list(n_to_t_cols)}，KS表AB~AH列: {list(ab_to_ah_cols)}，CPA表AC列: {ac_col}，KS表S列: {s_col}")

# 5. 回填数据
for idx, row in ks_df.iterrows():
    a_value = pd.to_datetime(row['日期'], errors='coerce')
    a_value_str = a_value.strftime('%Y-%m-%d') if not pd.isna(a_value) else ''
    e_value = str(row['渠道ID']).replace("'", "") if pd.notnull(row['渠道ID']) else ''
    # 判断渠道ID是否为数值
    try:
        float(e_value)
        is_numeric = True
    except (ValueError, TypeError):
        is_numeric = False
    if not is_numeric:
        # logging.info(f"第{idx+1}行：渠道ID={e_value} 不是数值，保留原数据，不做回填")
        continue  # 跳过本行，保留原数据
    logging.info(f"第{idx+1}行：日期={a_value_str}，渠道ID={e_value}")

    match = cpa_df[
        (cpa_df['日期_str'] == a_value_str) &
        (cpa_df['渠道ID'] == e_value)
    ]
    if not match.empty:
        values = match.iloc[0][n_to_t_cols].values
        fill_values = []
        for i, v in enumerate(values):
            try:
                float_v = float(v)
                fill_values.append(float_v)
                  # logging.info(f"  N~T第{i+1}列为数值，回填AB~AH列：{v}")
            except (ValueError, TypeError):
                fill_values.append(np.nan)
                # logging.info(f"  N~T第{i+1}列不是数值，AB~AH对应列回填空")
        # 自动补齐到7列
        if len(fill_values) < len(ab_to_ah_cols):
            fill_values += [np.nan] * (len(ab_to_ah_cols) - len(fill_values))
        elif len(fill_values) > len(ab_to_ah_cols):
            fill_values = fill_values[:len(ab_to_ah_cols)]
        ks_df.loc[idx, ab_to_ah_cols] = fill_values

        # 新增：回填AC列到S列
        ac_value = match.iloc[0][ac_col]
        ks_df.loc[idx, s_col] = ac_value
        logging.info(f"  匹配到，回填AC列({ac_col})到KS表S列({s_col})，值为：{ac_value}")

        # 新增：回填M列到KS表M列
        m_value = match.iloc[0][m_col]
        ks_df.loc[idx, ks_m_col] = m_value
        logging.info(f"  匹配到，回填M列({m_col})到KS表M列({ks_m_col})，值为：{m_value}")

        # 新增：回填L列到KS表K列
        l_value = match.iloc[0][l_col]
        ks_df.loc[idx, ks_k_col] = l_value
        logging.info(f"  匹配到，回填L列({l_col})到KS表K列({ks_k_col})，值为：{l_value}")
    else:
        ks_df.loc[idx, ab_to_ah_cols] = [np.nan]*len(ab_to_ah_cols)
        logging.info("  未匹配到，AB~AH列回填为空")

# 新增：回填消耗数据到G列
# 1. 获取ks表G列和账户ID列名
ks_g_col = '消耗'
ks_account_id_col = '账户ID'
# 2. 获取ks表最新日期
ks_df['日期_dt'] = pd.to_datetime(ks_df['日期'], errors='coerce')
latest_date = ks_df['日期_dt'].max()
latest_date_str = latest_date.strftime('%Y%m%d') if not pd.isna(latest_date) else ''
# 3. 查找对应的csv文件
def find_csv_file(date_str, search_path=csv_dir):
    pattern = f"{search_path}/material_new_mcc_report_{date_str}-{date_str}_*.csv"
    files = glob.glob(pattern)
    return files[0] if files else None
csv_file = find_csv_file(latest_date_str, search_path=csv_dir)
if csv_file:
    logging.info(f"找到消耗数据文件: {csv_file}")
    # 4. 读取csv，处理乱码
    csv_df = pd.read_csv(csv_file, encoding='gbk', dtype=str, skiprows=6)
   # if csv_df.columns[0].startswith('\u'):  # 兼容乱码
    #    csv_df = pd.read_csv(csv_file, encoding='gbk', dtype=str, skiprows=6)
    # 5. 获取csv账户ID和消耗列名
    csv_account_id_col = csv_df.columns[1]  # B列
    csv_cost_col = csv_df.columns[6]        # G列（第7列）
    # 6. 仅处理ks表最新日期的数据
    latest_rows = ks_df[ks_df['日期_dt'] == latest_date]
    logging.info(f"消耗回填调试：ks表最新日期={latest_date}，该日期下数据条数={len(latest_rows)}，账户ID列表={latest_rows[ks_account_id_col].tolist()}")
    for idx, row in latest_rows.iterrows():
        account_id = row[ks_account_id_col]
        row_date = row['日期']
        if pd.isna(account_id) or account_id == '':
            logging.info(f"跳过空账户ID，行索引={idx}，日期={row_date}")
            continue
        # 打印调试信息
        logging.info(f"消耗匹配调试：ks表日期={row_date}，账户ID={account_id}，csv账户ID列名={csv_account_id_col}，csv消耗列名={csv_cost_col}")
        logging.info(f"csv账户ID前5行: {csv_df[csv_account_id_col].head().tolist()}")
        logging.info(f"csv消耗前5行: {csv_df[csv_cost_col].head().tolist()}")
        match = csv_df[csv_df[csv_account_id_col] == str(account_id)]
        if not match.empty:
            cost_value = match.iloc[0][csv_cost_col]
            ks_df.loc[idx, ks_g_col] = cost_value
            logging.info(f"  回填消耗：日期={row_date}，账户ID={account_id}，消耗={cost_value}")
        else:
            # 新增：未匹配到则到账户维度明细报表匹配
            detail_path = f"{csv_dir}/账户维度明细报表{latest_date.strftime('%Y-%m-%d')}.xlsx"
            try:
                detail_df = pd.read_excel(detail_path, dtype=str)
                detail_account_id_col = detail_df.columns[1]  # B列
                detail_cost_col = '总消耗'  # C列标题
                match_detail = detail_df[detail_df[detail_account_id_col] == str(account_id)]
                if not match_detail.empty:
                    detail_cost = match_detail.iloc[0][detail_cost_col]
                    ks_df.loc[idx, ks_g_col] = detail_cost
                    logging.info(f"  二次回填消耗：日期={row_date}，账户ID={account_id}，总消耗={detail_cost}")
                else:
                    logging.warning(f"  消耗未匹配到：日期={row_date}，账户ID={account_id}，csv账户ID列表={csv_df[csv_account_id_col].tolist()}，明细表账户ID列表={detail_df[detail_account_id_col].tolist()}")
            except Exception as e:
                logging.error(f"  读取账户维度明细报表失败：{detail_path}，错误：{e}")
else:
    logging.warning(f"未找到消耗数据文件，日期参数: {latest_date_str}")
# 6. 保存回填后的文件，确保AB~AH列、S列和M列为数值类型
for col in ab_to_ah_cols:
    ks_df[col] = pd.to_numeric(ks_df[col], errors='coerce')
ks_df[s_col] = pd.to_numeric(ks_df[s_col], errors='coerce')      # 确保S列为数值类型
ks_df[ks_m_col] = pd.to_numeric(ks_df[ks_m_col], errors='coerce')  # 确保M列为数值类型
# 新增：确保K列为数值类型
ks_df[ks_k_col] = pd.to_numeric(ks_df[ks_k_col], errors='coerce')  # 确保K列为数值类型
# 保存前确保G列为数值类型
ks_df[ks_g_col] = pd.to_numeric(ks_df[ks_g_col], errors='coerce')
ks_df.to_excel(output_path, index=False)
logging.info(f"回填完成，已保存为 {output_path}")