import zipfile
import re
import json
import uuid
import pandas as pd
import numpy as np
import gzip
from io import BytesIO, TextIOWrapper
from ecr_billing_tools import generate_last_folder, find_last_folder, print_and_write, upload_df_to_s3, convert_size, get_UsageDate, get_csv_filename

def process_patch(last_cur_prefix_month, last_cur_prefix_base, link_cur_prefix_base, last_cur_bucket, link_cur_bucket, manifest_json_name, s3, chunksize=100000):
    print_and_write("-" * 88)
    print_and_write("开始处理 CSV 补丁文件")
    # 格式：cur-to-ob/20240601-20240701/
    last_cur_prefix_parent = f'{last_cur_prefix_base}{last_cur_prefix_month}/'
    # 获取账单月，格式： 202406
    link_cur_prefix = f'{link_cur_prefix_base}{last_cur_prefix_month[:6]}'

    # 找到账单月最新的文件夹，例如 cur-to-ob/20240601-20240701/20240609T042719Z/'
    # 要处理的文件都在这份里面 last_cur_prefix
    last_cur_prefix = find_last_folder(last_cur_prefix_parent, last_cur_bucket, s3)
    # 找到账单月最新的文件夹生成+1小时文件夹，例如 cur-to-ob/20240601-20240701/20240609T050000Z/'
    # 最后上传的路径是这个 new_last_cur_prefix
    new_last_cur_prefix = generate_last_folder(last_cur_prefix_parent, last_cur_bucket, s3)

    # print_and_write(f'last_cur_prefix is {last_cur_prefix}')
    # print_and_write(f'new_last_cur_prefix is {new_last_cur_prefix}')
    if last_cur_prefix == new_last_cur_prefix:
        print_and_write(f's3://{last_cur_bucket}/{new_last_cur_prefix} 文件夹已存在，本次不做任何操作。')
        print_and_write("-" * 88)
    else:
        print_and_write("开始处理 CSV 补丁文件")
        # 要处理的 CSV 文件
        csv_filenames = [f'{get_csv_filename(1)}',f'{get_csv_filename(2)}']

        # 开始处理业务
        df_payer_origin = process_csv_in_chunks(last_cur_bucket,last_cur_prefix, csv_filenames, chunksize, s3)
        df_payer_delete_colume = df_payer_origin[
            ~(df_payer_origin['lineItem/LineItemType'].isin(['SppDiscount', 'Credit'])) 
            & (df_payer_origin['lineItem/UsageAccountId'] != '054309941891')]
        df_payer_delete_colume['lineItem/LineItemType'].unique()
        print('-'*88)
        print_and_write(f'df_payer_origin 处理完毕。')

        # 处理 link cur 文件
        df_payer_monthly = process_link_cur_from_s3(link_cur_bucket, f'{link_cur_prefix}', df_payer_origin, chunksize, s3)
        print('-'*88)
        print_and_write(f'df_payer_monthly 处理完毕。')
        
        # 上传 csv 到 s3
        upload_df_to_s3(df_payer_delete_colume, last_cur_bucket, new_last_cur_prefix, 1, s3)
        upload_df_to_s3(df_payer_monthly, last_cur_bucket, new_last_cur_prefix, 2, s3)
        # 处理 Json 文件
        process_manifestjson(last_cur_bucket, manifest_json_name, last_cur_prefix, new_last_cur_prefix, last_cur_prefix_parent, manifest_json_name, s3)

# 分块读取 CSV 文件，用来构建新的月账单结构
# 返回DataFrame
def process_csv_in_chunks(last_cur_bucket, last_cur_prefix, csv_filenames, chunksize, s3):
    # 指定要视为字符串类型的列
    string_cols = ['bill/PayerAccountId', 'lineItem/UsageAccountId']
    final_df = pd.DataFrame()
    # 遍历每个文件名
    for csv_filename in csv_filenames:
        # 从S3获取对象 (CSV文件)
        response = s3.get_object(Bucket=last_cur_bucket, Key=f'{last_cur_prefix}{csv_filename}')
        data = response['Body'].read()
        string_cols = string_cols or []
        with gzip.GzipFile(fileobj=BytesIO(data)) as gz:
            for df in pd.read_csv(TextIOWrapper(gz), chunksize=chunksize, dtype={col: str for col in string_cols}, low_memory=False):
                df['lineItem/UsageAccountId'].unique()
                df['lineItem/LineItemType'].unique()
                final_df = pd.concat([final_df, df], ignore_index=True)
    return final_df

def process_link_cur_from_s3(bucket_name, prefix, df_payer_origin, chunksize, s3):
    df_reconciliation = pd.DataFrame(columns=['LinkAccountID', 'ItemDescription', 'UnBlendedCost'])
    response = s3.list_objects_v2(Bucket=bucket_name, Prefix=prefix)
    for obj in response.get('Contents', []):
        key = obj['Key']
        print_and_write(f'开始处理: {key}，文件大小：{convert_size(obj["Size"])}')

        # if turn_on_local_debug:
        #     localfile = f"{local_path}{obj['Key'][obj['Key'].rfind('/') + 1:]}"
        #     print_and_write(f'开始处理本地: {localfile}')
        #     with open(localfile, 'rb') as file:
        #         zip_content = BytesIO(file.read())
        # else:
        s3_response = s3.get_object(Bucket=bucket_name, Key=key)
        zip_content = BytesIO(s3_response['Body'].read())
        
        with zipfile.ZipFile(zip_content, 'r') as zf:
            for file_name in zf.namelist():
                link_account_id = file_name[:12]
                if re.match(r'^\d{12}-cur-with-tags-\d{4}-\d{2}\.csv$', file_name):
                    print_and_write(f'开始处理csv: {file_name}')
                    with zf.open(file_name) as csv_file:
                        extracted_data = process_link_cur_csv_in_zip(csv_file, link_account_id, chunksize)
                        df_reconciliation = pd.concat([df_reconciliation, extracted_data], ignore_index=True)
    # 重命名列名
    df_reconciliation.rename(columns={'LinkAccountID':'lineItem/UsageAccountId',
                'ItemDescription':'lineItem/LineItemDescription',
                'UnBlendedCost':'lineItem/UnblendedCost'}, inplace=True)
    # 创建新列
    UsageStartDate, UsageEndDate = get_UsageDate()
    start_date = pd.Series([UsageStartDate] * len(df_reconciliation), name='lineItem/UsageStartDate')
    end_date = pd.Series([UsageEndDate] * len(df_reconciliation), name='lineItem/UsageEndDate')
    billing_period_start_date = start_date.copy(deep=True)
    billing_period_start_date.name = 'bill/BillingPeriodStartDate'
    billing_period_end_date = end_date.copy(deep=True)
    billing_period_end_date.name = 'bill/BillingPeriodEndDate'
    currency_code = pd.Series(['USD' for _ in range(len(df_reconciliation))], name='lineItem/CurrencyCode')
    line_item_id = pd.Series([str(uuid.uuid4()) for _ in range(len(df_reconciliation))], name='identity/LineItemId')
    # 根据 lineItem/LineItemDescription 列的值创建 lineItem/ProductCode 列
    product_code = np.where(df_reconciliation['lineItem/LineItemDescription'] == 'DISCOUNT', 'DISCOUNT',
                    np.where(df_reconciliation['lineItem/LineItemDescription'] == 'AWS Enterprise Support', 'PLES', 
                    np.where(df_reconciliation['lineItem/LineItemDescription'] == 'Enterprise Support', 'PLES', '')))
    product_code = pd.Series(product_code, name='lineItem/ProductCode')
    # 创建 lineItem/UsageType 列,值与 lineItem/ProductCode 列相同
    usage_type = product_code.copy(deep=True)
    usage_type.name = 'lineItem/UsageType'
    # 合并新列与原始 DataFrame,并指定新列的位置
    new_df = pd.concat([start_date, end_date, currency_code, line_item_id, product_code, usage_type, billing_period_start_date, billing_period_end_date,  df_reconciliation], axis=1)
    # 创建新列
    new_df.insert(2, 'lineItem/LineItemType', 'ECR_Reconciliation')
    new_df.groupby('lineItem/LineItemDescription').sum('lineItem/UnblendedCost').reset_index()
    # 获取原始列名
    all_columns = df_payer_origin.columns
    df_payer_origin.shape
    # 扩展补丁文件列名
    patch_df = new_df.reindex(all_columns, axis=1, fill_value=np.nan)
    patch_df.shape
    return patch_df

def process_link_cur_csv_in_zip(csv_file, link_account_id, chunksize):
    string_cols = ['ItemDescription', 'UnBlendedCost']
    output = BytesIO()
    extracted_data = pd.DataFrame()
    with gzip.GzipFile(fileobj=output, mode='w') as gz_out:
        final_df = pd.DataFrame()
        for chunk in pd.read_csv(TextIOWrapper(csv_file), chunksize=chunksize, dtype={col: str for col in string_cols}, low_memory=False):
            # 筛选出包含'DISCOUNT'或'AWS Enterprise Support'的行
            filtered_chunk = chunk[(chunk['ItemDescription'].str.contains('DISCOUNT', case=True)) |
                            (chunk['ItemDescription'].str.contains('AWS Enterprise Support', case=True)) |
                            (chunk['ItemDescription'].str.contains('Enterprise Support', case=True))
                            ]
            filtered_chunk = filtered_chunk[string_cols]
            final_df = pd.concat([final_df, filtered_chunk], ignore_index=True)
        if not final_df.empty:
            extracted_data = final_df[['ItemDescription', 'UnBlendedCost']]
            extracted_data.insert(0, 'LinkAccountID', link_account_id)
    return extracted_data

def process_manifestjson(bucket_name, manifest_key, last_cur_prefix, new_last_cur_prefix, parent_prefix, manifest_json_name, s3):
    # 下载ob-payer-cur-Manifest.json文件
    manifest_obj = s3.get_object(Bucket=bucket_name, Key=f'{last_cur_prefix}{manifest_key}')
    manifest_data = json.loads(manifest_obj['Body'].read())
    # 清空 reportKeys 列表
    reportKeys = manifest_data["reportKeys"] = []
    # 添加指定的两行数据到 reportKeys 列表
    reportKeys.append(f'{new_last_cur_prefix}{get_csv_filename(1)}')
    reportKeys.append(f'{new_last_cur_prefix}{get_csv_filename(2)}')

    manifest_data["reportKeys"] = reportKeys
    # 将更新后的json文件上传到新的S3路径
    s3.put_object(Bucket=bucket_name, Key=f'{new_last_cur_prefix}{manifest_json_name}', Body=json.dumps(manifest_data))
    print_and_write('-'*88)
    print_and_write(f'json上传完毕 s3://{bucket_name}/{new_last_cur_prefix}{manifest_json_name}')
    s3.put_object(Bucket=bucket_name, Key=f'{parent_prefix}{manifest_json_name}', Body=json.dumps(manifest_data))
    print_and_write('-'*88)
    print_and_write(f'json上传完毕 s3://{bucket_name}/{parent_prefix}{manifest_json_name}')
