import os
import math
import gzip
import boto3
from io import BytesIO
from boto3.s3.transfer import TransferConfig
from botocore.exceptions import ClientError
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta

def get_csv_filename(number):
    return f'ob-payer-cur-0000{number}.csv.gz'

# 方便查看文件大小，输出转换
def convert_size(size_bytes):
    if size_bytes == 0:
        return "0B"
    size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
    i = int(math.floor(math.log(size_bytes, 1024)))
    p = math.pow(1024, i)
    s = round(size_bytes / p, 2)
    return "%s %s" % (s, size_name[i])

# 生成link_cur下载月（上月）以及当前月份和上个月的文件夹
# 输入：20240701
# download_month 格式 '2024/06'
# last_cur_prefix_month 格式 20240601-20240701
def generate_date_range(input_date_str=None):
    if input_date_str is None:
        input_date_str = datetime.now().strftime("%Y%m%d")
    input_date = datetime.strptime(input_date_str, "%Y%m%d")
    first_day_of_month = input_date.replace(day=1)# 转成输入的第一天
    last_month = first_day_of_month - relativedelta(months=1)# 上个月
    formatted_start = last_month.strftime("%Y%m%d")
    formatted_end = first_day_of_month.strftime("%Y%m%d")
    return (input_date - relativedelta(months=1)).strftime('%Y/%m'),f"{formatted_start}-{formatted_end}" 

# 下载 link cur 的月份，默认上个月。
# 返回格式：2024/06
# 注意：如果手动指定请按返回格式
def get_cur_folder(month=None):
    if month is None:
        return (datetime.now() - relativedelta(months=1)).strftime('%Y/%m')
    else:
        return month

# 返回 上个月1号和当前月份1号0点的时间戳，可以调整计算月
# 输出格式: 2024-07-01T00:00:00Z, 2024-08-01T00:00:00Z
def get_UsageDate(month=None):
    now = datetime.now()
    target_date = now
    if month is not None:
        year, month = divmod(month - 1, 12)
        target_date = now.replace(year=now.year+year, month=month+1, day=1, hour=0, minute=0, second=0, microsecond=0)
    else:
        # 否则使用当前月份
        target_date = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
        
    # 获取当前月份的第一天
    first_day_of_month = target_date.strftime('%Y-%m-%dT%H:%M:%SZ')
    # 计算上个月的第一天
    first_day_of_last_month = (target_date - relativedelta(months=1)).strftime('%Y-%m-%dT%H:%M:%SZ')
    return first_day_of_last_month, first_day_of_month

def timestamp():
    return datetime.now().strftime('%H:%M:%S')

def create_folder_if_not_exists(folder_path):
    if not os.path.exists(folder_path):
        print_and_write(f"文件夹不存在: {folder_path} 创建新文件夹...")
        os.makedirs(folder_path)

def print_and_write(text, file_full_path='py_log',enable_log=False):
    print(f'{datetime.now().strftime("%Y-%m-%d %H:%M:%S")} - {text}')
    if enable_log:
        with open(file_full_path, 'a') as f:
            f.write(f'{datetime.now().strftime("%Y-%m-%d %H:%M:%S")} - {text}\n')


# 上传本地文件到 S3
def upload_localfile_to_s3(s3, file_path, bucket_name, prefix, object_name=None):
    if object_name is None:
        object_name = os.path.basename(file_path)
    try:
        # 配置分块上传
        config = TransferConfig(
            multipart_threshold=1024 * 25,  # 25MB
            max_concurrency=10,
            multipart_chunksize=1024 * 25,  # 25MB
            use_threads=True
        )
        s3.upload_file(
            file_path, bucket_name, prefix + object_name,
            Config=config
        )
        print_and_write(f"上传文件: '{file_path}' 至 s3://{bucket_name}/{prefix}{object_name}'")
    except Exception as e:
        print_and_write(f"上传文件至 s3 错误: {e}")


# 生成月账单文件夹prefix，默认为最后一个文件夹往后1小时整点。
# 返回格式：20240601T000000Z/
def generate_last_folder(last_cur_prefix, bucket_name, s3):
    current_last_date_folder = find_last_folder(last_cur_prefix, bucket_name, s3)
    datetime_folder = current_last_date_folder.replace(last_cur_prefix,'')
    dt = datetime.strptime(datetime_folder[:-1], "%Y%m%dT%H%M%SZ")
    # 如果最后个文件夹是整点，即0分0秒，则返回最后的文件夹，否则大概率是重复生成
    if dt.minute == 0 and dt.second == 0:
        return current_last_date_folder
    dt += timedelta(hours=1)
    dt = dt.replace(minute=0, second=0, microsecond=0)
    new_time_string = dt.strftime("%Y%m%dT%H%M%SZ") + "/"
    return f'{last_cur_prefix}{new_time_string}'

# 将 S3 的第二级，类似 20240702T101010Z/ 进行查找，返回最新的
def find_last_folder(last_cur_prefix, bucket_name, s3):
    paginator = s3.get_paginator('list_objects_v2')
    directories = []
    for page in paginator.paginate(Bucket=bucket_name, Prefix=last_cur_prefix, Delimiter='/'):
        common_prefixes = page.get('CommonPrefixes', [])
        for prefix in common_prefixes:
            directories.append(prefix['Prefix'].replace(last_cur_prefix,''))
    latest_directory = None
    latest_time = None
    for directory in directories:
        timestamp_str = directory[:-1]
        timestamp = datetime.strptime(timestamp_str, "%Y%m%dT%H%M%SZ")
        if latest_time is None or timestamp > latest_time:
            latest_time = timestamp
            latest_directory = directory
    return f'{last_cur_prefix}{latest_directory}'

# 检查 S3 文件是否存在
def check_s3file_exists(bucket_name, file_key, s3):
    try:
        s3.head_object(Bucket=bucket_name, Key=file_key)
        return True
    except Exception as e:
        return False


# df转换成csv并上传文件到 S3
def upload_df_to_s3(df, bucket_name, upload_prefix, filename_index, s3):
    csv_buffer = BytesIO()
    df.to_csv(csv_buffer, index=False, header=True)
    csv_buffer.seek(0)
    
    gzip_buffer = BytesIO()
    with gzip.GzipFile(fileobj=gzip_buffer, mode='w') as f:
        f.write(csv_buffer.getvalue())
    gzip_buffer.seek(0)
    print_and_write('-'*88)
    print_and_write(f"上传完毕 s3://{bucket_name}/{upload_prefix}{get_csv_filename(filename_index)}")
    s3.upload_fileobj(gzip_buffer, bucket_name, f'{upload_prefix}{get_csv_filename(filename_index)}')


# 假设角色并获取临时凭证
def assume_role(role_arn, session_name):
    sts_client = boto3.client('sts')
    try:
        response = sts_client.assume_role(
            RoleArn=role_arn,
            RoleSessionName=session_name
        )
        credentials = response['Credentials']
        return credentials
    except ClientError as e:
        print(f"Error assuming role: {e}")
        return None

# 创建一个新的会话，使用临时凭证
def create_session_with_credentials(credentials):
    if credentials:
        session = boto3.Session(
            aws_access_key_id=credentials['AccessKeyId'],
            aws_secret_access_key=credentials['SecretAccessKey'],
            aws_session_token=credentials['SessionToken']
        )
        return session
    else:
        print("No credentials available to create session.")
        return None

# 获取当前调用者的身份
def get_caller_identity(session):
    sts_client = session.client('sts')
    try:
        identity = sts_client.get_caller_identity()
        return identity
    except ClientError as e:
        print(f"Error getting caller identity: {e}")
        return None

# S3 递归复制
def s3_copy_recursive(source_bucket, source_prefix, target_bucket, target_prefix, s3):
    print_and_write('-'*88)
    print_and_write(f"准备复制 s3://{source_bucket}/{source_prefix} -> s3://{target_bucket}/{target_prefix}")
    paginator = s3.get_paginator('list_objects_v2')
    for page in paginator.paginate(Bucket=source_bucket, Prefix=source_prefix):
        for obj in page.get('Contents', []):
            source_key = obj['Key']
            target_key = target_prefix + source_key[len(source_prefix):]
            copy_source = {'Bucket': source_bucket, 'Key': source_key}
            try:
                print_and_write(f"Copying {source_key} to {target_key}")
                s3.copy_object(CopySource=copy_source, Bucket=target_bucket, Key=target_key)
            except ClientError as e:
                print_and_write(f"Error copying {source_key} to {target_key}: {e}")
    print_and_write(f"复制完毕")
    print_and_write('-'*88)

# 列出S3桶中的所有对象
def list_s3_objects(s3, bucket_name, prefix=""):
    paginator = s3.get_paginator('list_objects_v2')
    for page in paginator.paginate(Bucket=bucket_name, Prefix=prefix):
        for obj in page.get('Contents', []):
            print(obj['Key'])