from aws_cdk import (
    aws_autoscaling as autoscaling,
    aws_ec2 as ec2,
    aws_ecs as ecs,
    aws_s3 as s3,
    aws_events as events,
    aws_events_targets as targets,
    aws_ecs_patterns as ecs_patterns,
    aws_sqs as sqs,
    App, CfnOutput, Stack,
    RemovalPolicy,
    Duration
)
from constructs import Construct
import os

class S3EventbridgeEcsStack(Stack):

    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id,
            env=kwargs.get('env', {
                'account': os.environ['CDK_DEFAULT_ACCOUNT'],
                'region': os.environ['CDK_DEFAULT_REGION']
            }),
            **kwargs)

        # Explicitly create asset bucket
        asset_bucket = s3.Bucket(
            self, "AssetBucket",
            removal_policy=RemovalPolicy.DESTROY,
            auto_delete_objects=True,  # 改为True以便清理
            lifecycle_rules=[s3.LifecycleRule(
                expiration=Duration.days(1)
            )],
            # 启用 EventBridge 通知
            event_bridge_enabled=True
        )

        # Create VPC and Fargate Cluster
        vpc = ec2.Vpc(
            self, "MyVpc",
            max_azs=2
        )

        cluster = ecs.Cluster(
            self, 'Ec2Cluster',
            vpc=vpc
        )

        # 创建SQS队列用于接收S3事件
        queue = sqs.Queue(
            self, "S3EventsQueue",
            visibility_timeout=Duration.seconds(300),
            retention_period=Duration.days(1)
        )

        # 创建自定义任务定义
        task_definition = ecs.FargateTaskDefinition(
            self, "TaskDef",
            memory_limit_mib=512,
            cpu=256
        )

        # 使用ECR镜像
        container = task_definition.add_container(
            "web",
            image=ecs.ContainerImage.from_asset(
                    directory="../s3-eventbridge-ecs/app"
                ),
            port_mappings=[ecs.PortMapping(container_port=80)],
            environment={
                "S3_BUCKET_NAME": asset_bucket.bucket_name,
                "SQS_QUEUE_URL": queue.queue_url,
                "AWS_DEFAULT_REGION": self.region
            },
            logging=ecs.LogDriver.aws_logs(stream_prefix="S3Events")
        )

        fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self, "FargateService",
            cluster=cluster,
            task_definition=task_definition,
            public_load_balancer=True
        )
        # 确保安全组允许 HTTP 流量
        fargate_service.service.connections.security_groups[0].add_ingress_rule(
            ec2.Peer.any_ipv4(),
            ec2.Port.tcp(80),
            "Allow HTTP traffic"
        )

        # 授予Fargate任务读取SQS队列的权限
        queue.grant_consume_messages(fargate_service.task_definition.task_role)

        # 授予任务访问S3的权限（用于获取对象信息）
        asset_bucket.grant_read(fargate_service.task_definition.task_role)

        # EventBridge规则
        event_rule = events.Rule(
            self,
            "ecsRule",
            description="Rule to send S3 events to SQS",
            event_pattern=events.EventPattern(
                source=["aws.s3"],
                detail_type=["Object Created"],
                resources = [asset_bucket.bucket_arn]
            ),
        )

        # 将事件发送到SQS队列
        event_rule.add_target(targets.SqsQueue(queue))

        CfnOutput(
            self, "LoadBalancerDNS",
            value=fargate_service.load_balancer.load_balancer_dns_name
        )
        CfnOutput(
            self, "BucketName",
            value=asset_bucket.bucket_name
        )
        CfnOutput(
            self, "QueueURL",
            value=queue.queue_url
        )
 