This message was deleted.
# aws
s
This message was deleted.
b
can you share the code you currrently have?
f
Check if you have any notifications in your service in the AWS console. I'm also struggling with my service not getting updated, and it seems to be related to the message: "Reason: You've reached the limit on the number of tasks you can run concurrently."
I've been reading in some places online that AWS is limiting to two tasks until you request an increase 🤷
g
here is the code of my pulumi client:
Copy code
def create_container_service(self, vpc, vpc_subnet, cluster, image, security_group, service_name):
    alb = aws.lb.LoadBalancer(service_name+'-app-lb',
                            security_groups=[security_group],
                            subnets=vpc_subnet,
                            )

    atg = aws.lb.TargetGroup(service_name+'-app-tg',
                            port=8080,
                            protocol='HTTP',
                            target_type='ip',
                            vpc_id=vpc,
                            )

    wl = aws.lb.Listener(service_name+'-web',
                        load_balancer_arn=alb.arn,
                        port=8080,
                        default_actions=[aws.lb.ListenerDefaultActionArgs(
                          type='forward',
                          target_group_arn=atg.arn,
                        )],
                        )

    # Create an IAM role that can be used by our service's task.
    role = aws.iam.Role(service_name+'-task-exec-role',
                      assume_role_policy=json.dumps({
                        'Version': '2008-10-17',
                        'Statement': [{
                            'Sid': '',
                            'Effect': 'Allow',
                            'Principal': {
                              'Service': '<http://ecs-tasks.amazonaws.com|ecs-tasks.amazonaws.com>'
                            },
                            'Action': 'sts:AssumeRole',
                        }]
                      }),
                      )

    aws.iam.RolePolicyAttachment(service_name+'-task-exec-policy',
                                role=role.name,
                                policy_arn='arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy',
                                )

    task_definition = aws.ecs.TaskDefinition(service_name+'-app-task',
                                            family='fargate-task-definition',
                                            cpu='256',
                                            memory='512',
                                            network_mode='awsvpc',
                                            requires_compatibilities=[
                                              'FARGATE'],
                                            execution_role_arn=role.arn,
                                            container_definitions=image,
                                            )

    aws.ecs.Service(service_name+'-svc',
                    cluster=cluster,
                    desired_count=2,
                    launch_type='FARGATE',
                    task_definition=task_definition.arn,
                    network_configuration=aws.ecs.ServiceNetworkConfigurationArgs(
                      assign_public_ip=True,
                      subnets=vpc_subnet,
                      security_groups=[security_group],
                    ),
                    load_balancers=[aws.ecs.ServiceLoadBalancerArgs(
                      target_group_arn=atg.arn,
                      container_name=service_name,
                      container_port=8080,
                    )],
                    opts=ResourceOptions(depends_on=[wl]),
                    )
    export('service.'+service_name+'.url', alb.dns_name.apply(lambda url: 'http://'+url+':8080'))
every time I call this function I’m passing a new fresh created
container_defintions
including the new version of the code uploaded to ERS:
Copy code
image = docker.Image(name,
                        build=code_path,
                        image_name=repository,
                        registry=credentials.apply(getRegistryInfo),
                        )

    export('baseImageName', image.base_image_name)
    export('imageName', image.image_name)
    return Output.all(image=image.image_name, envVars=envVars).apply(
      lambda args: json.dumps([{
        'name': name,
        'image': args['image'],
        'dockerLabels': {
            'com.datadoghq.ad.instances': '[{\"host\": \"%%host%%\", \"port\": 8080}]',
            'com.datadoghq.ad.check_names': '[\"'+name+'\"]',
            'com.datadoghq.ad.init_configs': '[{}]'
        },
        'portMappings': [{
          'containerPort': 8080,
          'hostPort': 8080,
          'protocol': 'tcp'
        },
        ],
        'secrets': args['envVars'],
        'logConfiguration': { 
          'logDriver': 'awsfirelens',
          'options': {
              'Name': 'datadog',
              'apiKey': args['datadogApiKey'],
              'dd_service': 'my-httpd-service',
              'dd_source': 'httpd',
              'dd_tags': 'project:example',
              'TLS': 'on',
              'provider': 'ecs'
          }
        }
      },
      {
        'essential': True,
        'image': 'amazon/aws-for-fluent-bit:latest',
        'name': name+'-log_router',
        'firelensConfiguration': {
          'type': 'fluentbit',
          'options': {
            'enable-ecs-log-metadata': 'true'
          }
        }
    }]
      )
    )