Hi all, Anyone got any suggestions? I am trying t...
# aws
i
Hi all, Anyone got any suggestions? I am trying to use Pulumi to recreate a terraform workflow that I have. In my Terraform I use the
terraform-aws-modules/vpc/aws
module, which allows me to easily add a secondary CIDR block as well as dictate the CIDR blocks for the subnets within it. I am trying to create a networking custom resource here in Pulumi for future re-usability. Below is the code I have so far, but I can’t seem to figure out how I can accomplish the same thing. I am adding the secondary CIDR block, but that happens after the
awsx.vpc
resource and therefor I am not able to have my isolated subnet utilize my secondary CIDR block.
Copy code
from dataclasses import dataclass

import pulumi
import pulumi_aws as aws
import pulumi_awsx as awsx

@dataclass
class VpcArgs:
    vpc_cidr_block: str
    vpc_cidr_block_secondary: str
    instance_tenancy: str
    enable_dns_hostnames: bool
    enable_dns_support: bool
    owner: str


class Vpc(pulumi.ComponentResource):
    def __init__(self, name: str, args: VpcArgs, opts: pulumi.ResourceOptions = None) -> None:
        super().__init__("awsCustomNetworking:index:Vpc", name, None, opts)

        self.name = name
        self.args = args

        self.vpc = awsx.ec2.Vpc(
            f"{name}-vpc",
            awsx.ec2.VpcArgs(
                cidr_block=args.vpc_cidr_block,
                subnet_specs=[
                    awsx.ec2.SubnetSpecArgs(
                        type=awsx.ec2.SubnetType.PUBLIC,
                        cidr_mask=28,
                    ),

                    awsx.ec2.SubnetSpecArgs(
                        type=awsx.ec2.SubnetType.PRIVATE,
                        cidr_mask=28,
                    ),
                    awsx.ec2.SubnetSpecArgs(
                        type=awsx.ec2.SubnetType.ISOLATED,
                        cidr_mask=28,
                    ),
                ],

                nat_gateways=awsx.ec2.NatGatewayConfigurationArgs(
                    strategy=awsx.ec2.NatGatewayStrategy.ONE_PER_AZ
                ),
                tags={
                    "Name": f"{name}-vpc",
                    "Owner": f"{args.owner}",
                }
            ),
            opts=pulumi.ResourceOptions(
                *(opts or {}),
                parent=self,
            ),
        )

        self.secondary_cidr = aws.ec2.VpcIpv4CidrBlockAssociation(f"{name}-secondary-cidr",
            vpc_id=self.vpc.vpc_id,
            cidr_block=args.vpc_cidr_block_secondary,
            opts=pulumi.ResourceOptions(
                *(opts or {}),
                parent=self,
                depends_on=[self.vpc],
            ),
            )

        self.eip = aws.ec2.Eip(
            f"{name}-eip",
            tags={
                "Name": f"{name}-eip",
                "Owner": f"{args.owner}",
            },
            opts=pulumi.ResourceOptions(
                parent=self,
                depends_on=[self.vpc],
            ),
        )

        self.nat_gateway = aws.ec2.NatGateway(
            f"{name}-nat-gateway",
            aws.ec2.NatGatewayArgs(
                subnet_id=self.vpc.public_subnet_ids[0],
                allocation_id=self.eip.allocation_id,
                tags={
                    "Name": f"{name}-nat-gateway",
                    "Owner": f"{args.owner}",
                }
            ),
            pulumi.ResourceOptions(
                parent=self,
                depends_on=[self.vpc],
            )
        )

        self.register_outputs({})
I’d love to be able to use awsx components as some of the heavy lifting is taken care of, but so far the only things I can think of is using pure
pulumi_aws
components and build each piece myself. Creating the VPC, then adding the additional cidr block, followed by the subnets, then everything else like nat gateway and internet gateway.
b
@icy-dress-83371 how would this look in terraform? How would you instantiate the module?
i
My invocation of the module would looks something like this:
Copy code
locals {
  cluster_name = "${var.name}-${var.environment}"
  region       = var.aws_region
  sliced_azs   = slice(data.aws_availability_zones.available_azs.zone_ids, 0, var.number_of_azs)
  subnet_group_count  = 3

  tags = {
    Environment         = var.environment
  }
}

data "aws_availability_zones" "available_azs" {
  state = "available"
  exclude_names = [ "us-east-1e" ] #us-east-1e was not supporting EKS deployment
}

module "vpc" {
  source  = "terraform-aws-modules/vpc/aws"
  name                  = "${var.name}-${var.environment}-vpc"
  cidr                  = var.primary_vpc_cidr
  secondary_cidr_blocks = [var.secondary_vpc_cidr]
  azs                   = local.sliced_azs
  enable_nat_gateway    = true
  single_nat_gateway    = true
  enable_dns_hostnames  = true

  private_subnets = [
    for netnumber in range(0, length(local.sliced_azs)): 
        cidrsubnet(var.primary_vpc_cidr, ceil(log(length(local.sliced_azs) * local.subnet_group_count, 2)), netnumber) 
  ]

  public_subnets = [
    for netnumber in range(length(local.sliced_azs), length(local.sliced_azs) * 2): 
        cidrsubnet(var.primary_vpc_cidr, ceil(log(length(local.sliced_azs) * local.subnet_group_count, 2)), netnumber)
  ]

  intra_subnets = [
    for netnumber in range(length(local.sliced_azs) * 2, length(local.sliced_azs)): 
        cidrsubnet(var.secondary_vpc_cidr, ceil(log(length(local.sliced_azs) * local.subnet_group_count, 2)), netnumber)
  ]

  tags = {
    "<http://kubernetes.io/cluster/${local.cluster_name}|kubernetes.io/cluster/${local.cluster_name}>" = "shared"
  }
  public_subnet_tags = {
    "<http://kubernetes.io/cluster/${local.cluster_name}-primary|kubernetes.io/cluster/${local.cluster_name}-primary>" = "shared"
    "<http://kubernetes.io/role/elb|kubernetes.io/role/elb>"                    = "1"
  }
  private_subnet_tags = {
    "<http://kubernetes.io/cluster/${local.cluster_name}-primary|kubernetes.io/cluster/${local.cluster_name}-primary>" = "shared"
    "<http://kubernetes.io/role/internal-elb|kubernetes.io/role/internal-elb>"           = "1"
  }
}
This allowed me to setup both the public and private subnets in my primary vpc cidr, where I will have LB’s, NAT and IGWs, etc. Then I would setup my eks cluster later to be within the intra subnet, which uses my secondary cidr. This was to help me get ahead of the issues of IP exhaustion in eks clusters.
b
ah, I see. We don’t support secondary cidr blocks on
awsx.ec2.Vpc
as a native input. However, you should be able to do this, because it’s just a resource https://github.com/terraform-aws-modules/terraform-aws-vpc/blob/master/main.tf#L53
give me half an hour and I’ll see if I can come up with something
i
Yeah, I have tried to add it as an additional component in my code above as you can see, and that works fine, but then there is a circular dependency. The
aws.ec2.VpcIpv4CidrBlockAssociation
requires a vpc_id. But in order for the VPC to be built (at least using
awsx.ec2.Vpc
) I need to provide subnet specs. I also don’t see how I would set the actual cidr for each subnet, when it only seems to allow a cidr_mask.
b
You will likely have to define the intra subnet yourself, not as an isolated subnet within the awsx.ec2.Vpc resource
i
Maybe I am just looking at this the wrong way, and am trying to use awsx when I shouldn’t be? Is there a maor benefit to trying to use the awsx components rather than just rolling my own with aws classic?
b
awsx is meant to solve the problem of a fairly simple VPC with public and private subnets. For your use case, you can bootstrap the public and private with awsx and then add your intra subnets
something like this
Copy code
"""An AWS Python Pulumi program"""

import pulumi
import pulumi_aws as aws
import pulumi_awsx as awsx

vpc = awsx.ec2.Vpc(
    "vpc",
    cidr_block="10.0.0.0/16",
    subnet_specs=[
        awsx.ec2.SubnetSpecArgs(
            cidr_mask=22,
            type="public",
        ),
        awsx.ec2.SubnetSpecArgs(
            cidr_mask=22,
            type="private",
        ),
    ],
)

secondary_block = aws.ec2.VpcIpv4CidrBlockAssociation(
    "secondaryBlock", vpc_id=vpc.vpc_id, cidr_block="10.0.20.0/22"
)

intra = aws.ec2.Subnet(
    "intra",
    vpc_id=vpc.vpc_id,
    cidr_block=secondary_block.cidr_block,
)

intra_rt = aws.ec2.RouteTable(
    "intra",
    vpc_id=vpc.vpc_id,
)

intra_rt_associ = aws.ec2.RouteTableAssociation(
    "intra",
    route_table_id=intra_rt.id,
    subnet_id=intra.id,
)
i
Yeah, that makes sense. I guess my only question is, is it worth bootstrapping the first portions with awsx. I guess the only thing I am really gaining is the automatic creation of the route tables and NAT for the public and private.
b
if you’re happy creating your own vpc, you’ll get more flexibility doing it yourself
i
Yeah, it seems so. I think I was putting too much stake in the awsx side of things. Coming from a terraform world, I saw it as the alternative to TF modules. But I think it is really more of an “Easy Button” for the most basic implementation of that resource.
b
personally I never used terraform modules, they broke all the time and they essentially just plumb every possibly API element through to the user, so it ended up being harded to understand what was happening. But that’s just me..
i
Yeah, sometimes they aren’t great, the ones I used the most was the VPC, but I think that was just because I didn’t want to write all the rest myself.
Thanks for the insight, and I appreciate your time. BTW… Epic last name…
b
ha! Briggs originates where I grew up, so run across a lot them. Where are you based?
i
Toronto, Canada, but my family comes from Scotland
b
welcome to the community in any case! Let me know how else we can help, and sorry for the delay initially