stocky-petabyte-29883
04/12/2023, 2:55 PMexport const cluster = new eks.Cluster(`${stack}-eks-cluster`, {
name: `${stack}-eks-cluster`,
skipDefaultNodeGroup: true,
version: eksConfig.version,
createOidcProvider: true,
privateSubnetIds: vpc.privateSubnetIds,
publicSubnetIds: vpc.publicSubnetIds,
deployDashboard: false,
storageClasses: eksConfig.volumeType,
vpcId: vpc.id,
useDefaultVpcCni: true,
instanceRoles: [role],
nodeAssociatePublicIpAddress: false,
kubernetesServiceIpAddressRange: eksConfig.kubernetesServiceIpAddressRange,
tags: {
Environment: stack,
}
});
eksConfig.nodegroup.forEach((nodeGroupDetail, index) => {
eks.createManagedNodeGroup(`${stack}-managed-ng-${index}`, {
cluster: cluster,
nodeGroupName: `${stack}-managed-ng-${index}`,
capacityType: nodeGroupDetail.capacityType,
instanceTypes: [nodeGroupDetail.type],
scalingConfig: {
desiredSize: nodeGroupDetail.desiredCapacity,
minSize: nodeGroupDetail.minSize,
maxSize: nodeGroupDetail.maxSize,
},
labels: Object.fromEntries(nodeGroupDetail.labels.map(({ key, value }) => [key, value])),
nodeRole: role,
}, cluster);
});
I am expecting the nodes in the node group to use the private subnets, however it aint working that way.
Am I missing something here?billowy-army-68599
04/12/2023, 2:58 PMnodeAssociatePublicIpAddress: false,
This is for the default node group, and you’re create another managed node groupstocky-petabyte-29883
04/12/2023, 2:59 PMbillowy-army-68599
04/12/2023, 3:00 PMskipDefaultNodeGroup: true
stocky-petabyte-29883
04/12/2023, 3:01 PMbillowy-army-68599
04/12/2023, 3:07 PMstocky-petabyte-29883
04/12/2023, 3:08 PMexport const vpc = new awsx.ec2.Vpc(`${stack}-eks-vpc`, {
numberOfAvailabilityZones: eksConfig.vpc.length,
cidrBlock: eksConfig.cidrRange,
numberOfNatGateways: eksConfig.vpc.length,
subnets: eksConfig.vpc.map((instance, index) => {
return <awsx.ec2.VpcSubnetArgs[]>[{
type: "private",
name: `${stack}-private-eks-subnet-${index}`,
location: {
availabilityZone: instance.zoneName,
cidrBlock: instance.privateCidr,
},
tags: {
Environment: stack,
"<http://kubernetes.io/role/internal-elb|kubernetes.io/role/internal-elb>": "1"
}
},
{
type: "public",
name: `${stack}-public-eks-subnet-${index}`,
location: {
availabilityZone: instance.zoneName,
cidrBlock: instance.publicCidr,
},
tags: {
Environment: stack,
"<http://kubernetes.io/role/elb|kubernetes.io/role/elb>": "1"
}
}]
}).flatMap(s => s),
});
billowy-army-68599
04/12/2023, 3:10 PMstocky-petabyte-29883
04/12/2023, 3:16 PMbillowy-army-68599
04/12/2023, 3:22 PMstocky-petabyte-29883
04/12/2023, 3:23 PMbillowy-army-68599
04/12/2023, 3:31 PMstocky-petabyte-29883
04/12/2023, 3:57 PMbillowy-army-68599
04/12/2023, 4:51 PMstocky-petabyte-29883
04/12/2023, 4:52 PMbillowy-army-68599
04/12/2023, 7:44 PMstocky-petabyte-29883
04/12/2023, 8:08 PM/**
* Make subnetIds optional, since the cluster is required and it contains it.
*
* Default subnetIds is chosen from the following list, in order, if
* subnetIds arg is not set:
* - core.subnetIds
* - core.privateIds
* - core.publicSublicSubnetIds
*
* This default logic is based on the existing subnet IDs logic of this
* package: <https://git.io/JeM11>
*/
subnetIds?: pulumi.Input<pulumi.Input<string>[]>;
I haven't set core.subnetIds explicitly, I guess since subnetIds have all the values, it uses all subnets for the managed node groupbillowy-army-68599
04/12/2023, 8:16 PMstocky-petabyte-29883
04/12/2023, 8:17 PM