sparse-intern-71089
09/14/2020, 3:38 PMfaint-motherboard-95438
09/15/2020, 2:53 PMconsole.log
pulumi output values, you won’t like the result. You can try to apply
though, something like frontend.spec.apply(spec => console.log(spec.clusterIP))
The Pulumi examples are tested and should work as expected. What’s the complete update output ?hundreds-receptionist-31352
09/15/2020, 5:49 PMhundreds-receptionist-31352
09/15/2020, 5:51 PMconst namespaceName = clusterSvcsNamespace.metadata.apply(m => m.name);
const nginx = new k8s.helm.v2.Chart("ingress", {
repo: "stable",
chart: "nginx-ingress",
namespace: namespaceName,
values : {
controller: {
kind: "DaemonSet",
image: {
registry: "<http://registry.hub.docker.com|registry.hub.docker.com>",
repository: "roquesao/nginx",
tag: "0.21.0",
runAsUser: "33"
},
service: {
externalTrafficPolicy: "Local",
annotations: {
"<http://service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled|service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled>": "True",
"<http://service.beta.kubernetes.io/aws-load-balancer-internal|service.beta.kubernetes.io/aws-load-balancer-internal>": "true",
"<http://service.beta.kubernetes.io/aws-load-balancer-type|service.beta.kubernetes.io/aws-load-balancer-type>": "nlb"
}
}
}
}
}, { providers: { "kubernetes": eksCluster.provider }});
const name= namespaceName + "/ingress-nginx-ingress-controller"
const frontend = nginx.getResource("v1/Service", name);
const frontendIp = frontend.status.loadBalancer.ingress[0].ip;
I have followed the pulumi example but in my case didn't work , not sure why, the NLB is created but can't get the outputfaint-motherboard-95438
09/15/2020, 6:04 PMfaint-motherboard-95438
09/15/2020, 6:07 PMgetResource
method, instead of concatenating the namespace and the service name, you should try :
const frontend = nginx.getResource("v1/Service", namespaceName, "ingress-nginx-ingress-controller");
Also ensure the service name you’re inputing here is correct.hundreds-receptionist-31352
09/15/2020, 6:45 PMfaint-motherboard-95438
09/16/2020, 4:06 AMfrontendIp
? Are you exporting it immediately for stack output ? is it inside a ComponentResource
? Are you trying to use it elsewhere in your code ?hundreds-receptionist-31352
09/16/2020, 12:33 PMfaint-motherboard-95438
09/16/2020, 2:36 PMfrontend
resource, but you need to add in its CustomResourceOptions
a dependsOn
like { dependsOn: [frontend] }
so you’re ensuring it is provisioned and you can query its properties, you can’t only wait on the Chart
provisioning atm, a fix is ongoing though.hundreds-receptionist-31352
09/16/2020, 3:26 PMfaint-motherboard-95438
09/16/2020, 3:27 PMhundreds-receptionist-31352
09/16/2020, 3:35 PM"use strict";
import * as helm from "@pulumi/kubernetes/helm";
import * as eks from "@pulumi/eks";
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
import * as k8s from "@pulumi/kubernetes";
import AWS from 'aws-sdk'; // To set the AWS credentials and AWS Region.
AWS.config.update({
region: 'us-east-1'
});
function getLoadBalancer():any {
var elb1 = new AWS.ELBv2();
const params={}
return new Promise(function(resolve, reject) {
elb1.describeLoadBalancers(params, function(err: any, data: unknown) {
if (err) {
reject(err); // an error occurred
}
else {
resolve(data);
}
});
});
}
export async function getLB() {
let result = await getLoadBalancer();
return result.LoadBalancers[0].LoadBalancerArn;
}
async function installNginxIngressController(eksCluster: eks.Cluster ) {
const clusterSvcsNamespace = new k8s.core.v1.Namespace("nginx-ingress-namespace", undefined, { provider: eksCluster.provider});
const namespaceName = clusterSvcsNamespace.metadata.apply(m => m.name);
const nginx = new k8s.helm.v2.Chart("ingress", {
repo: "stable",
chart: "nginx-ingress",
namespace: namespaceName,
values : {
controller: {
kind: "DaemonSet",
image: {
registry: "<http://registry.hub.docker.com|registry.hub.docker.com>",
repository: "roquesao/nginx",
tag: "0.21.0",
runAsUser: "33"
},
service: {
externalTrafficPolicy: "Local",
annotations: {
"<http://service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled|service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled>": "True",
"<http://service.beta.kubernetes.io/aws-load-balancer-internal|service.beta.kubernetes.io/aws-load-balancer-internal>": "true",
"<http://service.beta.kubernetes.io/aws-load-balancer-type|service.beta.kubernetes.io/aws-load-balancer-type>": "nlb",
"<http://service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags|service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags>": "apigateway-vpc-link=True"
}
},
podAnnotations: {
"<http://ad.datadoghq.com/nginx-ingress-controller.init_configs|ad.datadoghq.com/nginx-ingress-controller.init_configs>": "[{},{}]",
"<http://ad.datadoghq.com/nginx-ingress-controller.check_names|ad.datadoghq.com/nginx-ingress-controller.check_names>": '["nginx","nginx_ingress_controller"]',
"<http://ad.datadoghq.com/nginx-ingress-controller.instances|ad.datadoghq.com/nginx-ingress-controller.instances>": '[{"nginx_status_url":"<http://%%host%>%/nginx_status"},{"prometheus_url":"<http://%%host%>%:10254/metrics"}]',
"<http://ad.datadoghq.com/nginx-ingress-controller.logs|ad.datadoghq.com/nginx-ingress-controller.logs>" : '[{"service": "controller","source":"nginx-ingress-controller"}]',
"<http://prometheus.io/port|prometheus.io/port>": "10254",
"<http://prometheus.io/scrape|prometheus.io/scrape>": "true"
}
}
}
}, { providers: { "kubernetes": eksCluster.provider }});
return nginx;
}
export async function VpcLink(eksCluster: eks.Cluster) {
const nginx= await installNginxIngressController(eksCluster);
const VpcLink = new aws.apigateway.VpcLink("vpc-eks", {
description: "vpc-eks",
targetArn: getLB(),
},{dependsOn:nginx });
}
hundreds-receptionist-31352
09/16/2020, 3:36 PMhundreds-receptionist-31352
09/16/2020, 3:36 PMhundreds-receptionist-31352
09/16/2020, 3:37 PMpulumi:pulumi:Stack (eks-aws-eks):
TypeError: Cannot read property 'LoadBalancerArn' of undefined
faint-motherboard-95438
09/16/2020, 3:51 PMaws-sdk
for credentials and config, Pulumi has a provider to configure this
• You should use a ComponentResource
to group what you’re trying to achieve here
• avoid export
if you are not actually using the function elsewhere
• You are mixing `async`/`await`, Promise
and pulumi Components, that can’t end well
• You are mixing Pulumi with aws-sdk
to manage your resources (used in getLoadBalancer
), that won’t work, rely only on Pulumi if you want to use it.
I don’t have the time to rewrite your code to show you, but that’s basically what you should do here (starting in a ComponentResource
object and dropping the aws-sdk
stuff)hundreds-receptionist-31352
09/16/2020, 4:14 PMfaint-motherboard-95438
09/17/2020, 5:19 AMfaint-motherboard-95438
09/17/2020, 5:20 AM// import * as aws from "@pulumi/aws"
import * as k8s from "@pulumi/kubernetes"
import * as pulumi from "@pulumi/pulumi"
export interface NginxIngressArgs {
namespaceName?: string
}
export class NginxIngress extends pulumi.ComponentResource {
public readonly frontendIp: pulumi.Output<string>
public readonly service: pulumi.Output<k8s.core.v1.Service>
constructor (
name: string,
args: NginxIngressArgs,
opts?: pulumi.ComponentResourceOptions,
) {
super(`${pulumi.getProject()}:NginxIngress`, name, {}, opts)
const { namespaceName = "default" } = args
if (namespaceName !== "default") {
const namespace = new k8s.core.v1.Namespace("nginx-ingress-namespace", {
metadata: {
name: namespaceName,
},
}, { parent: this })
}
const nginx = new k8s.helm.v2.Chart("ingress", {
repo: "stable",
chart: "nginx-ingress",
namespace: namespaceName,
values : {
controller: {
kind: "DaemonSet",
image: {
registry: "<http://registry.hub.docker.com|registry.hub.docker.com>",
repository: "roquesao/nginx",
tag: "0.21.0",
runAsUser: "33",
},
service: {
externalTrafficPolicy: "Local",
annotations: {
"<http://service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled|service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled>": "True",
"<http://service.beta.kubernetes.io/aws-load-balancer-internal|service.beta.kubernetes.io/aws-load-balancer-internal>": "true",
"<http://service.beta.kubernetes.io/aws-load-balancer-type|service.beta.kubernetes.io/aws-load-balancer-type>": "nlb",
"<http://service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags|service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags>": "apigateway-vpc-link=True",
},
},
podAnnotations: {
"<http://ad.datadoghq.com/nginx-ingress-controller.init_configs|ad.datadoghq.com/nginx-ingress-controller.init_configs>": "[{},{}]",
"<http://ad.datadoghq.com/nginx-ingress-controller.check_names|ad.datadoghq.com/nginx-ingress-controller.check_names>": '["nginx","nginx_ingress_controller"]',
"<http://ad.datadoghq.com/nginx-ingress-controller.instances|ad.datadoghq.com/nginx-ingress-controller.instances>": '[{"nginx_status_url":"<http://%%host%>%/nginx_status"},{"prometheus_url":"<http://%%host%>%:10254/metrics"}]',
"<http://ad.datadoghq.com/nginx-ingress-controller.logs|ad.datadoghq.com/nginx-ingress-controller.logs>" : '[{"service": "controller","source":"nginx-ingress-controller"}]',
"<http://prometheus.io/port|prometheus.io/port>": "10254",
"<http://prometheus.io/scrape|prometheus.io/scrape>": "true",
},
},
},
}, { parent: this })
this.service = nginx.getResource(
"v1/Service",
namespaceName,
"ingress-nginx-ingress-controller",
)
// const vpcLink = new aws.apigateway.VpcLink("vpc-eks", {
// description: "vpc-eks",
// targetArn: ???, // No idea how to get this one
// },{ dependsOn: [this.service] })
this.frontendIp = this.service.status.loadBalancer.ingress[0].ip
this.registerOutputs({
service: this.service,
})
}
}
faint-motherboard-95438
09/17/2020, 5:20 AMconst cluster = eks.Cluster(...)
const providers = {
kubernetes: cluster.provider,
}
const ingressController = new NginxIngress("ingress-controller", {
namespaceName: "nginx",
}, { providers })
// do something with `ingressController.frontendIp`
// that's an output and you need to `dependsOn: [ingressController.service]` wherever you use it
No matter how you like to participate in developer communities, Pulumi wants to meet you there. If you want to meet other Pulumi users to share use-cases and best practices, contribute code or documentation, see us at an event, or just tell a story about something cool you did with Pulumi, you are part of our community.
Powered by