thankful-notebook-32588
11/14/2023, 9:01 AMnpm fund
for details
found 0 vulnerabilities
apple@APPLEs-MacBook-Pro k8sNov0723 % pulumi up
Previewing update (dev)
View in Browser (Ctrl+O): https://app.pulumi.com/Dhanesh/k8sGCP/dev/previews/d0f6f6fe-8b49-4444-8334-c5d379e68751
Type Name Plan
pulumipulumiStack k8sGCP-dev
+ ├─ pulumiproviderskubernetes k8s-provider create
+ ├─ kubernetescore/v1Service nginx-service create
+ └─ kubernetesapps/v1Deployment nginx-deployment create
Outputs:
+ nginxLoadBalancerAddress: outputstring
Resources:
+ 3 to create
6 unchanged
Do you want to perform this update? yes
Updating (dev)
View in Browser (Ctrl+O): https://app.pulumi.com/Dhanesh/k8sGCP/dev/updates/3
Type Name Status Info
*pulumipulumiStack k8sGCP-dev failed 1 error*
+ ├─ pulumiproviderskubernetes k8s-provider created (1s)
+ ├─ kubernetescore/v1Service nginx-service created (72s)
+ └*─ kubernetesapps/v1Deployment nginx-deployment creating failed 1 error*
Diagnostics:
pulumipulumiStack (k8sGCP-dev):
error: update failed
kubernetesapps/v1Deployment (nginx-deployment):
error: 4 errors occurred:
* resource default/nginx-deployment was successfully created, but the Kubernetes API server reported that it failed to fully initialize or become live: 'nginx-deployment' timed out waiting to be Ready
* [MinimumReplicasUnavailable] Deployment does not have minimum availability.
* Minimum number of live Pods was not attained
* [Pod nginx-deployment-68b9d8bc68-5frrb]: containers with unready status: [nginx] -- [ImagePullBackOff] Back-off pulling image "nginx:1.19.10"
Outputs:
clusterId : "projects/k8snov0723/locations/us-central1/clusters/gke-cluster-a740275"
clusterName : "gke-cluster-a740275"
kubeconfig : [secret]
networkId : "projects/k8snov0723/global/networks/gke-network-8f00948"
networkName : "gke-network-8f00948"
+ nginxLoadBalancerAddress: "35.239.14.51"
Resources:
+ 2 created
6 unchanged
Duration: 10m13s
Whats wrong here ...?? index.ts file follows here
#######################################################################################################################
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
import * as k8s from "@pulumi/kubernetes";
// Get some provider-namespaced configuration values
const providerCfg = new pulumi.Config("gcp");
const gcpProject = providerCfg.require("project");
const gcpRegion = providerCfg.get("region") || "us-central1";
// Get some other configuration values or use defaults
const cfg = new pulumi.Config();
const nodesPerZone = cfg.getNumber("nodesPerZone") || 1;
// Create a new network
const gkeNetwork = new gcp.compute.Network("gke-network", {
autoCreateSubnetworks: false,
description: "A virtual network for your GKE cluster(s)",
});
// Create a new subnet in the network created above
const gkeSubnet = new gcp.compute.Subnetwork("gke-subnet", {
ipCidrRange: "10.128.0.0/12",
network: gkeNetwork.id,
privateIpGoogleAccess: true,
});
// Create a new GKE cluster
const gkeCluster = new gcp.container.Cluster("gke-cluster", {
addonsConfig: {
dnsCacheConfig: {
enabled: true,
},
},
binaryAuthorization: {
evaluationMode: "PROJECT_SINGLETON_POLICY_ENFORCE",
},
datapathProvider: "ADVANCED_DATAPATH",
description: "A GKE cluster",
initialNodeCount: 1,
ipAllocationPolicy: {
clusterIpv4CidrBlock: "/14",
servicesIpv4CidrBlock: "/20",
},
location: gcpRegion,
masterAuthorizedNetworksConfig: {
cidrBlocks: [{
cidrBlock: "0.0.0.0/0",
displayName: "All networks",
}],
},
network: gkeNetwork.name,
networkingMode: "VPC_NATIVE",
privateClusterConfig: {
enablePrivateNodes: true,
enablePrivateEndpoint: false,
masterIpv4CidrBlock: "10.100.0.0/28",
},
removeDefaultNodePool: true,
releaseChannel: {
channel: "STABLE",
},
subnetwork: gkeSubnet.name,
workloadIdentityConfig: {
workloadPool: ${gcpProject}.svc.id.goog
,
},
});
// Create a service account for the node pool
const gkeNodepoolSa = new gcp.serviceaccount.Account("gke-nodepool-sa", {
accountId: pulumi.interpolate ${gkeCluster.name}-np-1-sa
,
displayName: "Nodepool 1 Service Account",
});
// Create a nodepool for the GKE cluster
const gkeNodepool = new gcp.container.NodePool("gke-nodepool", {
cluster: gkeCluster.id,
nodeCount: nodesPerZone,
nodeConfig: {
oauthScopes: ["https://www.googleapis.com/auth/cloud-platform"],
serviceAccount: gkeNodepoolSa.email,
},
});
// Build a Kubeconfig for accessing the cluster
const clusterKubeconfig = pulumi.interpolate `apiVersion: v1
clusters:
- cluster:
certificate-authority-data: ${gkeCluster.masterAuth.clusterCaCertificate}
server: https://${gkeCluster.endpoint}
name: ${gkeCluster.name}
contexts:
- context:
cluster: ${gkeCluster.name}
user: ${gkeCluster.name}
name: ${gkeCluster.name}
current-context: ${gkeCluster.name}
kind: Config
preferences: {}
users:
- name: ${gkeCluster.name}
user:
exec:
apiVersion: client.authentication.k8s.io/v1beta1
command: gke-gcloud-auth-plugin
installHint: Install gke-gcloud-auth-plugin for use with kubectl by following
https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke
provideClusterInfo: true
`;
// Export some values for use elsewhere
export const networkName = gkeNetwork.name;
export const networkId = gkeNetwork.id;
export const clusterName = gkeCluster.name;
export const clusterId = gkeCluster.id;
export const kubeconfig = clusterKubeconfig;
const k8sProvider = new k8s.Provider("k8s-provider",{
kubeconfig: kubeconfig
});
const nginxDeployment = new k8s.apps.v1.Deployment("nginx-deployment", {
metadata: {
name: "nginx-deployment",
labels: { app: "nginx" },
},
spec: {
replicas: 2,
selector: { matchLabels: { app: "nginx" } },
template: {
metadata: { labels{app "nginx"} },
spec: {
containers:[
{
name: "nginx",
image: "nginx:1.19.10",
ports: [{ containerPort: 80 }],
},
],
},
},
},
}, { provider: k8sProvider});
const nginxService = new k8s.core.v1.Service("nginx-service", {
metadata: { name: "nginx-service" },
spec: {
selector: { app: "nginx" },
type: "LoadBalancer",
ports: [{ port:80, targetPort: 80 }],
},
}, { provider: k8sProvider });
export const nginxLoadBalancerAddress = nginxService.status.loadBalancer.ingress[0].ip;
############################################################################################################salmon-account-74572
11/14/2023, 1:56 PM