white-chef-55657
05/07/2022, 6:24 PMAdministratorAccess
policy
import * as pulumi from "@pulumi/pulumi";
import * as awsx from "@pulumi/awsx";
import * as aws from "@pulumi/aws";
import * as eks from "@pulumi/eks";
export const createCluster = (stackName: string) => {
const vpc = new awsx.ec2.Vpc(`${stackName}-eks-cluster`, {
numberOfAvailabilityZones: "all",
});
const rolesSSOAdmin = pulumi.output(aws.iam.getRoles({
nameRegex: ".*AWSReservedSSO_AWSAdministratorAccess.*",
}))
const cluster = new eks.Cluster(stackName, {
fargate: true,
vpcId: vpc.id,
createOidcProvider: true,
privateSubnetIds: vpc.privateSubnetIds,
publicSubnetIds: vpc.publicSubnetIds,
enabledClusterLogTypes: [
"api",
"audit",
"authenticator",
"controllerManager",
"scheduler",
],
roleMappings: [
{
groups: ["system:masters"],
roleArn: rolesSSOAdmin.arns[0],
username: "pulumi:admin-usr",
},
],
});
return cluster;
};
error:
eks:index:VpcCni (prod-vpc-cni):
error: Command failed: kubectl apply -f /var/folders/07/53v8pkz52xd_324b5n15b9l40000gn/T/tmp-369306DJMe7fe12yG.tmp
error: You must be logged in to the server (the server has asked for the client to provide credentials)
ideas?billowy-army-68599
kubectl
configuration that gets created. When an EKS cluster is created, it also creates a kubeconfig and a provider.
I would export the kubeconfig
from the EKS cluster and verify your AWS credentials have adequate access. The roleMappings
are usually to blamewhite-chef-55657
05/07/2022, 8:04 PMbillowy-army-68599
white-chef-55657
05/07/2022, 8:08 PMbillowy-army-68599
white-chef-55657
05/07/2022, 8:09 PM