This message was deleted.
# kubernetes
s
This message was deleted.
s
This is my
eks.py
Copy code
import pulumi
import pulumi_eks as eks

# Retrieve configuration values from Pulumi configuration
config_ec2 = pulumi.Config("pulumi-ec2")
eks_instance_type = config_ec2.require("eks-cluster_instance_type")
eks_cluster_version = config_ec2.require("eks-cluster_version")

# Create an EKS cluster
def create_eks_cluster(private_subnets, public_subnets, vpc_id, eks_worker_role, eks_cluster_role):
    eks_cluster = eks.Cluster("eks-cluster",
                              vpc_id=vpc_id,
                              private_subnet_ids=[subnet.id for subnet in private_subnets],
                              public_subnet_ids=[subnet.id for subnet in public_subnets],
                              create_oidc_provider=False,  # check
                              skip_default_node_group=False,  # check
                              instance_role=eks_worker_role,  # check
                              service_role=eks_cluster_role, # check
                              #instance_profile_name=iam_instance_profile,
                              role_mappings=[
                                  {
                                      'groups': ['system:bootstrappers', 'system:nodes'],
                                      'rolearn': eks_worker_role.arn,
                                      'username': 'system:node:{{EC2PrivateDNSName}}',
                                  }
                              ],
                              vpc_cni_options=eks.VpcCniOptionsArgs(
                                  warm_ip_target=5,
                              ),
                              instance_type=eks_instance_type,
                              node_associate_public_ip_address=False,
                              desired_capacity=3,
                              min_size=1,
                              max_size=3,
                              endpoint_public_access=True,
                              version=eks_cluster_version,
                              enabled_cluster_log_types=["api", "audit", "authenticator"],
                              tags={'Name': 'pulumi-eks-cluster'})

    # Export the cluster's kubeconfig.
    pulumi.export("kubeconfig", eks_cluster.kubeconfig)
    pulumi.export('cluster-name', eks_cluster.eks_cluster.name)
and this is
iam.py
Copy code
import pulumi_aws as aws
from pulumi_aws import iam
from pulumi import log
import json


# Func to create an IAM role for SSM
def create_iam_role_ssm():
    # Create custom IAM Role for EC2
    ec2_role = aws.iam.Role(
        "ec2Role",
        assume_role_policy=json.dumps({
            "Version": "2012-10-17",
            "Statement": [
                {
                    "Action": "sts:AssumeRole",
                    "Principal": {
                        "Service": "ec2.amazonaws.com"
                    },
                    "Effect": "Allow",
                    "Sid": ""
                }
            ],
        }),
    )

    # Attach existing service policy for newly created role
    aws.iam.RolePolicyAttachment("rolePolicyAttachment",
                                 policy_arn="arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore",
                                 role=ec2_role.name)

    # Create Instance Profile
    instance_profile = aws.iam.InstanceProfile("instanceProfile",
                                               role=ec2_role.name)

    return instance_profile


def eks_cluster_role():
    log.info('[base.iam.eks_cluster_role]')
    eks_cluster_role = iam.Role(
        'eks-iam-role',
        name='EKS-Cluster-Role',
        assume_role_policy=json.dumps({
            'Version': '2012-10-17',
            'Statement': [
                {
                    'Action': 'sts:AssumeRole',
                    'Principal': {
                        'Service': 'eks.amazonaws.com'
                    },
                    'Effect': 'Allow',
                    'Sid': ''
                }
            ],
        }),
    )

    iam.RolePolicyAttachment(
        'eks-service-policy-attachment',
        role=eks_cluster_role.id,
        policy_arn='arn:aws:iam::aws:policy/AmazonEKSServicePolicy',
    )

    iam.RolePolicyAttachment(
        'eks-cluster-policy-attachment',
        role=eks_cluster_role.id,
        policy_arn='arn:aws:iam::aws:policy/AmazonEKSClusterPolicy',
    )

    # Let's return the role from the function
    return eks_cluster_role

def eks_worker_role():
    log.info('[base.iam.eks_worker_role]')
    eks_worker_role = iam.Role(
        'ec2-nodegroup-iam-role',
        name='EKS-Worker-Role',
        assume_role_policy=json.dumps({
            'Version': '2012-10-17',
            'Statement': [
                {
                    'Action': 'sts:AssumeRole',
                    'Principal': {
                        'Service': 'ec2.amazonaws.com'
                    },
                    'Effect': 'Allow',
                    'Sid': ''
                }
            ],
        }),
    )

    iam.RolePolicyAttachment(
        'eks-workernode-policy-attachment',
        role=eks_worker_role.id,
        policy_arn='arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy',
    )

    iam.RolePolicyAttachment(
        'eks-cni-policy-attachment',
        role=eks_worker_role.id,
        policy_arn='arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy',
    )

    iam.RolePolicyAttachment(
        'ec2-container-ro-policy-attachment',
        role=eks_worker_role.id,
        policy_arn='arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly',
    )
    # Let's return the role from the function
    return eks_worker_role
I do create all necessary IAM roles and policies and I even see them attached in AWS Console (both to workers and control plane) but nodes aren’t joined a cluster 😞
my
__main__.py
Copy code
# Create EKS cluster
eks_worker_role = eks_worker_role()
eks_cluster_role = eks_cluster_role()
eks_cluster = create_eks_cluster(vpc_resources['private_subnets'],
                                 vpc_resources['public_subnets'],
                                 vpc_resources['vpc'].id,
                                 eks_worker_role,
                                 eks_cluster_role)