able-policeman-41860
06/21/2023, 9:09 AMimport * as pulumi from "@pulumi/pulumi";
import * as resources from "@pulumi/azure-native/resources";
import * as network from "@pulumi/azure-native/network";
import * as containerservice from "@pulumi/azure-native/containerservice";
import * as kubernetes from "@pulumi/kubernetes";
// Grab some values from the Pulumi stack configuration (or use defaults)
const projCfg = new pulumi.Config();
const numWorkerNodes = projCfg.getNumber("numWorkerNodes") || 1;
const k8sVersion = projCfg.get("kubernetesVersion") || "1.26.3";
const prefixForDns = projCfg.get("prefixForDns") || "pulumi";
const nodeVmSize = projCfg.get("nodeVmSize") || "standard_B2s";
// The next two configuration values are required (no default can be provided)
const mgmtGroupId = projCfg.require("mgmtGroupId");
const sshPubKey = projCfg.require("sshPubKey");
// Create a new Azure Resource Group
const resourceGroup = new resources.ResourceGroup("resourceGroup", {});
// Create a new Azure Virtual Network
const virtualNetwork = new network.VirtualNetwork("virtualNetwork", {
addressSpace: {
addressPrefixes: ["10.0.0.0/16"],
},
resourceGroupName: resourceGroup.name,
});
// Create three subnets in the virtual network
const subnet1 = new network.Subnet("subnet1", {
addressPrefix: "10.0.0.0/22",
resourceGroupName: resourceGroup.name,
virtualNetworkName: virtualNetwork.name,
});
// Create an Azure Kubernetes Cluster
const managedCluster = new containerservice.ManagedCluster("managedCluster", {
resourceGroupName: resourceGroup.name,
addonProfiles: {},
agentPoolProfiles: [{
availabilityZones: ["1","2","3"],
count: numWorkerNodes,
enableNodePublicIP: false,
mode: "System",
name: "systempool",
osType: "Linux",
osDiskSizeGB: 30,
type: "VirtualMachineScaleSets",
vmSize: nodeVmSize,
vnetSubnetID: subnet1.id,
}],
apiServerAccessProfile: {
authorizedIPRanges: ["0.0.0.0/0"],
enablePrivateCluster: false,
},
dnsPrefix: prefixForDns,
enableRBAC: true,
identity: {
type: "SystemAssigned",
},
kubernetesVersion: k8sVersion,
linuxProfile: {
adminUsername: "azureuser",
ssh: {
publicKeys: [{
keyData: sshPubKey,
}],
},
},
networkProfile: {
networkPlugin: "azure",
networkPolicy: "azure",
serviceCidr: "10.96.0.0/16",
dnsServiceIP: "10.96.0.10",
},
aadProfile: {
enableAzureRBAC: true,
managed: true,
adminGroupObjectIDs: [mgmtGroupId],
},
});
// Build a Kubeconfig to access the cluster
const creds = containerservice.listManagedClusterUserCredentialsOutput({
resourceGroupName: resourceGroup.name,
resourceName: managedCluster.name,
});
const encoded = creds.kubeconfigs[0].value;
const decoded = encoded.apply(enc => Buffer.from(enc, "base64").toString());
// Apply the Percona XtraDB container image to the Kubernetes cluster
const k8sProvider = new kubernetes.Provider("k8sProvider", {
kubeconfig: decoded,
});
const perconaXtradbDeployment = new kubernetes.apps.v1.Deployment("percona-xtradb-deployment", {
metadata: {
name: "percona-xtradb",
},
spec: {
replicas: 1,
selector: {
matchLabels: {
app: "percona-xtradb",
},
},
template: {
metadata: {
labels: {
app: "percona-xtradb",
},
},
spec: {
containers: [
{
name: "percona-xtradb",
image: "percona/percona-xtradb-cluster:8.0",
ports: [
{
containerPort: 3306,
},
],
env: [
{
name: "MYSQL_ROOT_PASSWORD",
value: "kjhkjhjkhkhllg",
},
],
resources: {
requests: {
memory: "1Gi",
},
limits: {
memory: "1.5Gi",
},
},
},
],
},
},
},
}, { provider: k8sProvider });
const perconaXtradbService = new kubernetes.core.v1.Service("percona-xtradb-service", {
metadata: {
name: "percona-xtradb",
},
spec: {
selector: {
app: "percona-xtradb",
},
ports: [
{
port: 3306,
targetPort: 3306,
},
],
type: "LoadBalancer",
},
}, { provider: k8sProvider });
// Export some values for use elsewhere
export const rgName = resourceGroup.name;
export const networkName = virtualNetwork.name;
export const clusterName = managedCluster.name;
export const kubeconfig = decoded;
is there anything wrong with this code structure ?
after executing this the created cluster uses a high amount of ram. what should be the reason for that ?