ancient-night-64850
05/20/2021, 4:28 PMwide-holiday-45783
05/20/2021, 5:26 PMmagnificent-scientist-71902
05/20/2021, 5:59 PMmagnificent-scientist-71902
05/20/2021, 6:07 PMmagnificent-scientist-71902
05/20/2021, 6:10 PMimport * as pulumi from "@pulumi/pulumi";
import * as k8s from "@pulumi/kubernetes";
let pulumiConfig = new pulumi.Config();
export type LagoonCoreConfig = {
kubeconfig: any;
gitlab: {
token: pulumi.Output<string>;
}
elasticsearch: {
install: boolean;
adminUsername: string;
adminPassword: pulumi.Output<string>;
}
kibana: {
install: boolean;
externalHost: pulumi.Output<string>;
accountUsername: string;
accountPassword: pulumi.Output<string>;
},
registry: {
hostname: pulumi.Output<string>;
harborAdminPassword: pulumi.Output<string>;
},
dnsBaseName: any;
}
const clusterStackRef = new pulumi.StackReference(pulumiConfig.require("clusterStackRef"));
const dnsBaseName = pulumiConfig.get("dnsBaseName") || clusterStackRef.getOutput("clusterDnsName");
export const lagoonconfig: LagoonCoreConfig = {
// Infrastructure / Networking
kubeconfig: clusterStackRef.getOutput("kubeconfig"),
gitlab: {
token: pulumiConfig.requireSecret("gitlabToken"),
},
elasticsearch: {
install: true,
adminUsername: 'admin',
adminPassword: pulumiConfig.requireSecret("elasticsearchAdminPassword")
},
kibana: {
install: true,
externalHost: pulumi.interpolate `kibana.${dnsBaseName}`,
accountUsername: "kibanaserver",
accountPassword: pulumiConfig.requireSecret("kibanaPassword")
},
registry: {
hostname: pulumi.interpolate `registry.${dnsBaseName}`,
harborAdminPassword: pulumiConfig.requireSecret("harborAdminPassword")
},
dnsBaseName: dnsBaseName
};
// Create the k8s provider with the kubeconfig.
export const k8sProvider = new k8s.Provider("k8sProvider", { kubeconfig: lagoonconfig.kubeconfig });
export default lagoonconfig;
And the file that deploys the chart for Harbor below. Every time I so a pulumi up, the secret in the 'harborAdminPassword' is different.
import * as k8s from "@pulumi/kubernetes";
import { k8sProvider, lagoonconfig } from "../config";
const hostname = lagoonconfig.registry.hostname;
const url = `https://${hostname}`;
export type HarborOutput = {
chart: k8s.helm.v3.Chart;
registryUrl: string;
};
const ns = new k8s.core.v1.Namespace("registry", {
metadata: { name: "registry" },
}, { provider: k8sProvider })
export const createRegistry = (): HarborOutput => {
const registryChart = new k8s.helm.v3.Chart("registry",
{
namespace: ns.metadata.name,
chart: "harbor",
version: "1.5.2",
fetchOpts: { repo: "<https://helm.goharbor.io>" },
values: {
harborAdminPassword: lagoonconfig.registry.harborAdminPassword,
expose: {
tls: {
enabled: true,
certSource: "secret",
secret: {
secretName: "registry-harbor-tls",
}
},
ingress: {
annotations: {
"<http://kubernetes.io/ingress.class|kubernetes.io/ingress.class>": "nginx",
"<http://kubernetes.io/tls-acme|kubernetes.io/tls-acme>": "true"
},
hosts: {
core: lagoonconfig.registry.hostname
}
}
},
externalUrl: url,
clair: {
enabled: false
},
notary: {
enabled: false
},
trivy: {
enabled: true
}
},
},
{providers: {kubernetes: k8sProvider}},
);
return {
chart: registryChart,
registryUrl: url
};
};
salmon-egg-38815
05/21/2021, 4:25 AMusing System.Threading.Tasks;
using Pulumi;
using Azure = Pulumi.AzureNative;
using AzureAD = Pulumi.AzureAD;
class WorkingStack : Stack
{
public MyStack()
{
var subscriptionId = "12345678-1234-1234-1234-1234567890ab";
var existingRgId = $"/subscriptions/{subscriptionId}/resourceGroups/some-existing-rg";
var rgContributorId = $"{existingRgId}/providers/Microsoft.Authorization/roleDefinitions/b24988ac-6180-42a0-ab88-20f7382dd24c";
var contributorRoleDef = Azure.Authorization.RoleDefinition.Get("contributorRoleDef",rgContributorId);
var globalAdmins = AzureAD.Group.Get("globalAdmins","f22ac475-c1e3-4e21-b9a8-4f50f473278c");
var assignment1 = new Azure.Authorization.RoleAssignment("assignment1", new Azure.Authorization.RoleAssignmentArgs
{
PrincipalId = globalAdmins.Id,
RoleDefinitionId = contributorRoleDef.Id,
Scope = existingRgId
});
}
}
but if I try to do the same thing with a created resource group:
class BrokenStack : Stack
{
public BrokenStack()
{
var rg = new Azure.Resources.ResourceGroup("rg", new Azure.Resources.ResourceGroupArgs
{
Location = "UK South"
});
var rgContributorId = rg.Id.Apply(id => $"{id}/providers/Microsoft.Authorization/roleDefinitions/b24988ac-6180-42a0-ab88-20f7382dd24c");
var contributorRoleDef = Azure.Authorization.RoleDefinition.Get("contributorRoleDef",rgContributorId);
var globalAdmins = AzureAD.Group.Get("grp","f22ac475-c1e3-4e21-b9a8-4f50f473278c");
var assignment1 = new Azure.Authorization.RoleAssignment("assignment1", new Azure.Authorization.RoleAssignmentArgs
{
PrincipalId = globalAdmins.Id,
RoleDefinitionId = contributorRoleDef.Id,
Scope = rg.Id
});
}
}
then it fails:
Diagnostics:
azure-native:authorization:RoleDefinition (contributorRoleDef):
error: azure-native:authorization:RoleDefinition resource 'contributorRoleDef' has a problem: missing required property 'scope'
hundreds-kite-52072
05/24/2021, 12:52 PMhundreds-kite-52072
05/24/2021, 6:33 PMpulumi up
I'm getting the following error: error: Error: invocation of aws:ec2/getVpc:getVpc returned an error: invoking aws:ec2/getVpc:getVpc: 1 error occurred:
* no matching VPC found
Searching for that on line takes me down a rabbit hole of setting up a vpc that I'm nervous about getting into as there's quite a bit of stuff at the next level of abstraction down that I don't understand and I don't want to do anything that will break any existing infrastructure. Any suggestions on how to work around / fix this error?nutritious-church-27230
05/24/2021, 7:44 PMimportant-sandwich-62391
05/25/2021, 12:17 AMstraight-tailor-56799
05/25/2021, 11:04 PMsubprocess.Popen
. This shell script does gcloud auth
, pulumi login
and pulumi new
back in the python application I am trying to use pulumi auto and its complaining about missing google credentials. Note: I am using google storage as my managed backend… Am I doing anything fundamentally wrong ? how to address this ? I want my python application (using Flask) to create and destroy stacks using pulumi auto (using my gs backend url)many-salesmen-89069
05/26/2021, 10:04 AMmy_dir/
Pulumi.yaml
setup.py // calls `setup(packages=find_packages())`
my_package/
__init__.py
__main__.py // does `from .pulumi import *`
pulumi.py // contains AWS resource created with Pulumi
Now when I run pulumi up
in my_dir
, I get the following error:
ImportError: can't find '__main__' module in '.'
Which tells me that Pulumi tries to import my_dir/__main__.py
and ignores setup.py
. How can I fix this? Is there any documentation that I’m missing?colossal-vr-62639
05/26/2021, 7:26 PMDiagnostics:
aws:ec2:Vpc (vpc):
error: 1 error occurred:
* error configuring Terraform AWS Provider: AWS account ID not previously found and failed retrieving via all available methods. See <https://www.terraform.io/docs/providers/aws/index.html#skip_requesting_account_id> for workaround and implications. Errors: 2 errors occurred:
* error calling sts:GetCallerIdentity: InvalidClientTokenId: The security token included in the request is invalid.
status code: 403, request id: 52427706-bd0f-49de-ae8c-d82b557e5cb5
* failed getting account information via iam:ListRoles: InvalidClientTokenId: The security token included in the request is invalid.
status code: 403, request id: c2f043c0-16d0-436a-9e5d-65238953123d
prehistoric-london-9917
05/26/2021, 11:35 PMgetLoadBalancer
to retrieve the provisioned ALB, but this seems to be in a race condition for the ALB controller to fully provision it. So I get a “load balancer not found” error. I’ve tried wrapping the Ingress resource in an all
, but that doesn’t seem to help.
Any suggestions on what I can do to make the getLoadBalancer
function to wait for the load balancer to provision?quiet-architect-74241
05/27/2021, 3:51 PMvar webapp1 = new WebApp(name,
new WebAppArgs
{
Kind = "app,linux",
SiteConfig = new SiteConfigArgs
{
Cors = new CorsSettingsArgs
{
AllowedOrigins = {
// how do I specify WebApp2 here?
},
SupportCredentials = false
}
},
});
hundreds-kite-52072
05/28/2021, 6:24 PMmammoth-doctor-29598
05/31/2021, 1:06 AMResourceInitializationError: failed to invoke EFS utils commands to set up EFS volumes: stderr: b'mount.nfs4: Connection reset by peer' : unsuccessful EFS utils command execution; code: 32
I googled, and some people suggested creating a security group with ingress on 2049/tcp, but this doesn't seem to fix the error.
I've tried many setups, but no matter how I configure the VPC, targets or mount points, it doesn't seem to work. Any idea of what I could try?mammoth-doctor-29598
05/31/2021, 1:14 AMmammoth-doctor-29598
05/31/2021, 1:38 AMcolossal-vr-62639
05/31/2021, 5:49 PMRegistering resource monitor end: t=pulumi:pulumi:Stack, name=axiom.saas-manage-dev, custom=False, remote=False
adorable-continent-4073
05/31/2021, 10:18 PMbrash-airline-37413
06/01/2021, 4:53 AMnarrow-battery-21100
06/01/2021, 11:13 AMType Name Plan Info
pulumi:pulumi:Stack psrc-dev
+- ├─ azure-native:storage:Blob zip replace [diff: ~source]
~ └─ azure-native:web:WebApp fnapp57 update [diff: ~siteConfig]
Resources:
~ 1 to update
+-1 to replace
2 changes. 8 unchanged
Do you want to perform this update? yes
Updating (dev)
View Live: <https://app.pulumi.com/markymark/psrc/dev/updates/3>
Type Name Status Info
pulumi:pulumi:Stack psrc-dev
+- └─ azure-native:storage:Blob zip replaced [diff: ~source]
Outputs: ...
Resources:
+-1 replaced
9 unchanged
hundreds-kite-52072
06/01/2021, 2:29 PMrhythmic-kite-60258
06/02/2021, 9:31 PMthankful-oxygen-71474
06/03/2021, 7:48 AMeslint
to my project and format all files I run command pulumi up
and receive multiple errors like this
aws:lambda:Function (dev-v1-internal-verify):
error: 1 error occurred:
* updating urn:pulumi:dev::aok-multi-pulumi::aws:lambda/function:Function::dev-v1-internal-verify: 1 error occurred:
* error modifying Lambda Function (dev-v1-internal-verify-cb6dab7) Code: RequestEntityTooLargeException:
status code: 413, request id: eadb73d4-c9b3-4946-8a4a-ed2743cde46b
Can anyone help ?wide-activity-54187
06/04/2021, 12:55 PMstraight-teacher-66836
06/04/2021, 2:02 PMstraight-airplane-54654
06/08/2021, 12:09 PMsalmon-mechanic-4571
06/08/2021, 12:54 PMsalmon-mechanic-4571
06/08/2021, 12:54 PMbrave-planet-10645
06/08/2021, 12:57 PMsalmon-mechanic-4571
06/08/2021, 12:57 PMbrave-planet-10645
06/08/2021, 1:37 PMsalmon-mechanic-4571
06/08/2021, 1:42 PMbrave-planet-10645
06/08/2021, 1:51 PMcwd
option needs to point at the folder that contains (in your case) the __main__.py
filesalmon-mechanic-4571
06/08/2021, 1:57 PMbrave-planet-10645
06/08/2021, 2:02 PMsalmon-mechanic-4571
06/08/2021, 2:04 PM