elegant-crayon-4967
11/22/2022, 11:43 PMadamant-leather-41068
11/23/2022, 12:37 AMpolicyAttachment
to an EC2 instance?
I have a policyAttacement as documented:
const test_attach = new aws.iam.RolePolicyAttachment("test-attach", {
role: role.name,
policyArn: policy.arn,
});
and an aws.ec2.Instance
What is the correct way to attach them together?dry-beach-58138
11/23/2022, 2:34 AMpulumi import kubernetes:networking/v1:Ingress my_portal frontend/my_portal
error message
error: Preview failed: resource 'frontend/my_portal' does not exist
and idea ?flaky-arm-38472
11/23/2022, 3:30 AMquaint-salesclerk-22166
11/23/2022, 8:30 AMthousands-lizard-52998
11/23/2022, 9:06 AMimportant-holiday-25047
11/23/2022, 2:14 PMdevelopment/rabbitmq violates plan: properties changed:
First it was that the erlang cookie changed, after a pulumi refresh its more:
error: resource urn:pulumi:Development::Cloud::kubernetes:<http://helm.sh/v3:Chart$kubernetes:apps/v1:StatefulSet::development/rabbitmq|helm.sh/v3:Chart$kubernetes:apps/v1:StatefulSet::development/rabbitmq> violates plan: properties changed: ~~spec[{map[podManagementPolicy:{OrderedReady} replicas:{1} selector:{map[matchLabels:{map[<http://app.kubernetes.io/instance:{rabbitmq}|app.kubernetes.io/instance:{rabbitmq}>
<http://app.kubernetes.io/name:{rabbitmq}]}]}|app.kubernetes.io/name:{rabbitmq}]}]}> serviceName:{rabbitmq-headless} template:{map[metadata:{map[annotations:{map[checksum/config:{105414d1c6b687cc6720aaa6aeabae8605b2c47f60956643a158b8795a9fee05} checksum/secret:{0c8c4dcfdfcceeb8d55ca3d08ee5fa2815b03c1505fa60a83406922bb2dd8428}]} labels:{map[<http://app.kubernetes.io/instance:{rabbitmq}|app.kubernetes.io/instance:{rabbitmq}>
<http://app.kubernetes.io/managed-by:{Helm}|app.kubernetes.io/managed-by:{Helm}>
<http://app.kubernetes.io/name:{rabbitmq}|app.kubernetes.io/name:{rabbitmq}> <http://helm.sh/chart:{rabbitmq-11.1.1}]|helm.sh/chart:{rabbitmq-11.1.1}]>}]} spec:{map[affinity:{map[nodeAffinity:{map[requiredDuringSchedulingIgnoredDuringExecution:{map[nodeSelectorTerms:{[{map[matchExpressions:{[{map[key:{performance} operator:{In} values:{[{slow}]}]}]}]}]}]}]}]} containers:{[{map[env:{[{map[name:{BITNAMI_DEBUG} value:{false}]} {map[name:{MY_POD_IP} valueFrom:{map[fieldRef:{map[fieldPath:{status.podIP}]}]}]} {map[name:{MY_POD_NAME} valueFrom:{map[fieldRef:{map[fieldPath:{metadata.name}]}]}]} {map[name:{MY_POD_NAMESPACE} valueFrom:{map[fieldRef:{map[fieldPath:{metadata.namespace}]}]}]} {map[name:{K8S_SERVICE_NAME} value:{rabbitmq-headless}]} {map[name:{K8S_ADDRESS_TYPE} value:{hostname}]} {map[name:{RABBITMQ_FORCE_BOOT} value:{no}]} {map[name:{RABBITMQ_NODE_NAME} value:{rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local}]} {map[name:{K8S_HOSTNAME_SUFFIX} value:{.$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local}]} {map[name:{RABBITMQ_MNESIA_DIR} value:{/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)}]} {map[name:{RABBITMQ_LDAP_ENABLE} value:{no}]} {map[name:{RABBITMQ_LOGS} value:{-}]} {map[name:{RABBITMQ_ULIMIT_NOFILES} value:{65536}]} {map[name:{RABBITMQ_USE_LONGNAME} value:{true}]} {map[name:{RABBITMQ_ERL_COOKIE} valueFrom:{map[secretKeyRef:{map[key:{rabbitmq-erlang-cookie} name:{rabbitmq}]}]}]} {map[name:{RABBITMQ_LOAD_DEFINITIONS} value:{no}]} {map[name:{RABBITMQ_DEFINITIONS_FILE} value:{/app/load_definition.json}]} {map[name:{RABBITMQ_SECURE_PASSWORD} value:{yes}]} {map[name:{RABBITMQ_USERNAME} value:{user}]} {map[name:{RABBITMQ_PASSWORD} valueFrom:{map[secretKeyRef:{map[key:{rabbitmq-password} name:{rabbitmq}]}]}]} {map[name:{RABBITMQ_PLUGINS} value:{rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_delayed_message_exchange}]} {map[name:{RABBITMQ_COMMUNITY_PLUGINS} value:{<https://github.com/rabbitmq/rabbitmq-delayed-message-exchange/releases/download/3.11.1/rabbitmq_delayed_message_exchange-3.11.1.ez}]}]}> image:{<http://marketplace.azurecr.io/bitnami/rabbitmq:3.11.2-debian-11-r0|marketplace.azurecr.io/bitnami/rabbitmq:3.11.2-debian-11-r0>} imagePullPolicy:{IfNotPresent} lifecycle:{map[preStop:{map[exec:{map[command:{[{/bin/bash} {-ec} {if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false" else rabbitmqctl stop_app fi }]}]}]}]} livenessProbe:{map[exec:{map[command:{[{/bin/bash} {-ec} {rabbitmq-diagnostics -q ping}]}]} failureThreshold:{6} initialDelaySeconds:{120} periodSeconds:{30} successThreshold:{1} timeoutSeconds:{20}]} name:{rabbitmq} ports:{[{map[containerPort:{5672} name:{amqp}]} {map[containerPort:{25672} name:{dist}]} {map[containerPort:{15672} name:{stats}]} {map[containerPort:{4369} name:{epmd}]}]} readinessProbe:{map[exec:{map[command:{[{/bin/bash} {-ec} {rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms}]}]} failureThreshold:{3} initialDelaySeconds:{10} periodSeconds:{30} successThreshold:{1} timeoutSeconds:{20}]} resources:{map[limits:{map[]} requests:{map[]}]} securityContext:{map[runAsNonRoot:{true} runAsUser:{1001}]} volumeMounts:{[{map[mountPath:{/bitnami/rabbitmq/conf} name:{configuration}]} {map[mountPath:{/bitnami/rabbitmq/mnesia} name:{data}]}]}]}]} securityContext:{map[fsGroup:{1001}]} serviceAccountName:{rabbitmq} terminationGracePeriodSeconds:{120} volumes:{[{map[name:{configuration} secret:{map[items:{[{map[key:{rabbitmq.conf} path:{rabbitmq.conf}]}]} secretName:{rabbitmq-config}]}]} {map[emptyDir:{map[]} name:{data}]}]}]}]} updateStrategy:{map[type:{RollingUpdate}]}]}!={map[podManagementPolicy:{OrderedReady} replicas:{1} selector:{map[matchLabels:{map[<http://app.kubernetes.io/instance:{rabbitmq}|app.kubernetes.io/instance:{rabbitmq}>
<http://app.kubernetes.io/name:{rabbitmq}]}]}|app.kubernetes.io/name:{rabbitmq}]}]}> serviceName:{rabbitmq-headless} template:{map[metadata:{map[annotations:{map[checksum/config:{105414d1c6b687cc6720aaa6aeabae8605b2c47f60956643a158b8795a9fee05} checksum/secret:{bc2150c097b4a1af2d5acd584ff944c61404fe43403d0f1aeeff50fba12d4c42}]} labels:{map[<http://app.kubernetes.io/instance:{rabbitmq}|app.kubernetes.io/instance:{rabbitmq}>
<http://app.kubernetes.io/managed-by:{Helm}|app.kubernetes.io/managed-by:{Helm}>
<http://app.kubernetes.io/name:{rabbitmq}|app.kubernetes.io/name:{rabbitmq}> <http://helm.sh/chart:{rabbitmq-11.1.1}]|helm.sh/chart:{rabbitmq-11.1.1}]>}]} spec:{map[affinity:{map[nodeAffinity:{map[requiredDuringSchedulingIgnoredDuringExecution:{map[nodeSelectorTerms:{[{map[matchExpressions:{[{map[key:{performance} operator:{In} values:{[{slow}]}]}]}]}]}]}]}]} containers:{[{map[env:{[{map[name:{BITNAMI_DEBUG} value:{false}]} {map[name:{MY_POD_IP} valueFrom:{map[fieldRef:{map[fieldPath:{status.podIP}]}]}]} {map[name:{MY_POD_NAME} valueFrom:{map[fieldRef:{map[fieldPath:{metadata.name}]}]}]} {map[name:{MY_POD_NAMESPACE} valueFrom:{map[fieldRef:{map[fieldPath:{metadata.namespace}]}]}]} {map[name:{K8S_SERVICE_NAME} value:{rabbitmq-headless}]} {map[name:{K8S_ADDRESS_TYPE} value:{hostname}]} {map[name:{RABBITMQ_FORCE_BOOT} value:{no}]} {map[name:{RABBITMQ_NODE_NAME} value:{rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local}]} {map[name:{K8S_HOSTNAME_SUFFIX} value:{.$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local}]} {map[name:{RABBITMQ_MNESIA_DIR} value:{/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)}]} {map[name:{RABBITMQ_LDAP_ENABLE} value:{no}]} {map[name:{RABBITMQ_LOGS} value:{-}]} {map[name:{RABBITMQ_ULIMIT_NOFILES} value:{65536}]} {map[name:{RABBITMQ_USE_LONGNAME} value:{true}]} {map[name:{RABBITMQ_ERL_COOKIE} valueFrom:{map[secretKeyRef:{map[key:{rabbitmq-erlang-cookie} name:{rabbitmq}]}]}]} {map[name:{RABBITMQ_LOAD_DEFINITIONS} value:{no}]} {map[name:{RABBITMQ_DEFINITIONS_FILE} value:{/app/load_definition.json}]} {map[name:{RABBITMQ_SECURE_PASSWORD} value:{yes}]} {map[name:{RABBITMQ_USERNAME} value:{user}]} {map[name:{RABBITMQ_PASSWORD} valueFrom:{map[secretKeyRef:{map[key:{rabbitmq-password} name:{rabbitmq}]}]}]} {map[name:{RABBITMQ_PLUGINS} value:{rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_delayed_message_exchange}]} {map[name:{RABBITMQ_COMMUNITY_PLUGINS} value:{<https://github.com/rabbitmq/rabbitmq-delayed-message-exchange/releases/download/3.11.1/rabbitmq_delayed_message_exchange-3.11.1.ez}]}]}> image:{<http://marketplace.azurecr.io/bitnami/rabbitmq:3.11.2-debian-11-r0|marketplace.azurecr.io/bitnami/rabbitmq:3.11.2-debian-11-r0>} imagePullPolicy:{IfNotPresent} lifecycle:{map[preStop:{map[exec:{map[command:{[{/bin/bash} {-ec} {if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false" else rabbitmqctl stop_app fi }]}]}]}]} livenessProbe:{map[exec:{map[command:{[{/bin/bash} {-ec} {rabbitmq-diagnostics -q ping}]}]} failureThreshold:{6} initialDelaySeconds:{120} periodSeconds:{30} successThreshold:{1} timeoutSeconds:{20}]} name:{rabbitmq} ports:{[{map[containerPort:{5672} name:{amqp}]} {map[containerPort:{25672} name:{dist}]} {map[containerPort:{15672} name:{stats}]} {map[containerPort:{4369} name:{epmd}]}]} readinessProbe:{map[exec:{map[command:{[{/bin/bash} {-ec} {rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms}]}]} failureThreshold:{3} initialDelaySeconds:{10} periodSeconds:{30} successThreshold:{1} timeoutSeconds:{20}]} resources:{map[limits:{map[]} requests:{map[]}]} securityContext:{map[runAsNonRoot:{true} runAsUser:{1001}]} volumeMounts:{[{map[mountPath:{/bitnami/rabbitmq/conf} name:{configuration}]} {map[mountPath:{/bitnami/rabbitmq/mnesia} name:{data}]}]}]}]} securityContext:{map[fsGroup:{1001}]} serviceAccountName:{rabbitmq} terminationGracePeriodSeconds:{120} volumes:{[{map[name:{configuration} secret:{map[items:{[{map[key:{rabbitmq.conf} path:{rabbitmq.conf}]}]} secretName:{rabbitmq-config}]}]} {map[emptyDir:{map[]} name:{data}]}]}]}]} updateStrategy:{map[type:{RollingUpdate}]}]}]
If I run the pulumi up locally a rabbitmq pod is created and can be viewed in kubernetes, but the pod still has errors, even though it is marked as running:
Warning Unhealthy 38s kubelet Readiness probe failed: Error:
RabbitMQ on node rabbit@rabbitmq-0.rabbitmq-headless.development.svc.cluster.local is not running or has not fully booted yet (check with is_booting)
Locally the redis pod is stuck in the pulumi up with:
Finding Pods to direct traffic to
Does anybody have any idea what could be wrong here?lemon-church-28946
11/23/2022, 3:00 PMLocalWorkspace
◦ Stack
◦ pulumi.automation
• I've tried pulumi.automation.create_stack
and importing an export via stack.new_stack
, but it errors out because it requires a --force
flag due to the fact that the exported stack is still deployed.
pulumi.automation.errors.CommandError:
code: 255
stdout:
stderr: warning: A new version of Pulumi is available. To upgrade from version '3.44.2' to '3.47.2', visit <https://pulumi.com/docs/reference/install/> for manual instructions and release notes.
error: 'stack-name-redactified' still has resources; removal rejected. Possible actions:
- Make sure that 'stack-name-redactified' is the stack that you want to destroy
- Run `pulumi destroy` to delete the resources, then run `pulumi stack rm`
- Run `pulumi stack rm --force` to override this error
Am I making this more complicated than necessary? Is there a way of passing the --force
flag such that the methods will acknowledge it?salmon-motherboard-78006
11/23/2022, 3:41 PMclever-glass-42863
11/23/2022, 4:13 PMhallowed-printer-89159
11/23/2022, 5:07 PM// Create a Key to allow SSH connection
const sshKeyPrivate = new tls.PrivateKey("bastion", {
algorithm: "RSA",
})
export const privateKey = sshKeyPrivate.privateKeyPem;
const privateKeyInterP = pulumi.interpolate `${privateKey}`;
fs.writeFileSync("key.pem", privateKeyInterP);
and this is the error on the key.pem
Calling [toString] on an [Output<T>] is not supported.
To get the value of an Output<T> as an Output<string> consider either:
1: o.apply(v => `prefix${v}suffix`)
2: pulumi.interpolate `prefix${v}suffix`
See <https://pulumi.io/help/outputs> for more details.
This function may throw in a future version of @pulumi/pulumi.
broad-dog-22463
11/23/2022, 5:26 PMthousands-pizza-93362
11/23/2022, 8:00 PMquaint-match-50796
11/23/2022, 8:18 PMresource urn:pulumi:dev::us-cluster-1::kubernetes:<http://helm.sh/v3:Release::kube-prometheus-stack|helm.sh/v3:Release::kube-prometheus-stack> violates plan: properties changed: ++name[{kube-prometheus-stack-63216a72}!={kube-prometheus-stack-73270e19}], ++resourceNames[{map[<http://Alertmanager.monitoring.coreos.com/monitoring.coreos.com/v1:{[{monitoring/kube-prometheus-stack-6321-alertmanager}]}|Alertmanager.monitoring.coreos.com/monitoring.coreos.com/v1:{[{monitoring/kube-prometheus-stack-6321-alertmanager}]}>
<http://ClusterRole.rbac.authorization.k8s.io/rbac.authorization.k8s.io/v1:{[{kube-prometheus-stack-6321-admission}|ClusterRole.rbac.authorization.k8s.io/rbac.authorization.k8s.io/v1:{[{kube-prometheus-stack-6321-admission}> {kube-prometheus-stack-6321-operator} {kube-prometheus-stack-6321-prometheus} {kube-prometheus-stack-63216a72-grafana-clusterrole} {kube-prometheus-stack-63216a72-kube-state-metrics}]} <http://ClusterRoleBinding.rbac.authorization.k8s.io/rbac.authorization.k8s.io/v1:{[{kube-prometheus-stack-6321-admission}|ClusterRoleBinding.rbac.authorization.k8s.io/rbac.authorization.k8s.io/v1:{[{kube-prometheus-stack-6321-admission}> {kube-prometheus-stack-6321-operator} {kube-prometheus-stack-6321-prometheus} {kube-prometheus-stack-63216a72-grafana-clusterrolebinding} {kube-prometheus-stack-63216a72-kube-state-metrics}]} ConfigMap/v1:{[{monitoring/kube-prometheus-stack-6321-alertmanager-overview} {monitoring/kube-prometheus-stack-6321-apiserver} {monitoring/kube-prometheus-stack-6321-cluster-total} {monitoring/kube-prometheus-stack-6321-controller-manager} {monitoring/kube-prometheus-stack-6321-etcd} {monitoring/kube-prometheus-stack-6321-grafana-datasource} {monitoring/kube-prometheus-stack-6321-grafana-overview} {monitoring/kube-prometheus-stack-6321-k8s-coredns} {monitoring/kube-prometheus-stack-6321-k8s-resources-cluster} {monitoring/kube-prometheus-stack-6321-k8s-resources-namespace} {monitoring/kube-prometheus-stack-6321-k8s-resources-node} {monitoring/kube-prometheus-stack-6321-k8s-resources-pod} {monitoring/kube-prometheus-stack-6321-k8s-resources-workload} {monitoring/kube-prometheus-stack-6321-k8s-resources-workloads-namespace} {monitoring/kube-prometheus-stack-6321-kubelet} {monitoring/kube-prometheus-stack-6321-namespace-by-pod} {monitoring/kube-prometheus-stack-6321-namespace-by-workload} {monitoring/kube-prometheus-stack-6321-node-cluster-rsrc-use} {monitoring/kube-prometheus-stack-6321-node-rsrc-use} {monitoring/kube-prometheus-stack-6321-nodes} {monitoring/kube-prometheus-stack-6321-nodes-darwin} {monitoring/kube-prometheus-stack-6321-persistentvolumesusage} {monitoring/kube-prometheus-stack-6321-pod-total} {monitoring/kube-prometheus-stack-6321-prometheus} {monitoring/kube-prometheus-stack-6321-proxy} {monitoring/kube-prometheus-stack-6321-scheduler} {monitoring/kube-prometheus-stack-6321-workload-total} {monitoring/kube-prometheus-stack-63216a72-grafana} {monitoring/kube-prometheus-stack-63216a72-grafana-config-dashboards} {monitoring/kube-prometheus-stack-63216a72-grafana-test}]} <http://CustomResourceDefinition.apiextensions.k8s.io/apiextensions.k8s.io/v1:{[{alertmanagerconfigs.monitoring.coreos.com}|CustomResourceDefinition.apiextensions.k8s.io/apiextensions.k8s.io/v1:{[{alertmanagerconfigs.monitoring.coreos.com}>
And they continue... They go over the Slack message limit.
After reverting to 3.47.1, everything is fine again.curved-magazine-50014
11/23/2022, 8:49 PMlittle-cartoon-10569
11/23/2022, 8:58 PMpulumi refresh --diff [--json] > file
, it responds with the error message
error: --yes or --skip-preview must be passed in to proceed when running in non-interactive modeCan anyone suggest a workaround to get the details into a file for easier viewing, without doing the actual refresh?
rough-jordan-15935
11/24/2022, 6:20 AMrough-jordan-15935
11/24/2022, 6:55 AMquaint-salesclerk-22166
11/24/2022, 8:53 AMgetManagedDatabases
to list all databases from a given managed instance, something like the implemented function for Snowflake: https://www.pulumi.com/registry/packages/snowflake/api-docs/getdatabases/. Is there an easy way to gather all managed databases?green-kilobyte-86931
11/24/2022, 1:21 PMstrong-match-67698
11/24/2022, 4:24 PMthousands-lizard-52998
11/25/2022, 5:30 AMfast-island-38778
11/25/2022, 7:13 AMclever-painter-96148
11/25/2022, 9:08 AMechoing-dress-72742
11/25/2022, 9:22 AMwet-noon-14291
11/25/2022, 8:52 PMfierce-dinner-64337
11/25/2022, 11:48 PMfreezing-activity-93019
11/26/2022, 4:08 PMDiagnostics:
pulumi:pulumi:Stack (pulumi-kubernetes-pulumi-kubernetes):
error: an unhandled error occurred: program exited with non-zero exit code: 2
panic: fatal: An assertion has failed: {<nil> <nil> <nil> <nil> <nil> <nil> <nil> <nil> <nil> <nil> flux-system flux-system <nil> <nil> <nil> <nil>}: cannot assign v1.objectMetaPtrType to v1.ObjectMeta
goroutine 42 [running]:
<http://github.com/pulumi/pulumi/sdk/v3/go/common/util/contract.failfast(...)|github.com/pulumi/pulumi/sdk/v3/go/common/util/contract.failfast(...)>
<http://github.com/pulumi/pulumi/sdk/v3@v3.48.0/go/common/util/contract/failfast.go:23|github.com/pulumi/pulumi/sdk/v3@v3.48.0/go/common/util/contract/failfast.go:23>
<http://github.com/pulumi/pulumi/sdk/v3/go/common/util/contract.Assertf|github.com/pulumi/pulumi/sdk/v3/go/common/util/contract.Assertf>(0x20?, {0x248475a?, 0x240c660?}, {0xc000c87068?, 0xc000c78b01?, 0x98?})
<http://github.com/pulumi/pulumi/sdk/v3@v3.48.0/go/common/util/contract/assert.go:33|github.com/pulumi/pulumi/sdk/v3@v3.48.0/go/common/util/contract/assert.go:33> +0xed
<http://github.com/pulumi/pulumi/sdk/v3/go/pulumi.marshalInputImpl({0x1e67920|github.com/pulumi/pulumi/sdk/v3/go/pulumi.marshalInputImpl({0x1e67920>?, 0xc00046a200?}, {0x279c0a8?, 0x1b22fe0?}, 0x1, 0x58?)
<http://github.com/pulumi/pulumi/sdk/v3@v3.48.0/go/pulumi/rpc.go:391|github.com/pulumi/pulumi/sdk/v3@v3.48.0/go/pulumi/rpc.go:391> +0xa09
<http://github.com/pulumi/pulumi/sdk/v3/go/pulumi.marshalInput(...)|github.com/pulumi/pulumi/sdk/v3/go/pulumi.marshalInput(...)>
<http://github.com/pulumi/pulumi/sdk/v3@v3.48.0/go/pulumi/rpc.go:214|github.com/pulumi/pulumi/sdk/v3@v3.48.0/go/pulumi/rpc.go:214>
<http://github.com/pulumi/pulumi/sdk/v3/go/pulumi.marshalInputs.func1({0x198ccc5|github.com/pulumi/pulumi/sdk/v3/go/pulumi.marshalInputs.func1({0x198ccc5>, 0x8}, {0x1e67920?, 0xc00046a200?}, {0x279c0a8?, 0x1b22fe0?})
<http://github.com/pulumi/pulumi/sdk/v3@v3.48.0/go/pulumi/rpc.go:134|github.com/pulumi/pulumi/sdk/v3@v3.48.0/go/pulumi/rpc.go:134> +0x97
<http://github.com/pulumi/pulumi/sdk/v3/go/pulumi.marshalInputs({0x272a400|github.com/pulumi/pulumi/sdk/v3/go/pulumi.marshalInputs({0x272a400>, 0xc00053eaf0})
<http://github.com/pulumi/pulumi/sdk/v3@v3.48.0/go/pulumi/rpc.go:184|github.com/pulumi/pulumi/sdk/v3@v3.48.0/go/pulumi/rpc.go:184> +0xa18
<http://github.com/pulumi/pulumi/sdk/v3/go/pulumi.(*Context).prepareResourceInputs(0xc00019a580|github.com/pulumi/pulumi/sdk/v3/go/pulumi.(*Context).prepareResourceInputs(0xc00019a580>, {0x278fe68?, 0xc00046a300?}, {0x272a400, 0xc00053eaf0}, {0x2483e58?, 0x19?}, 0xc000246b40, 0xc00011c480, 0x0, ...)
<http://github.com/pulumi/pulumi/sdk/v3@v3.48.0/go/pulumi/context.go:1280|github.com/pulumi/pulumi/sdk/v3@v3.48.0/go/pulumi/context.go:1280> +0x1a5
<http://github.com/pulumi/pulumi/sdk/v3/go/pulumi.(*Context).registerResource.func1()|github.com/pulumi/pulumi/sdk/v3/go/pulumi.(*Context).registerResource.func1()>
<http://github.com/pulumi/pulumi/sdk/v3@v3.48.0/go/pulumi/context.go:832|github.com/pulumi/pulumi/sdk/v3@v3.48.0/go/pulumi/context.go:832> +0x271
created by <http://github.com/pulumi/pulumi/sdk/v3/go/pulumi.(*Context).registerResource|github.com/pulumi/pulumi/sdk/v3/go/pulumi.(*Context).registerResource>
<http://github.com/pulumi/pulumi/sdk/v3@v3.48.0/go/pulumi/context.go:819|github.com/pulumi/pulumi/sdk/v3@v3.48.0/go/pulumi/context.go:819> +0x845
freezing-activity-93019
11/26/2022, 4:08 PMicy-controller-6092
11/27/2022, 11:53 PM@pulumi/aws
package? https://github.com/hashicorp/terraform-provider-aws/pull/27965icy-controller-6092
11/27/2022, 11:53 PM@pulumi/aws
package? https://github.com/hashicorp/terraform-provider-aws/pull/27965shy-kite-40307
11/28/2022, 11:12 PMicy-controller-6092
11/29/2022, 6:43 AM