diff --git a/.github/actions/pre-init-distributed/action.yaml b/.github/actions/pre-init-distributed/action.yaml index 78ce41770f..fcbb48d92b 100644 --- a/.github/actions/pre-init-distributed/action.yaml +++ b/.github/actions/pre-init-distributed/action.yaml @@ -93,7 +93,6 @@ runs: shell: bash -e {0} run: rm -f target/debug/bolt target/release/bolt - # Run `bolt config generate` so that the `Check` job can start working sooner - name: Generate Bolt Config shell: bash -e {0} run: nix-shell --pure --run "bolt config generate ci" diff --git a/docs/benchmarks/START_TIMES.md b/docs/benchmarks/START_TIMES.md new file mode 100644 index 0000000000..aef8320163 --- /dev/null +++ b/docs/benchmarks/START_TIMES.md @@ -0,0 +1,105 @@ +# Start times + +## Low-end machine + +> System +> +> - Debian GNU/Linux 11 +> - Shared VM, 4 VCPUs (of AMD EPYC 7713 16-Core 2GHz) +> - 8GB memory + +### `nix-shell` setup time (fresh) + +- Before building `bolt`: 1m31s +- Building `bolt`: 2m15s + +### Services (Minimal setup) + +| step | up | +| ------------------ | ----- | +| k8s-cluster | 20s | +| k8s-infra | 2m31s | +| redis | 1s | +| cockroach | 1s | +| clickhouse | 1s | +| s3 | 24s | +| infra-artifacts | 50s | +| migrate | 62s | +| up (containerized) | 7s | +| total | 5m17s | + +### `k8s-infra` breakdown + +_Note, these are not additive as they run in parallel_ + +_First loki, promtail, and prometheus are provisioned then the rest follow_ + +| service | up | +| -------------- | ----- | +| promtail | 3s | +| prometheus | 43s | +| loki | 1m14s | +| k8s_dashboard | 3s | +| traefik tunnel | 20s | +| traefik | 20s | +| traffic_server | 26s | +| nats | 27s | +| imagor | 29s | +| minio | 35s | +| nomad_server | 46s | +| clickhouse | 47s | +| redis | 51s | +| nsfw_api | 56s | +| cockroachdb | 1m6s | + +## Higher-end machine + +> System +> +> - Debian GNU/Linux 11 +> - AMD EPYC 7713 16-Core 2GHz +> - 32GB memory + +### Services (Complex setup) + +_This setup uses postgres as the terraform config storage method, adding overhead to each step_ + +| step | up | destroy | +| ------------------ | -------- | -------- | +| k8s-cluster | 27s | 16s | +| k8s-infra | 2m34s | - | +| tls | 4m29s | 5s | +| redis | 11s | - | +| cockroach | 10s | - | +| clickhouse | 10s | - | +| vector | 19s | - | +| pools | 2m43s | 1m57s | +| dns | 2m48s | 9s | +| better uptime | untested | untested | +| cf-workers | 15s | 6s | +| cf-tunnels | 18s | 12s | +| s3 | 35s | - | +| infra-artifacts | 35s | - | +| migrate | 58s | - | +| up (containerized) | 7s | - | +| total | 17m2s | 2m40s | + +### `k8s-infra` breakdown + +| service | up | +| -------------- | ----- | +| promtail | 6s | +| prometheus | 48s | +| loki | 1m20s | +| k8s_dashboard | 6s | +| imagor | 8s | +| traefik | 12s | +| traefik tunnel | 14s | +| traffic_server | 16s | +| minio | 22s | +| nats | 28s | +| clickhouse | 30s | +| redis | 33s | +| nsfw_api | 36s | +| nomad_server | 46s | +| cockroachdb | 49s | diff --git a/errors/feature/disabled.md b/errors/feature/disabled.md new file mode 100644 index 0000000000..236d8a18a8 --- /dev/null +++ b/errors/feature/disabled.md @@ -0,0 +1,9 @@ +--- +name = "FEATURE_DISABLED" +description = "The given feature is disabled: {feature}" +http_status = 400 +--- + +# Feature Disabled + +A feature required to use/access this resource is disabled. diff --git a/infra/tf/k8s_infra/clickhouse.tf b/infra/tf/k8s_infra/clickhouse.tf index f1f8c21db1..24abddea61 100644 --- a/infra/tf/k8s_infra/clickhouse.tf +++ b/infra/tf/k8s_infra/clickhouse.tf @@ -1,5 +1,5 @@ locals { - clickhouse_k8s = var.clickhouse_provider == "kubernetes" + clickhouse_enabled = var.clickhouse_enabled && var.clickhouse_provider == "kubernetes" service_clickhouse = lookup(var.services, "clickhouse", { count = 1 resources = { @@ -10,7 +10,7 @@ locals { } module "clickhouse_secrets" { - count = local.clickhouse_k8s ? 1 : 0 + count = local.clickhouse_enabled ? 1 : 0 source = "../modules/secrets" @@ -20,7 +20,7 @@ module "clickhouse_secrets" { } resource "kubernetes_namespace" "clickhouse" { - count = local.clickhouse_k8s ? 1 : 0 + count = local.clickhouse_enabled ? 1 : 0 metadata { name = "clickhouse" @@ -28,6 +28,8 @@ resource "kubernetes_namespace" "clickhouse" { } resource "kubernetes_priority_class" "clickhouse_priority" { + count = local.clickhouse_enabled ? 1 : 0 + metadata { name = "clickhouse-priority" } @@ -36,12 +38,11 @@ resource "kubernetes_priority_class" "clickhouse_priority" { } resource "helm_release" "clickhouse" { + count = local.clickhouse_enabled ? 1 : 0 depends_on = [null_resource.daemons] - count = local.clickhouse_k8s ? 1 : 0 - name = "clickhouse" - namespace = kubernetes_namespace.clickhouse[0].metadata.0.name + namespace = kubernetes_namespace.clickhouse.0.metadata.0.name chart = "../../helm/clickhouse" # repository = "oci://registry-1.docker.io/bitnamicharts" # chart = "clickhouse" @@ -56,7 +57,7 @@ resource "helm_release" "clickhouse" { replicaCount = 1 } - priorityClassName = kubernetes_priority_class.clickhouse_priority.metadata.0.name + priorityClassName = kubernetes_priority_class.clickhouse_priority.0.metadata.0.name resources = var.limit_resources ? { limits = { memory = "${local.service_clickhouse.resources.memory}Mi" @@ -121,7 +122,7 @@ resource "helm_release" "clickhouse" { # Admin auth auth = { username = "default" - password = module.clickhouse_secrets[0].values["clickhouse/users/default/password"] + password = module.clickhouse_secrets.0.values["clickhouse/users/default/password"] } metrics = { @@ -129,7 +130,7 @@ resource "helm_release" "clickhouse" { serviceMonitor = { enabled = true - namespace = kubernetes_namespace.clickhouse[0].metadata.0.name + namespace = kubernetes_namespace.clickhouse.0.metadata.0.name } # TODO: @@ -142,18 +143,17 @@ resource "helm_release" "clickhouse" { } data "kubernetes_secret" "clickhouse_ca" { - count = local.clickhouse_k8s ? 1 : 0 - + count = local.clickhouse_enabled ? 1 : 0 depends_on = [helm_release.clickhouse] metadata { name = "clickhouse-crt" - namespace = kubernetes_namespace.clickhouse[0].metadata.0.name + namespace = kubernetes_namespace.clickhouse.0.metadata.0.name } } resource "kubernetes_config_map" "clickhouse_ca" { - for_each = local.clickhouse_k8s ? toset(["rivet-service", "bolt", "vector"]) : toset([]) + for_each = local.clickhouse_enabled ? toset(["rivet-service", "bolt", "vector"]) : toset([]) metadata { name = "clickhouse-ca" @@ -161,7 +161,7 @@ resource "kubernetes_config_map" "clickhouse_ca" { } data = { - "ca.crt" = data.kubernetes_secret.clickhouse_ca[0].data["ca.crt"] + "ca.crt" = data.kubernetes_secret.clickhouse_ca.0.data["ca.crt"] } } diff --git a/infra/tf/k8s_infra/grafana.tf b/infra/tf/k8s_infra/grafana.tf index 6340bbc22c..38cf626c7e 100644 --- a/infra/tf/k8s_infra/grafana.tf +++ b/infra/tf/k8s_infra/grafana.tf @@ -8,10 +8,10 @@ locals { } resource "kubernetes_config_map" "grafana_dashboard" { - for_each = local.grafana_dashboards + for_each = var.prometheus_enabled ? local.grafana_dashboards : {} metadata { - namespace = kubernetes_namespace.prometheus.metadata.0.name + namespace = kubernetes_namespace.prometheus.0.metadata.0.name name = "prometheus-rivet-${each.key}" labels = { grafana_dashboard = "1" diff --git a/infra/tf/k8s_infra/imagor.tf b/infra/tf/k8s_infra/imagor.tf index df96e08573..8ccd2b8fa0 100644 --- a/infra/tf/k8s_infra/imagor.tf +++ b/infra/tf/k8s_infra/imagor.tf @@ -49,12 +49,16 @@ module "imagor_secrets" { } resource "kubernetes_namespace" "imagor" { + count = var.imagor_enabled ? 1 : 0 + metadata { name = "imagor" } } resource "kubernetes_priority_class" "imagor_priority" { + count = var.imagor_enabled ? 1 : 0 + metadata { name = "imagor-priority" } @@ -63,11 +67,12 @@ resource "kubernetes_priority_class" "imagor_priority" { } resource "kubernetes_deployment" "imagor" { + count = var.imagor_enabled ? 1 : 0 depends_on = [null_resource.daemons, module.docker_auth] metadata { name = "imagor" - namespace = kubernetes_namespace.imagor.metadata[0].name + namespace = kubernetes_namespace.imagor.0.metadata[0].name } spec { @@ -87,7 +92,7 @@ resource "kubernetes_deployment" "imagor" { } spec { - priority_class_name = kubernetes_priority_class.imagor_priority.metadata.0.name + priority_class_name = kubernetes_priority_class.imagor_priority.0.metadata.0.name # MARK: Docker auth image_pull_secrets { @@ -168,9 +173,11 @@ resource "kubernetes_deployment" "imagor" { } resource "kubernetes_secret" "imagor_secret_env" { + count = var.imagor_enabled ? 1 : 0 + metadata { name = "imagor-secret-env" - namespace = kubernetes_namespace.imagor.metadata[0].name + namespace = kubernetes_namespace.imagor.0.metadata[0].name } data = { @@ -180,13 +187,15 @@ resource "kubernetes_secret" "imagor_secret_env" { } resource "kubernetes_service" "imagor" { + count = var.imagor_enabled ? 1 : 0 + metadata { name = "imagor" - namespace = kubernetes_namespace.imagor.metadata[0].name + namespace = kubernetes_namespace.imagor.0.metadata[0].name } spec { selector = { - "app.kubernetes.io/name" = kubernetes_deployment.imagor.metadata.0.name + "app.kubernetes.io/name" = kubernetes_deployment.imagor.0.metadata.0.name } port { @@ -198,6 +207,7 @@ resource "kubernetes_service" "imagor" { } resource "kubectl_manifest" "imagor_traefik_service" { + count = var.imagor_enabled ? 1 : 0 depends_on = [helm_release.traefik] yaml_body = yamlencode({ @@ -206,7 +216,7 @@ resource "kubectl_manifest" "imagor_traefik_service" { metadata = { name = "imagor" - namespace = kubernetes_namespace.imagor.metadata[0].name + namespace = kubernetes_namespace.imagor.0.metadata[0].name labels = { "traefik-instance" = "main" } @@ -215,7 +225,7 @@ resource "kubectl_manifest" "imagor_traefik_service" { spec = { mirroring = { name = "imagor" - namespace = kubernetes_namespace.imagor.metadata[0].name + namespace = kubernetes_namespace.imagor.0.metadata[0].name port = 8000 } } @@ -223,7 +233,7 @@ resource "kubectl_manifest" "imagor_traefik_service" { } resource "kubectl_manifest" "imagor_ingress" { - for_each = local.entrypoints + for_each = var.imagor_enabled ? local.entrypoints : {} depends_on = [helm_release.traefik] @@ -233,7 +243,7 @@ resource "kubectl_manifest" "imagor_ingress" { metadata = { name = "imagor-${each.key}" - namespace = kubernetes_namespace.imagor.metadata[0].name + namespace = kubernetes_namespace.imagor.0.metadata[0].name labels = { "traefik-instance" = "main" } @@ -251,13 +261,13 @@ resource "kubectl_manifest" "imagor_ingress" { middlewares = [ for mw in preset.middlewares: { name = mw - namespace = kubernetes_namespace.imagor.metadata[0].name + namespace = kubernetes_namespace.imagor.0.metadata[0].name } ] services = [{ kind = "TraefikService" name = "imagor" - namespace = kubernetes_namespace.imagor.metadata[0].name + namespace = kubernetes_namespace.imagor.0.metadata[0].name }] } ] @@ -269,6 +279,7 @@ resource "kubectl_manifest" "imagor_ingress" { # MARK: Middleware resource "kubectl_manifest" "imagor_cors" { + count = var.imagor_enabled ? 1 : 0 depends_on = [helm_release.traefik] yaml_body = yamlencode({ @@ -277,7 +288,7 @@ resource "kubectl_manifest" "imagor_cors" { metadata = { name = "imagor-cors" - namespace = kubernetes_namespace.imagor.metadata[0].name + namespace = kubernetes_namespace.imagor.0.metadata[0].name labels = { "traefik-instance" = "main" } @@ -294,6 +305,7 @@ resource "kubectl_manifest" "imagor_cors" { } resource "kubectl_manifest" "imagor_cors_game" { + count = var.imagor_enabled ? 1 : 0 depends_on = [helm_release.traefik] yaml_body = yamlencode({ @@ -302,7 +314,7 @@ resource "kubectl_manifest" "imagor_cors_game" { metadata = { name = "imagor-cors-game" - namespace = kubernetes_namespace.imagor.metadata[0].name + namespace = kubernetes_namespace.imagor.0.metadata[0].name labels = { "traefik-instance" = "main" } @@ -319,6 +331,7 @@ resource "kubectl_manifest" "imagor_cors_game" { } resource "kubectl_manifest" "imagor_cdn_retry" { + count = var.imagor_enabled ? 1 : 0 depends_on = [helm_release.traefik] yaml_body = yamlencode({ @@ -327,7 +340,7 @@ resource "kubectl_manifest" "imagor_cdn_retry" { metadata = { name = "imagor-cdn-retry" - namespace = kubernetes_namespace.imagor.metadata[0].name + namespace = kubernetes_namespace.imagor.0.metadata[0].name labels = { "traefik-instance" = "main" } @@ -343,6 +356,7 @@ resource "kubectl_manifest" "imagor_cdn_retry" { } resource "kubectl_manifest" "imagor_cdn_cache_control" { + count = var.imagor_enabled ? 1 : 0 depends_on = [helm_release.traefik] yaml_body = yamlencode({ @@ -351,7 +365,7 @@ resource "kubectl_manifest" "imagor_cdn_cache_control" { metadata = { name = "imagor-cdn-cache-control" - namespace = kubernetes_namespace.imagor.metadata[0].name + namespace = kubernetes_namespace.imagor.0.metadata[0].name labels = { "traefik-instance" = "main" } @@ -368,6 +382,7 @@ resource "kubectl_manifest" "imagor_cdn_cache_control" { } resource "kubectl_manifest" "imagor_cdn" { + count = var.imagor_enabled ? 1 : 0 depends_on = [helm_release.traefik] yaml_body = yamlencode({ @@ -376,7 +391,7 @@ resource "kubectl_manifest" "imagor_cdn" { metadata = { name = "imagor-cdn" - namespace = kubernetes_namespace.imagor.metadata[0].name + namespace = kubernetes_namespace.imagor.0.metadata[0].name labels = { "traefik-instance" = "main" } @@ -387,11 +402,11 @@ resource "kubectl_manifest" "imagor_cdn" { middlewares = [ { name = "imagor-cdn-retry" - namespace = kubernetes_namespace.imagor.metadata[0].name + namespace = kubernetes_namespace.imagor.0.metadata[0].name }, { name = "imagor-cdn-cache-control" - namespace = kubernetes_namespace.imagor.metadata[0].name + namespace = kubernetes_namespace.imagor.0.metadata[0].name } ] } @@ -401,10 +416,13 @@ resource "kubectl_manifest" "imagor_cdn" { resource "kubectl_manifest" "imagor_preset_middlewares" { depends_on = [helm_release.traefik] - for_each = { - for index, preset in var.imagor_presets: - preset.key => preset - } + for_each = ( + var.imagor_enabled ? { + for index, preset in var.imagor_presets: + preset.key => preset + } : + {} + ) yaml_body = yamlencode({ apiVersion = "traefik.io/v1alpha1" @@ -412,7 +430,7 @@ resource "kubectl_manifest" "imagor_preset_middlewares" { metadata = { name = "imagor-${each.key}-path" - namespace = kubernetes_namespace.imagor.metadata[0].name + namespace = kubernetes_namespace.imagor.0.metadata[0].name labels = { "traefik-instance" = "main" } diff --git a/infra/tf/k8s_infra/init.tf b/infra/tf/k8s_infra/init.tf index 71fc32e36b..5e344926b2 100644 --- a/infra/tf/k8s_infra/init.tf +++ b/infra/tf/k8s_infra/init.tf @@ -15,13 +15,14 @@ module "docker_auth" { source = "../modules/k8s_auth" namespaces = [ - for x in [ - kubernetes_namespace.traffic_server, + for x in flatten([ + [kubernetes_namespace.traffic_server, # kubernetes_namespace.redis_exporter, kubernetes_namespace.rivet_service, - kubernetes_namespace.imagor, - kubernetes_namespace.nsfw_api - ]: + ], + var.imagor_enabled ? [kubernetes_namespace.imagor.0] : [], + var.nsfw_api_enabled ? [kubernetes_namespace.nsfw_api.0] : [] + ]) : x.metadata.0.name ] authenticate_all_docker_hub_pulls = var.authenticate_all_docker_hub_pulls diff --git a/infra/tf/k8s_infra/k8s_dashboard.tf b/infra/tf/k8s_infra/k8s_dashboard.tf index aacb6c4158..85806efad6 100644 --- a/infra/tf/k8s_infra/k8s_dashboard.tf +++ b/infra/tf/k8s_infra/k8s_dashboard.tf @@ -1,14 +1,17 @@ resource "kubernetes_namespace" "k8s_dashboard" { + count = var.k8s_dashboard_enabled ? 1 : 0 + metadata { name = "kubernetes-dashboard" } } resource "helm_release" "k8s_dashboard" { + count = var.k8s_dashboard_enabled ? 1 : 0 depends_on = [null_resource.daemons] name = "kubernetes-dashboard" - namespace = kubernetes_namespace.k8s_dashboard.metadata.0.name + namespace = kubernetes_namespace.k8s_dashboard.0.metadata.0.name repository = "https://kubernetes.github.io/dashboard/" chart = "kubernetes-dashboard" # Version 7 doesn't seem to work @@ -22,13 +25,17 @@ resource "helm_release" "k8s_dashboard" { } resource "kubernetes_service_account" "admin_user" { + count = var.k8s_dashboard_enabled ? 1 : 0 + metadata { - namespace = kubernetes_namespace.k8s_dashboard.metadata.0.name + namespace = kubernetes_namespace.k8s_dashboard.0.metadata.0.name name = "admin-user" } } resource "kubernetes_cluster_role_binding" "admin_user" { + count = var.k8s_dashboard_enabled ? 1 : 0 + metadata { name = "admin-user" } @@ -41,7 +48,7 @@ resource "kubernetes_cluster_role_binding" "admin_user" { subject { kind = "ServiceAccount" - namespace = kubernetes_namespace.k8s_dashboard.metadata.0.name + namespace = kubernetes_namespace.k8s_dashboard.0.metadata.0.name name = "admin-user" } } diff --git a/infra/tf/k8s_infra/loki.tf b/infra/tf/k8s_infra/loki.tf index 75500a6f54..603c9f0475 100644 --- a/infra/tf/k8s_infra/loki.tf +++ b/infra/tf/k8s_infra/loki.tf @@ -9,12 +9,16 @@ locals { } resource "kubernetes_namespace" "loki" { + count = var.prometheus_enabled ? 1 : 0 + metadata { name = "loki" } } resource "kubernetes_priority_class" "loki_priority" { + count = var.prometheus_enabled ? 1 : 0 + metadata { name = "loki-priority" } @@ -23,14 +27,16 @@ resource "kubernetes_priority_class" "loki_priority" { } resource "helm_release" "loki" { + count = var.prometheus_enabled ? 1 : 0 + name = "loki" - namespace = kubernetes_namespace.loki.metadata.0.name + namespace = kubernetes_namespace.loki.0.metadata.0.name repository = "https://grafana.github.io/helm-charts" chart = "loki" version = "5.36.0" values = [yamlencode({ global = { - priorityClassName = kubernetes_priority_class.loki_priority.metadata.0.name + priorityClassName = kubernetes_priority_class.loki_priority.0.metadata.0.name } loki = { auth_enabled = false @@ -86,10 +92,10 @@ resource "helm_release" "loki" { } monitoring = { dashboards = { - namespace = kubernetes_namespace.prometheus.metadata.0.name + namespace = kubernetes_namespace.prometheus.0.metadata.0.name } rules = { - namespace = kubernetes_namespace.prometheus.metadata.0.name + namespace = kubernetes_namespace.prometheus.0.metadata.0.name } } })] diff --git a/infra/tf/k8s_infra/minio.tf b/infra/tf/k8s_infra/minio.tf index ebc0077837..ab35ab19fb 100644 --- a/infra/tf/k8s_infra/minio.tf +++ b/infra/tf/k8s_infra/minio.tf @@ -66,13 +66,13 @@ resource "helm_release" "minio" { } metrics = { serviceMonitor = { - enabled = true + enabled = var.prometheus_enabled namespace = kubernetes_namespace.minio[0].metadata.0.name } # TODO: # prometheusRule = { - # enabled = true + # enabled = var.prometheus_enabled # namespace = kubernetes_namespace.prometheus.metadata.0.name # } } diff --git a/infra/tf/k8s_infra/nomad.tf b/infra/tf/k8s_infra/nomad.tf index daa91b2d2a..51d35426e4 100644 --- a/infra/tf/k8s_infra/nomad.tf +++ b/infra/tf/k8s_infra/nomad.tf @@ -71,6 +71,8 @@ locals { } resource "kubernetes_namespace" "nomad" { + count = var.edge_enabled ? 1 : 0 + metadata { name = "nomad" } @@ -78,8 +80,10 @@ resource "kubernetes_namespace" "nomad" { # Create a new config map for each version of the config so the stateful set can roll back gracefully. resource "kubernetes_config_map" "nomad_server" { + count = var.edge_enabled ? 1 : 0 + metadata { - namespace = kubernetes_namespace.nomad.metadata.0.name + namespace = kubernetes_namespace.nomad.0.metadata.0.name name = "nomad-server-configmap-${local.nomad_checksum_configmap}" labels = { app = "nomad-server" @@ -91,8 +95,10 @@ resource "kubernetes_config_map" "nomad_server" { # Expose service resource "kubernetes_service" "nomad_server" { + count = var.edge_enabled ? 1 : 0 + metadata { - namespace = kubernetes_namespace.nomad.metadata.0.name + namespace = kubernetes_namespace.nomad.0.metadata.0.name name = "nomad-server" labels = { name = "nomad-server" @@ -119,10 +125,10 @@ resource "kubernetes_service" "nomad_server" { } resource "kubernetes_service" "nomad_server_indexed" { - count = local.nomad_server_count + count = var.edge_enabled ? local.nomad_server_count : 0 metadata { - namespace = kubernetes_namespace.nomad.metadata.0.name + namespace = kubernetes_namespace.nomad.0.metadata.0.name name = "nomad-server-${count.index}" labels = { name = "nomad-server-${count.index}" @@ -150,6 +156,7 @@ resource "kubernetes_service" "nomad_server_indexed" { } resource "kubectl_manifest" "nomad_server_monitor" { + count = var.edge_enabled && var.prometheus_enabled ? 1 : 0 depends_on = [kubernetes_stateful_set.nomad_server] yaml_body = yamlencode({ @@ -158,7 +165,7 @@ resource "kubectl_manifest" "nomad_server_monitor" { metadata = { name = "nomad-server-service-monitor" - namespace = kubernetes_namespace.nomad.metadata.0.name + namespace = kubernetes_namespace.nomad.0.metadata.0.name } spec = { @@ -181,6 +188,8 @@ resource "kubectl_manifest" "nomad_server_monitor" { } resource "kubernetes_priority_class" "nomad_priority" { + count = var.edge_enabled ? 1 : 0 + metadata { name = "nomad-priority" } @@ -189,10 +198,11 @@ resource "kubernetes_priority_class" "nomad_priority" { } resource "kubernetes_stateful_set" "nomad_server" { + count = var.edge_enabled ? 1 : 0 depends_on = [null_resource.daemons] metadata { - namespace = kubernetes_namespace.nomad.metadata.0.name + namespace = kubernetes_namespace.nomad.0.metadata.0.name name = "nomad-server-statefulset" labels = { app = "nomad-server" @@ -207,7 +217,7 @@ resource "kubernetes_stateful_set" "nomad_server" { } } - service_name = kubernetes_service.nomad_server.metadata.0.name + service_name = kubernetes_service.nomad_server.0.metadata.0.name template { metadata { @@ -221,7 +231,7 @@ resource "kubernetes_stateful_set" "nomad_server" { } spec { - priority_class_name = kubernetes_priority_class.nomad_priority.metadata.0.name + priority_class_name = kubernetes_priority_class.nomad_priority.0.metadata.0.name security_context { run_as_user = 0 @@ -377,14 +387,14 @@ resource "kubernetes_stateful_set" "nomad_server" { volume { name = "nomad-config" config_map { - name = kubernetes_config_map.nomad_server.metadata.0.name + name = kubernetes_config_map.nomad_server.0.metadata.0.name } } volume { name = "traefik-config" config_map { - name = kubernetes_config_map.nomad_server_sidecar_traefik_config.metadata[0].name + name = kubernetes_config_map.nomad_server_sidecar_traefik_config.0.metadata[0].name } } } @@ -410,9 +420,11 @@ resource "kubernetes_stateful_set" "nomad_server" { # Build Traefik config for the sidecar that forwards traffic to other Nomad leaders. resource "kubernetes_config_map" "nomad_server_sidecar_traefik_config" { + count = var.edge_enabled ? 1 : 0 + metadata { name = "nomad-server-sidecar-traefik" - namespace = kubernetes_namespace.nomad.metadata[0].name + namespace = kubernetes_namespace.nomad.0.metadata.0.name } data = { diff --git a/infra/tf/k8s_infra/nsfw_api.tf b/infra/tf/k8s_infra/nsfw_api.tf index a7017c9e98..1bc2770b23 100644 --- a/infra/tf/k8s_infra/nsfw_api.tf +++ b/infra/tf/k8s_infra/nsfw_api.tf @@ -9,12 +9,16 @@ locals { } resource "kubernetes_namespace" "nsfw_api" { + count = var.nsfw_api_enabled ? 1 : 0 + metadata { name = "nsfw-api" } } resource "kubernetes_priority_class" "nsfw_api_priority" { + count = var.nsfw_api_enabled ? 1 : 0 + metadata { name = "nsfw-api-priority" } @@ -23,11 +27,12 @@ resource "kubernetes_priority_class" "nsfw_api_priority" { } resource "kubernetes_deployment" "nsfw_api" { + count = var.nsfw_api_enabled ? 1 : 0 depends_on = [null_resource.daemons, module.docker_auth] metadata { name = "nsfw-api" - namespace = kubernetes_namespace.nsfw_api.metadata[0].name + namespace = kubernetes_namespace.nsfw_api.0.metadata[0].name } spec { @@ -47,7 +52,7 @@ resource "kubernetes_deployment" "nsfw_api" { } spec { - priority_class_name = kubernetes_priority_class.nsfw_api_priority.metadata.0.name + priority_class_name = kubernetes_priority_class.nsfw_api_priority.0.metadata.0.name # MARK: Docker auth image_pull_secrets { @@ -85,13 +90,15 @@ resource "kubernetes_deployment" "nsfw_api" { } resource "kubernetes_service" "nsfw_api" { + count = var.nsfw_api_enabled ? 1 : 0 + metadata { name = "nsfw-api" - namespace = kubernetes_namespace.nsfw_api.metadata[0].name + namespace = kubernetes_namespace.nsfw_api.0.metadata[0].name } spec { selector = { - "app.kubernetes.io/name" = kubernetes_deployment.nsfw_api.metadata.0.name + "app.kubernetes.io/name" = kubernetes_deployment.nsfw_api.0.metadata.0.name } port { diff --git a/infra/tf/k8s_infra/outputs.tf b/infra/tf/k8s_infra/outputs.tf index 5430f7f2d4..516ecae404 100644 --- a/infra/tf/k8s_infra/outputs.tf +++ b/infra/tf/k8s_infra/outputs.tf @@ -1,7 +1,15 @@ output "traefik_external_ip" { - value = var.deploy_method_cluster ? data.kubernetes_service.traefik.status[0].load_balancer[0].ingress[0].hostname : var.public_ip + value = ( + var.deploy_method_cluster ? + data.kubernetes_service.traefik.status[0].load_balancer[0].ingress[0].hostname : + var.public_ip + ) } output "traefik_tunnel_external_ip" { - value = var.deploy_method_cluster ? data.kubernetes_service.traefik_tunnel.status[0].load_balancer[0].ingress[0].hostname : var.public_ip + value = ( + var.deploy_method_cluster && var.edge_enabled ? + data.kubernetes_service.traefik_tunnel.0.status[0].load_balancer[0].ingress[0].hostname : + var.public_ip + ) } diff --git a/infra/tf/k8s_infra/prometheus.tf b/infra/tf/k8s_infra/prometheus.tf index ff9b5afb7d..59e7735187 100644 --- a/infra/tf/k8s_infra/prometheus.tf +++ b/infra/tf/k8s_infra/prometheus.tf @@ -81,6 +81,8 @@ module "alertmanager_secrets" { } resource "kubernetes_namespace" "prometheus" { + count = var.prometheus_enabled ? 1 : 0 + metadata { name = "prometheus" } @@ -88,6 +90,8 @@ resource "kubernetes_namespace" "prometheus" { # Set a high priority for Node Exporter so it can run on all nodes resource "kubernetes_priority_class" "node_exporter_priority" { + count = var.prometheus_enabled ? 1 : 0 + metadata { name = "node-exporter-priority" } @@ -95,6 +99,8 @@ resource "kubernetes_priority_class" "node_exporter_priority" { } resource "kubernetes_priority_class" "prometheus_priority" { + count = var.prometheus_enabled ? 1 : 0 + metadata { name = "prometheus-priority" } @@ -102,10 +108,11 @@ resource "kubernetes_priority_class" "prometheus_priority" { } resource "helm_release" "prometheus" { + count = var.prometheus_enabled ? 1 : 0 depends_on = [helm_release.vpa] name = "prometheus" - namespace = kubernetes_namespace.prometheus.metadata.0.name + namespace = kubernetes_namespace.prometheus.0.metadata.0.name repository = "https://prometheus-community.github.io/helm-charts" chart = "kube-prometheus-stack" version = "51.5.1" @@ -117,7 +124,7 @@ resource "helm_release" "prometheus" { cpu = "${local.service_node_exporter.resources.cpu}m" } } : null - priorityClassName = kubernetes_priority_class.node_exporter_priority.metadata.0.name + priorityClassName = kubernetes_priority_class.node_exporter_priority.0.metadata.0.name affinity = { nodeAffinity = { requiredDuringSchedulingIgnoredDuringExecution = { @@ -265,7 +272,7 @@ resource "helm_release" "prometheus" { } } - priorityClassName = kubernetes_priority_class.prometheus_priority.metadata.0.name + priorityClassName = kubernetes_priority_class.prometheus_priority.0.metadata.0.name resources = var.limit_resources ? { limits = { memory = "${local.service_prometheus.resources.memory}Mi" diff --git a/infra/tf/k8s_infra/prometheus_rules.tf b/infra/tf/k8s_infra/prometheus_rules.tf index a1a50dc34d..1560f9aa16 100644 --- a/infra/tf/k8s_infra/prometheus_rules.tf +++ b/infra/tf/k8s_infra/prometheus_rules.tf @@ -74,6 +74,7 @@ locals { # Useful: https://github.com/kubernetes/kube-state-metrics/blob/main/docs/pod-metrics.md resource "kubectl_manifest" "pod_rules" { + count = var.prometheus_enabled ? 1 : 0 depends_on = [helm_release.prometheus] yaml_body = yamlencode({ @@ -81,7 +82,7 @@ resource "kubectl_manifest" "pod_rules" { kind = "PrometheusRule" metadata = { name = "pod-rules" - namespace = kubernetes_namespace.prometheus.metadata.0.name + namespace = kubernetes_namespace.prometheus.0.metadata.0.name } spec = { groups = [ @@ -110,6 +111,7 @@ resource "kubectl_manifest" "pod_rules" { } resource "kubectl_manifest" "pvc_rules" { + count = var.prometheus_enabled ? 1 : 0 depends_on = [helm_release.prometheus] yaml_body = yamlencode({ @@ -117,7 +119,7 @@ resource "kubectl_manifest" "pvc_rules" { kind = "PrometheusRule" metadata = { name = "persistent-volume-claim-rules" - namespace = kubernetes_namespace.prometheus.metadata.0.name + namespace = kubernetes_namespace.prometheus.0.metadata.0.name } spec = { groups = [ @@ -151,6 +153,7 @@ resource "kubectl_manifest" "pvc_rules" { } resource "kubectl_manifest" "host_rules" { + count = var.prometheus_enabled ? 1 : 0 depends_on = [helm_release.prometheus] yaml_body = yamlencode({ @@ -158,7 +161,7 @@ resource "kubectl_manifest" "host_rules" { kind = "PrometheusRule" metadata = { name = "host-rules" - namespace = kubernetes_namespace.prometheus.metadata.0.name + namespace = kubernetes_namespace.prometheus.0.metadata.0.name } spec = { groups = [ @@ -194,6 +197,7 @@ resource "kubectl_manifest" "host_rules" { } resource "kubectl_manifest" "chirp_rules" { + count = var.prometheus_enabled ? 1 : 0 depends_on = [helm_release.prometheus] yaml_body = yamlencode({ @@ -201,7 +205,7 @@ resource "kubectl_manifest" "chirp_rules" { kind = "PrometheusRule" metadata = { name = "chirp-rules" - namespace = kubernetes_namespace.prometheus.metadata.0.name + namespace = kubernetes_namespace.prometheus.0.metadata.0.name } spec = { groups = [ @@ -251,6 +255,7 @@ resource "kubectl_manifest" "chirp_rules" { resource "kubectl_manifest" "api_rules" { + count = var.prometheus_enabled ? 1 : 0 depends_on = [helm_release.prometheus] yaml_body = yamlencode({ @@ -258,7 +263,7 @@ resource "kubectl_manifest" "api_rules" { kind = "PrometheusRule" metadata = { name = "api-rules" - namespace = kubernetes_namespace.prometheus.metadata.0.name + namespace = kubernetes_namespace.prometheus.0.metadata.0.name } spec = { groups = [ @@ -346,6 +351,7 @@ resource "kubectl_manifest" "api_rules" { } resource "kubectl_manifest" "crdb_rules" { + count = var.prometheus_enabled ? 1 : 0 depends_on = [helm_release.prometheus] yaml_body = yamlencode({ @@ -353,7 +359,7 @@ resource "kubectl_manifest" "crdb_rules" { kind = "PrometheusRule" metadata = { name = "crdb-rules" - namespace = kubernetes_namespace.prometheus.metadata.0.name + namespace = kubernetes_namespace.prometheus.0.metadata.0.name } spec = { groups = [ @@ -390,6 +396,7 @@ resource "kubectl_manifest" "crdb_rules" { } resource "kubectl_manifest" "nomad_rules" { + count = var.prometheus_enabled ? 1 : 0 depends_on = [helm_release.prometheus] yaml_body = yamlencode({ @@ -397,7 +404,7 @@ resource "kubectl_manifest" "nomad_rules" { kind = "PrometheusRule" metadata = { name = "nomad-rules" - namespace = kubernetes_namespace.prometheus.metadata.0.name + namespace = kubernetes_namespace.prometheus.0.metadata.0.name } spec = { groups = [ @@ -488,6 +495,7 @@ resource "kubectl_manifest" "nomad_rules" { } resource "kubectl_manifest" "traefik_rules" { + count = var.prometheus_enabled ? 1 : 0 depends_on = [helm_release.prometheus] yaml_body = yamlencode({ @@ -495,7 +503,7 @@ resource "kubectl_manifest" "traefik_rules" { kind = "PrometheusRule" metadata = { name = "traefik-rules" - namespace = kubernetes_namespace.prometheus.metadata.0.name + namespace = kubernetes_namespace.prometheus.0.metadata.0.name } spec = { groups = [ diff --git a/infra/tf/k8s_infra/promtail.tf b/infra/tf/k8s_infra/promtail.tf index 411bf93947..46bdf19455 100644 --- a/infra/tf/k8s_infra/promtail.tf +++ b/infra/tf/k8s_infra/promtail.tf @@ -9,12 +9,16 @@ locals { } resource "kubernetes_namespace" "promtail" { + count = var.prometheus_enabled ? 1 : 0 + metadata { name = "promtail" } } resource "kubernetes_priority_class" "promtail_priority" { + count = var.prometheus_enabled ? 1 : 0 + metadata { name = "promtail-priority" } @@ -22,8 +26,10 @@ resource "kubernetes_priority_class" "promtail_priority" { } resource "helm_release" "promtail" { + count = var.prometheus_enabled ? 1 : 0 + name = "promtail" - namespace = kubernetes_namespace.promtail.metadata.0.name + namespace = kubernetes_namespace.promtail.0.metadata.0.name repository = "https://grafana.github.io/helm-charts" chart = "promtail" version = "6.15.1" @@ -140,7 +146,7 @@ resource "helm_release" "promtail" { } } - priorityClassName = kubernetes_priority_class.promtail_priority.metadata.0.name + priorityClassName = kubernetes_priority_class.promtail_priority.0.metadata.0.name resources = var.limit_resources ? { limits = { memory = "${local.service_promtail.resources.memory}Mi" diff --git a/infra/tf/k8s_infra/redis.tf b/infra/tf/k8s_infra/redis.tf index 1ab9bef3c9..759a621807 100644 --- a/infra/tf/k8s_infra/redis.tf +++ b/infra/tf/k8s_infra/redis.tf @@ -45,7 +45,7 @@ module "redis_secrets" { } resource "kubernetes_namespace" "redis" { - depends_on = [ helm_release.prometheus ] + depends_on = [helm_release.prometheus] for_each = var.redis_dbs metadata { @@ -111,7 +111,7 @@ resource "helm_release" "redis" { metrics = { enabled = true serviceMonitor = { - enabled = true + enabled = var.prometheus_enabled namespace = kubernetes_namespace.redis[each.key].metadata.0.name } extraArgs = each.key == "chirp" ? { diff --git a/infra/tf/k8s_infra/traefik.tf b/infra/tf/k8s_infra/traefik.tf index d8c78ff4b5..a7fefb4fa0 100644 --- a/infra/tf/k8s_infra/traefik.tf +++ b/infra/tf/k8s_infra/traefik.tf @@ -204,7 +204,8 @@ resource "kubernetes_service" "traefik_headless" { } resource "kubectl_manifest" "traefik_service_monitor" { - depends_on = [helm_release.traefik] + count = var.prometheus_enabled ? 1 : 0 + depends_on = [null_resource.daemons, helm_release.traefik] yaml_body = yamlencode({ apiVersion = "monitoring.coreos.com/v1" diff --git a/infra/tf/k8s_infra/traefik_tunnel.tf b/infra/tf/k8s_infra/traefik_tunnel.tf index 816a4df866..9ddf18cc15 100644 --- a/infra/tf/k8s_infra/traefik_tunnel.tf +++ b/infra/tf/k8s_infra/traefik_tunnel.tf @@ -1,46 +1,49 @@ locals { # Specify what services to expose via the tunnel server - tunnel_services = { - # LEGACY: Addresses a random Nomad server. - "nomad" = { - service = "nomad-server" - service_namespace = kubernetes_namespace.nomad.metadata[0].name - service_port = 4647 - } - - # Addresses specific Nomad servers. - "nomad-server-0" = { - service = "nomad-server-0" - service_namespace = kubernetes_namespace.nomad.metadata[0].name - service_port = 4647 - } - "nomad-server-1" = { - service = "nomad-server-1" - service_namespace = kubernetes_namespace.nomad.metadata[0].name - service_port = 4647 - } - "nomad-server-2" = { - service = "nomad-server-2" - service_namespace = kubernetes_namespace.nomad.metadata[0].name - service_port = 4647 - } + tunnel_services = merge(flatten([ + [{ + "api-route" = { + service = "rivet-api-route" + service_namespace = kubernetes_namespace.rivet_service.metadata[0].name + service_port = 80 + }, + # LEGACY: Addresses a random Nomad server. + "nomad" = { + service = "nomad-server" + service_namespace = kubernetes_namespace.nomad.0.metadata[0].name + service_port = 4647 + } - "api-route" = { - service = "rivet-api-route" - service_namespace = kubernetes_namespace.rivet_service.metadata[0].name - service_port = 80 - } - "vector" = { - service = "vector" - service_namespace = kubernetes_namespace.vector.metadata[0].name - service_port = 6000 - } - "vector-tcp-json" = { - service = "vector" - service_namespace = kubernetes_namespace.vector.metadata[0].name - service_port = 6100 - } - } + # Addresses specific Nomad servers. + "nomad-server-0" = { + service = "nomad-server-0" + service_namespace = kubernetes_namespace.nomad.0.metadata[0].name + service_port = 4647 + } + "nomad-server-1" = { + service = "nomad-server-1" + service_namespace = kubernetes_namespace.nomad.0.metadata[0].name + service_port = 4647 + } + "nomad-server-2" = { + service = "nomad-server-2" + service_namespace = kubernetes_namespace.nomad.0.metadata[0].name + service_port = 4647 + } + }], + var.prometheus_enabled ? [{ + "vector" = { + service = "vector" + service_namespace = kubernetes_namespace.vector.0.metadata[0].name + service_port = 6000 + } + "vector-tcp-json" = { + service = "vector" + service_namespace = kubernetes_namespace.vector.0.metadata[0].name + service_port = 6100 + } + }] : [], + ])...) service_traefik_tunnel = lookup(var.services, "traefik-tunnel", { count = var.deploy_method_cluster ? 2 : 1 @@ -52,12 +55,16 @@ locals { } resource "kubernetes_namespace" "traefik_tunnel" { + count = var.edge_enabled ? 1 : 0 + metadata { name = "traefik-tunnel" } } resource "kubernetes_priority_class" "traefik_tunnel_priority" { + count = var.edge_enabled ? 1 : 0 + metadata { name = "traefik-tunnel-priority" } @@ -65,10 +72,12 @@ resource "kubernetes_priority_class" "traefik_tunnel_priority" { } resource "helm_release" "traefik_tunnel" { + count = var.edge_enabled ? 1 : 0 + depends_on = [null_resource.daemons] name = "traefik-tunnel" - namespace = kubernetes_namespace.traefik_tunnel.metadata.0.name + namespace = kubernetes_namespace.traefik_tunnel.0.metadata.0.name repository = "https://traefik.github.io/charts" chart = "traefik" version = "24.0.0" @@ -99,7 +108,7 @@ resource "helm_release" "traefik_tunnel" { } } - priorityClassName = kubernetes_priority_class.traefik_tunnel_priority.metadata.0.name + priorityClassName = kubernetes_priority_class.traefik_tunnel_priority.0.metadata.0.name tlsOptions = { "ingress-tunnel" = { @@ -169,11 +178,12 @@ resource "helm_release" "traefik_tunnel" { } resource "kubernetes_service" "traefik_tunnel_headless" { + count = var.edge_enabled ? 1 : 0 depends_on = [helm_release.traefik_tunnel] metadata { name = "traefik-headless" - namespace = kubernetes_namespace.traefik_tunnel.metadata.0.name + namespace = kubernetes_namespace.traefik_tunnel.0.metadata.0.name labels = { "app.kubernetes.io/name" = "traefik-headless" } @@ -207,7 +217,8 @@ resource "kubernetes_service" "traefik_tunnel_headless" { } resource "kubectl_manifest" "traefik_tunnel_service_monitor" { - depends_on = [helm_release.traefik_tunnel] + count = var.edge_enabled && var.prometheus_enabled ? 1 : 0 + depends_on = [null_resource.daemons, helm_release.traefik_tunnel] yaml_body = yamlencode({ apiVersion = "monitoring.coreos.com/v1" @@ -215,7 +226,7 @@ resource "kubectl_manifest" "traefik_tunnel_service_monitor" { metadata = { name = "traefik-service-monitor" - namespace = kubernetes_namespace.traefik_tunnel.metadata.0.name + namespace = kubernetes_namespace.traefik_tunnel.0.metadata.0.name } spec = { @@ -235,18 +246,18 @@ resource "kubectl_manifest" "traefik_tunnel_service_monitor" { } data "kubernetes_service" "traefik_tunnel" { + count = var.edge_enabled ? 1 : 0 depends_on = [helm_release.traefik_tunnel] metadata { name = "traefik-tunnel" - namespace = kubernetes_namespace.traefik_tunnel.metadata.0.name + namespace = kubernetes_namespace.traefik_tunnel.0.metadata.0.name } } resource "kubectl_manifest" "traefik_nomad_router" { depends_on = [helm_release.traefik_tunnel] - - for_each = local.tunnel_services + for_each = var.edge_enabled ? local.tunnel_services : {} yaml_body = yamlencode({ apiVersion = "traefik.io/v1alpha1" diff --git a/infra/tf/k8s_infra/vars.tf b/infra/tf/k8s_infra/vars.tf index 17a8821c84..5fb7b80fdd 100644 --- a/infra/tf/k8s_infra/vars.tf +++ b/infra/tf/k8s_infra/vars.tf @@ -59,13 +59,29 @@ variable "authenticate_all_docker_hub_pulls" { type = bool } +# MARK: Nomad +variable "edge_enabled" { + type = bool +} + +# MARK: NSFW API +variable "nsfw_api_enabled" { + type = bool +} + # MARK: Imagor +variable "imagor_enabled" { + type = bool +} + variable "imagor_presets" { type = any + default = {} } variable "imagor_cors_allowed_origins" { type = list(string) + default = [] } # MARK: CockroachDB @@ -76,6 +92,11 @@ variable "cockroachdb_provider" { # MARK: ClickHouse variable "clickhouse_provider" { type = string + nullable = true +} + +variable "clickhouse_enabled" { + type = bool } # MARK: Redis @@ -89,7 +110,7 @@ variable "redis_provider" { variable "redis_dbs" { type = map(object({ - persistent = bool + persistent = bool })) } @@ -111,6 +132,10 @@ variable "limit_resources" { type = bool } +variable "k8s_dashboard_enabled" { + type = bool +} + # MARK: S3 variable "s3_default_provider" { type = string @@ -132,3 +157,8 @@ variable "s3_buckets" { variable "cdn_cache_size_gb" { type = number } + +# MARK: Prometheus +variable "prometheus_enabled" { + type = bool +} diff --git a/infra/tf/k8s_infra/vector.tf b/infra/tf/k8s_infra/vector.tf index 77bacb9c3c..dd0d7b4587 100644 --- a/infra/tf/k8s_infra/vector.tf +++ b/infra/tf/k8s_infra/vector.tf @@ -1,4 +1,6 @@ resource "kubernetes_namespace" "vector" { + count = var.prometheus_enabled ? 1 : 0 + metadata { name = "vector" } diff --git a/infra/tf/tls/cloudflare.tf b/infra/tf/tls/cloudflare.tf index 7a12e956d2..c36cd02ac3 100644 --- a/infra/tf/tls/cloudflare.tf +++ b/infra/tf/tls/cloudflare.tf @@ -66,7 +66,8 @@ resource "cloudflare_origin_ca_certificate" "rivet_gg" { # Must be created in every namespace it is used in resource "kubernetes_secret" "ingress_tls_cert" { for_each = toset(flatten([ - ["traefik", "imagor", "rivet-service"], + ["traefik", "rivet-service"], + var.imagor_enabled ? ["imagor"] : [], local.has_minio ? ["minio"] : [] ])) @@ -84,7 +85,10 @@ resource "kubernetes_secret" "ingress_tls_cert" { } resource "kubernetes_secret" "ingress_tls_ca_cert" { - for_each = toset(["traefik", "imagor", "rivet-service"]) + for_each = toset(flatten([ + ["traefik", "rivet-service"], + var.imagor_enabled ? ["imagor"] : [] + ])) metadata { name = "ingress-tls-cloudflare-ca-cert" diff --git a/infra/tf/tls/vars.tf b/infra/tf/tls/vars.tf index 543eb62f1a..7c9e6ea9cd 100644 --- a/infra/tf/tls/vars.tf +++ b/infra/tf/tls/vars.tf @@ -35,3 +35,7 @@ variable "s3_providers" { })) } +# MARK: Imagor +variable "imagor_enabled" { + type = bool +} diff --git a/infra/tf/vector/vars.tf b/infra/tf/vector/vars.tf index 7d033488dc..e184e4910f 100644 --- a/infra/tf/vector/vars.tf +++ b/infra/tf/vector/vars.tf @@ -5,6 +5,7 @@ variable "namespace" { # MARK: ClickHouse variable "clickhouse_provider" { type = string + nullable = true } variable "clickhouse_host" { diff --git a/infra/tf/vector/vector.tf b/infra/tf/vector/vector.tf index d966da848c..fbe71dcd36 100644 --- a/infra/tf/vector/vector.tf +++ b/infra/tf/vector/vector.tf @@ -7,7 +7,7 @@ locals { } }) - clickhouse_k8s = var.clickhouse_provider == "kubernetes" + clickhouse_k8s = var.clickhouse_enabled && var.clickhouse_provider == "kubernetes" } resource "kubernetes_priority_class" "vector_priority" { diff --git a/lib/bolt/config/src/ns.rs b/lib/bolt/config/src/ns.rs index f3d1afc9c0..9a7fe6c0d1 100644 --- a/lib/bolt/config/src/ns.rs +++ b/lib/bolt/config/src/ns.rs @@ -38,10 +38,12 @@ pub struct Namespace { #[serde(default)] pub cockroachdb: CockroachDB, #[serde(default)] - pub clickhouse: ClickHouse, + pub clickhouse: Option, #[serde(default)] pub traefik: Traefik, #[serde(default)] + pub prometheus: Option, + #[serde(default)] pub rust: Rust, #[serde(default)] pub rivet: Rivet, @@ -377,6 +379,8 @@ pub struct Kubernetes { pub provider: KubernetesProvider, #[serde(default)] pub health_checks: Option, + #[serde(default)] + pub dashboard_enabled: bool, } #[derive(Serialize, Deserialize, Clone, Debug)] @@ -501,6 +505,10 @@ impl Default for Traefik { } } +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(deny_unknown_fields)] +pub struct Prometheus {} + #[derive(Serialize, Deserialize, Clone, Debug, Default)] #[serde(deny_unknown_fields)] pub struct Rust { @@ -638,7 +646,13 @@ pub struct Profanity { #[derive(Serialize, Deserialize, Clone, Debug, Default)] #[serde(deny_unknown_fields)] pub struct Upload { - pub nsfw_error_verbose: bool, + pub nsfw_check: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(deny_unknown_fields)] +pub struct NsfwCheck { + pub error_verbose: bool, } #[derive(Serialize, Deserialize, Clone, Debug, Default)] @@ -663,14 +677,22 @@ pub enum DynamicServersBuildDeliveryMethod { #[serde(deny_unknown_fields)] pub struct Cdn { pub cache_size_gb: usize, + pub image_resizing: Option, } impl Default for Cdn { fn default() -> Self { - Cdn { cache_size_gb: 10 } + Cdn { + cache_size_gb: 10, + image_resizing: None, + } } } +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(deny_unknown_fields)] +pub struct ImageResizing {} + #[derive(Serialize, Deserialize, Clone, Debug)] #[serde(deny_unknown_fields)] pub struct RivetBilling { diff --git a/lib/bolt/core/src/context/service.rs b/lib/bolt/core/src/context/service.rs index 8432b4aa66..2ef3934e21 100644 --- a/lib/bolt/core/src/context/service.rs +++ b/lib/bolt/core/src/context/service.rs @@ -901,7 +901,7 @@ impl ServiceContextData { self.config().cockroachdb.min_connections.to_string(), )); - if self.depends_on_prometheus_api() { + if project_ctx.ns().prometheus.is_some() && self.depends_on_prometheus_api() { env.push(( format!("PROMETHEUS_URL"), "http://prometheus-operated.prometheus.svc.cluster.local:9090".into(), @@ -1004,9 +1004,6 @@ impl ServiceContextData { if project_ctx.ns().rivet.profanity.filter_disable { env.push(("RIVET_PROFANITY_FILTER_DISABLE".into(), "1".into())); } - if project_ctx.ns().rivet.upload.nsfw_error_verbose { - env.push(("RIVET_UPLOAD_NSFW_ERROR_VERBOSE".into(), "1".into())); - } env.push(( "RIVET_DS_BUILD_DELIVERY_METHOD".into(), project_ctx @@ -1017,6 +1014,14 @@ impl ServiceContextData { .to_string(), )); + if let Some(nsfw_check) = &project_ctx.ns().rivet.upload.nsfw_check { + env.push(("RIVET_UPLOAD_NSFW_CHECK_ENABLED".into(), "1".into())); + + if nsfw_check.error_verbose { + env.push(("RIVET_UPLOAD_NSFW_ERROR_VERBOSE".into(), "1".into())); + } + } + // Sort env by keys so it's always in the same order env.sort_by_cached_key(|x| x.0.clone()); @@ -1160,17 +1165,21 @@ impl ServiceContextData { // ClickHouse if self.depends_on_clickhouse() { - let clickhouse_data = terraform::output::read_clickhouse(&project_ctx).await; - let username = "chirp"; - let password = project_ctx - .read_secret(&["clickhouse", "users", username, "password"]) - .await?; - let uri = format!( - "https://{}:{}@{}:{}", - username, password, *clickhouse_data.host, *clickhouse_data.port_https - ); + if project_ctx.ns().clickhouse.is_some() { + let clickhouse_data = terraform::output::read_clickhouse(&project_ctx).await; + let username = "chirp"; + let password = project_ctx + .read_secret(&["clickhouse", "users", username, "password"]) + .await?; + let uri = format!( + "https://{}:{}@{}:{}", + username, password, *clickhouse_data.host, *clickhouse_data.port_https + ); - env.push(("CLICKHOUSE_URL".into(), uri)); + env.push(("CLICKHOUSE_URL".into(), uri)); + } else { + env.push(("CLICKHOUSE_DISABLED".into(), "1".into())); + } } // Expose S3 endpoints to services that need them diff --git a/lib/bolt/core/src/dep/k8s/gen.rs b/lib/bolt/core/src/dep/k8s/gen.rs index 7acd8ab9e0..5182bb5d4c 100644 --- a/lib/bolt/core/src/dep/k8s/gen.rs +++ b/lib/bolt/core/src/dep/k8s/gen.rs @@ -594,24 +594,26 @@ pub async fn gen_svc(exec_ctx: &ExecServiceContext) -> Vec { })); // Monitor the service - specs.push(json!({ - "apiVersion": "monitoring.coreos.com/v1", - "kind": "ServiceMonitor", - "metadata": { - "name": service_name, - "namespace": "rivet-service" - }, - "spec": { - "selector": { - "matchLabels": { - "app.kubernetes.io/name": service_name - }, + if project_ctx.ns().prometheus.is_some() { + specs.push(json!({ + "apiVersion": "monitoring.coreos.com/v1", + "kind": "ServiceMonitor", + "metadata": { + "name": service_name, + "namespace": "rivet-service" }, - "endpoints": [ - { "port": "metrics" } - ], - } - })); + "spec": { + "selector": { + "matchLabels": { + "app.kubernetes.io/name": service_name + }, + }, + "endpoints": [ + { "port": "metrics" } + ], + } + })); + } // Build ingress router if matches!(run_context, RunContext::Service { .. }) { @@ -752,29 +754,30 @@ async fn build_volumes( } // Add ClickHouse CA - match project_ctx.ns().clickhouse.provider { - config::ns::ClickHouseProvider::Kubernetes {} => { - volumes.push(json!({ - "name": "clickhouse-ca", - "configMap": { + if let Some(clickhouse) = &project_ctx.ns().clickhouse { + match &clickhouse.provider { + config::ns::ClickHouseProvider::Kubernetes {} => { + volumes.push(json!({ "name": "clickhouse-ca", - "defaultMode": 420, - "items": [ - { - "key": "ca.crt", - "path": "clickhouse-ca.crt" - } - ] - } - })); - volume_mounts.push(json!({ - "name": "clickhouse-ca", - "mountPath": "/usr/local/share/ca-certificates/clickhouse-ca.crt", - "subPath": "clickhouse-ca.crt" - })); - } - config::ns::ClickHouseProvider::Managed { .. } => { + "configMap": { + "name": "clickhouse-ca", + "defaultMode": 420, + "items": [ + { + "key": "ca.crt", + "path": "clickhouse-ca.crt" + } + ] + } + })); + volume_mounts.push(json!({ + "name": "clickhouse-ca", + "mountPath": "/usr/local/share/ca-certificates/clickhouse-ca.crt", + "subPath": "clickhouse-ca.crt" + })); + } // Uses publicly signed cert + config::ns::ClickHouseProvider::Managed { .. } => {} } } diff --git a/lib/bolt/core/src/dep/terraform/gen.rs b/lib/bolt/core/src/dep/terraform/gen.rs index 68b98cd096..d7f32d834c 100644 --- a/lib/bolt/core/src/dep/terraform/gen.rs +++ b/lib/bolt/core/src/dep/terraform/gen.rs @@ -41,7 +41,14 @@ pub async fn project(ctx: &ProjectContext) { pub async fn gen_bolt_tf(ctx: &ProjectContext, plan_id: &str) -> Result<()> { // Configure the backend let backend = match ctx.ns().terraform.backend { - ns::TerraformBackend::Local {} => String::new(), + ns::TerraformBackend::Local {} => indoc!( + " + terraform { + backend \"local\" {} + } + " + ) + .to_string(), ns::TerraformBackend::Postgres {} => indoc!( " terraform { @@ -235,6 +242,9 @@ async fn vars(ctx: &ProjectContext) { let pools = super::pools::build_pools(&ctx).await.unwrap(); vars.insert("pools".into(), json!(&pools)); + // Edge nodes + vars.insert("edge_enabled".into(), json!(&!pools.is_empty())); + // Tunnels if let Some(ns::Dns { provider: Some(ns::DnsProvider::Cloudflare { access, .. }), @@ -369,32 +379,40 @@ async fn vars(ctx: &ProjectContext) { } // ClickHouse - match &config.clickhouse.provider { - ns::ClickHouseProvider::Kubernetes {} => { - vars.insert("clickhouse_provider".into(), json!("kubernetes")); - } - ns::ClickHouseProvider::Managed { tier } => { - vars.insert("clickhouse_provider".into(), json!("managed")); - match tier { - ns::ClickHouseManagedTier::Development {} => { - vars.insert("clickhouse_tier".into(), json!("development")); - } - ns::ClickHouseManagedTier::Production { - min_total_memory_gb, - max_total_memory_gb, - } => { - vars.insert("clickhouse_tier".into(), json!("production")); - vars.insert( - "clickhouse_min_total_memory_gb".into(), - json!(min_total_memory_gb), - ); - vars.insert( - "clickhouse_max_total_memory_gb".into(), - json!(max_total_memory_gb), - ); + vars.insert( + "clickhouse_enabled".into(), + json!(config.clickhouse.is_some()), + ); + if let Some(clickhouse) = &config.clickhouse { + match &clickhouse.provider { + ns::ClickHouseProvider::Kubernetes {} => { + vars.insert("clickhouse_provider".into(), json!("kubernetes")); + } + ns::ClickHouseProvider::Managed { tier } => { + vars.insert("clickhouse_provider".into(), json!("managed")); + match tier { + ns::ClickHouseManagedTier::Development {} => { + vars.insert("clickhouse_tier".into(), json!("development")); + } + ns::ClickHouseManagedTier::Production { + min_total_memory_gb, + max_total_memory_gb, + } => { + vars.insert("clickhouse_tier".into(), json!("production")); + vars.insert( + "clickhouse_min_total_memory_gb".into(), + json!(min_total_memory_gb), + ); + vars.insert( + "clickhouse_max_total_memory_gb".into(), + json!(max_total_memory_gb), + ); + } } } } + } else { + vars.insert("clickhouse_provider".into(), json!(null)); } if dep::terraform::cli::has_applied(ctx, "clickhouse_k8s").await @@ -412,6 +430,12 @@ async fn vars(ctx: &ProjectContext) { ); } + // Prometheus + vars.insert( + "prometheus_enabled".into(), + json!(config.prometheus.is_some()), + ); + // Redis services { let mut redis_dbs = HashMap::new(); @@ -564,19 +588,33 @@ async fn vars(ctx: &ProjectContext) { vars.insert("better_uptime".into(), json!(better_uptime.to_owned())); } - // Media presets + // Imagor vars.insert( - "imagor_presets".into(), - json!(media_resize::build_presets(ctx.ns_id()) - .into_iter() - .map(media_resize::ResizePresetSerialize::from) - .collect::>()), + "imagor_enabled".into(), + json!(config.rivet.cdn.image_resizing.is_some()), ); + + // NSFW API vars.insert( - "imagor_cors_allowed_origins".into(), - json!(ctx.imagor_cors_allowed_origins()), + "nsfw_api_enabled".into(), + json!(config.rivet.upload.nsfw_check.is_some()), ); + // Media presets + if config.rivet.cdn.image_resizing.is_some() { + vars.insert( + "imagor_presets".into(), + json!(media_resize::build_presets(ctx.ns_id()) + .into_iter() + .map(media_resize::ResizePresetSerialize::from) + .collect::>()), + ); + vars.insert( + "imagor_cors_allowed_origins".into(), + json!(ctx.imagor_cors_allowed_origins()), + ); + } + vars.insert("kubeconfig_path".into(), json!(ctx.gen_kubeconfig_path())); vars.insert( "k8s_storage_class".into(), @@ -586,6 +624,10 @@ async fn vars(ctx: &ProjectContext) { }), ); vars.insert("limit_resources".into(), json!(ctx.limit_resources())); + vars.insert( + "k8s_dashboard_enabled".into(), + json!(config.kubernetes.dashboard_enabled), + ); vars.insert( "cdn_cache_size_gb".into(), diff --git a/lib/bolt/core/src/tasks/check.rs b/lib/bolt/core/src/tasks/check.rs index a735ca3055..3be46551b6 100644 --- a/lib/bolt/core/src/tasks/check.rs +++ b/lib/bolt/core/src/tasks/check.rs @@ -199,7 +199,6 @@ pub async fn check_config_sync(ctx: &ProjectContext) { ctx.config_local()._1password.as_ref(), ctx.ns().secrets._1password.as_ref(), ) else { - eprintln!(); rivet_term::status::warn( "Warning", format!( diff --git a/lib/bolt/core/src/tasks/infra/mod.rs b/lib/bolt/core/src/tasks/infra/mod.rs index c22f19eb1e..5ec1b2fc57 100644 --- a/lib/bolt/core/src/tasks/infra/mod.rs +++ b/lib/bolt/core/src/tasks/infra/mod.rs @@ -39,7 +39,7 @@ pub enum PlanStepKind { impl PlanStepKind { async fn execute(&self, ctx: ProjectContext, opts: &ExecutePlanOpts) -> Result<()> { // Generate the project before each step since things likely changed between steps - tasks::gen::generate_project(&ctx, false).await; + tasks::gen::generate_project(&ctx, true).await; match self { PlanStepKind::Terraform { plan_id, .. } => { @@ -200,35 +200,39 @@ pub fn build_plan( } // ClickHouse - match ctx.ns().clickhouse.provider { - ns::ClickHouseProvider::Kubernetes {} => { - plan.push(PlanStep { - name_id: "clickhouse-k8s", - kind: PlanStepKind::Terraform { - plan_id: "clickhouse_k8s".into(), - needs_destroy: false, - }, - }); - } - ns::ClickHouseProvider::Managed { .. } => { - plan.push(PlanStep { - name_id: "clickhouse-managed", - kind: PlanStepKind::Terraform { - plan_id: "clickhouse_managed".into(), - needs_destroy: true, - }, - }); + if let Some(clickhouse) = &ctx.ns().clickhouse { + match &clickhouse.provider { + ns::ClickHouseProvider::Kubernetes {} => { + plan.push(PlanStep { + name_id: "clickhouse-k8s", + kind: PlanStepKind::Terraform { + plan_id: "clickhouse_k8s".into(), + needs_destroy: false, + }, + }); + } + ns::ClickHouseProvider::Managed { .. } => { + plan.push(PlanStep { + name_id: "clickhouse-managed", + kind: PlanStepKind::Terraform { + plan_id: "clickhouse_managed".into(), + needs_destroy: true, + }, + }); + } } } // Vector - plan.push(PlanStep { - name_id: "vector", - kind: PlanStepKind::Terraform { - plan_id: "vector".into(), - needs_destroy: false, - }, - }); + if ctx.ns().prometheus.is_some() { + plan.push(PlanStep { + name_id: "vector", + kind: PlanStepKind::Terraform { + plan_id: "vector".into(), + needs_destroy: false, + }, + }); + } // Pools if ctx.ns().dns.is_some() { diff --git a/lib/bolt/core/src/tasks/migrate.rs b/lib/bolt/core/src/tasks/migrate.rs index 52749f10c1..a5e24256a7 100644 --- a/lib/bolt/core/src/tasks/migrate.rs +++ b/lib/bolt/core/src/tasks/migrate.rs @@ -70,7 +70,7 @@ pub async fn check_all(ctx: &ProjectContext) -> Result<()> { check(ctx, &services[..]).await } -pub async fn check(_ctx: &ProjectContext, services: &[ServiceContext]) -> Result<()> { +pub async fn check(ctx: &ProjectContext, services: &[ServiceContext]) -> Result<()> { // Spawn Cockroach test container let crdb_port = utils::pick_port(); let crdb_container_id = if services @@ -215,6 +215,14 @@ pub async fn check(_ctx: &ProjectContext, services: &[ServiceContext]) -> Result database_url } RuntimeKind::ClickHouse { .. } => { + if ctx.ns().clickhouse.is_none() { + rivet_term::status::warn( + "Warning", + format!("Clickhouse is disabled. Skipping {}", svc.name()), + ); + continue; + } + // Build URL let db_name = svc.clickhouse_db_name(); let database_url = @@ -290,6 +298,14 @@ pub async fn up(ctx: &ProjectContext, services: &[ServiceContext]) -> Result<()> }); } RuntimeKind::ClickHouse { .. } => { + if ctx.ns().clickhouse.is_none() { + rivet_term::status::warn( + "Warning", + format!("Clickhouse is disabled. Skipping {}", svc.name()), + ); + continue; + } + let db_name = svc.clickhouse_db_name(); let query = formatdoc!( @@ -385,7 +401,13 @@ pub async fn up(ctx: &ProjectContext, services: &[ServiceContext]) -> Result<()> eprintln!(); rivet_term::status::progress("Running migrations", ""); - let migrations = futures_util::stream::iter(services.iter()) + + let filtered_services = services.iter().filter(|svc| { + ctx.ns().clickhouse.is_some() + || !matches!(&svc.config().runtime, RuntimeKind::ClickHouse { .. }) + }); + + let migrations = futures_util::stream::iter(filtered_services) .map(|svc| { let conn = conn.clone(); diff --git a/lib/chirp/worker/src/test.rs b/lib/chirp/worker/src/test.rs index 010afad424..5eb311ef25 100644 --- a/lib/chirp/worker/src/test.rs +++ b/lib/chirp/worker/src/test.rs @@ -1,5 +1,6 @@ use std::time::Duration; +use global_error::GlobalResult; use rivet_operation::OperationContext; use rivet_pools::prelude::*; use uuid::Uuid; @@ -79,7 +80,7 @@ impl TestCtx { self.op_ctx.redis_user_presence().await } - pub async fn clickhouse(&self) -> Result { + pub async fn clickhouse(&self) -> GlobalResult { self.op_ctx.clickhouse().await } } diff --git a/lib/connection/src/lib.rs b/lib/connection/src/lib.rs index aa8501ce54..442f514397 100644 --- a/lib/connection/src/lib.rs +++ b/lib/connection/src/lib.rs @@ -89,7 +89,7 @@ impl Connection { self.client.perf() } - pub async fn clickhouse(&self) -> Result { + pub async fn clickhouse(&self) -> GlobalResult { self.pools.clickhouse() } } diff --git a/lib/operation/core/src/lib.rs b/lib/operation/core/src/lib.rs index 3036b11e57..90bb750ca1 100644 --- a/lib/operation/core/src/lib.rs +++ b/lib/operation/core/src/lib.rs @@ -284,7 +284,7 @@ where self.conn.perf() } - pub async fn clickhouse(&self) -> Result { + pub async fn clickhouse(&self) -> GlobalResult { self.conn.clickhouse().await } } diff --git a/lib/pools/src/pools.rs b/lib/pools/src/pools.rs index 7b3a4cba9e..b78d10b412 100644 --- a/lib/pools/src/pools.rs +++ b/lib/pools/src/pools.rs @@ -1,3 +1,4 @@ +use global_error::{ensure_with, GlobalResult}; use std::{collections::HashMap, sync::Arc, time::Duration}; use tokio_util::sync::{CancellationToken, DropGuard}; @@ -70,8 +71,17 @@ impl PoolsInner { self.redis("ephemeral") } - pub fn clickhouse(&self) -> Result { - self.clickhouse.clone().ok_or(Error::MissingClickHousePool) + pub fn clickhouse(&self) -> GlobalResult { + ensure_with!( + std::env::var("CLICKHOUSE_DISABLED").is_err(), + FEATURE_DISABLED, + feature = "Clickhouse" + ); + + self.clickhouse + .clone() + .ok_or(Error::MissingClickHousePool) + .map_err(Into::into) } } diff --git a/svc/pkg/job-run/ops/metrics-log/src/lib.rs b/svc/pkg/job-run/ops/metrics-log/src/lib.rs index 35f454adf5..42373ea5cb 100644 --- a/svc/pkg/job-run/ops/metrics-log/src/lib.rs +++ b/svc/pkg/job-run/ops/metrics-log/src/lib.rs @@ -3,12 +3,6 @@ use reqwest::StatusCode; use rivet_operation::prelude::*; use serde::Deserialize; -#[derive(Debug, thiserror::Error)] -enum Error { - #[error("prometheus error: {0}")] - PrometheusError(String), -} - #[derive(Debug, Deserialize)] struct PrometheusResponse { data: PrometheusData, @@ -44,7 +38,12 @@ impl QueryTiming { async fn handle( ctx: OperationContext, ) -> GlobalResult { - let prometheus_url = std::env::var("PROMETHEUS_URL")?; + let Ok(prometheus_url) = util::env::var("PROMETHEUS_URL") else { + // Prometheus disabled + return Ok(job_run::metrics_log::Response { + metrics: Vec::new(), + }); + }; let mut metrics = Vec::new(); @@ -133,12 +132,13 @@ async fn handle_request( let status = res.status(); let text = res.text().await?; - return Err(Error::PrometheusError(format!( - "failed prometheus request: ({}) {}", - status, text - )) - .into()); + bail_with!( + ERROR, + error = format!("failed prometheus request ({}):\n{}", status, text) + ); } - Ok(unwrap!(res.json::().await?.data.result.first()).clone()) + let body = res.json::().await?; + + Ok(unwrap!(body.data.result.first()).clone()) } diff --git a/svc/pkg/nsfw/ops/image-score/src/lib.rs b/svc/pkg/nsfw/ops/image-score/src/lib.rs index 10971ba492..e78fbe190c 100644 --- a/svc/pkg/nsfw/ops/image-score/src/lib.rs +++ b/svc/pkg/nsfw/ops/image-score/src/lib.rs @@ -36,6 +36,20 @@ enum ScorePrediction { async fn handle( ctx: OperationContext, ) -> GlobalResult { + // NSFW API disabled, return default response + if util::env::var("RIVET_UPLOAD_NSFW_CHECK_ENABLED").is_err() { + return Ok(nsfw::image_score::Response { + scores: ctx + .image_urls + .iter() + .map(|url| nsfw::image_score::response::ImageScore { + url: url.clone(), + score: 0.0, + }) + .collect::>(), + }); + } + let images = ctx .image_urls .iter() diff --git a/svc/pkg/user/worker/tests/admin_set.rs b/svc/pkg/user/worker/tests/admin_set.rs index 671baa0c0d..9700bf089d 100644 --- a/svc/pkg/user/worker/tests/admin_set.rs +++ b/svc/pkg/user/worker/tests/admin_set.rs @@ -1,6 +1,33 @@ use chirp_worker::prelude::*; +use proto::backend::pkg::*; #[worker_test] -async fn empty(_ctx: TestCtx) { - todo!(); +async fn admin_set(ctx: TestCtx) { + let user_res = op!([ctx] faker_user { }).await.unwrap(); + let user_id = user_res.user_id.unwrap(); + + // Turn user into admin + msg!([ctx] user::msg::admin_set(user_id) -> user::msg::update { + user_id: Some(user_id), + }) + .await + .unwrap(); + + let (exists,) = sql_fetch_one!( + [ctx, (bool,)] + " + SELECT EXISTS ( + SELECT 1 + FROM db_user.users + WHERE + user_id = $1 AND + is_admin = true + ) + ", + user_id.as_uuid(), + ) + .await + .unwrap(); + + assert!(exists, "user not made into an admin"); } diff --git a/svc/pkg/user/worker/tests/profile_set.rs b/svc/pkg/user/worker/tests/profile_set.rs index 1d15a23c26..dd05eb066f 100644 --- a/svc/pkg/user/worker/tests/profile_set.rs +++ b/svc/pkg/user/worker/tests/profile_set.rs @@ -3,7 +3,7 @@ use proto::backend::pkg::*; #[worker_test] async fn empty(ctx: TestCtx) { - let user_res = op!([ctx] faker_user { }).await.expect("user create"); + let user_res = op!([ctx] faker_user { }).await.unwrap(); let user_id = user_res.user_id.unwrap(); let display_name = util::faker::display_name();