# Values to be used elsewhere in the Helm configuration

# Values in this file are the defaults. Override these values in other environments via # `values.<some environment>.yaml` files. name: <%= Tpt::Rails.app_name %> # ODEs use “integration” as the deploy_env deploy_env: integration

# There are added to Kubernetes in various ways as labels/etc annotations:

owner: <%= config.fetch(:team_name) %>
# tpt/costEnv can be [development, integration, staging, production, uncategorized]
costEnv: integration
# tpt/costCategory can be [core_domains, data, easel, infrastructure, kubernetes, marketplace, search, schools, uncategorized]
costCategory: uncategorized

# Docker image configuration for the pod image: &appImage

name: <%= Tpt::Rails.app_name %>
repository: teacherspayteachers/<%= Tpt::Rails.app_name %>
tag: latest # most recently pushed
pullPolicy: Always # always pull, don't use a cached version

# Used by a Helm hook that sets up database schema and seeds. # We don't override DB_MAX_EXECUTION_TIME here because it only applies to `select` queries and we # plan to write migrations such that any `select` queries are fast. # We specify the READ timeout because when testing that was the one that seemed to be used for # `alter table` commands. dbPrepare:

command: "DB_READ_TIMEOUT=600 DB_WRITE_TIMEOUT=600 bin/rails db:prepare"
enabled: true

revisionHistoryLimit: 5

# CI changes this before deploying buildID: 1

imagePullSecrets:

name: "tptreadonly-docker"

# Deployment params rollingUpdate:

maxUnavailable: 25% # X% of pods are allowed to be offline at once as we deploy
maxSurge: 0 # Setting this above 0 will allow having extra pods online during deploy

mysql:

enabled: true
imageTag: 5.7.12
podAnnotations:
  # istio is a service mesh. the istio sidecar interferes with how other pods connect to the MySQL
  # pod.
  sidecar.istio.io/inject: "false"
# Leave "mysqlDatabase" commented out.
# Specifying the db name causes our mysql chart to proactively create the database. This then
# causes db:prepare to assume it needs to *run* all migrations, instead creating the db +
# bootstrapping the schema from db/schema.rb. The latter is the correct way to set up a new DB.
# mysqlDatabase: &mysqlDatabase "<%= Tpt::Rails.app_name %>"
# mysqlRootPassword is read by our ODE mysql chart
mysqlRootPassword: &mysqlRootPassword "123456"
persistence:
  enabled: false

redis:

enabled: false
# usePassword: false
# cluster:
#   enabled: false
# persistence:
#   enabled: true
#   size: 10Gi

## Configure the service ## ref: kubernetes.io/docs/user-guide/services/ service:

## Specify a service type
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types
type: ClusterIP
port: 80

## Configure resource requests and limits ## ref: kubernetes.io/docs/user-guide/compute-resources/ ## resources: &resources

# requests is guaranteed. kubernetes will only put your pod on a node where it can guarantee these
# amounts.
requests: &requests
  memory: 512Mi
  cpu: 500m
# limits is when your pod gets killed for using too many resources. normally these should be set
# the same as the "requests" limit, but if you have a unique workload that requires burstable
# limits then consult with CloudOps
limits:
  *requests

app:

# this configures your service to be available at <%= Tpt::Rails.app_name %>.<ingress_dns_name>
ingress_dns_name: "ode.tptpm.info"

# # Environment variables for the <%= Tpt::Rails.app_name %> service # # These values are used in templates/_env.yaml, with some of them (e.g. `valueFrom`, or any `{{…}}` # templates) getting converted into other values. # See also templates/_env.yaml for additional environment variables. environment:

APP_PORT: '3000'
APP_NAME: <%= Tpt::Rails.app_name %>
APP_ENV: integration
DD_TRACE_DEBUG: '1'
RAILS_ENV: 'production'
RAILS_LOG_TO_STDOUT: '1'
RAILS_SERVE_STATIC_FILES: '1'
CLUSTER_SELECTOR: 'app={{ template "<%= Tpt::Rails.app_name %>.fullname" . }}'
RACK_TIMEOUT_SERVICE_TIMEOUT: "20"
RACK_TIMEOUT_TERM_ON_TIMEOUT: "1"

APP_ENV_NAME: |-
  valueFrom:
    fieldRef:
      fieldPath: metadata.namespace
KUBE_POD: |-
  valueFrom:
    fieldRef:
      fieldPath: metadata.name
KUBE_NODE: |-
  valueFrom:
    fieldRef:
      fieldPath: spec.nodeName
KUBE_POD_IP: |-
  valueFrom:
    fieldRef:
      fieldPath: status.podIP

DATABASE_HOST: '{{ template "<%= Tpt::Rails.app_name %>.fullname" . }}-mysql'
DATABASE_REPLICA_HOST: '$(DATABASE_HOST)'
DATABASE_NAME: '<%= Tpt::Rails.app_name %>'
DATABASE_PASSWORD: *mysqlRootPassword
DATABASE_USER: root
# Using "DB" prefix here because kube env vars can only reference other env vars that come before
# them alphabetically.
DB_URL:         "mysql2://$(DATABASE_USER):$(DATABASE_PASSWORD)@$(DATABASE_HOST)/$(DATABASE_NAME)"
DB_REPLICA_URL: "mysql2://$(DATABASE_USER):$(DATABASE_PASSWORD)@$(DATABASE_REPLICA_HOST)/$(DATABASE_NAME)"

DATADOG_TRACE_HOST: |-
  valueFrom:
    fieldRef:
      fieldPath: status.hostIP
DATADOG_TRACE_PORT: "8126"
DATADOG_TRACE_URL: "datadog://$(DATADOG_TRACE_HOST):$(DATADOG_TRACE_PORT)"

DOGSTATSD_HOST: |-
  valueFrom:
    fieldRef:
      fieldPath: status.hostIP
DOGSTATSD_PORT: "8125"
DOGSTATSD_URL: "statsd://$(DOGSTATSD_HOST):$(DOGSTATSD_PORT)"

copy-secrets:

enabled: true
secrets:
  - <%= config.fetch(:secrets_name) %>

secrets:

- name: SECRET_KEY_BASE
  valueFrom:
    secretKeyRef:
      name: <%= config.fetch(:secrets_name) %>
      key: <%= config.fetch(:secrets_key) %>

# liveness: Whether the pod is up. # After `initialDelaySeconds` if this is failing Kube will crash+restart the pod. livenessProbe:

initialDelaySeconds: 120
timeoutSeconds: 2
periodSeconds: 15
failureThreshold: 3
successThreshold: 1

# readiness: Whether the pod is ready to receive traffic. # If this fails Kube will take the pod out of rotation until the pod becomes ready again. readinessProbe:

initialDelaySeconds: 5
timeoutSeconds: 2
periodSeconds: 15 # time between failures
failureThreshold: 3 # fails required before probe counts as failed
successThreshold: 1 # successes required to mark the probe as ready again

# tests: # apiFunctional: # postman: # collection: 5817358-93d711ed-3181-480d-bba5-ab68b0a29eae # environment: 5817358-dccd7905-050c-4d93-a9bf-743eae208f46 # folder: Working

autoscaler:

minimumPods: 1
maximumPods: 1
targetCPUUtilizationPercentage: 70 # no effect if minimumPods == maximumPods

# Database migration support. db-migrate:

enabled: false
migration:
  command:
    - bin/rails
    - db:migrate
  image: *appImage
  env:
    - name: APP_ENV
      value: integration
    - name: APP_NAME
      value: <%= Tpt::Rails.app_name %>
    # The db-migrate chart sets the following environment variables, which we can use to construct
    # a Rails-friendly DB_URL.
    # - PROXY_HOST
    # - PROXY_PASSWORD
    # - PROXY_USER
    - name: DB_URL
      value: "mysql2://$(PROXY_USER):$(PROXY_PASSWORD)@$(PROXY_HOST):3306/<%= Tpt::Rails.app_name %>"
    # We don't override DB_MAX_EXECUTION_TIME here because it only applies to `select` queries and we
    # plan to write migrations such that any `select` queries are fast.
    # We specify the READ timeout because when testing that was the one that seemed to be used for
    # `alter table` commands.
    - name: DB_READ_TIMEOUT
      value: "604800"
    - name: DB_WRITE_TIMEOUT
      value: "604800"
    - name: RAILS_ENV
      value: production
    - name: SECRET_KEY_BASE
      valueFrom:
        secretKeyRef:
          name: <%= config.fetch(:secrets_name) %>
          key: <%= config.fetch(:secrets_key) %>
  resources: *resources
proxy-osc:
  logLevel: debug
  target:
    # When host is not set, proxy-osc defaults to [release-name]-mysql
    # host:
    user: root
    password: *mysqlRootPassword
schedule: "*/1 * * * *"