mirror of
https://github.com/jackyzy823/fxa-selfhosting.git
synced 2025-12-19 21:16:09 +01:00
1081 lines
50 KiB
YAML
1081 lines
50 KiB
YAML
#! THIS FILE USING YTT(https://github.com/k14s/ytt/) FORMAT
|
|
#@ load("@ytt:data", "data")
|
|
#@ load("@ytt:assert", "assert")
|
|
#@ load("@ytt:struct", "struct")
|
|
#@ if hasattr(data.values.debug,"keep_compose_file_version_property") and data.values.debug.keep_compose_file_version_property == True:
|
|
version: '3.7'
|
|
#@ end
|
|
|
|
x-logging:
|
|
&default-logging
|
|
options:
|
|
max-size: '5m'
|
|
max-file: '3'
|
|
driver: json-file
|
|
|
|
services:
|
|
#! #! #! #! #! #! #! #! #! #! #! #! #! #! #! dependecies #! #! #! #! #! #! #! #! #! #! #! #! #! #!
|
|
mysqldb: #! used by fxa-profile-server fxa-db-migrations pushbox syncserver
|
|
#! [TODO]: set a password
|
|
#! since patcher 114->115.sql has `JSON` and key too long issue #10526 comment 1
|
|
#! so we use mysql 5.7 (at least 5.7.8+ for `JSON`)
|
|
#! so db upgrade necessary ? by docker-compose exec mysqldb mysql_upgrade
|
|
#! from mysql 5.7 to 8.0.16+ mysql_upgrade is not needed.
|
|
image: mysql:8.4
|
|
environment:
|
|
- MYSQL_ALLOW_EMPTY_PASSWORD=true
|
|
- MYSQL_ROOT_HOST=%
|
|
expose:
|
|
- "3306"
|
|
volumes:
|
|
- #@ "{persistencepath}/mysql_data:/var/lib/mysql/".format(persistencepath=data.values.persistencepath)
|
|
|
|
#@ if data.values.option.sync.neverexpire == True:
|
|
- ./_init/mysql/init_bso_ttl.sql:/tmp/common_init.sql:ro
|
|
#@ else:
|
|
- ./_init/mysql/init.sql:/tmp/common_init.sql:ro
|
|
#@ end
|
|
#@ if data.values.debug.deps_logs == False:
|
|
logging:
|
|
driver: "none"
|
|
#@ else:
|
|
logging: *default-logging
|
|
#@ end
|
|
restart: unless-stopped
|
|
command:
|
|
- "--event-scheduler=ON"
|
|
- "--init-file=/tmp/common_init.sql"
|
|
- "--authentication_policy=mysql_native_password"
|
|
- "--mysql-native-password=ON"
|
|
|
|
redis: #! used by fxa-profile-server(has prefixkey) fxa-content-server (seem not used?) fxa-auth-server(has prefix key)
|
|
image: redis:6.0-alpine
|
|
expose:
|
|
- "6379"
|
|
#@ if data.values.debug.deps_logs == False:
|
|
logging:
|
|
driver: "none"
|
|
#@ else:
|
|
logging: *default-logging
|
|
#@ end
|
|
volumes:
|
|
- type: tmpfs
|
|
target: /data
|
|
restart: unless-stopped
|
|
|
|
#! using docker-compose-wait and service_completed_successfully to build wait chain.
|
|
waitforinfra:
|
|
image: ghcr.io/ufoscout/docker-compose-wait:2.12.1
|
|
depends_on:
|
|
- mysqldb
|
|
- redis
|
|
command: /wait
|
|
environment:
|
|
- WAIT_HOSTS=redis:6379,mysqldb:3306
|
|
- WAIT_TIMEOUT=120
|
|
|
|
waitfordeps:
|
|
image: ghcr.io/ufoscout/docker-compose-wait:2.12.1
|
|
depends_on:
|
|
- fxa-auth-server
|
|
- fxa-content-server
|
|
command: /wait
|
|
environment:
|
|
- WAIT_HOSTS=fxa-auth-server:9000,fxa-content-server:3030
|
|
- WAIT_TIMEOUT=120
|
|
|
|
#! #! #! #! #! #! internal dependencies
|
|
browseridverifier.local: #! used by syncserver and fxa-auth-server's oauth-server
|
|
#! uses fxa-auth-server (issuer)
|
|
#! a tld-like name is required for fxa-auth-server config's url type check
|
|
image: #@ "mozilla/fxa-mono:{version}".format(version=data.values.fxa_version or "latest")
|
|
stop_grace_period: 1s
|
|
working_dir: "/fxa/packages/browserid-verifier"
|
|
expose:
|
|
- "5050"
|
|
environment:
|
|
- PORT=5050
|
|
- IP_ADDRESS=0.0.0.0
|
|
- FORCE_INSECURE_LOOKUP_OVER_HTTP=false
|
|
- HTTP_TIMEOUT=60
|
|
restart: unless-stopped
|
|
logging: *default-logging
|
|
#! to override "npm" entrypoint and "start" command
|
|
entrypoint: "node"
|
|
command: "server.js"
|
|
|
|
syncserver: #! used by fxa-content-server
|
|
#! uses browserid-verifier / mysqldb
|
|
#@ if hasattr(data.values.debug,"use_syncserver3") and data.values.debug.use_syncserver3 == True:
|
|
image: ghcr.io/jackyzy823/syncserver3:03b9cfdf03e4ee6baf6f8952b7bb2d6af4e376a5
|
|
#@ else:
|
|
#! since introduce of SYNCSERVER_OAUTH_VERIFIER , a force docker image update is required!
|
|
image: mozilla/syncserver@sha256:016162bf39d8486d5710b4ae5bcaaebd8bd60b55db6cf468b8f720b526bd68b7
|
|
#@ end
|
|
expose:
|
|
- "5000"
|
|
environment:
|
|
#! [TODO][RETHINK] what should we wait a nginx:443 or nginx:80 or nginx_listener address(what if 0.0.0.0) or more outside?? For now we choose 443/80 inner docker network
|
|
#! for audience display only . must be public , no visit
|
|
- #@ "SYNCSERVER_PUBLIC_URL=https://{sync}.{domain_name}".format(sync=data.values.domain.sync, domain_name=data.values.domain.name)
|
|
- SYNCSERVER_BROWSERID_VERIFIER=http://browseridverifier.local:5050
|
|
#! auto generate via syncserver / ?? use for syncserver<-> tokenserver??
|
|
#! secret is generated in code. see syncserver/_init__.py:57
|
|
#! - SYNCSERVER_SECRET=${FXAHOST_SYNCSERVER_SECRET:=`head /dev/urandom | tr -dc A-Za-z0-9 | head -c 20`}
|
|
- SYNCSERVER_SQLURI=mysql+pymysql://root@mysqldb/sync #! databasename: sync
|
|
#! - SYNCSERVER_SQLURI=sqlite:////tmp/syncserver.db
|
|
- SYNCSERVER_BATCH_UPLOAD_ENABLED=true
|
|
- SYNCSERVER_FORCE_WSGI_ENVIRON=true #! because we have nginx proxy it
|
|
- PORT=5000
|
|
#! SYNCSERVER_IDENTITY_PROVIDER needs content server SYNCSERVER_OAUTH_VERIFIER needs oauth-server
|
|
#! prefer to be internal , has visit
|
|
#! replace SYNCSERVER_IDENTITY_PROVIDER with SYNCSERVER_OAUTH_VERIFIER see https://github.com/mozilla-services/syncserver/pull/209
|
|
#! and https://github.com/mozilla/fxa/pull/4689
|
|
#! for fenix and firefox (in future to replace broserid) to use sync with oauth (verify by self fxa)
|
|
#! update : however it still needs a visit to oauth server when init. see: https://github.com/mozilla-services/tokenserver/blob/942241b97c44defa77898e2432775db62a5da69e/tokenserver/verifiers.py#L207
|
|
#! make this internal , request when startup for oauth url in /fxa-client-configuration
|
|
#! [!!NOTE!!] since syncserver will visit contentserver/fxa-client-configuration's oauthserver
|
|
#! full self sign may encounter error!! since verify oauth via public API
|
|
#! so decide to use SYNCSERVER_OAUTH_VERIFIER for internal url?
|
|
- SYNCSERVER_OAUTH_VERIFIER=http://fxa-auth-server:9000
|
|
- SYNCSERVER_IDENTITY_PROVIDER=http://fxa-content-server:3030
|
|
depends_on:
|
|
mysqldb:
|
|
condition: service_started
|
|
fxa-auth-server:
|
|
condition: service_started
|
|
fxa-content-server:
|
|
condition: service_started
|
|
waitforinfra:
|
|
condition: service_completed_successfully
|
|
waitfordeps:
|
|
condition: service_completed_successfully
|
|
restart: unless-stopped
|
|
logging: *default-logging
|
|
|
|
#! #! #! #! #! #! #! #! #! #! #! #! #! #! #! #! #! #!
|
|
fxa-db-migrations:
|
|
#! use a db-migrations for profile oauth auth , to avoid they do it in their startup same time to avoid lock
|
|
#! TODO pruning is missing
|
|
image: #@ "mozilla/fxa-mono:{version}".format(version=data.values.fxa_version or "latest")
|
|
working_dir: "/fxa/packages/db-migrations"
|
|
depends_on:
|
|
#! oops library/mysql:5.7 do not has healthcheck but mysql/mysql-server:5.7 has
|
|
#! we can add a healthcheck for library/mysql:5.7 by using mysql/mysql-server:5.7 's script
|
|
mysqldb:
|
|
condition: service_started
|
|
waitforinfra:
|
|
condition: service_completed_successfully
|
|
command: sh -c "node ./bin/patcher.mjs"
|
|
environment:
|
|
- AUTH_MYSQL_HOST=mysqldb
|
|
- PROFILE_MYSQL_HOST=mysqldb
|
|
- OAUTH_MYSQL_HOST=mysqldb
|
|
- PUSHBOX_MYSQL_HOST=mysqldb
|
|
restart: "no"
|
|
logging: *default-logging
|
|
|
|
#@ if data.values.mail.type == "localhelper":
|
|
#! #! for non-send-mail-just-check-code-in-log
|
|
fxa-auth-local-mail-helper:
|
|
#! [TODO] mails in const users ={} should be deleted
|
|
image: #@ "mozilla/fxa-mono:{version}".format(version=data.values.fxa_version or "latest")
|
|
stop_grace_period: 1s
|
|
working_dir: "/fxa/packages/fxa-auth-server/dist/packages/fxa-auth-server"
|
|
expose:
|
|
- "9999" #! smtp_port do not affect this
|
|
#! so you can curl http://127.0.0.1:9001/mail/<your-mail-addr-before-at-mark> <- connection will not close until get one
|
|
#! or curl -X DELETE http://127.0.0.1:9001/mail/<your-mail-addr-before-at-mark>
|
|
#@ if data.values.mail.localhelper.web:
|
|
ports:
|
|
- #@ "{}:9001".format(data.values.mail.localhelper.web)
|
|
#@ end
|
|
environment:
|
|
- NODE_ENV=dev #! #! to avoid missing pushbox.key and etc...in fxa-auth-server\config\index.js line 1739
|
|
- SMTP_PORT=9999 #! #! port for smtp server receive mail!!!
|
|
#! check mail via API
|
|
- MAILER_HOST=0.0.0.0
|
|
- MAILER_PORT=9001 #! need ports to outside! for GET /mail/your-mail-address!
|
|
- REDIS_HOST=redis
|
|
#! becasue after v1.173.0 docker image base changes and npm will always fail because of lacking of python for nodegyp to build unixgram package.
|
|
command: sh -c "mkdir -p test && cd test && cp /fxa/packages/fxa-auth-server/test/mail_helper.js . && npm init --yes && npm i mailparser@0.6.1 simplesmtp && node mail_helper.js"
|
|
restart: unless-stopped
|
|
logging: *default-logging
|
|
depends_on:
|
|
waitforinfra:
|
|
condition: service_completed_successfully
|
|
|
|
#@ elif data.values.mail.type == "localrelay":
|
|
#! if your hosting allows you send email via smtp/25
|
|
exim-sender:
|
|
image: elsdoerfer/exim-sender
|
|
stop_grace_period: 1s
|
|
environment:
|
|
- ALLOWED_HOSTS="192.168.0.0/16;172.16.0.0/12;10.0.0.0/8"
|
|
- #@ "PRIMARY_HOST={}".format(data.values.domain.name)
|
|
expose:
|
|
- "25"
|
|
volumes:
|
|
- type: tmpfs
|
|
target: /var/spool/exim4
|
|
#! "Allow AUTH command with any user/password"
|
|
entrypoint: sh -c "sed -i '/begin authenticators/a PLAIN:\\ndriver = plaintext\\nserver_advertise_condition = yes\\nserver_condition = yes' /etc/exim4/exim4.conf && /exim"
|
|
restart: unless-stopped
|
|
logging: *default-logging
|
|
#@ end
|
|
|
|
fxa-auth-server:
|
|
image: #@ "mozilla/fxa-mono:{version}".format(version=data.values.fxa_version or "latest")
|
|
stop_grace_period: 1s
|
|
working_dir: "/fxa/packages/fxa-auth-server/dist/packages/fxa-auth-server"
|
|
expose:
|
|
- "9000"
|
|
depends_on:
|
|
redis:
|
|
condition: service_started
|
|
mysqldb:
|
|
condition: service_started
|
|
waitforinfra:
|
|
condition: service_completed_successfully
|
|
fxa-db-migrations:
|
|
condition: service_completed_successfully
|
|
volumes:
|
|
#! since i do not know which client_id is real use
|
|
#! so i add all client_ids from dev.json
|
|
#! 98e6508e88680e1a -> settings
|
|
#! 5882386c6d801776 -> firefox and sync? <- but i have not seen it in enabledClientIds? because it is not a publicClient ?
|
|
#!
|
|
#! see lib/oauth/db/ -> preClients ?
|
|
#@ if data.values.debug.auth_server_preverifed == True:
|
|
- ./_init/auth/oauthserver-prod.json:/fxa/packages/fxa-auth-server/dist/packages/fxa-auth-server/config/test.json
|
|
#@ else:
|
|
- ./_init/auth/oauthserver-prod.json:/fxa/packages/fxa-auth-server/dist/packages/fxa-auth-server/config/prod.json
|
|
#@ end
|
|
environment:
|
|
#@ if data.values.debug.auth_server_preverifed == True:
|
|
- NODE_ENV=test
|
|
#@ end
|
|
#! since we do not use sns ,we make this prod
|
|
|
|
#! these two pairs?
|
|
#! OAUTH_SERVER_SECRET_KEY = oauth.secretKey default megaz0rd dev.json: not found
|
|
#! AUTH_SERVER_SECRETS = oauthServer.authServerSecrets default: [], dev.json ["megaz0rd", "whatever"]
|
|
|
|
#! these two pairs?
|
|
#! OAUTH_SERVER_SECRETS = oauth.jwtSecretKeys default ['megaz0rd'] dev.json: ["megaz0rd"]
|
|
#! AUTH_SERVER_SHARED_SECRET = oauthServer.auth.jwtSecretKey default megaz0rd dev.json not found
|
|
- #@ "OAUTH_SERVER_SECRET_KEY={authsecret}".format(authsecret=data.values.secrets.authsecret)
|
|
- #@ "OAUTH_SERVER_SECRETS={authsecret}".format(authsecret=data.values.secrets.authsecret)
|
|
- #@ "AUTH_SERVER_SHARED_SECRET={authsecret}".format(authsecret=data.values.secrets.authsecret)
|
|
- #@ "AUTH_SERVER_SECRETS={authsecret},realwhatever".format(authsecret=data.values.secrets.authsecret)
|
|
|
|
#! - JWT_ACCESS_TOKENS_ENABLED=false
|
|
#! - JWT_ACCESS_TOKENS_ENABLED_CLIENT_IDS=98e6508e88680e1a
|
|
|
|
#! same as profile server AUTH_SECRET_BEARER_TOKEN
|
|
- #@ "PROFILE_SERVER_AUTH_SECRET_BEARER_TOKEN={profileserver_authsecret_bearertoken}".format(profileserver_authsecret_bearertoken=data.values.secrets.profileserver_authsecret_bearertoken)
|
|
#! same as content-server 's
|
|
- #@ "FLOW_ID_KEY={flowidkey}".format(flowidkey=data.values.secrets.flowidkey)
|
|
- REDIS_HOST=redis
|
|
- ACCESS_TOKEN_REDIS_HOST=redis #! new in v1.160.0
|
|
|
|
- REFRESH_TOKEN_REDIS_HOST=redis #! new in v1.178.0
|
|
|
|
- CUSTOMS_REDIS_HOST=redis
|
|
- METRICS_REDIS_HOST=redis
|
|
- AUTH_CACHE_REDIS_HOST=redis
|
|
#! - RECOVERY_PHONE__ENABLED=false
|
|
#! - RECOVERY_PHONE_REDIS_ENABLED=false
|
|
#! even not used, this should be set too.:(
|
|
- RECOVERY_PHONE_REDIS_HOST=redis
|
|
|
|
#! sqs for 3rd rp
|
|
#! sns for profile/sync/pushbox
|
|
|
|
|
|
#! if (conf.has('snsTopicEndpoint') && conf.get('env') !== 'dev') {
|
|
#! throw new Error('snsTopicEndpoint is only allowed in dev env');
|
|
#! }
|
|
- SNS_TOPIC_ARN=disabled
|
|
|
|
#! for oauth
|
|
- DB=mysql
|
|
- MYSQL_HOST=mysqldb
|
|
|
|
#! replace httpdb backend , using makeMySqlConfig from fxa-shared/db
|
|
- AUTH_MYSQL_HOST=mysqldb
|
|
- IP_ADDRESS=0.0.0.0
|
|
#!
|
|
- SIGNIN_UNBLOCK_FORCED_EMAILS=^block.*@restmail\\.net$$
|
|
- SIGNIN_CONFIRMATION_ENABLED=true
|
|
- SIGNIN_CONFIRMATION_FORCE_EMAIL_REGEX=^sync.*@restmail\\.net$$
|
|
#! SIGNIN_CONFIRMATION_SKIP_FOR_NEW_ACCOUNTS -> do not need sigincode?
|
|
#! SIGNIN_CONFIRMATION_SKIP_FOR_EMAIL_ADDRESS -> or this?
|
|
|
|
- #@ "ISSUER={auth}.{domain_name}".format(auth=data.values.domain.auth,domain_name=data.values.domain.name)
|
|
#! two place config.publicUrl config.oauthserver.publicurl , no internal visit safe for self sign
|
|
- #@ "PUBLIC_URL=https://{auth}.{domain_name}".format(auth=data.values.domain.auth,domain_name=data.values.domain.name)
|
|
#! two place config.oauth.url (public) config.oauthServer.audience (public) no internal visit safe for self sign
|
|
- #@ "OAUTH_URL=https://{oauth}.{domain_name}".format(oauth=data.values.domain.oauth,domain_name=data.values.domain.name)
|
|
|
|
#! must public for oauthserver's GET /authorization to tell user to redirect to content server and login
|
|
- #@ "CONTENT_URL=https://{content}.{domain_name}".format(content=data.values.domain.content,domain_name=data.values.domain.name)
|
|
#! for urls in email public
|
|
- #@ "CONTENT_SERVER_URL=https://{content}.{domain_name}".format(content=data.values.domain.content,domain_name=data.values.domain.name)
|
|
|
|
#! introducde in 1.171.0 @2476a02ed only for audience in jwt no any request see mozilla/fxa@2476a02ed no internal visit safe for self sign
|
|
- #@ "SYNC_TOKENSERVER_URL=https://{sync}.{domain_name}/token".format(sync=data.values.domain.sync, domain_name=data.values.domain.name)
|
|
#! lib/profile/client.js
|
|
#! use for subhub delete cache , could be inner url
|
|
- PROFILE_SERVER_URL=http://fxa-profile-server:1111
|
|
#! oauthServer.openid.issuer just a issuer? public no internal visit safe for self sign
|
|
- #@ "FXA_OPENID_ISSUER=https://{content}.{domain_name}".format(content=data.values.domain.content,domain_name=data.values.domain.name)
|
|
|
|
#! convict -> url a valid url (need tld)
|
|
- VERIFICATION_URL=http://browseridverifier.local:5050/v2
|
|
- PUSHBOX_ENABLED=true
|
|
- PUSHBOX_MAX_TTL=12 months
|
|
#! replace httpdb backend , directly call
|
|
- PUSHBOX_MYSQL_HOST=mysqldb
|
|
|
|
- CUSTOMS_SERVER_URL=none #! disable it
|
|
|
|
#! npm run gen-keys -> however if file exists will fail
|
|
- FXA_OPENID_KEYFILE=config/key.json
|
|
- FXA_OPENID_NEWKEYFILE=config/newKey.json
|
|
- FXA_OPENID_OLDKEYFILE=config/oldKey.json
|
|
|
|
|
|
#! send mail via nodemailer to SMTP_HOST:SMTP_PORT
|
|
#! see auth-server/lib/senders/index.js -> line 25 config.smtp.
|
|
#! select_email_services.js ln 160 -> get mailer (self smtp server)
|
|
#@ if data.values.mail.smtp_sender:
|
|
- #@ "SMTP_SENDER={smtp_sender}".format(smtp_sender=data.values.mail.smtp_sender)
|
|
#@ else:
|
|
- #@ 'SMTP_SENDER=Firefox Accounts <no-reply@{domain_name}>'.format(domain_name=data.values.domain.name)
|
|
#@ end
|
|
|
|
#! this for non-send-mail-just-check-code-in-log
|
|
#@ if data.values.mail.type == "localhelper":
|
|
- SMTP_HOST=fxa-auth-local-mail-helper
|
|
- SMTP_PORT=9999
|
|
#! since mozilla/fxa:pr#10532 when no SMTP user:password, they will use AWS SES.
|
|
- SMTP_USER=local
|
|
- SMTP_PASS=local
|
|
#@ elif data.values.mail.type == "localrelay":
|
|
#! real send mail self hosting if your provider allow smtp/25
|
|
- SMTP_HOST=exim-sender
|
|
- SMTP_PORT=25
|
|
#! since mozilla/fxa:pr#10532 when no SMTP user:password, they will use AWS SES.
|
|
- SMTP_USER=local
|
|
- SMTP_PASS=local
|
|
#@ else:
|
|
#! 3rd part stmp replay service
|
|
- #@ "SMTP_HOST={smtp_host}".format(smtp_host = data.values.mail.smtp_host or assert.fail("mail.smtp_host is a must"))
|
|
- #@ "SMTP_PORT={smtp_port}".format(smtp_port = data.values.mail.smtp_port or assert.fail("mail.smtp_port is a must"))
|
|
- #@ "SMTP_USER={smtp_user}".format(smtp_user = data.values.mail.smtp_user or assert.fail("mail.smtp_user is a must"))
|
|
- #@ "SMTP_PASS={smtp_pass}".format(smtp_pass = data.values.mail.smtp_pass or assert.fail("mail.smtp_pass is a must"))
|
|
- #@ "SMTP_SECURE={smtp_secure}".format(smtp_secure = data.values.mail.smtp_secure if data.values.mail.smtp_secure else False)
|
|
#@ end
|
|
|
|
- GEODB_ENABLED=false
|
|
|
|
- LOG_LEVEL=WARN
|
|
|
|
- LASTACCESSTIME_UPDATES_SAMPLE_RATE=1
|
|
- ACCOUNT_EVENTS_ENABLED=false
|
|
|
|
- FXA_EXPIRATION_ACCESS_TOKEN=7 days
|
|
|
|
- AUTH_GLEAN_ENABLED=false
|
|
#! from v1.173.0 docker iamge do not have publickey.json and other key relate json
|
|
#! but it need openssl to genrsa 2048 . so ....
|
|
#! it's not elegant but no better way to ....
|
|
#! -m is important for passing ENVs
|
|
#! from v1.188.0 schema will not create/update by default . so we manually do this and ignore CREATE_MYSQL_SCHEMA
|
|
command: |
|
|
sh -c "( sed -i 's_@fxa/shared/pem-jwk_../../../libs/shared/pem-jwk/src/index.js_' scripts/gen_keys.js && node scripts/gen_keys.js ); node scripts/oauth_gen_keys.js ; node scripts/gen_vapid_keys.js && sed -i '/increment/a histogram: () => { },' bin/key_server.js && node bin/key_server.js"
|
|
#! docker inspect to see health result
|
|
#! This looks buggy?
|
|
#! healthcheck:
|
|
#! #! ttl 86400000 -> 1day in ms
|
|
#! #! Note: prune-oauth-authorization-codes has limit to 10000
|
|
#! test: ( node scripts/prune-tokens.js --maxTokenAge='1 month' --maxCodeAge='1 month'; node scripts/prune-oauth-authorization-codes.js --ttl 86400000 ) || exit 1
|
|
#! interval: 24h
|
|
#! retries: 0
|
|
#! timeout: 10m
|
|
#! start_period: 10m
|
|
restart: unless-stopped
|
|
logging: *default-logging
|
|
|
|
#! fxa-event-broker:
|
|
#! image: mozilla/fxa-event-broker
|
|
#! depends_on:
|
|
#! #! - firestore
|
|
#! - pubsub
|
|
#! - goaws
|
|
#! environment:
|
|
#! - NODE_ENV=development #! just another word to avoid prod/dev
|
|
#! - FIRESTORE_ENABLED=false
|
|
#! #! - FIRESTORE_EMULATOR_HOST=http://firestore:9090
|
|
#! - PUBSUB_EMULATOR_HOST=http://pubsub:8085
|
|
#! - SERVICE_NOTIFICATION_QUEUE_URL=http://goaws:4100/serviceNotifications
|
|
#! #! goaws:4100 do not work beacuse hardcoded localhost:4100 in workerDev.js
|
|
#! command: sh -c "rm /app/config/development.json && node /app/dist/bin/workerDev.js"
|
|
|
|
#! fxa-profile-static and fxa-profile-worker should share /var/public
|
|
fxa-profile-static:
|
|
image: #@ "mozilla/fxa-mono:{version}".format(version=data.values.fxa_version or "latest")
|
|
stop_grace_period: 1s
|
|
working_dir: "/fxa/packages/fxa-profile-server"
|
|
volumes:
|
|
- #@ "{persistencepath}/public:/fxa/packages/fxa-profile-server/var/public/:ro".format(persistencepath=data.values.persistencepath)
|
|
expose:
|
|
- "1112"
|
|
environment:
|
|
- HOST=0.0.0.0
|
|
- IMG=local
|
|
command: ["node","bin/_static.js"]
|
|
restart: unless-stopped
|
|
logging: *default-logging
|
|
|
|
fxa-profile-worker-make-writable:
|
|
image: #@ "mozilla/fxa-mono:{version}".format(version=data.values.fxa_version or "latest")
|
|
user: root
|
|
volumes:
|
|
- #@ "{persistencepath}/public:/fxa/packages/fxa-profile-server/var/public/".format(persistencepath=data.values.persistencepath)
|
|
command: [ "chmod", "a+w", "/fxa/packages/fxa-profile-server/var/public/" ]
|
|
restart: "no"
|
|
logging: *default-logging
|
|
|
|
fxa-profile-worker:
|
|
#! just compute image and upload to aws/ local_public
|
|
image: #@ "mozilla/fxa-mono:{version}".format(version=data.values.fxa_version or "latest")
|
|
stop_grace_period: 1s
|
|
working_dir: "/fxa/packages/fxa-profile-server"
|
|
volumes:
|
|
#! cannot write? i'm app but folder:node
|
|
- #@ "{persistencepath}/public:/fxa/packages/fxa-profile-server/var/public/".format(persistencepath=data.values.persistencepath)
|
|
expose:
|
|
- "1113"
|
|
environment:
|
|
- WORKER_HOST=0.0.0.0
|
|
- IMG=local
|
|
depends_on:
|
|
fxa-profile-worker-make-writable:
|
|
condition: service_completed_successfully
|
|
command: ["node","bin/worker.js"]
|
|
restart: unless-stopped
|
|
logging: *default-logging
|
|
|
|
fxa-profile-server:
|
|
image: #@ "mozilla/fxa-mono:{version}".format(version=data.values.fxa_version or "latest")
|
|
stop_grace_period: 1s
|
|
working_dir: "/fxa/packages/fxa-profile-server"
|
|
volumes:
|
|
#! new added (because of npm preinstall is removed , so in docker images there's no /app/var/public , however it is not used )
|
|
- #@ "{persistencepath}/public:/fxa/packages/fxa-profile-server/var/public/".format(persistencepath=data.values.persistencepath)
|
|
expose:
|
|
- "1111"
|
|
depends_on:
|
|
mysqldb:
|
|
condition: service_started
|
|
redis:
|
|
condition: service_started
|
|
waitforinfra:
|
|
condition: service_completed_successfully
|
|
fxa-db-migrations:
|
|
condition: service_completed_successfully
|
|
environment:
|
|
#! same as authserver profile PROFILE_SERVER_AUTH_SECRET_BEARER_TOKEN
|
|
- #@ "AUTH_SECRET_BEARER_TOKEN={profileserver_authsecret_bearertoken}".format(profileserver_authsecret_bearertoken=data.values.secrets.profileserver_authsecret_bearertoken)
|
|
#! we do not need sqs events
|
|
- EVENTS_ENABLED=false
|
|
|
|
- HOST=0.0.0.0
|
|
- DB=mysql
|
|
#! last $ need escape? yes
|
|
- #@ "IMG_PROVIDERS_FXA=^https://{profile}.{domain_name}/img/a/[0-9a-f]{{32}}$$".format(profile=data.values.domain.profile , domain_name = data.values.domain.name)
|
|
- #@ "IMG_URL=https://{profile}.{domain_name}/img/a/{{id}}".format(profile=data.values.domain.profile , domain_name = data.values.domain.name)
|
|
#! - PUBLIC_URL -> <del>but not used in code? </del> now used. #for monogram avatar url see: https://github.com/mozilla/fxa/pull/7972
|
|
- #@ "PUBLIC_URL=https://{profile}.{domain_name}".format(profile=data.values.domain.profile , domain_name = data.values.domain.name)
|
|
- MYSQL_HOST=mysqldb
|
|
#! merge from fxa-profile-server/config/development.json
|
|
- IMG=local
|
|
#! prefer public url need v1? could be inner
|
|
- AUTH_SERVER_URL=http://fxa-auth-server:9000/v1
|
|
#! prefer public url need v1? could be inner
|
|
- OAUTH_SERVER_URL=http://fxa-auth-server:9000/v1
|
|
- REDIS_HOST=redis
|
|
|
|
- WORKER_URL=http://fxa-profile-worker:1113
|
|
command: sh -c "sed -i 's|result.avatar.startsWith(monogramUrl)|result.avatar.startsWith(`$${monogramUrl}/v1/avatar/`)|' /fxa/packages/fxa-profile-server/lib/routes/profile.js && node bin/server.js"
|
|
restart: unless-stopped
|
|
logging: *default-logging
|
|
|
|
fxa-content-server:
|
|
image: #@ "mozilla/fxa-mono:{version}".format(version=data.values.fxa_version or "latest")
|
|
stop_grace_period: 1s
|
|
working_dir: "/fxa/packages/fxa-content-server"
|
|
expose:
|
|
- "3030"
|
|
depends_on:
|
|
- fxa-auth-server
|
|
- fxa-profile-server
|
|
#! [TODO] wait redis
|
|
- redis
|
|
volumes:
|
|
- ./_init/content/contentserver-prod.json:/fxa/packages/fxa-content-server/config/prod.json
|
|
|
|
environment:
|
|
- CONFIG_FILES=/fxa/packages/fxa-content-server/config/prod.json #! load SCOPED_KEYS_VALIDATION for send
|
|
- FXA_OAUTH_CLIENT_ID=ea3ca969f8c6bb0d #! this is important!! maybe...
|
|
#! same as auth-server 's
|
|
- #@ "FLOW_ID_KEY={flowidkey}".format(flowidkey=data.values.secrets.flowidkey)
|
|
- FEATURE_FLAGS_REDIS_HOST=redis
|
|
#! display in /.well-known/fxa-client-configuration -> sync_tokenserver_base_url
|
|
#! todo paring_server_base_uri is not right see github/mozilla/fxa-pairing-channel
|
|
|
|
#! i prefer publicurl for audience no internal visit safe for self sign
|
|
- #@ "SYNC_TOKENSERVER_URL=https://{sync}.{domain_name}/token".format(sync=data.values.domain.sync, domain_name=data.values.domain.name)
|
|
#! we now use www represent for content
|
|
#! these for frontend no internal visit safe for self sign
|
|
- #@ "PUBLIC_URL=https://{content}.{domain_name}".format(content=data.values.domain.content, domain_name=data.values.domain.name)
|
|
- #@ "FXA_OAUTH_URL=https://{oauth}.{domain_name}".format(oauth=data.values.domain.oauth, domain_name=data.values.domain.name)
|
|
- #@ "FXA_URL=https://{auth}.{domain_name}".format(auth=data.values.domain.auth, domain_name=data.values.domain.name)
|
|
- #@ "FXA_PROFILE_URL=https://{profile}.{domain_name}".format(profile=data.values.domain.profile, domain_name=data.values.domain.name)
|
|
- #@ "FXA_PROFILE_IMAGES_URL=https://{profile}.{domain_name}".format(profile=data.values.domain.profile, domain_name=data.values.domain.name)
|
|
|
|
#@ if data.values.option.channelserver.enable == True:
|
|
- #@ "PAIRING_SERVER_BASE_URI=wss://{channelserver}.{domain_name}".format(channelserver=data.values.domain.channelserver, domain_name=data.values.domain.name)
|
|
#@ end
|
|
|
|
- FXA_MARKETING_EMAIL_ENABLED=false #! no marketing
|
|
|
|
- GEODB_ENABLED=false
|
|
- LOG_LEVEL=WARN
|
|
|
|
#! for minify js and css
|
|
- NODE_ENV=production
|
|
- STATIC_DIRECTORY=dist
|
|
- PAGE_TEMPLATE_SUBDIRECTORY=dist
|
|
#! may helpful?
|
|
- CSP_ENABLED=true
|
|
|
|
#@ if data.values.mail.type == "localhelper":
|
|
- FXA_MX_RECORD_VALIDATION=false #! you can use any mail address whatever domain is if using localhelper
|
|
#@ end
|
|
|
|
#@ allowd_origins = []
|
|
#@ if data.values.option.send.enable == True:
|
|
#@ allowd_origins.append("https://{send}.{domain_name}".format(send=data.values.domain.send or assert.fail("domain.send is a must") , domain_name=data.values.domain.name) )
|
|
#@ end
|
|
#@ if len(allowd_origins) > 0:
|
|
- #@ "ALLOWED_METRICS_FLOW_ORIGINS={}".format(",".join(allowd_origins))
|
|
#@ end
|
|
#! oauth_client_id <- so fxa-content-server (fxa settings) is a oauth?
|
|
#! client session secret
|
|
|
|
#! frontend no internal visit safe for self sign
|
|
- #@ "FXA_GQL_URL=https://{graphql}.{domain_name}".format(graphql=data.values.domain.graphql, domain_name=data.values.domain.name)
|
|
- #@ "REDIRECT_CHECK_ALLOW_LIST=*.{domain_name}".format(domain_name=data.values.domain.name)
|
|
|
|
#! We don't support auth with 3rd party.
|
|
- APPLE_AUTH_ENABLED=false
|
|
- GOOGLE_AUTH_ENABLED=false
|
|
|
|
- GLEAN_ENEABLED=false
|
|
- GLEAN_UPLOAD_ENABLED=false
|
|
- GLEAN_LOG_PINGS=false
|
|
- CONTENT_SERVER_GLEAN_ENABLED=false
|
|
|
|
#! TODO: check this affects?
|
|
#! v1 -> v2 upgrade detail fxa-auth-client/lib/client.ts
|
|
#! - ROLLOUT_KEY_STRETCH_V2=1
|
|
|
|
#! if don't use react in signup/signin -> sync will not auto enabled.
|
|
#! and old firefox (fx_desktop_v3) will not able to pass the signup/sign in flow
|
|
#! not sure it is my config problem or FxA's problem
|
|
#! so just keep the same as the prod
|
|
- REACT_CONVERSION_SIGNUP_ROUTES=true
|
|
- REACT_CONVERSION_SIGNIN_ROUTES=true
|
|
- REACT_CONVERSION_SIMPLE_ROUTES=true
|
|
- REACT_CONVERSION_RESET_PASSWORD_ROUTES=true
|
|
- REACT_CONVERSION_POST_VERIFY_THIRD_PARTY_AUTH=true
|
|
|
|
#! since docker iamge use "cdn.accounts.firefox.com" , we should replace it to avoid CSP issue and keep full selfhosting
|
|
#! double dollar sign for PUBLIC_URL in sed command is required for docker-compose file not to substitle it.
|
|
command:
|
|
- /bin/sh
|
|
- -c
|
|
- |
|
|
sed -i "s|https://cdn.accounts.firefox.com|$${PUBLIC_URL}|g" ./dist/settings/prod/asset-manifest.json ./dist/settings/prod/index.html ./dist/settings/prod/static/js/main.* &&
|
|
node server/bin/fxa-content-server.js
|
|
restart: unless-stopped
|
|
logging: *default-logging
|
|
|
|
|
|
fxa-graphql-api:
|
|
image: #@ "mozilla/fxa-mono:{version}".format(version=data.values.fxa_version or "latest")
|
|
stop_grace_period: 1s
|
|
working_dir: "/fxa/packages/fxa-graphql-api/dist/packages/fxa-graphql-api"
|
|
expose:
|
|
- "8290"
|
|
depends_on:
|
|
fxa-auth-server:
|
|
condition: service_started
|
|
fxa-profile-server:
|
|
condition: service_started
|
|
redis:
|
|
condition: service_started
|
|
mysqldb:
|
|
condition: service_started
|
|
waitforinfra:
|
|
condition: service_completed_successfully
|
|
environment:
|
|
- ACCESS_TOKEN_REDIS_HOST=redis
|
|
- PROFILE_SERVER_URL=http://fxa-profile-server:1111/v1
|
|
- CUSTOMS_SERVER_URL=none
|
|
- #@ "CORS_ORIGIN=https://{content}.{domain_name}".format(content=data.values.domain.content, domain_name=data.values.domain.name)
|
|
#! - AUTH_SERVER_URL=http://fxa-auth-server:9000/v1
|
|
#! [WARNING] need visit and mac sign so conflict with all internal
|
|
#! see graphql-api using fxa/packages/fxa-auth-client/lib/hawk.ts
|
|
|
|
#@ if data.values.debug.full_self_sign_workaround == True:
|
|
- #@ "AUTH_SERVER_PUBLIC_URL=https://{auth}.{domain_name}/v1".format(auth=data.values.domain.auth, domain_name=data.values.domain.name)
|
|
- AUTH_SERVER_URL=http://fxa-auth-server:9000/v1
|
|
#@ else:
|
|
- #@ "AUTH_SERVER_URL=https://{auth}.{domain_name}/v1".format(auth=data.values.domain.auth, domain_name=data.values.domain.name)
|
|
#@ end
|
|
|
|
#! for legal docs, settings is under content-server
|
|
- SETTINGS_SERVER_URL=http://fxa-content-server:3030
|
|
|
|
- AUTH_MYSQL_HOST=mysqldb
|
|
- PROFILE_MYSQL_HOST=mysqldb
|
|
- OAUTH_MYSQL_HOST=mysqldb
|
|
|
|
#! currently disabled not work for SNS_TOPIC_ARN, and to make fxa-graphql not crash, SNS_TOPIC_ENDPOINT is needed.
|
|
- SNS_TOPIC_ARN=disabled
|
|
- SNS_TOPIC_ENDPOINT=disabled
|
|
|
|
#! to find the correct path of config and load recoverPhone redis host
|
|
- CONFIG_FILES=/fxa/packages/fxa-graphql-api/dist/packages/fxa-graphql-api/src/config/production.json
|
|
#! two dollar sign below is for docker-compose escape.
|
|
#@ if data.values.debug.full_self_sign_workaround == True:
|
|
command:
|
|
- /bin/sh
|
|
- -c
|
|
- |
|
|
sed -i 's.authServer: {.authServer: {public_url:{doc:\"Public url (with v1) for Hawk Auth\",env:\"AUTH_SERVER_PUBLIC_URL\",default:\"http://localhost:9001/v1\"},.' /fxa/packages/fxa-graphql-api/dist/packages/fxa-graphql-api/src/config.js &&
|
|
sed -i 's/url, {/url, {public_url:authServerConfig.public_url,/' /fxa/packages/fxa-graphql-api/dist/packages/fxa-graphql-api/src/backend/auth-client.service.js &&
|
|
sed -i 's/30000;/30000;this.public_url = options.public_url || this.uri ;/' /fxa/packages/fxa-auth-client/dist/server/cjs/packages/fxa-auth-client/lib/client.js &&
|
|
sed -i 's/hawk.header(method, this.url(path),/hawk.header(method, `$${this.public_url}$${path}`,/' /fxa/packages/fxa-auth-client/dist/server/cjs/packages/fxa-auth-client/lib/client.js &&
|
|
sed -i 's/30000;/30000;this.public_url = options.public_url || this.uri ;/' /fxa/packages/fxa-auth-client/dist/server/esm/packages/fxa-auth-client/lib/client.js &&
|
|
sed -i 's/hawk.header(method, this.url(path),/hawk.header(method, `$${this.public_url}$${path}`,/' /fxa/packages/fxa-auth-client/dist/server/esm/packages/fxa-auth-client/lib/client.js &&
|
|
echo '{"recoveryPhone":{"redis":{"host": "redis"}}}' > /fxa/packages/fxa-graphql-api/dist/packages/fxa-graphql-api/src/config/production.json &&
|
|
node src/main.js
|
|
#@ else:
|
|
command:
|
|
- /bin/sh
|
|
- -c
|
|
- |
|
|
echo '{"recoveryPhone":{"redis":{"host": "redis"}}}' > /fxa/packages/fxa-graphql-api/dist/packages/fxa-graphql-api/src/config/production.json &&
|
|
node src/main.js
|
|
#@ end
|
|
restart: unless-stopped
|
|
logging: *default-logging
|
|
|
|
#! todo replace nginx with traefik
|
|
nginx:
|
|
#! [TODO] auto renew cert!
|
|
image: nginx:1.22.0-alpine
|
|
#@ if not hasattr(data.values.nginx,"unix_socket") or data.values.nginx.unix_socket == False:
|
|
ports:
|
|
#! [TODO] inner_listenr map to $INNER_LISTENER to nginx conf
|
|
#! [TODO] syncserver && send 's WAIT_HOSTS need this too . so make a GLOBAL arg?
|
|
- #@ "{listener}:{inner_listenr}".format(listener=data.values.nginx.listener , inner_listenr = 443 if data.values.nginx.ssl == True else 80)
|
|
#! - "443:443" #! the real outer port
|
|
#@ elif data.values.nginx.ssl == True:
|
|
#@ assert.fail("can not combine ssl with unix socket!")
|
|
#@ end
|
|
depends_on:
|
|
- fxa-auth-server
|
|
- fxa-profile-server
|
|
- syncserver #! cycle dependce? break via do not set SYNCSERVER_IDENTITY_PROVIDER
|
|
#! nope! nginx need only this service's name resolvable (which resolvable when container is up)
|
|
#! sync -> depends on content(www) -> depends nginx -> sync up is enough
|
|
|
|
- fxa-content-server
|
|
- fxa-graphql-api
|
|
#@ if data.values.option.channelserver.enable == True:
|
|
- channelserver
|
|
#@ end
|
|
#@ if data.values.option.send.enable == True:
|
|
- send
|
|
#@ end
|
|
#@ if data.values.option.notes.enable == True or data.values.option.webext_storagesync.enable == True :
|
|
- kinto
|
|
#@ end
|
|
volumes:
|
|
#! resolver 127.0.0.11 valid=30s; => to allow up without depends ?
|
|
#! ############## common parts #############
|
|
#@ if data.values.nginx.ssl == True:
|
|
#@ if data.values.nginx.certs.profile.cert and data.values.nginx.certs.profile.key :
|
|
- #@ "{}:/certs/profile.cer:ro".format(data.values.nginx.certs.profile.cert)
|
|
- #@ "{}:/certs/profile.key:ro".format(data.values.nginx.certs.profile.key)
|
|
#@ elif data.values.nginx.certs.wild.cert and data.values.nginx.certs.wild.key :
|
|
- #@ "{}:/certs/profile.cer:ro".format(data.values.nginx.certs.wild.cert)
|
|
- #@ "{}:/certs/profile.key:ro".format(data.values.nginx.certs.wild.key)
|
|
#@ else :
|
|
#@ assert.fail("NO CERT!!!")
|
|
#@ end
|
|
|
|
#@ if data.values.nginx.certs.auth.cert and data.values.nginx.certs.auth.key :
|
|
- #@ "{}:/certs/auth.cer:ro".format(data.values.nginx.certs.auth.cert)
|
|
- #@ "{}:/certs/auth.key:ro".format(data.values.nginx.certs.auth.key)
|
|
#@ elif data.values.nginx.certs.wild.cert and data.values.nginx.certs.wild.key :
|
|
- #@ "{}:/certs/auth.cer:ro".format(data.values.nginx.certs.wild.cert)
|
|
- #@ "{}:/certs/auth.key:ro".format(data.values.nginx.certs.wild.key)
|
|
#@ else :
|
|
#@ assert.fail("NO CERT!!!")
|
|
#@ end
|
|
|
|
#@ if data.values.nginx.certs.oauth.cert and data.values.nginx.certs.oauth.key :
|
|
- #@ "{}:/certs/oauth.cer:ro".format(data.values.nginx.certs.oauth.cert)
|
|
- #@ "{}:/certs/oauth.key:ro".format(data.values.nginx.certs.oauth.key)
|
|
#@ elif data.values.nginx.certs.wild.cert and data.values.nginx.certs.wild.key :
|
|
- #@ "{}:/certs/oauth.cer:ro".format(data.values.nginx.certs.wild.cert)
|
|
- #@ "{}:/certs/oauth.key:ro".format(data.values.nginx.certs.wild.key)
|
|
#@ else :
|
|
#@ assert.fail("NO CERT!!!")
|
|
#@ end
|
|
|
|
#@ if data.values.nginx.certs.sync.cert and data.values.nginx.certs.sync.key :
|
|
- #@ "{}:/certs/sync.cer:ro".format(data.values.nginx.certs.sync.cert)
|
|
- #@ "{}:/certs/sync.key:ro".format(data.values.nginx.certs.sync.key)
|
|
#@ elif data.values.nginx.certs.wild.cert and data.values.nginx.certs.wild.key :
|
|
- #@ "{}:/certs/sync.cer:ro".format(data.values.nginx.certs.wild.cert)
|
|
- #@ "{}:/certs/sync.key:ro".format(data.values.nginx.certs.wild.key)
|
|
#@ else :
|
|
#@ assert.fail("NO CERT!!!")
|
|
#@ end
|
|
|
|
#@ if data.values.nginx.certs.content.cert and data.values.nginx.certs.content.key :
|
|
- #@ "{}:/certs/content.cer:ro".format(data.values.nginx.certs.content.cert)
|
|
- #@ "{}:/certs/content.key:ro".format(data.values.nginx.certs.content.key)
|
|
#@ elif data.values.nginx.certs.wild.cert and data.values.nginx.certs.wild.key :
|
|
- #@ "{}:/certs/content.cer:ro".format(data.values.nginx.certs.wild.cert)
|
|
- #@ "{}:/certs/content.key:ro".format(data.values.nginx.certs.wild.key)
|
|
#@ else :
|
|
#@ assert.fail("NO CERT!!!")
|
|
#@ end
|
|
|
|
#@ if data.values.nginx.certs.graphql.cert and data.values.nginx.certs.graphql.key:
|
|
- #@ "{}:/certs/graphql.cer:ro".format(data.values.nginx.certs.graphql.cert)
|
|
- #@ "{}:/certs/graphql.key:ro".format(data.values.nginx.certs.graphql.key)
|
|
#@ elif data.values.nginx.certs.wild.cert and data.values.nginx.certs.wild.key :
|
|
- #@ "{}:/certs/graphql.cer:ro".format(data.values.nginx.certs.wild.cert)
|
|
- #@ "{}:/certs/graphql.key:ro".format(data.values.nginx.certs.wild.key)
|
|
#@ else :
|
|
#@ assert.fail("NO CERT!!!")
|
|
#@ end
|
|
|
|
- ./_init/nginx/fxa.conf.tmpl:/etc/nginx/templates/fxa.conf.tmpl:ro
|
|
#@ else:
|
|
- ./_init/nginx/fxa_nossl.conf.tmpl:/etc/nginx/templates/fxa.conf.tmpl:ro
|
|
#@ end
|
|
|
|
#! ############## channelserver parts #############
|
|
#@ if data.values.option.channelserver.enable == True:
|
|
#@ if data.values.nginx.ssl == True:
|
|
#@ if data.values.nginx.certs.channelserver.cert and data.values.nginx.certs.channelserver.key:
|
|
- #@ "{}:/certs/channelserver.cer:ro".format(data.values.nginx.certs.channelserver.cert)
|
|
- #@ "{}:/certs/channelserver.key:ro".format(data.values.nginx.certs.channelserver.key)
|
|
#@ elif data.values.nginx.certs.wild.cert and data.values.nginx.certs.wild.key :
|
|
- #@ "{}:/certs/channelserver.cer:ro".format(data.values.nginx.certs.wild.cert)
|
|
- #@ "{}:/certs/channelserver.key:ro".format(data.values.nginx.certs.wild.key)
|
|
#@ else :
|
|
#@ assert.fail("NO CERT!!!")
|
|
#@ end
|
|
- ./_init/nginx/channelserver.conf.tmpl:/etc/nginx/templates/channelserver.conf.tmpl:ro
|
|
#@ else:
|
|
#! ssl false
|
|
- ./_init/nginx/channelserver_nossl.conf.tmpl:/etc/nginx/templates/channelserver.conf.tmpl:ro
|
|
#@ end
|
|
#@ end
|
|
|
|
#! ############## send parts #############
|
|
#@ if data.values.option.send.enable == True:
|
|
#@ if data.values.nginx.ssl == True:
|
|
#@ if data.values.nginx.certs.send.cert and data.values.nginx.certs.send.key :
|
|
- #@ "{}:/certs/send.cer:ro".format(data.values.nginx.certs.send.cert)
|
|
- #@ "{}:/certs/send.key:ro".format(data.values.nginx.certs.send.key)
|
|
#@ elif data.values.nginx.certs.wild.cert and data.values.nginx.certs.wild.key :
|
|
- #@ "{}:/certs/send.cer:ro".format(data.values.nginx.certs.wild.cert)
|
|
- #@ "{}:/certs/send.key:ro".format(data.values.nginx.certs.wild.key)
|
|
#@ else :
|
|
#@ assert.fail("NO CERT!!!")
|
|
#@ end
|
|
- ./_init/nginx/send.conf.tmpl:/etc/nginx/templates/send.conf.tmpl:ro
|
|
#@ else:
|
|
#! ssl false
|
|
- ./_init/nginx/send_nossl.conf.tmpl:/etc/nginx/templates/send.conf.tmpl:ro
|
|
#@ end
|
|
#@ end
|
|
|
|
#! ############## kinto parts #############
|
|
#@ if data.values.option.notes.enable == True or data.values.option.webext_storagesync.enable == True:
|
|
#@ if data.values.nginx.ssl == True:
|
|
#@ if data.values.nginx.certs.kinto.cert and data.values.nginx.certs.kinto.key :
|
|
- #@ "{}:/certs/kinto.cer:ro".format(data.values.nginx.certs.kinto.cert)
|
|
- #@ "{}:/certs/kinto.key:ro".format(data.values.nginx.certs.kinto.key)
|
|
#@ elif data.values.nginx.certs.wild.cert and data.values.nginx.certs.wild.key :
|
|
- #@ "{}:/certs/kinto.cer:ro".format(data.values.nginx.certs.wild.cert)
|
|
- #@ "{}:/certs/kinto.key:ro".format(data.values.nginx.certs.wild.key)
|
|
#@ else :
|
|
#@ assert.fail("NO CERT!!!")
|
|
#@ end
|
|
- ./_init/nginx/kinto.conf.tmpl:/etc/nginx/templates/kinto.conf.tmpl:ro
|
|
#@ else:
|
|
#! ssl false
|
|
- ./_init/nginx/kinto_nossl.conf.tmpl:/etc/nginx/templates/kinto.conf.tmpl:ro
|
|
#@ end
|
|
#@ end
|
|
|
|
#@ if hasattr(data.values.nginx,"unix_socket") and data.values.nginx.unix_socket == True:
|
|
- #@ "{nginx_socket_folder}:/var/run/fxa".format(nginx_socket_folder=data.values.nginx.listener)
|
|
#@ end
|
|
environment:
|
|
#@ if hasattr(data.values.nginx,"unix_socket") and data.values.nginx.unix_socket == True:
|
|
- #@ "NGINX_LISTENER={nginx_listener}".format(nginx_listener= "unix:/var/run/fxa/nginx.sock" )
|
|
#@ elif data.values.nginx.ssl == False:
|
|
- #@ "NGINX_LISTENER={nginx_listener}".format(nginx_listener=80)
|
|
#@ end
|
|
#! since nginx 1.19 conf template from environment variables is implmented.
|
|
- NGINX_ENVSUBST_TEMPLATE_SUFFIX=.tmpl
|
|
#! for replacement in fxa.conf.tmpl
|
|
- #@ "NGINX_DOMAIN_NAME={domain_name}".format(domain_name=data.values.domain.name)
|
|
- #@ "CONTENT={content}".format(content=data.values.domain.content)
|
|
- #@ "AUTH={auth}".format(auth=data.values.domain.auth)
|
|
- #@ "OAUTH={oauth}".format(oauth=data.values.domain.oauth)
|
|
- #@ "PROFILE={profile}".format(profile=data.values.domain.profile)
|
|
- #@ "SYNC={sync}".format(sync=data.values.domain.sync)
|
|
- #@ "GRAPHQL={graphql}".format(graphql=data.values.domain.graphql)
|
|
|
|
#@ if data.values.option.channelserver.enable == True:
|
|
- #@ "CHANNELSERVER={channelserver}".format(channelserver=data.values.domain.channelserver or assert.fail("domain.channelserver is a must"))
|
|
#@ end
|
|
|
|
|
|
#@ if data.values.option.send.enable == True:
|
|
- #@ "SEND={send}".format(send=data.values.domain.send or assert.fail("domain.send is a must"))
|
|
#@ end
|
|
|
|
#@ if data.values.option.notes.enable == True or data.values.option.webext_storagesync.enable == True:
|
|
- #@ "KINTO={kinto}".format(kinto=data.values.domain.kinto or assert.fail("domain.kinto is a must"))
|
|
#@ end
|
|
|
|
|
|
|
|
#@ if data.values.debug.deps_logs == False:
|
|
logging: #! do not show logs
|
|
driver: "none"
|
|
#@ else:
|
|
logging: *default-logging
|
|
#@ end
|
|
restart: unless-stopped
|
|
|
|
#@ if data.values.option.channelserver.enable == True:
|
|
channelserver:
|
|
image: mozilla/channelserver@sha256:e0b4f3c2afa54cdc4d484edf44e4517bf6d9a6dc1e71a1b4986ac0c5e28620a4
|
|
expose:
|
|
- "8000"
|
|
restart: unless-stopped
|
|
logging: *default-logging
|
|
#@ end
|
|
|
|
|
|
#@ if data.values.option.send.enable == True:
|
|
send:
|
|
#! [TODO] TMP_DIR?
|
|
image: mozilla/send
|
|
depends_on:
|
|
#! [TODO] wait send-redis
|
|
- send-redis
|
|
#! see https://github.com/mozilla/send server/fxa.js -> depends on content server too (same as syncserver)
|
|
#! [TODO] wait nginx and fxa-content-server
|
|
- fxa-content-server
|
|
environment:
|
|
#! [TODO] internal it's not request when startup.
|
|
- FXA_URL=http://fxa-content-server:3030
|
|
#! must be public, for CSP
|
|
- #@ "BASE_URL=https://{send}.{domain_name}".format(send=data.values.domain.send or assert.fail("domain.send is a must") , domain_name=data.values.domain.name)
|
|
- #@ "FXA_CSP_CONTENT_URL=https://{content}.{domain_name}".format(content=data.values.domain.content, domain_name=data.values.domain.name)
|
|
- #@ "FXA_CSP_OAUTH_URL=https://{oauth}.{domain_name}".format(oauth=data.values.domain.oauth, domain_name=data.values.domain.name)
|
|
- #@ "FXA_CSP_PROFILE_URL=https://{profile}.{domain_name}".format(profile=data.values.domain.profile, domain_name=data.values.domain.name)
|
|
- #@ "FXA_CSP_PROFILEIMAGE_URL=https://{profile}.{domain_name}".format(profile=data.values.domain.profile, domain_name=data.values.domain.name)
|
|
#@ settings = struct.decode(data.values.option.send.settings)
|
|
#@ for key in settings:
|
|
#@ if settings.get(key):
|
|
- #@ "{key}={val}".format(key=key.upper(), val= settings.get(key))
|
|
#@ end
|
|
#@ end
|
|
#! else url protocol of file depends on NODE_ENV to be http/https
|
|
- NODE_ENV=production
|
|
- REDIS_HOST=send-redis
|
|
restart: unless-stopped
|
|
send-redis: #! used by send
|
|
image: redis
|
|
expose:
|
|
- "6379"
|
|
#@ if data.values.debug.deps_logs == False:
|
|
logging: #! do not show logs
|
|
driver: "none"
|
|
#@ end
|
|
volumes:
|
|
- type: tmpfs
|
|
target: /data
|
|
#@ end
|
|
|
|
#@ if data.values.option.notes.enable == True or data.values.option.webext_storagesync.enable == True:
|
|
waitforpostgres:
|
|
image: ghcr.io/ufoscout/docker-compose-wait:2.12.1
|
|
depends_on:
|
|
- postgresdb
|
|
command: /wait
|
|
environment:
|
|
- WAIT_HOSTS=postgresdb:5432
|
|
- WAIT_TIMEOUT=120
|
|
|
|
kinto:
|
|
image: #@ "kinto/kinto-server:{version}".format(version=data.values.option.kinto_version or "latest")
|
|
depends_on:
|
|
postgresdb:
|
|
condition: service_started
|
|
waitforpostgres:
|
|
condition: service_completed_successfully
|
|
expose:
|
|
- "8888"
|
|
environment:
|
|
- PORT=8888
|
|
- KINTO_STORAGE_BACKEND=kinto.core.storage.postgresql
|
|
- KINTO_STORAGE_URL=postgresql://postgres:postgres@postgresdb/postgres
|
|
- KINTO_CACHE_BACKEND=kinto.core.cache.postgresql
|
|
- KINTO_CACHE_URL=postgresql://postgres:postgres@postgresdb/postgres
|
|
#! permission backend is necessary
|
|
- KINTO_PERMISSION_BACKEND=kinto.core.permission.postgresql
|
|
- KINTO_PERMISSION_URL=postgresql://postgres:postgres@postgresdb/postgres
|
|
|
|
#! read_env do not work for this ? => no , if .ini defined a KEY than the VALUE can be overrided in ENV
|
|
#! kinto-fxa replace "-"-> "_" and "." -> "_" see kinto/core/utils.py-> read_env
|
|
|
|
#! must be public see kinto-fxa: views/relier fxa_oauth_login just for 302 redirect no internal visit safe for self sign
|
|
- #@ "FXA_OAUTH_OAUTH_URI=https://{oauth}.{domain_name}/v1".format(oauth=data.values.domain.oauth, domain_name=data.values.domain.name)
|
|
|
|
#! #### note config
|
|
#@ if data.values.option.notes.enable == True:
|
|
#@ client_ids = []
|
|
#@ if data.values.option.notes.settings.client_id.webext:
|
|
#@ client_ids.append(data.values.option.notes.settings.client_id.webext)
|
|
#@ end
|
|
#@ if data.values.option.notes.settings.client_id.android:
|
|
#@ client_ids.append(data.values.option.notes.settings.client_id.android)
|
|
#@ end
|
|
- #@ "FXA_OAUTH_CLIENTS_NOTES_CLIENT_ID={val}".format(val= " ".join(client_ids) )
|
|
#@ end
|
|
|
|
#@ if data.values.option.webext_storagesync.enable == True:
|
|
- #@ "FXA_OAUTH_CLIENTS_STORAGESYNC_CLIENT_ID={val}".format(val= data.values.option.webext_storagesync.settings.client_id or assert.fail("webext_storagesync client_id is a must") )
|
|
#@ end
|
|
|
|
#! [deprecated]
|
|
#! #### note config
|
|
#! #@ notes_settings = struct.decode(data.values.option.notes.settings)
|
|
#! #@ for key in notes_settings:
|
|
#! #@ if notes_settings.get(key):
|
|
#! - #@ "{key}={val}".format(key=key.upper().replace(".","_").replace("-","_"), val= notes_settings.get(key))
|
|
#! #@ end
|
|
#! #@ end
|
|
#! [deprecated]
|
|
#! #### webextension.storage.sync config
|
|
#! #@ webext_storagesync_settings = struct.decode(data.values.option.webext_storagesync.settings)
|
|
#! #@ for key in webext_storagesync_settings:
|
|
#! #@ if webext_storagesync_settings.get(key):
|
|
#! - #@ "{key}={val}".format(key=key.upper().replace(".","_").replace("-","_"), val= webext_storagesync_settings.get(key))
|
|
#! #@ end
|
|
#! #@ end
|
|
|
|
volumes:
|
|
- ./_init/kinto/kinto.ini:/etc/kinto/kinto.ini
|
|
command: sh -c "pip install kinto-fxa && kinto migrate --ini $$KINTO_INI && kinto start --ini $$KINTO_INI --port $$PORT"
|
|
restart: unless-stopped
|
|
|
|
postgresdb:
|
|
image: postgres
|
|
expose:
|
|
- "5432"
|
|
environment:
|
|
POSTGRES_USER: postgres
|
|
POSTGRES_PASSWORD: postgres
|
|
volumes:
|
|
- #@ "{persistencepath}/postgres_data:/var/lib/postgresql/data".format(persistencepath=data.values.persistencepath)
|
|
#@ if data.values.debug.deps_logs == False:
|
|
logging: #! do not show logs
|
|
driver: "none"
|
|
#@ end
|
|
restart: unless-stopped
|
|
#@ end
|
|
|
|
|
|
|
|
#! todo all events
|
|
#! todo auth -> prod , but snsTopicEndpoint needs dev!!!
|
|
#! remove auth's dev.json unused.
|
|
|
|
#! issue 1. covinct valid url for pushbox and browser-verify
|
|
#! -> docker-compose network alias may help
|
|
#! issue 2 .geodata with proxy_pass 127.0.0.1?
|
|
#! issue 3. is fxa-event-broker necessary for self hosting?
|
|
#! -> No
|
|
|
|
#! this is no need
|
|
#! #! auth ->(send)-> sqs ->(get) -> event-broker -> reply party(3rd)
|
|
|
|
#! #! auth -> sns(via topic to all counsumer)
|
|
#! -> sync-tokenserver script:process_account_evnet.py (may need manual run and config)
|
|
#! ( delete / reset / passwordChange)
|
|
#! -> pushbox (delete[which is account] / device:delete)
|
|
#! -> profile (delete primaryEmailChanged profileDataChanged)
|
|
#! ?-> content?
|
|
#! profile use fxa-notifier-aws which cannot set endpoint
|
|
#! tokenserver cannot set endpoint too
|
|
#! what if we set goaws -> us-east-1.amazonaws.com //
|
|
#!
|
|
#! pushbox can change . but consume too slow and not necessary
|
|
#! so append it on your own risk
|
|
#! for pushbox
|
|
#! add
|
|
#! fxa-auth-server
|
|
#! environment:
|
|
#! - NODE_ENV=dev #! to enable SNS_TOPIC_ENDPOINT
|
|
#! - SNS_TOPIC_ENDPOINT=http://goaws:4100/
|
|
#! - SNS_TOPIC_ARN=arn:aws:sns:local-01:000000000000:local-topic1
|
|
#! add
|
|
#! pushbox.local
|
|
#! environment:
|
|
#! - AWS_LOCAL_SQS=http://goaws:4100 #! this sqs is for get message of device/account delete to delete these device/account unused messages.
|
|
#! - ROCKET_SQS_URL="http://goaws:4100/100010001000/local-queue3" #! #! local-queue3 is under local-topic1 define by goaws/goaws.yaml
|
|
#! - WAIT_HOSTS=mysqldb:3306,goaws:4100
|
|
#! add
|
|
#! goaws: #! used by fxa-event-broker fxa-auth-server push.local
|
|
#! image: pafortin/goaws
|
|
#! expose:
|
|
#! - "4100"
|
|
#! logging:
|
|
#! driver: "none"
|
|
#!
|
|
#!
|
|
|
|
|