Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
[job-run "mongo-backup"]
schedule= @every 24h
image= mongo:4.2
network= mongo_backup
volume= /backups:/tmp/backups
command= sh -c 'mongodump --uri=${OPENHIM_MONGO_URL} --gzip --archive=/tmp/backups/mongodump_$(date +%s).gz'
delete= true[job-run "mongo-backup"]
schedule= @every 24h
image= mongo:4.2
network= mongo_backup
volume= /backups:/tmp/backups
command= sh -c 'mongodump --uri=${OPENHIM_MONGO_URL} --gzip --archive=/tmp/backups/mongodump_$(date +%s).gz'
delete= trueBackup & restore process.
terraform initterraform applyApply complete! Resources: 5 added, 0 changed, 0 destroyed.
Outputs:
SUBNET_ID = "subnet-0004b0dacb5862d59"
VPC_ID = "vpc-067ab69f374ac9f47"terraform initPUBLIC_KEY_PATH - path to the user's public key file that gets injected into the servers created
PROJECT_NAME - unique project name that is used to identify each VPC and its resources
HOSTED_ZONE_ID - (only if you are creating domains, which by default you are) the hosted zone to use, this must be created in the AWS console
DOMAIN_NAME - the base domain name to use
SUBNET_ID - the subnet id to use, copy this from the previous step
VPC_ID - the subnet id to use, copy this from the previous stepPUBLIC_KEY_PATH = "/home/{user}/.ssh/id_rsa.pub"
PROJECT_NAME = "jembi_platform_dev_{user}"
HOSTED_ZONE_ID = "Z00782582NSP6D0VHBCMI"
DOMAIN_NAME = "{user}.jembi.cloud"
SUBNET_ID = "subnet-0004b0dacb5862d59"
VPC_ID = "vpc-067ab69f374ac9f47"cat ~/.aws/credentials[default]
aws_access_key_id = AKIA6FOPGN5TYHXXXXX
aws_secret_access_key = Qf7E+qcXXXXXXQh4XznN4MM8qR/VP/SXgXXXXX
[jembi-sandbox]
aws_access_key_id = AKIASOHFAV527JCXXXXX
aws_secret_access_key = YXFu3XxXXXXXTeNXdUtIg0gb9Ro7gJ89XXXXX
[jembi-icap]
aws_access_key_id = AKIAVFN7GJJFS6LXXXXX
aws_secret_access_key = b2I6jhwXXXXX4YehBCx/7rKl1JZjYdbtXXXXXterraform apply -var-file my.tfvarsApply complete! Resources: 13 added, 0 changed, 0 destroyed.
Outputs:
domains = {
"domain_name" = "{user}.jembi.cloud"
"node_domain_names" = [
"node-0.{user}.jembi.cloud",
"node-1.{user}.jembi.cloud",
"node-2.{user}.jembi.cloud",
]
"subdomain" = [
"*.{user}.jembi.cloud",
]
}
public_ips = [
"13.245.143.121",
"13.246.39.101",
"13.246.39.92",
]terraform destroy -var-file my.tfvarsVarious notes and guide
ansible-galaxy collection install community.dockerssh-keyscan -H <host> >> ~/.ssh/known_hostsansible-playbook \
--ask-vault-pass \
--become \
--inventory=inventories/<INVENTORY> \
--user=ubuntu \
playbooks/<PLAYBOOK>.ymlansible-playbook \
--ask-vault-pass \
--become \
--inventory=inventories/development \
--user=ubuntu \
playbooks/provision.ymlecho -n '<YOUR SECRET>' | ansible-vault encrypt_string# Stop the server running in the container
docker exec -t <postgres_leader_container_id> pg_ctl stop -D /bitnami/postgresql/data
# Clear the contents of /bitnami/postgresql/data
docker exec -t --user root <postgres_leader_container_id> sh -c 'cd /bitnami/postgresql/data && rm -rf $(ls)'
# Copy over the base.tar file
sudo docker cp <backup_file>/base.tar <postgres_leader_container_id>:/bitnami/postgresql
# Extract the base.tar file
docker exec -t --user root <postgres_leader_container_id> sh -c 'tar -xf /bitnami/postgresql/base.tar --directory=/bitnami/postgresql/data'
# Copy over the pg_wal.tar file
sudo docker cp <backup_file>/pg_wal.tar <postgres_leader_container_id>:/bitnami/postgresql
# Extract pg_wal.tar
docker exec -t --user root <postgres_leader_container_id> sh -c 'tar -xf /bitnami/postgresql/pg_wal.tar --directory=/bitnami/postgresql/data/pg_wal'
# Copy conf dir over
docker exec -t --user root <postgres_leader_container_id> sh -c 'cp -r /bitnami/postgresql/conf/. /bitnami/postgresql/data'
# Set pg_wal.tar permissions
docker exec -t --user root <postgres_leader_container_id> sh -c 'cd /bitnami/postgresql/data/pg_wal && chown -v 1001 $(ls)'
# Start the server
docker exec -t <postgres_leader_container_id> pg_ctl start -D /bitnami/postgresql/data