From dd92012b31f10b02397367ce5c64e37e1e254127 Mon Sep 17 00:00:00 2001 From: ahgraber Date: Sat, 13 Feb 2021 15:25:43 -0500 Subject: [PATCH] initial commit (still not working) --- .github/CONTRIBUTING.md | 122 --- .github/FUNDING.yml | 3 - .github/ISSUE_TEMPLATE/config.yml | 13 - .github/ISSUE_TEMPLATE/issue.bug.md | 40 - .github/ISSUE_TEMPLATE/issue.feature.md | 25 - .github/PULL_REQUEST_TEMPLATE.md | 43 - .github/workflows/external_trigger.yml | 92 --- .../workflows/external_trigger_scheduler.yml | 43 - .github/workflows/greetings.yml | 13 - .github/workflows/package_trigger.yml | 38 - .../workflows/package_trigger_scheduler.yml | 50 -- .github/workflows/stale.yml | 23 - .gitignore | 0 Dockerfile | 215 ++--- Dockerfile.armhf => Dockerfile-swag | 2 +- Dockerfile.aarch64 | 154 ---- Jenkinsfile | 775 ------------------ README.md | 242 ++---- docker-compose.yaml | 12 + jenkins-vars.yml | 29 - package_versions.txt | 220 ----- readme-vars.yml | 168 ---- root/app/duckdns-txt | 8 - root/app/le-renew.sh | 19 +- root/defaults/502.html | 44 - root/defaults/authelia-location.conf | 11 - root/defaults/authelia-server.conf | 48 -- .../{dns-conf => credentials}/cloudflare.ini | 2 +- root/defaults/crontabs/.gitkeep | 0 root/defaults/crontabs/root | 9 + root/defaults/default | 155 ---- root/defaults/deploy/deploy-convert-certs.sh | 32 + root/defaults/dns-conf/aliyun.ini | 6 - root/defaults/dns-conf/cloudxns.ini | 4 - root/defaults/dns-conf/cpanel.ini | 6 - root/defaults/dns-conf/digitalocean.ini | 3 - root/defaults/dns-conf/dnsimple.ini | 3 - root/defaults/dns-conf/dnsmadeeasy.ini | 4 - root/defaults/dns-conf/domeneshop.ini | 4 - root/defaults/dns-conf/gandi.ini | 3 - root/defaults/dns-conf/gehirn.ini | 4 - root/defaults/dns-conf/google.json | 6 - root/defaults/dns-conf/hetzner.ini | 3 - root/defaults/dns-conf/inwx.ini | 6 - root/defaults/dns-conf/linode.ini | 3 - root/defaults/dns-conf/luadns.ini | 4 - root/defaults/dns-conf/netcup.ini | 3 - root/defaults/dns-conf/njalla.ini | 2 - root/defaults/dns-conf/nsone.ini | 3 - root/defaults/dns-conf/ovh.ini | 6 - root/defaults/dns-conf/rfc2136.ini | 11 - root/defaults/dns-conf/route53.ini | 5 - root/defaults/dns-conf/sakuracloud.ini | 4 - root/defaults/dns-conf/transip.ini | 30 - root/defaults/fail2ban/fail2ban.local | 4 - .../fail2ban/filter.d/nginx-badbots.conf | 21 - .../fail2ban/filter.d/nginx-deny.conf | 15 - root/defaults/geoip2.conf | 123 --- root/defaults/index.html | 39 - root/defaults/jail.local | 57 -- root/defaults/ldap.conf | 92 --- root/defaults/nginx.conf | 120 --- root/defaults/proxy.conf | 30 - root/defaults/ssl.conf | 48 -- root/donate.txt | 1 - root/etc/cont-init.d/.gitkeep | 0 root/etc/cont-init.d/00-s6-secret-init.sh | 29 + root/etc/cont-init.d/01-add_user.sh | 38 + root/etc/cont-init.d/02-set-timezone.sh | 9 + root/etc/cont-init.d/10-permissions.sh | 15 + root/etc/cont-init.d/50-certbot.sh | 171 ++++ root/etc/cont-init.d/50-config | 356 -------- root/etc/cont-init.d/60-renew | 10 - root/etc/cont-init.d/60_renewal-init.sh | 15 + root/etc/cont-init.d/70-templates | 42 - root/etc/crontabs/.gitkeep | 0 root/etc/logrotate.d/fail2ban | 12 - root/etc/logrotate.d/lerotate | 11 - root/etc/services.d/.gitkeep | 0 root/etc/services.d/cron/finish | 3 + root/etc/services.d/cron/run | 8 + root/etc/services.d/fail2ban/run | 4 - scripts/archive/00_secret-init.sh | 36 + scripts/archive/01-add_user.sh | 38 + scripts/archive/init.sh | 15 + scripts/backups/deploy-convert-certs.sh | 30 + scripts/backups/root | 9 + scripts/buildx.sh | 23 + scripts/install-s6.sh | 29 + 89 files changed, 640 insertions(+), 3586 deletions(-) delete mode 100644 .github/CONTRIBUTING.md delete mode 100755 .github/FUNDING.yml delete mode 100755 .github/ISSUE_TEMPLATE/config.yml delete mode 100755 .github/ISSUE_TEMPLATE/issue.bug.md delete mode 100755 .github/ISSUE_TEMPLATE/issue.feature.md delete mode 100644 .github/PULL_REQUEST_TEMPLATE.md delete mode 100644 .github/workflows/external_trigger.yml delete mode 100644 .github/workflows/external_trigger_scheduler.yml delete mode 100644 .github/workflows/greetings.yml delete mode 100644 .github/workflows/package_trigger.yml delete mode 100644 .github/workflows/package_trigger_scheduler.yml delete mode 100644 .github/workflows/stale.yml create mode 100644 .gitignore mode change 100755 => 100644 Dockerfile rename Dockerfile.armhf => Dockerfile-swag (98%) delete mode 100755 Dockerfile.aarch64 delete mode 100644 Jenkinsfile create mode 100644 docker-compose.yaml delete mode 100644 jenkins-vars.yml delete mode 100755 package_versions.txt delete mode 100755 readme-vars.yml delete mode 100644 root/app/duckdns-txt delete mode 100644 root/defaults/502.html delete mode 100644 root/defaults/authelia-location.conf delete mode 100644 root/defaults/authelia-server.conf rename root/defaults/{dns-conf => credentials}/cloudflare.ini (83%) create mode 100644 root/defaults/crontabs/.gitkeep create mode 100644 root/defaults/crontabs/root delete mode 100644 root/defaults/default create mode 100644 root/defaults/deploy/deploy-convert-certs.sh delete mode 100644 root/defaults/dns-conf/aliyun.ini delete mode 100644 root/defaults/dns-conf/cloudxns.ini delete mode 100644 root/defaults/dns-conf/cpanel.ini delete mode 100644 root/defaults/dns-conf/digitalocean.ini delete mode 100644 root/defaults/dns-conf/dnsimple.ini delete mode 100644 root/defaults/dns-conf/dnsmadeeasy.ini delete mode 100644 root/defaults/dns-conf/domeneshop.ini delete mode 100644 root/defaults/dns-conf/gandi.ini delete mode 100644 root/defaults/dns-conf/gehirn.ini delete mode 100644 root/defaults/dns-conf/google.json delete mode 100644 root/defaults/dns-conf/hetzner.ini delete mode 100644 root/defaults/dns-conf/inwx.ini delete mode 100644 root/defaults/dns-conf/linode.ini delete mode 100644 root/defaults/dns-conf/luadns.ini delete mode 100644 root/defaults/dns-conf/netcup.ini delete mode 100644 root/defaults/dns-conf/njalla.ini delete mode 100644 root/defaults/dns-conf/nsone.ini delete mode 100644 root/defaults/dns-conf/ovh.ini delete mode 100644 root/defaults/dns-conf/rfc2136.ini delete mode 100644 root/defaults/dns-conf/route53.ini delete mode 100644 root/defaults/dns-conf/sakuracloud.ini delete mode 100644 root/defaults/dns-conf/transip.ini delete mode 100644 root/defaults/fail2ban/fail2ban.local delete mode 100644 root/defaults/fail2ban/filter.d/nginx-badbots.conf delete mode 100644 root/defaults/fail2ban/filter.d/nginx-deny.conf delete mode 100644 root/defaults/geoip2.conf delete mode 100644 root/defaults/index.html delete mode 100644 root/defaults/jail.local delete mode 100644 root/defaults/ldap.conf delete mode 100644 root/defaults/nginx.conf delete mode 100644 root/defaults/proxy.conf delete mode 100644 root/defaults/ssl.conf delete mode 100644 root/donate.txt create mode 100644 root/etc/cont-init.d/.gitkeep create mode 100644 root/etc/cont-init.d/00-s6-secret-init.sh create mode 100644 root/etc/cont-init.d/01-add_user.sh create mode 100644 root/etc/cont-init.d/02-set-timezone.sh create mode 100644 root/etc/cont-init.d/10-permissions.sh create mode 100644 root/etc/cont-init.d/50-certbot.sh delete mode 100644 root/etc/cont-init.d/50-config delete mode 100644 root/etc/cont-init.d/60-renew create mode 100644 root/etc/cont-init.d/60_renewal-init.sh delete mode 100644 root/etc/cont-init.d/70-templates create mode 100644 root/etc/crontabs/.gitkeep delete mode 100644 root/etc/logrotate.d/fail2ban delete mode 100644 root/etc/logrotate.d/lerotate create mode 100644 root/etc/services.d/.gitkeep create mode 100644 root/etc/services.d/cron/finish create mode 100644 root/etc/services.d/cron/run delete mode 100644 root/etc/services.d/fail2ban/run create mode 100644 scripts/archive/00_secret-init.sh create mode 100644 scripts/archive/01-add_user.sh create mode 100644 scripts/archive/init.sh create mode 100644 scripts/backups/deploy-convert-certs.sh create mode 100644 scripts/backups/root create mode 100755 scripts/buildx.sh create mode 100755 scripts/install-s6.sh diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md deleted file mode 100644 index 10ff51d..0000000 --- a/.github/CONTRIBUTING.md +++ /dev/null @@ -1,122 +0,0 @@ -# Contributing to swag - -## Gotchas - -* While contributing make sure to make all your changes before creating a Pull Request, as our pipeline builds each commit after the PR is open. -* Read, and fill the Pull Request template - * If this is a fix for a typo in code or documentation in the README please file an issue - * If the PR is addressing an existing issue include, closes #\, in the body of the PR commit message -* If you want to discuss changes, you can also bring it up in [#dev-talk](https://discordapp.com/channels/354974912613449730/757585807061155840) in our [Discord server](https://discord.gg/YWrKVTn) - -## Common files - -| File | Use case | -| :----: | --- | -| `Dockerfile` | Dockerfile used to build amd64 images | -| `Dockerfile.aarch64` | Dockerfile used to build 64bit ARM architectures | -| `Dockerfile.armhf` | Dockerfile used to build 32bit ARM architectures | -| `Jenkinsfile` | This file is a product of our builder and should not be edited directly. This is used to build the image | -| `jenkins-vars.yml` | This file is used to generate the `Jenkinsfile` mentioned above, it only affects the build-process | -| `package_versions.txt` | This file is generated as a part of the build-process and should not be edited directly. It lists all the installed packages and their versions | -| `README.md` | This file is a product of our builder and should not be edited directly. This displays the readme for the repository and image registries | -| `readme-vars.yml` | This file is used to generate the `README.md` | - -## Readme - -If you would like to change our readme, please __**do not**__ directly edit the readme, as it is auto-generated on each commit. -Instead edit the [readme-vars.yml](https://github.com/linuxserver/docker-swag/edit/master/readme-vars.yml). - -These variables are used in a template for our [Jenkins Builder](https://github.com/linuxserver/docker-jenkins-builder) as part of an ansible play. -Most of these variables are also carried over to [docs.linuxserver.io](https://docs.linuxserver.io/images/docker-swag) - -### Fixing typos or clarify the text in the readme - -There are variables for multiple parts of the readme, the most common ones are: - -| Variable | Description | -| :----: | --- | -| `project_blurb` | This is the short excerpt shown above the project logo. | -| `app_setup_block` | This is the text that shows up under "Application Setup" if enabled | - -### Parameters - -The compose and run examples are also generated from these variables. - -We have a [reference file](https://github.com/linuxserver/docker-jenkins-builder/blob/master/vars/_container-vars-blank) in our Jenkins Builder. - -These are prefixed with `param_` for required parameters, or `opt_param` for optional parameters, except for `cap_add`. -Remember to enable param, if currently disabled. This differs between parameters, and can be seen in the reference file. - -Devices, environment variables, ports and volumes expects its variables in a certain way. - -### Devices - -```yml -param_devices: - - { device_path: "/dev/dri", device_host_path: "/dev/dri", desc: "For hardware transcoding" } -opt_param_devices: - - { device_path: "/dev/dri", device_host_path: "/dev/dri", desc: "For hardware transcoding" } -``` - -### Environment variables - -```yml -param_env_vars: - - { env_var: "TZ", env_value: "Europe/London", desc: "Specify a timezone to use EG Europe/London." } -opt_param_env_vars: - - { env_var: "VERSION", env_value: "latest", desc: "Supported values are LATEST, PLEXPASS or a specific version number." } -``` - -### Ports - -```yml -param_ports: - - { external_port: "80", internal_port: "80", port_desc: "Application WebUI" } -opt_param_ports: - - { external_port: "80", internal_port: "80", port_desc: "Application WebUI" } -``` - -### Volumes - -```yml -param_volumes: - - { vol_path: "/config", vol_host_path: "", desc: "Configuration files." } -opt_param_volumes: - - { vol_path: "/config", vol_host_path: "", desc: "Configuration files." } -``` - -### Testing template changes - -After you make any changes to the templates, you can use our [Jenkins Builder](https://github.com/linuxserver/docker-jenkins-builder) to have the files updated from the modified templates. Please use the command found under `Running Locally` [on this page](https://github.com/linuxserver/docker-jenkins-builder/blob/master/README.md) to generate them prior to submitting a PR. - -## Dockerfiles - -We use multiple Dockerfiles in our repos, this is because sometimes some CPU architectures needs different packages to work. -If you are proposing additional packages to be added, ensure that you added the packages to all the Dockerfiles in alphabetical order. - -### Testing your changes - -``` -git clone https://github.com/linuxserver/docker-swag.git -cd docker-swag -docker build \ - --no-cache \ - --pull \ - -t linuxserver/swag:latest . -``` - -The ARM variants can be built on x86_64 hardware using `multiarch/qemu-user-static` -``` -docker run --rm --privileged multiarch/qemu-user-static:register --reset -``` - -Once registered you can define the dockerfile to use with `-f Dockerfile.aarch64`. - -## Update the chagelog - -If you are modifying the Dockerfiles or any of the startup scripts in [root](https://github.com/linuxserver/docker-swag/tree/master/root), add an entry to the changelog - -```yml -changelogs: - - { date: "DD.MM.YY:", desc: "Added some love to templates" } -``` diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml deleted file mode 100755 index dc31dc8..0000000 --- a/.github/FUNDING.yml +++ /dev/null @@ -1,3 +0,0 @@ -github: linuxserver -open_collective: linuxserver -custom: ["https://supporters.eff.org/donate/support-work-on-certbot",] diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml deleted file mode 100755 index 333e042..0000000 --- a/.github/ISSUE_TEMPLATE/config.yml +++ /dev/null @@ -1,13 +0,0 @@ -blank_issues_enabled: false -contact_links: - - name: Discord chat support - url: https://discord.gg/YWrKVTn - about: Realtime support / chat with the community and the team. - - - name: Discourse discussion forum - url: https://discourse.linuxserver.io - about: Post on our community forum. - - - name: Documentation - url: https://docs.linuxserver.io/images/docker-swag - about: Documentation - information about all of our containers. diff --git a/.github/ISSUE_TEMPLATE/issue.bug.md b/.github/ISSUE_TEMPLATE/issue.bug.md deleted file mode 100755 index 4b97561..0000000 --- a/.github/ISSUE_TEMPLATE/issue.bug.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve - ---- -[linuxserverurl]: https://linuxserver.io -[![linuxserver.io](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/linuxserver_medium.png)][linuxserverurl] - - - - - ------------------------------- - -## Expected Behavior - - -## Current Behavior - - -## Steps to Reproduce - - -1. -2. -3. -4. - -## Environment -**OS:** -**CPU architecture:** x86_64/arm32/arm64 -**How docker service was installed:** - - - -## Command used to create docker container (run/create/compose/screenshot) - - -## Docker logs - diff --git a/.github/ISSUE_TEMPLATE/issue.feature.md b/.github/ISSUE_TEMPLATE/issue.feature.md deleted file mode 100755 index 20a91fd..0000000 --- a/.github/ISSUE_TEMPLATE/issue.feature.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project - ---- -[linuxserverurl]: https://linuxserver.io -[![linuxserver.io](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/linuxserver_medium.png)][linuxserverurl] - - - - - - - - ------------------------------- - -## Desired Behavior - - -## Current Behavior - - -## Alternatives Considered - diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 98c4ffd..0000000 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,43 +0,0 @@ - - -[linuxserverurl]: https://linuxserver.io -[![linuxserver.io](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/linuxserver_medium.png)][linuxserverurl] - - - - - - - - - - - - - - - - - ------------------------------- - - - [ ] I have read the [contributing](https://github.com/linuxserver/docker-swag/blob/master/.github/CONTRIBUTING.md) guideline and understand that I have made the correct modifications - ------------------------------- - - - -## Description: - - -## Benefits of this PR and context: - - -## How Has This Been Tested? - - - - - -## Source / References: - diff --git a/.github/workflows/external_trigger.yml b/.github/workflows/external_trigger.yml deleted file mode 100644 index 81ea607..0000000 --- a/.github/workflows/external_trigger.yml +++ /dev/null @@ -1,92 +0,0 @@ -name: External Trigger Main - -on: - workflow_dispatch: - -jobs: - external-trigger-master: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2.3.3 - - - name: External Trigger - if: github.ref == 'refs/heads/master' - run: | - if [ -n "${{ secrets.PAUSE_EXTERNAL_TRIGGER_SWAG_MASTER }}" ]; then - echo "**** Github secret PAUSE_EXTERNAL_TRIGGER_SWAG_MASTER is set; skipping trigger. ****" - exit 0 - fi - echo "**** External trigger running off of master branch. To disable this trigger, set a Github secret named \"PAUSE_EXTERNAL_TRIGGER_SWAG_MASTER\". ****" - echo "**** Retrieving external version ****" - EXT_RELEASE=$(curl -sL "https://pypi.python.org/pypi/certbot/json" |jq -r '. | .info.version') - if [ -z "${EXT_RELEASE}" ] || [ "${EXT_RELEASE}" == "null" ]; then - echo "**** Can't retrieve external version, exiting ****" - FAILURE_REASON="Can't retrieve external version for swag branch master" - GHA_TRIGGER_URL="https://github.com/linuxserver/docker-swag/actions/runs/${{ github.run_id }}" - curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://cdn.discordapp.com/avatars/354986384542662657/df91181b3f1cf0ef1592fbe18e0962d7.png","embeds": [{"color": 16711680, - "description": "**Trigger Failed** \n**Reason:** '"${FAILURE_REASON}"' \n**Trigger URL:** '"${GHA_TRIGGER_URL}"' \n"}], - "username": "Github Actions"}' ${{ secrets.DISCORD_WEBHOOK }} - exit 1 - fi - EXT_RELEASE=$(echo ${EXT_RELEASE} | sed 's/[~,%@+;:/]//g') - echo "**** External version: ${EXT_RELEASE} ****" - echo "**** Retrieving last pushed version ****" - image="linuxserver/swag" - tag="latest" - token=$(curl -sX GET \ - "https://ghcr.io/token?scope=repository%3Alinuxserver%2Fswag%3Apull" \ - | jq -r '.token') - multidigest=$(curl -s \ - --header "Accept: application/vnd.docker.distribution.manifest.v2+json" \ - --header "Authorization: Bearer ${token}" \ - "https://ghcr.io/v2/${image}/manifests/${tag}" \ - | jq -r 'first(.manifests[].digest)') - digest=$(curl -s \ - --header "Accept: application/vnd.docker.distribution.manifest.v2+json" \ - --header "Authorization: Bearer ${token}" \ - "https://ghcr.io/v2/${image}/manifests/${multidigest}" \ - | jq -r '.config.digest') - image_info=$(curl -sL \ - --header "Authorization: Bearer ${token}" \ - "https://ghcr.io/v2/${image}/blobs/${digest}" \ - | jq -r '.container_config') - IMAGE_RELEASE=$(echo ${image_info} | jq -r '.Labels.build_version' | awk '{print $3}') - IMAGE_VERSION=$(echo ${IMAGE_RELEASE} | awk -F'-ls' '{print $1}') - if [ -z "${IMAGE_VERSION}" ]; then - echo "**** Can't retrieve last pushed version, exiting ****" - FAILURE_REASON="Can't retrieve last pushed version for swag tag latest" - curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://cdn.discordapp.com/avatars/354986384542662657/df91181b3f1cf0ef1592fbe18e0962d7.png","embeds": [{"color": 16711680, - "description": "**Trigger Failed** \n**Reason:** '"${FAILURE_REASON}"' \n"}], - "username": "Github Actions"}' ${{ secrets.DISCORD_WEBHOOK }} - exit 1 - fi - echo "**** Last pushed version: ${IMAGE_VERSION} ****" - if [ "${EXT_RELEASE}" == "${IMAGE_VERSION}" ]; then - echo "**** Version ${EXT_RELEASE} already pushed, exiting ****" - exit 0 - elif [ $(curl -s https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-swag/job/master/lastBuild/api/json | jq -r '.building') == "true" ]; then - echo "**** New version ${EXT_RELEASE} found; but there already seems to be an active build on Jenkins; exiting ****" - exit 0 - else - echo "**** New version ${EXT_RELEASE} found; old version was ${IMAGE_VERSION}. Triggering new build ****" - response=$(curl -iX POST \ - https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-swag/job/master/buildWithParameters?PACKAGE_CHECK=false \ - --user ${{ secrets.JENKINS_USER }}:${{ secrets.JENKINS_TOKEN }} | grep -i location | sed "s|^[L|l]ocation: \(.*\)|\1|") - echo "**** Jenkins job queue url: ${response%$'\r'} ****" - echo "**** Sleeping 10 seconds until job starts ****" - sleep 10 - buildurl=$(curl -s "${response%$'\r'}api/json" | jq -r '.executable.url') - buildurl="${buildurl%$'\r'}" - echo "**** Jenkins job build url: ${buildurl} ****" - echo "**** Attempting to change the Jenkins job description ****" - curl -iX POST \ - "${buildurl}submitDescription" \ - --user ${{ secrets.JENKINS_USER }}:${{ secrets.JENKINS_TOKEN }} \ - --data-urlencode "description=GHA external trigger https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" \ - --data-urlencode "Submit=Submit" - echo "**** Notifying Discord ****" - TRIGGER_REASON="A version change was detected for swag tag latest. Old version:${IMAGE_VERSION} New version:${EXT_RELEASE}" - curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://cdn.discordapp.com/avatars/354986384542662657/df91181b3f1cf0ef1592fbe18e0962d7.png","embeds": [{"color": 9802903, - "description": "**Build Triggered** \n**Reason:** '"${TRIGGER_REASON}"' \n**Build URL:** '"${buildurl}display/redirect"' \n"}], - "username": "Github Actions"}' ${{ secrets.DISCORD_WEBHOOK }} - fi diff --git a/.github/workflows/external_trigger_scheduler.yml b/.github/workflows/external_trigger_scheduler.yml deleted file mode 100644 index 632e265..0000000 --- a/.github/workflows/external_trigger_scheduler.yml +++ /dev/null @@ -1,43 +0,0 @@ -name: External Trigger Scheduler - -on: - schedule: - - cron: '50 * * * *' - workflow_dispatch: - -jobs: - external-trigger-scheduler: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2.3.3 - with: - fetch-depth: '0' - - - name: External Trigger Scheduler - run: | - echo "**** Branches found: ****" - git for-each-ref --format='%(refname:short)' refs/remotes - echo "**** Pulling the yq docker image ****" - docker pull ghcr.io/linuxserver/yq - for br in $(git for-each-ref --format='%(refname:short)' refs/remotes) - do - br=$(echo "$br" | sed 's|origin/||g') - echo "**** Evaluating branch ${br} ****" - ls_branch=$(curl -sX GET https://raw.githubusercontent.com/linuxserver/docker-swag/${br}/jenkins-vars.yml \ - | docker run --rm -i --entrypoint yq ghcr.io/linuxserver/yq -r .ls_branch) - if [ "$br" == "$ls_branch" ]; then - echo "**** Branch ${br} appears to be live; checking workflow. ****" - if curl -sfX GET https://raw.githubusercontent.com/linuxserver/docker-swag/${br}/.github/workflows/external_trigger.yml > /dev/null 2>&1; then - echo "**** Workflow exists. Triggering external trigger workflow for branch ${br} ****." - curl -iX POST \ - -H "Authorization: token ${{ secrets.CR_PAT }}" \ - -H "Accept: application/vnd.github.v3+json" \ - -d "{\"ref\":\"refs/heads/${br}\"}" \ - https://api.github.com/repos/linuxserver/docker-swag/actions/workflows/external_trigger.yml/dispatches - else - echo "**** Workflow doesn't exist; skipping trigger. ****" - fi - else - echo "**** ${br} appears to be a dev branch; skipping trigger. ****" - fi - done diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml deleted file mode 100644 index b822f27..0000000 --- a/.github/workflows/greetings.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: Greetings - -on: [pull_request_target, issues] - -jobs: - greeting: - runs-on: ubuntu-latest - steps: - - uses: actions/first-interaction@v1 - with: - issue-message: 'Thanks for opening your first issue here! Be sure to follow the [bug](https://github.com/linuxserver/docker-swag/blob/master/.github/ISSUE_TEMPLATE/issue.bug.md) or [feature](https://github.com/linuxserver/docker-swag/blob/master/.github/ISSUE_TEMPLATE/issue.feature.md) issue templates!' - pr-message: 'Thanks for opening this pull request! Be sure to follow the [pull request template](https://github.com/linuxserver/docker-swag/blob/master/.github/PULL_REQUEST_TEMPLATE.md)!' - repo-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/package_trigger.yml b/.github/workflows/package_trigger.yml deleted file mode 100644 index a122e53..0000000 --- a/.github/workflows/package_trigger.yml +++ /dev/null @@ -1,38 +0,0 @@ -name: Package Trigger Main - -on: - workflow_dispatch: - -jobs: - package-trigger-master: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2.3.3 - - - name: Package Trigger - if: github.ref == 'refs/heads/master' - run: | - if [ -n "${{ secrets.PAUSE_PACKAGE_TRIGGER_SWAG_MASTER }}" ]; then - echo "**** Github secret PAUSE_PACKAGE_TRIGGER_SWAG_MASTER is set; skipping trigger. ****" - exit 0 - fi - if [ $(curl -s https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-swag/job/master/lastBuild/api/json | jq -r '.building') == "true" ]; then - echo "**** There already seems to be an active build on Jenkins; skipping package trigger ****" - exit 0 - fi - echo "**** Package trigger running off of master branch. To disable, set a Github secret named \"PAUSE_PACKAGE_TRIGGER_SWAG_MASTER\". ****" - response=$(curl -iX POST \ - https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-swag/job/master/buildWithParameters?PACKAGE_CHECK=true \ - --user ${{ secrets.JENKINS_USER }}:${{ secrets.JENKINS_TOKEN }} | grep -i location | sed "s|^[L|l]ocation: \(.*\)|\1|") - echo "**** Jenkins job queue url: ${response%$'\r'} ****" - echo "**** Sleeping 10 seconds until job starts ****" - sleep 10 - buildurl=$(curl -s "${response%$'\r'}api/json" | jq -r '.executable.url') - buildurl="${buildurl%$'\r'}" - echo "**** Jenkins job build url: ${buildurl} ****" - echo "**** Attempting to change the Jenkins job description ****" - curl -iX POST \ - "${buildurl}submitDescription" \ - --user ${{ secrets.JENKINS_USER }}:${{ secrets.JENKINS_TOKEN }} \ - --data-urlencode "description=GHA package trigger https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" \ - --data-urlencode "Submit=Submit" diff --git a/.github/workflows/package_trigger_scheduler.yml b/.github/workflows/package_trigger_scheduler.yml deleted file mode 100644 index 61d7a9a..0000000 --- a/.github/workflows/package_trigger_scheduler.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: Package Trigger Scheduler - -on: - schedule: - - cron: '03 5 * * 4' - workflow_dispatch: - -jobs: - package-trigger-scheduler: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2.3.3 - with: - fetch-depth: '0' - - - name: Package Trigger Scheduler - run: | - echo "**** Branches found: ****" - git for-each-ref --format='%(refname:short)' refs/remotes - echo "**** Pulling the yq docker image ****" - docker pull ghcr.io/linuxserver/yq - for br in $(git for-each-ref --format='%(refname:short)' refs/remotes) - do - br=$(echo "$br" | sed 's|origin/||g') - echo "**** Evaluating branch ${br} ****" - ls_branch=$(curl -sX GET https://raw.githubusercontent.com/linuxserver/docker-swag/${br}/jenkins-vars.yml \ - | docker run --rm -i --entrypoint yq ghcr.io/linuxserver/yq -r .ls_branch) - if [ "${br}" == "${ls_branch}" ]; then - echo "**** Branch ${br} appears to be live; checking workflow. ****" - if curl -sfX GET https://raw.githubusercontent.com/linuxserver/docker-swag/${br}/.github/workflows/package_trigger.yml > /dev/null 2>&1; then - echo "**** Workflow exists. Triggering package trigger workflow for branch ${br}. ****" - triggered_branches="${triggered_branches}${br} " - curl -iX POST \ - -H "Authorization: token ${{ secrets.CR_PAT }}" \ - -H "Accept: application/vnd.github.v3+json" \ - -d "{\"ref\":\"refs/heads/${br}\"}" \ - https://api.github.com/repos/linuxserver/docker-swag/actions/workflows/package_trigger.yml/dispatches - sleep 30 - else - echo "**** Workflow doesn't exist; skipping trigger. ****" - fi - else - echo "**** ${br} appears to be a dev branch; skipping trigger. ****" - fi - done - echo "**** Package check build(s) triggered for branch(es): ${triggered_branches} ****" - echo "**** Notifying Discord ****" - curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://cdn.discordapp.com/avatars/354986384542662657/df91181b3f1cf0ef1592fbe18e0962d7.png","embeds": [{"color": 9802903, - "description": "**Package Check Build(s) Triggered for swag** \n**Branch(es):** '"${triggered_branches}"' \n**Build URL:** '"https://ci.linuxserver.io/blue/organizations/jenkins/Docker-Pipeline-Builders%2Fdocker-swag/activity/"' \n"}], - "username": "Github Actions"}' ${{ secrets.DISCORD_WEBHOOK }} diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml deleted file mode 100644 index 1806420..0000000 --- a/.github/workflows/stale.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Mark stale issues and pull requests - -on: - schedule: - - cron: "30 1 * * *" - -jobs: - stale: - - runs-on: ubuntu-latest - - steps: - - uses: actions/stale@v1 - with: - stale-issue-message: "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions." - stale-pr-message: "This pull request has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions." - stale-issue-label: 'no-issue-activity' - stale-pr-label: 'no-pr-activity' - days-before-stale: 30 - days-before-close: 365 - exempt-issue-labels: 'awaiting-approval,work-in-progress' - exempt-pr-labels: 'awaiting-approval,work-in-progress' - repo-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/Dockerfile b/Dockerfile old mode 100755 new mode 100644 index 4928ee2..034eec1 --- a/Dockerfile +++ b/Dockerfile @@ -1,154 +1,73 @@ -FROM ghcr.io/linuxserver/baseimage-alpine-nginx:3.13 - -# set version label +# FROM ubuntu:focal +FROM --platform=${TARGETPLATFORM:-linux/amd64} ubuntu:focal +ARG TARGETPLATFORM ARG BUILD_DATE -ARG VERSION -ARG CERTBOT_VERSION -LABEL build_version="Linuxserver.io version:- ${VERSION} Build-date:- ${BUILD_DATE}" -LABEL maintainer="aptalca" -# environment settings -ENV DHLEVEL=2048 ONLY_SUBDOMAINS=false AWS_CONFIG_FILE=/config/dns-conf/route53.ini -ENV S6_BEHAVIOUR_IF_STAGE2_FAILS=2 +LABEL build_version="${TARGETPLATFORM} - ${BUILD_DATE}" -RUN \ - echo "**** install build packages ****" && \ - apk add --no-cache --virtual=build-dependencies \ - cargo \ - g++ \ - gcc \ - libffi-dev \ - openssl-dev \ - python3-dev && \ - echo "**** install runtime packages ****" && \ - apk add --no-cache --upgrade \ - curl \ - fail2ban \ - gnupg \ - memcached \ - nginx \ - nginx-mod-http-brotli \ - nginx-mod-http-dav-ext \ - nginx-mod-http-echo \ - nginx-mod-http-fancyindex \ - nginx-mod-http-geoip2 \ - nginx-mod-http-headers-more \ - nginx-mod-http-image-filter \ - nginx-mod-http-nchan \ - nginx-mod-http-perl \ - nginx-mod-http-redis2 \ - nginx-mod-http-set-misc \ - nginx-mod-http-upload-progress \ - nginx-mod-http-xslt-filter \ - nginx-mod-mail \ - nginx-mod-rtmp \ - nginx-mod-stream \ - nginx-mod-stream-geoip2 \ - nginx-vim \ - php7-bcmath \ - php7-bz2 \ - php7-ctype \ - php7-curl \ - php7-dom \ - php7-exif \ - php7-ftp \ - php7-gd \ - php7-iconv \ - php7-imap \ - php7-intl \ - php7-ldap \ - php7-mcrypt \ - php7-memcached \ - php7-mysqli \ - php7-mysqlnd \ - php7-opcache \ - php7-pdo_mysql \ - php7-pdo_odbc \ - php7-pdo_pgsql \ - php7-pdo_sqlite \ - php7-pear \ - php7-pecl-apcu \ - php7-pecl-redis \ - php7-pgsql \ - php7-phar \ - php7-posix \ - php7-soap \ - php7-sockets \ - php7-sodium \ - php7-sqlite3 \ - php7-tokenizer \ - php7-xml \ - php7-xmlreader \ - php7-xmlrpc \ - php7-xsl \ - php7-zip \ - py3-cryptography \ - py3-future \ - py3-pip \ - whois && \ - echo "**** install certbot plugins ****" && \ - if [ -z ${CERTBOT_VERSION+x} ]; then \ - CERTBOT="certbot"; \ - else \ - CERTBOT="certbot==${CERTBOT_VERSION}"; \ - fi && \ - pip3 install -U \ - pip && \ - pip3 install -U \ - ${CERTBOT} \ - certbot-dns-aliyun \ +# ENV TLD +# ENV SUBDOMAINS +ENV ONLY_SUBDOMAINS=false +ENV PROPAGATION=60 +ENV STAGING=false + +# install supporting packages +RUN apt-get update \ + && apt-get install --no-install-recommends -y \ + ca-certificates \ + cron \ + curl \ + gcc \ + libssl-dev \ + libffi-dev \ + openssl \ + python3 \ + python3-pip \ + tzdata \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* \ + /tmp/* \ + /var/tmp/* + +# s6 overlay +COPY ./scripts/install-s6.sh /tmp/install-s6.sh +RUN chmod +x /tmp/install-s6.sh && /tmp/install-s6.sh "${TARGETPLATFORM}" && rm -f /tmp/install-s6 + +EXPOSE 80 443 + +# install certbot +RUN pip3 install \ + pip \ + && pip3 install \ + certbot \ certbot-dns-cloudflare \ - certbot-dns-cloudxns \ - certbot-dns-cpanel \ - certbot-dns-digitalocean \ - certbot-dns-dnsimple \ - certbot-dns-dnsmadeeasy \ - certbot-dns-domeneshop \ - certbot-dns-google \ - certbot-dns-hetzner \ - certbot-dns-inwx \ - certbot-dns-linode \ - certbot-dns-luadns \ - certbot-dns-netcup \ - certbot-dns-njalla \ - certbot-dns-nsone \ - certbot-dns-ovh \ - certbot-dns-rfc2136 \ - certbot-dns-route53 \ - certbot-dns-transip \ - certbot-plugin-gandi \ - cryptography \ - requests && \ - echo "**** remove unnecessary fail2ban filters ****" && \ - rm \ - /etc/fail2ban/jail.d/alpine-ssh.conf && \ - echo "**** copy fail2ban default action and filter to /default ****" && \ - mkdir -p /defaults/fail2ban && \ - mv /etc/fail2ban/action.d /defaults/fail2ban/ && \ - mv /etc/fail2ban/filter.d /defaults/fail2ban/ && \ - echo "**** copy proxy confs to /default ****" && \ - mkdir -p /defaults/proxy-confs && \ - curl -o \ - /tmp/proxy.tar.gz -L \ - "https://github.com/linuxserver/reverse-proxy-confs/tarball/master" && \ - tar xf \ - /tmp/proxy.tar.gz -C \ - /defaults/proxy-confs --strip-components=1 --exclude=linux*/.gitattributes --exclude=linux*/.github --exclude=linux*/.gitignore --exclude=linux*/LICENSE && \ - echo "**** configure nginx ****" && \ - rm -f /etc/nginx/conf.d/default.conf && \ - curl -o \ - /defaults/dhparams.pem -L \ - "https://lsio.ams3.digitaloceanspaces.com/dhparams.pem" && \ - echo "**** cleanup ****" && \ - apk del --purge \ - build-dependencies && \ - for cleanfiles in *.pyc *.pyo; \ - do \ - find /usr/lib/python3.* -iname "${cleanfiles}" -exec rm -f '{}' + \ - ; done && \ - rm -rf \ - /tmp/* \ - /root/.cache + cryptography \ + requests \ + && for cleanfiles in *.pyc *.pyo; \ + do \ + find /usr/lib/python3.* -iname "${cleanfiles}" -exec rm -f '{}' + \ + ; done \ + && rm -rf \ + /tmp/* \ + /root/.cache + +RUN mkdir -p \ + /app \ + /config \ + /defaults \ + /letsencrypt \ + /etc/letsencrypt/live \ + /etc/letsencrypt/renewal-hooks/deploy + +VOLUME /config +VOLUME /letsencrypt + + +RUN groupmod -g 1000 users && \ + useradd -u 911 -U -d /config -s /bin/false abc && \ + usermod -G users abc -# add local files COPY root/ / +# RUN chmod -R +x /app + +ENTRYPOINT [ "/init" ] \ No newline at end of file diff --git a/Dockerfile.armhf b/Dockerfile-swag similarity index 98% rename from Dockerfile.armhf rename to Dockerfile-swag index 681d520..4928ee2 100755 --- a/Dockerfile.armhf +++ b/Dockerfile-swag @@ -1,4 +1,4 @@ -FROM ghcr.io/linuxserver/baseimage-alpine-nginx:arm32v7-3.13 +FROM ghcr.io/linuxserver/baseimage-alpine-nginx:3.13 # set version label ARG BUILD_DATE diff --git a/Dockerfile.aarch64 b/Dockerfile.aarch64 deleted file mode 100755 index e57d2b2..0000000 --- a/Dockerfile.aarch64 +++ /dev/null @@ -1,154 +0,0 @@ -FROM ghcr.io/linuxserver/baseimage-alpine-nginx:arm64v8-3.13 - -# set version label -ARG BUILD_DATE -ARG VERSION -ARG CERTBOT_VERSION -LABEL build_version="Linuxserver.io version:- ${VERSION} Build-date:- ${BUILD_DATE}" -LABEL maintainer="aptalca" - -# environment settings -ENV DHLEVEL=2048 ONLY_SUBDOMAINS=false AWS_CONFIG_FILE=/config/dns-conf/route53.ini -ENV S6_BEHAVIOUR_IF_STAGE2_FAILS=2 - -RUN \ - echo "**** install build packages ****" && \ - apk add --no-cache --virtual=build-dependencies \ - cargo \ - g++ \ - gcc \ - libffi-dev \ - openssl-dev \ - python3-dev && \ - echo "**** install runtime packages ****" && \ - apk add --no-cache --upgrade \ - curl \ - fail2ban \ - gnupg \ - memcached \ - nginx \ - nginx-mod-http-brotli \ - nginx-mod-http-dav-ext \ - nginx-mod-http-echo \ - nginx-mod-http-fancyindex \ - nginx-mod-http-geoip2 \ - nginx-mod-http-headers-more \ - nginx-mod-http-image-filter \ - nginx-mod-http-nchan \ - nginx-mod-http-perl \ - nginx-mod-http-redis2 \ - nginx-mod-http-set-misc \ - nginx-mod-http-upload-progress \ - nginx-mod-http-xslt-filter \ - nginx-mod-mail \ - nginx-mod-rtmp \ - nginx-mod-stream \ - nginx-mod-stream-geoip2 \ - nginx-vim \ - php7-bcmath \ - php7-bz2 \ - php7-ctype \ - php7-curl \ - php7-dom \ - php7-exif \ - php7-ftp \ - php7-gd \ - php7-iconv \ - php7-imap \ - php7-intl \ - php7-ldap \ - php7-mcrypt \ - php7-memcached \ - php7-mysqli \ - php7-mysqlnd \ - php7-opcache \ - php7-pdo_mysql \ - php7-pdo_odbc \ - php7-pdo_pgsql \ - php7-pdo_sqlite \ - php7-pear \ - php7-pecl-apcu \ - php7-pecl-redis \ - php7-pgsql \ - php7-phar \ - php7-posix \ - php7-soap \ - php7-sockets \ - php7-sodium \ - php7-sqlite3 \ - php7-tokenizer \ - php7-xml \ - php7-xmlreader \ - php7-xmlrpc \ - php7-xsl \ - php7-zip \ - py3-cryptography \ - py3-future \ - py3-pip \ - whois && \ - echo "**** install certbot plugins ****" && \ - if [ -z ${CERTBOT_VERSION+x} ]; then \ - CERTBOT="certbot"; \ - else \ - CERTBOT="certbot==${CERTBOT_VERSION}"; \ - fi && \ - pip3 install -U \ - pip && \ - pip3 install -U \ - ${CERTBOT} \ - certbot-dns-aliyun \ - certbot-dns-cloudflare \ - certbot-dns-cloudxns \ - certbot-dns-cpanel \ - certbot-dns-digitalocean \ - certbot-dns-dnsimple \ - certbot-dns-dnsmadeeasy \ - certbot-dns-domeneshop \ - certbot-dns-google \ - certbot-dns-hetzner \ - certbot-dns-inwx \ - certbot-dns-linode \ - certbot-dns-luadns \ - certbot-dns-netcup \ - certbot-dns-njalla \ - certbot-dns-nsone \ - certbot-dns-ovh \ - certbot-dns-rfc2136 \ - certbot-dns-route53 \ - certbot-dns-transip \ - certbot-plugin-gandi \ - cryptography \ - requests && \ - echo "**** remove unnecessary fail2ban filters ****" && \ - rm \ - /etc/fail2ban/jail.d/alpine-ssh.conf && \ - echo "**** copy fail2ban default action and filter to /default ****" && \ - mkdir -p /defaults/fail2ban && \ - mv /etc/fail2ban/action.d /defaults/fail2ban/ && \ - mv /etc/fail2ban/filter.d /defaults/fail2ban/ && \ - echo "**** copy proxy confs to /default ****" && \ - mkdir -p /defaults/proxy-confs && \ - curl -o \ - /tmp/proxy.tar.gz -L \ - "https://github.com/linuxserver/reverse-proxy-confs/tarball/master" && \ - tar xf \ - /tmp/proxy.tar.gz -C \ - /defaults/proxy-confs --strip-components=1 --exclude=linux*/.gitattributes --exclude=linux*/.github --exclude=linux*/.gitignore --exclude=linux*/LICENSE && \ - echo "**** configure nginx ****" && \ - rm -f /etc/nginx/conf.d/default.conf && \ - curl -o \ - /defaults/dhparams.pem -L \ - "https://lsio.ams3.digitaloceanspaces.com/dhparams.pem" && \ - echo "**** cleanup ****" && \ - apk del --purge \ - build-dependencies && \ - for cleanfiles in *.pyc *.pyo; \ - do \ - find /usr/lib/python3.* -iname "${cleanfiles}" -exec rm -f '{}' + \ - ; done && \ - rm -rf \ - /tmp/* \ - /root/.cache - -# add local files -COPY root/ / diff --git a/Jenkinsfile b/Jenkinsfile deleted file mode 100644 index 2eab6f0..0000000 --- a/Jenkinsfile +++ /dev/null @@ -1,775 +0,0 @@ -pipeline { - agent { - label 'X86-64-MULTI' - } - options { - buildDiscarder(logRotator(numToKeepStr: '10', daysToKeepStr: '60')) - parallelsAlwaysFailFast() - } - // Input to determine if this is a package check - parameters { - string(defaultValue: 'false', description: 'package check run', name: 'PACKAGE_CHECK') - } - // Configuration for the variables used for this specific repo - environment { - BUILDS_DISCORD=credentials('build_webhook_url') - GITHUB_TOKEN=credentials('498b4638-2d02-4ce5-832d-8a57d01d97ab') - GITLAB_TOKEN=credentials('b6f0f1dd-6952-4cf6-95d1-9c06380283f0') - GITLAB_NAMESPACE=credentials('gitlab-namespace-id') - EXT_PIP = 'certbot' - BUILD_VERSION_ARG = 'CERTBOT_VERSION' - LS_USER = 'linuxserver' - LS_REPO = 'docker-swag' - CONTAINER_NAME = 'swag' - DOCKERHUB_IMAGE = 'linuxserver/swag' - DEV_DOCKERHUB_IMAGE = 'lsiodev/swag' - PR_DOCKERHUB_IMAGE = 'lspipepr/swag' - DIST_IMAGE = 'alpine' - MULTIARCH='true' - CI='true' - CI_WEB='false' - CI_PORT='80' - CI_SSL='false' - CI_DELAY='30' - CI_DOCKERENV='TEST_RUN=1' - CI_AUTH='' - CI_WEBPATH='' - } - stages { - // Setup all the basic environment variables needed for the build - stage("Set ENV Variables base"){ - steps{ - script{ - env.EXIT_STATUS = '' - env.LS_RELEASE = sh( - script: '''docker run --rm ghcr.io/linuxserver/alexeiled-skopeo sh -c 'skopeo inspect docker://docker.io/'${DOCKERHUB_IMAGE}':latest 2>/dev/null' | jq -r '.Labels.build_version' | awk '{print $3}' | grep '\\-ls' || : ''', - returnStdout: true).trim() - env.LS_RELEASE_NOTES = sh( - script: '''cat readme-vars.yml | awk -F \\" '/date: "[0-9][0-9].[0-9][0-9].[0-9][0-9]:/ {print $4;exit;}' | sed -E ':a;N;$!ba;s/\\r{0,1}\\n/\\\\n/g' ''', - returnStdout: true).trim() - env.GITHUB_DATE = sh( - script: '''date '+%Y-%m-%dT%H:%M:%S%:z' ''', - returnStdout: true).trim() - env.COMMIT_SHA = sh( - script: '''git rev-parse HEAD''', - returnStdout: true).trim() - env.CODE_URL = 'https://github.com/' + env.LS_USER + '/' + env.LS_REPO + '/commit/' + env.GIT_COMMIT - env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.DOCKERHUB_IMAGE + '/tags/' - env.PULL_REQUEST = env.CHANGE_ID - env.TEMPLATED_FILES = 'Jenkinsfile README.md LICENSE ./.github/CONTRIBUTING.md ./.github/FUNDING.yml ./.github/ISSUE_TEMPLATE/config.yml ./.github/ISSUE_TEMPLATE/issue.bug.md ./.github/ISSUE_TEMPLATE/issue.feature.md ./.github/PULL_REQUEST_TEMPLATE.md ./.github/workflows/greetings.yml ./.github/workflows/stale.yml ./root/donate.txt ./.github/workflows/package_trigger.yml ./.github/workflows/package_trigger_scheduler.yml ./.github/workflows/external_trigger.yml ./.github/workflows/external_trigger_scheduler.yml' - } - script{ - env.LS_RELEASE_NUMBER = sh( - script: '''echo ${LS_RELEASE} |sed 's/^.*-ls//g' ''', - returnStdout: true).trim() - } - script{ - env.LS_TAG_NUMBER = sh( - script: '''#! /bin/bash - tagsha=$(git rev-list -n 1 ${LS_RELEASE} 2>/dev/null) - if [ "${tagsha}" == "${COMMIT_SHA}" ]; then - echo ${LS_RELEASE_NUMBER} - elif [ -z "${GIT_COMMIT}" ]; then - echo ${LS_RELEASE_NUMBER} - else - echo $((${LS_RELEASE_NUMBER} + 1)) - fi''', - returnStdout: true).trim() - } - } - } - /* ####################### - Package Version Tagging - ####################### */ - // Grab the current package versions in Git to determine package tag - stage("Set Package tag"){ - steps{ - script{ - env.PACKAGE_TAG = sh( - script: '''#!/bin/bash - if [ -e package_versions.txt ] ; then - cat package_versions.txt | md5sum | cut -c1-8 - else - echo none - fi''', - returnStdout: true).trim() - } - } - } - /* ######################## - External Release Tagging - ######################## */ - // If this is a pip release set the external tag to the pip version - stage("Set ENV pip_version"){ - steps{ - script{ - env.EXT_RELEASE = sh( - script: '''curl -sL https://pypi.python.org/pypi/${EXT_PIP}/json |jq -r '. | .info.version' ''', - returnStdout: true).trim() - env.RELEASE_LINK = 'https://pypi.python.org/pypi/' + env.EXT_PIP - } - } - } // Sanitize the release tag and strip illegal docker or github characters - stage("Sanitize tag"){ - steps{ - script{ - env.EXT_RELEASE_CLEAN = sh( - script: '''echo ${EXT_RELEASE} | sed 's/[~,%@+;:/]//g' ''', - returnStdout: true).trim() - } - } - } - // If this is a master build use live docker endpoints - stage("Set ENV live build"){ - when { - branch "master" - environment name: 'CHANGE_ID', value: '' - } - steps { - script{ - env.IMAGE = env.DOCKERHUB_IMAGE - env.GITHUBIMAGE = 'ghcr.io/' + env.LS_USER + '/' + env.CONTAINER_NAME - env.GITLABIMAGE = 'registry.gitlab.com/linuxserver.io/' + env.LS_REPO + '/' + env.CONTAINER_NAME - if (env.MULTIARCH == 'true') { - env.CI_TAGS = 'amd64-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER + '|arm32v7-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER + '|arm64v8-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER - } else { - env.CI_TAGS = env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER - } - env.VERSION_TAG = env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER - env.META_TAG = env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER - env.EXT_RELEASE_TAG = 'version-' + env.EXT_RELEASE_CLEAN - } - } - } - // If this is a dev build use dev docker endpoints - stage("Set ENV dev build"){ - when { - not {branch "master"} - environment name: 'CHANGE_ID', value: '' - } - steps { - script{ - env.IMAGE = env.DEV_DOCKERHUB_IMAGE - env.GITHUBIMAGE = 'ghcr.io/' + env.LS_USER + '/lsiodev-' + env.CONTAINER_NAME - env.GITLABIMAGE = 'registry.gitlab.com/linuxserver.io/' + env.LS_REPO + '/lsiodev-' + env.CONTAINER_NAME - if (env.MULTIARCH == 'true') { - env.CI_TAGS = 'amd64-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '|arm32v7-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '|arm64v8-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA - } else { - env.CI_TAGS = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA - } - env.VERSION_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA - env.META_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA - env.EXT_RELEASE_TAG = 'version-' + env.EXT_RELEASE_CLEAN - env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.DEV_DOCKERHUB_IMAGE + '/tags/' - } - } - } - // If this is a pull request build use dev docker endpoints - stage("Set ENV PR build"){ - when { - not {environment name: 'CHANGE_ID', value: ''} - } - steps { - script{ - env.IMAGE = env.PR_DOCKERHUB_IMAGE - env.GITHUBIMAGE = 'ghcr.io/' + env.LS_USER + '/lspipepr-' + env.CONTAINER_NAME - env.GITLABIMAGE = 'registry.gitlab.com/linuxserver.io/' + env.LS_REPO + '/lspipepr-' + env.CONTAINER_NAME - if (env.MULTIARCH == 'true') { - env.CI_TAGS = 'amd64-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST + '|arm32v7-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST + '|arm64v8-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST - } else { - env.CI_TAGS = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST - } - env.VERSION_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST - env.META_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST - env.EXT_RELEASE_TAG = 'version-' + env.EXT_RELEASE_CLEAN - env.CODE_URL = 'https://github.com/' + env.LS_USER + '/' + env.LS_REPO + '/pull/' + env.PULL_REQUEST - env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.PR_DOCKERHUB_IMAGE + '/tags/' - } - } - } - // Run ShellCheck - stage('ShellCheck') { - when { - environment name: 'CI', value: 'true' - } - steps { - withCredentials([ - string(credentialsId: 'ci-tests-s3-key-id', variable: 'S3_KEY'), - string(credentialsId: 'ci-tests-s3-secret-access-key', variable: 'S3_SECRET') - ]) { - script{ - env.SHELLCHECK_URL = 'https://ci-tests.linuxserver.io/' + env.IMAGE + '/' + env.META_TAG + '/shellcheck-result.xml' - } - sh '''curl -sL https://raw.githubusercontent.com/linuxserver/docker-shellcheck/master/checkrun.sh | /bin/bash''' - sh '''#! /bin/bash - set -e - docker pull ghcr.io/linuxserver/lsiodev-spaces-file-upload:latest - docker run --rm \ - -e DESTINATION=\"${IMAGE}/${META_TAG}/shellcheck-result.xml\" \ - -e FILE_NAME="shellcheck-result.xml" \ - -e MIMETYPE="text/xml" \ - -v ${WORKSPACE}:/mnt \ - -e SECRET_KEY=\"${S3_SECRET}\" \ - -e ACCESS_KEY=\"${S3_KEY}\" \ - -t ghcr.io/linuxserver/lsiodev-spaces-file-upload:latest \ - python /upload.py''' - } - } - } - // Use helper containers to render templated files - stage('Update-Templates') { - when { - branch "master" - environment name: 'CHANGE_ID', value: '' - expression { - env.CONTAINER_NAME != null - } - } - steps { - sh '''#! /bin/bash - set -e - TEMPDIR=$(mktemp -d) - docker pull ghcr.io/linuxserver/jenkins-builder:latest - docker run --rm -e CONTAINER_NAME=${CONTAINER_NAME} -e GITHUB_BRANCH=master -v ${TEMPDIR}:/ansible/jenkins ghcr.io/linuxserver/jenkins-builder:latest - CURRENTHASH=$(grep -hs ^ ${TEMPLATED_FILES} | md5sum | cut -c1-8) - cd ${TEMPDIR}/docker-${CONTAINER_NAME} - NEWHASH=$(grep -hs ^ ${TEMPLATED_FILES} | md5sum | cut -c1-8) - if [[ "${CURRENTHASH}" != "${NEWHASH}" ]]; then - mkdir -p ${TEMPDIR}/repo - git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/repo/${LS_REPO} - cd ${TEMPDIR}/repo/${LS_REPO} - git checkout -f master - cd ${TEMPDIR}/docker-${CONTAINER_NAME} - mkdir -p ${TEMPDIR}/repo/${LS_REPO}/.github/workflows - mkdir -p ${TEMPDIR}/repo/${LS_REPO}/.github/ISSUE_TEMPLATE - rm -f ${TEMPDIR}/repo/${LS_REPO}/.github/ISSUE_TEMPLATE.md - cp --parents ${TEMPLATED_FILES} ${TEMPDIR}/repo/${LS_REPO}/ || : - cd ${TEMPDIR}/repo/${LS_REPO}/ - git add ${TEMPLATED_FILES} - git rm .github/ISSUE_TEMPLATE.md || : - git commit -m 'Bot Updating Templated Files' - git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git --all - echo "true" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER} - else - echo "false" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER} - fi - mkdir -p ${TEMPDIR}/gitbook - git clone https://github.com/linuxserver/docker-documentation.git ${TEMPDIR}/gitbook/docker-documentation - if [[ ("${BRANCH_NAME}" == "master") || ("${BRANCH_NAME}" == "main") ]] && [[ (! -f ${TEMPDIR}/gitbook/docker-documentation/images/docker-${CONTAINER_NAME}.md) || ("$(md5sum ${TEMPDIR}/gitbook/docker-documentation/images/docker-${CONTAINER_NAME}.md | awk '{ print $1 }')" != "$(md5sum ${TEMPDIR}/docker-${CONTAINER_NAME}/docker-${CONTAINER_NAME}.md | awk '{ print $1 }')") ]]; then - cp ${TEMPDIR}/docker-${CONTAINER_NAME}/docker-${CONTAINER_NAME}.md ${TEMPDIR}/gitbook/docker-documentation/images/ - cd ${TEMPDIR}/gitbook/docker-documentation/ - git add images/docker-${CONTAINER_NAME}.md - git commit -m 'Bot Updating Documentation' - git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/docker-documentation.git --all - fi - mkdir -p ${TEMPDIR}/unraid - git clone https://github.com/linuxserver/docker-templates.git ${TEMPDIR}/unraid/docker-templates - git clone https://github.com/linuxserver/templates.git ${TEMPDIR}/unraid/templates - if [[ -f ${TEMPDIR}/unraid/docker-templates/linuxserver.io/img/${CONTAINER_NAME}-icon.png ]]; then - sed -i "s|master/linuxserver.io/img/linuxserver-ls-logo.png|master/linuxserver.io/img/${CONTAINER_NAME}-icon.png|" ${TEMPDIR}/docker-${CONTAINER_NAME}/${CONTAINER_NAME}.xml - fi - if [[ ("${BRANCH_NAME}" == "master") || ("${BRANCH_NAME}" == "main") ]] && [[ (! -f ${TEMPDIR}/unraid/templates/unraid/${CONTAINER_NAME}.xml) || ("$(md5sum ${TEMPDIR}/unraid/templates/unraid/${CONTAINER_NAME}.xml | awk '{ print $1 }')" != "$(md5sum ${TEMPDIR}/docker-${CONTAINER_NAME}/${CONTAINER_NAME}.xml | awk '{ print $1 }')") ]]; then - if grep -wq "${CONTAINER_NAME}" ${TEMPDIR}/unraid/templates/unraid/ignore.list; then - echo "Image is on the ignore list, skipping Unraid template upload" - else - cp ${TEMPDIR}/docker-${CONTAINER_NAME}/${CONTAINER_NAME}.xml ${TEMPDIR}/unraid/templates/unraid/ - cd ${TEMPDIR}/unraid/templates/ - git add unraid/${CONTAINER_NAME}.xml - git commit -m 'Bot Updating Unraid Template' - git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/templates.git --all - fi - fi - rm -Rf ${TEMPDIR}''' - script{ - env.FILES_UPDATED = sh( - script: '''cat /tmp/${COMMIT_SHA}-${BUILD_NUMBER}''', - returnStdout: true).trim() - } - } - } - // Exit the build if the Templated files were just updated - stage('Template-exit') { - when { - branch "master" - environment name: 'CHANGE_ID', value: '' - environment name: 'FILES_UPDATED', value: 'true' - expression { - env.CONTAINER_NAME != null - } - } - steps { - script{ - env.EXIT_STATUS = 'ABORTED' - } - } - } - /* ####################### - GitLab Mirroring - ####################### */ - // Ping into Gitlab to mirror this repo and have a registry endpoint - stage("GitLab Mirror"){ - when { - environment name: 'EXIT_STATUS', value: '' - } - steps{ - sh '''curl -H "Content-Type: application/json" -H "Private-Token: ${GITLAB_TOKEN}" -X POST https://gitlab.com/api/v4/projects \ - -d '{"namespace_id":'${GITLAB_NAMESPACE}',\ - "name":"'${LS_REPO}'", - "mirror":true,\ - "import_url":"https://github.com/linuxserver/'${LS_REPO}'.git",\ - "issues_access_level":"disabled",\ - "merge_requests_access_level":"disabled",\ - "repository_access_level":"enabled",\ - "visibility":"public"}' ''' - } - } - /* ############### - Build Container - ############### */ - // Build Docker container for push to LS Repo - stage('Build-Single') { - when { - environment name: 'MULTIARCH', value: 'false' - environment name: 'EXIT_STATUS', value: '' - } - steps { - echo "Running on node: ${NODE_NAME}" - sh "docker build --no-cache --pull -t ${IMAGE}:${META_TAG} \ - --build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ." - } - } - // Build MultiArch Docker containers for push to LS Repo - stage('Build-Multi') { - when { - environment name: 'MULTIARCH', value: 'true' - environment name: 'EXIT_STATUS', value: '' - } - parallel { - stage('Build X86') { - steps { - echo "Running on node: ${NODE_NAME}" - sh "docker build --no-cache --pull -t ${IMAGE}:amd64-${META_TAG} \ - --build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ." - } - } - stage('Build ARMHF') { - agent { - label 'ARMHF' - } - steps { - echo "Running on node: ${NODE_NAME}" - echo 'Logging into Github' - sh '''#! /bin/bash - echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin - ''' - sh "docker build --no-cache --pull -f Dockerfile.armhf -t ${IMAGE}:arm32v7-${META_TAG} \ - --build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ." - sh "docker tag ${IMAGE}:arm32v7-${META_TAG} ghcr.io/linuxserver/lsiodev-buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER}" - retry(5) { - sh "docker push ghcr.io/linuxserver/lsiodev-buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER}" - } - sh '''docker rmi \ - ${IMAGE}:arm32v7-${META_TAG} \ - ghcr.io/linuxserver/lsiodev-buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER} || :''' - } - } - stage('Build ARM64') { - agent { - label 'ARM64' - } - steps { - echo "Running on node: ${NODE_NAME}" - echo 'Logging into Github' - sh '''#! /bin/bash - echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin - ''' - sh "docker build --no-cache --pull -f Dockerfile.aarch64 -t ${IMAGE}:arm64v8-${META_TAG} \ - --build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ." - sh "docker tag ${IMAGE}:arm64v8-${META_TAG} ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}" - retry(5) { - sh "docker push ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}" - } - sh '''docker rmi \ - ${IMAGE}:arm64v8-${META_TAG} \ - ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} || :''' - } - } - } - } - // Take the image we just built and dump package versions for comparison - stage('Update-packages') { - when { - branch "master" - environment name: 'CHANGE_ID', value: '' - environment name: 'EXIT_STATUS', value: '' - } - steps { - sh '''#! /bin/bash - set -e - TEMPDIR=$(mktemp -d) - if [ "${MULTIARCH}" == "true" ]; then - LOCAL_CONTAINER=${IMAGE}:amd64-${META_TAG} - else - LOCAL_CONTAINER=${IMAGE}:${META_TAG} - fi - if [ "${DIST_IMAGE}" == "alpine" ]; then - docker run --rm --entrypoint '/bin/sh' -v ${TEMPDIR}:/tmp ${LOCAL_CONTAINER} -c '\ - apk info -v > /tmp/package_versions.txt && \ - sort -o /tmp/package_versions.txt /tmp/package_versions.txt && \ - chmod 777 /tmp/package_versions.txt' - elif [ "${DIST_IMAGE}" == "ubuntu" ]; then - docker run --rm --entrypoint '/bin/sh' -v ${TEMPDIR}:/tmp ${LOCAL_CONTAINER} -c '\ - apt list -qq --installed | sed "s#/.*now ##g" | cut -d" " -f1 > /tmp/package_versions.txt && \ - sort -o /tmp/package_versions.txt /tmp/package_versions.txt && \ - chmod 777 /tmp/package_versions.txt' - fi - NEW_PACKAGE_TAG=$(md5sum ${TEMPDIR}/package_versions.txt | cut -c1-8 ) - echo "Package tag sha from current packages in buit container is ${NEW_PACKAGE_TAG} comparing to old ${PACKAGE_TAG} from github" - if [ "${NEW_PACKAGE_TAG}" != "${PACKAGE_TAG}" ]; then - git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/${LS_REPO} - git --git-dir ${TEMPDIR}/${LS_REPO}/.git checkout -f master - cp ${TEMPDIR}/package_versions.txt ${TEMPDIR}/${LS_REPO}/ - cd ${TEMPDIR}/${LS_REPO}/ - wait - git add package_versions.txt - git commit -m 'Bot Updating Package Versions' - git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git --all - echo "true" > /tmp/packages-${COMMIT_SHA}-${BUILD_NUMBER} - echo "Package tag updated, stopping build process" - else - echo "false" > /tmp/packages-${COMMIT_SHA}-${BUILD_NUMBER} - echo "Package tag is same as previous continue with build process" - fi - rm -Rf ${TEMPDIR}''' - script{ - env.PACKAGE_UPDATED = sh( - script: '''cat /tmp/packages-${COMMIT_SHA}-${BUILD_NUMBER}''', - returnStdout: true).trim() - } - } - } - // Exit the build if the package file was just updated - stage('PACKAGE-exit') { - when { - branch "master" - environment name: 'CHANGE_ID', value: '' - environment name: 'PACKAGE_UPDATED', value: 'true' - environment name: 'EXIT_STATUS', value: '' - } - steps { - sh '''#! /bin/bash - echo "Packages were updated. Cleaning up the image and exiting." - if [ "${MULTIARCH}" == "true" ]; then - docker rmi ${IMAGE}:amd64-${META_TAG} - else - docker rmi ${IMAGE}:${META_TAG} - fi''' - script{ - env.EXIT_STATUS = 'ABORTED' - } - } - } - // Exit the build if this is just a package check and there are no changes to push - stage('PACKAGECHECK-exit') { - when { - branch "master" - environment name: 'CHANGE_ID', value: '' - environment name: 'PACKAGE_UPDATED', value: 'false' - environment name: 'EXIT_STATUS', value: '' - expression { - params.PACKAGE_CHECK == 'true' - } - } - steps { - sh '''#! /bin/bash - echo "There are no package updates. Cleaning up the image and exiting." - if [ "${MULTIARCH}" == "true" ]; then - docker rmi ${IMAGE}:amd64-${META_TAG} - else - docker rmi ${IMAGE}:${META_TAG} - fi''' - script{ - env.EXIT_STATUS = 'ABORTED' - } - } - } - /* ####### - Testing - ####### */ - // Run Container tests - stage('Test') { - when { - environment name: 'CI', value: 'true' - environment name: 'EXIT_STATUS', value: '' - } - steps { - withCredentials([ - string(credentialsId: 'ci-tests-s3-key-id', variable: 'S3_KEY'), - string(credentialsId: 'ci-tests-s3-secret-access-key ', variable: 'S3_SECRET') - ]) { - script{ - env.CI_URL = 'https://ci-tests.linuxserver.io/' + env.IMAGE + '/' + env.META_TAG + '/index.html' - } - sh '''#! /bin/bash - set -e - docker pull ghcr.io/linuxserver/lsiodev-ci:latest - if [ "${MULTIARCH}" == "true" ]; then - docker pull ghcr.io/linuxserver/lsiodev-buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER} - docker pull ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} - docker tag ghcr.io/linuxserver/lsiodev-buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm32v7-${META_TAG} - docker tag ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm64v8-${META_TAG} - fi - docker run --rm \ - --shm-size=1gb \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -e IMAGE=\"${IMAGE}\" \ - -e DELAY_START=\"${CI_DELAY}\" \ - -e TAGS=\"${CI_TAGS}\" \ - -e META_TAG=\"${META_TAG}\" \ - -e PORT=\"${CI_PORT}\" \ - -e SSL=\"${CI_SSL}\" \ - -e BASE=\"${DIST_IMAGE}\" \ - -e SECRET_KEY=\"${S3_SECRET}\" \ - -e ACCESS_KEY=\"${S3_KEY}\" \ - -e DOCKER_ENV=\"${CI_DOCKERENV}\" \ - -e WEB_SCREENSHOT=\"${CI_WEB}\" \ - -e WEB_AUTH=\"${CI_AUTH}\" \ - -e WEB_PATH=\"${CI_WEBPATH}\" \ - -e DO_REGION="ams3" \ - -e DO_BUCKET="lsio-ci" \ - -t ghcr.io/linuxserver/lsiodev-ci:latest \ - python /ci/ci.py''' - } - } - } - /* ################## - Release Logic - ################## */ - // If this is an amd64 only image only push a single image - stage('Docker-Push-Single') { - when { - environment name: 'MULTIARCH', value: 'false' - environment name: 'EXIT_STATUS', value: '' - } - steps { - withCredentials([ - [ - $class: 'UsernamePasswordMultiBinding', - credentialsId: '3f9ba4d5-100d-45b0-a3c4-633fd6061207', - usernameVariable: 'DOCKERUSER', - passwordVariable: 'DOCKERPASS' - ] - ]) { - retry(5) { - sh '''#! /bin/bash - set -e - echo $DOCKERPASS | docker login -u $DOCKERUSER --password-stdin - echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin - echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin - for PUSHIMAGE in "${GITHUBIMAGE}" "${GITLABIMAGE}" "${IMAGE}"; do - docker tag ${IMAGE}:${META_TAG} ${PUSHIMAGE}:${META_TAG} - docker tag ${PUSHIMAGE}:${META_TAG} ${PUSHIMAGE}:latest - docker tag ${PUSHIMAGE}:${META_TAG} ${PUSHIMAGE}:${EXT_RELEASE_TAG} - docker push ${PUSHIMAGE}:latest - docker push ${PUSHIMAGE}:${META_TAG} - docker push ${PUSHIMAGE}:${EXT_RELEASE_TAG} - done - ''' - } - sh '''#! /bin/bash - for DELETEIMAGE in "${GITHUBIMAGE}" "${GITLABIMAGE}" "${IMAGE}"; do - docker rmi \ - ${DELETEIMAGE}:${META_TAG} \ - ${DELETEIMAGE}:${EXT_RELEASE_TAG} \ - ${DELETEIMAGE}:latest || : - done - ''' - } - } - } - // If this is a multi arch release push all images and define the manifest - stage('Docker-Push-Multi') { - when { - environment name: 'MULTIARCH', value: 'true' - environment name: 'EXIT_STATUS', value: '' - } - steps { - withCredentials([ - [ - $class: 'UsernamePasswordMultiBinding', - credentialsId: '3f9ba4d5-100d-45b0-a3c4-633fd6061207', - usernameVariable: 'DOCKERUSER', - passwordVariable: 'DOCKERPASS' - ] - ]) { - retry(5) { - sh '''#! /bin/bash - set -e - echo $DOCKERPASS | docker login -u $DOCKERUSER --password-stdin - echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin - echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin - if [ "${CI}" == "false" ]; then - docker pull ghcr.io/linuxserver/lsiodev-buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER} - docker pull ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} - docker tag ghcr.io/linuxserver/lsiodev-buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm32v7-${META_TAG} - docker tag ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm64v8-${META_TAG} - fi - for MANIFESTIMAGE in "${IMAGE}" "${GITLABIMAGE}" "${GITHUBIMAGE}"; do - docker tag ${IMAGE}:amd64-${META_TAG} ${MANIFESTIMAGE}:amd64-${META_TAG} - docker tag ${IMAGE}:arm32v7-${META_TAG} ${MANIFESTIMAGE}:arm32v7-${META_TAG} - docker tag ${IMAGE}:arm64v8-${META_TAG} ${MANIFESTIMAGE}:arm64v8-${META_TAG} - docker tag ${MANIFESTIMAGE}:amd64-${META_TAG} ${MANIFESTIMAGE}:amd64-latest - docker tag ${MANIFESTIMAGE}:arm32v7-${META_TAG} ${MANIFESTIMAGE}:arm32v7-latest - docker tag ${MANIFESTIMAGE}:arm64v8-${META_TAG} ${MANIFESTIMAGE}:arm64v8-latest - docker tag ${MANIFESTIMAGE}:amd64-${META_TAG} ${MANIFESTIMAGE}:amd64-${EXT_RELEASE_TAG} - docker tag ${MANIFESTIMAGE}:arm32v7-${META_TAG} ${MANIFESTIMAGE}:arm32v7-${EXT_RELEASE_TAG} - docker tag ${MANIFESTIMAGE}:arm64v8-${META_TAG} ${MANIFESTIMAGE}:arm64v8-${EXT_RELEASE_TAG} - docker push ${MANIFESTIMAGE}:amd64-${META_TAG} - docker push ${MANIFESTIMAGE}:arm32v7-${META_TAG} - docker push ${MANIFESTIMAGE}:arm64v8-${META_TAG} - docker push ${MANIFESTIMAGE}:amd64-latest - docker push ${MANIFESTIMAGE}:arm32v7-latest - docker push ${MANIFESTIMAGE}:arm64v8-latest - docker push ${MANIFESTIMAGE}:amd64-${EXT_RELEASE_TAG} - docker push ${MANIFESTIMAGE}:arm32v7-${EXT_RELEASE_TAG} - docker push ${MANIFESTIMAGE}:arm64v8-${EXT_RELEASE_TAG} - docker manifest push --purge ${MANIFESTIMAGE}:latest || : - docker manifest create ${MANIFESTIMAGE}:latest ${MANIFESTIMAGE}:amd64-latest ${MANIFESTIMAGE}:arm32v7-latest ${MANIFESTIMAGE}:arm64v8-latest - docker manifest annotate ${MANIFESTIMAGE}:latest ${MANIFESTIMAGE}:arm32v7-latest --os linux --arch arm - docker manifest annotate ${MANIFESTIMAGE}:latest ${MANIFESTIMAGE}:arm64v8-latest --os linux --arch arm64 --variant v8 - docker manifest push --purge ${MANIFESTIMAGE}:${META_TAG} || : - docker manifest create ${MANIFESTIMAGE}:${META_TAG} ${MANIFESTIMAGE}:amd64-${META_TAG} ${MANIFESTIMAGE}:arm32v7-${META_TAG} ${MANIFESTIMAGE}:arm64v8-${META_TAG} - docker manifest annotate ${MANIFESTIMAGE}:${META_TAG} ${MANIFESTIMAGE}:arm32v7-${META_TAG} --os linux --arch arm - docker manifest annotate ${MANIFESTIMAGE}:${META_TAG} ${MANIFESTIMAGE}:arm64v8-${META_TAG} --os linux --arch arm64 --variant v8 - docker manifest push --purge ${MANIFESTIMAGE}:${EXT_RELEASE_TAG} || : - docker manifest create ${MANIFESTIMAGE}:${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:amd64-${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:arm32v7-${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:arm64v8-${EXT_RELEASE_TAG} - docker manifest annotate ${MANIFESTIMAGE}:${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:arm32v7-${EXT_RELEASE_TAG} --os linux --arch arm - docker manifest annotate ${MANIFESTIMAGE}:${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:arm64v8-${EXT_RELEASE_TAG} --os linux --arch arm64 --variant v8 - docker manifest push --purge ${MANIFESTIMAGE}:latest - docker manifest push --purge ${MANIFESTIMAGE}:${META_TAG} - docker manifest push --purge ${MANIFESTIMAGE}:${EXT_RELEASE_TAG} - done - ''' - } - sh '''#! /bin/bash - for DELETEIMAGE in "${GITHUBIMAGE}" "${GITLABIMAGE}" "${IMAGE}"; do - docker rmi \ - ${DELETEIMAGE}:amd64-${META_TAG} \ - ${DELETEIMAGE}:amd64-latest \ - ${DELETEIMAGE}:amd64-${EXT_RELEASE_TAG} \ - ${DELETEIMAGE}:arm32v7-${META_TAG} \ - ${DELETEIMAGE}:arm32v7-latest \ - ${DELETEIMAGE}:arm32v7-${EXT_RELEASE_TAG} \ - ${DELETEIMAGE}:arm64v8-${META_TAG} \ - ${DELETEIMAGE}:arm64v8-latest \ - ${DELETEIMAGE}:arm64v8-${EXT_RELEASE_TAG} || : - done - docker rmi \ - ghcr.io/linuxserver/lsiodev-buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER} \ - ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} || : - ''' - } - } - } - // If this is a public release tag it in the LS Github - stage('Github-Tag-Push-Release') { - when { - branch "master" - expression { - env.LS_RELEASE != env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER - } - environment name: 'CHANGE_ID', value: '' - environment name: 'EXIT_STATUS', value: '' - } - steps { - echo "Pushing New tag for current commit ${EXT_RELEASE_CLEAN}-ls${LS_TAG_NUMBER}" - sh '''curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/git/tags \ - -d '{"tag":"'${EXT_RELEASE_CLEAN}'-ls'${LS_TAG_NUMBER}'",\ - "object": "'${COMMIT_SHA}'",\ - "message": "Tagging Release '${EXT_RELEASE_CLEAN}'-ls'${LS_TAG_NUMBER}' to master",\ - "type": "commit",\ - "tagger": {"name": "LinuxServer Jenkins","email": "jenkins@linuxserver.io","date": "'${GITHUB_DATE}'"}}' ''' - echo "Pushing New release for Tag" - sh '''#! /bin/bash - echo "Updating PIP version of ${EXT_PIP} to ${EXT_RELEASE_CLEAN}" > releasebody.json - echo '{"tag_name":"'${EXT_RELEASE_CLEAN}'-ls'${LS_TAG_NUMBER}'",\ - "target_commitish": "master",\ - "name": "'${EXT_RELEASE_CLEAN}'-ls'${LS_TAG_NUMBER}'",\ - "body": "**LinuxServer Changes:**\\n\\n'${LS_RELEASE_NOTES}'\\n**PIP Changes:**\\n\\n' > start - printf '","draft": false,"prerelease": false}' >> releasebody.json - paste -d'\\0' start releasebody.json > releasebody.json.done - curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/releases -d @releasebody.json.done''' - } - } - // Use helper container to sync the current README on master to the dockerhub endpoint - stage('Sync-README') { - when { - environment name: 'CHANGE_ID', value: '' - environment name: 'EXIT_STATUS', value: '' - } - steps { - withCredentials([ - [ - $class: 'UsernamePasswordMultiBinding', - credentialsId: '3f9ba4d5-100d-45b0-a3c4-633fd6061207', - usernameVariable: 'DOCKERUSER', - passwordVariable: 'DOCKERPASS' - ] - ]) { - sh '''#! /bin/bash - set -e - TEMPDIR=$(mktemp -d) - docker pull ghcr.io/linuxserver/jenkins-builder:latest - docker run --rm -e CONTAINER_NAME=${CONTAINER_NAME} -e GITHUB_BRANCH="${BRANCH_NAME}" -v ${TEMPDIR}:/ansible/jenkins ghcr.io/linuxserver/jenkins-builder:latest - docker pull ghcr.io/linuxserver/lsiodev-readme-sync - docker run --rm=true \ - -e DOCKERHUB_USERNAME=$DOCKERUSER \ - -e DOCKERHUB_PASSWORD=$DOCKERPASS \ - -e GIT_REPOSITORY=${LS_USER}/${LS_REPO} \ - -e DOCKER_REPOSITORY=${IMAGE} \ - -e GIT_BRANCH=master \ - -v ${TEMPDIR}/docker-${CONTAINER_NAME}:/mnt \ - ghcr.io/linuxserver/lsiodev-readme-sync bash -c 'node sync' - rm -Rf ${TEMPDIR} ''' - } - } - } - // If this is a Pull request send the CI link as a comment on it - stage('Pull Request Comment') { - when { - not {environment name: 'CHANGE_ID', value: ''} - environment name: 'CI', value: 'true' - environment name: 'EXIT_STATUS', value: '' - } - steps { - sh '''curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/issues/${PULL_REQUEST}/comments \ - -d '{"body": "I am a bot, here are the test results for this PR: \\n'${CI_URL}' \\n'${SHELLCHECK_URL}'"}' ''' - } - } - } - /* ###################### - Send status to Discord - ###################### */ - post { - always { - script{ - if (env.EXIT_STATUS == "ABORTED"){ - sh 'echo "build aborted"' - } - else if (currentBuild.currentResult == "SUCCESS"){ - sh ''' curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://wiki.jenkins-ci.org/download/attachments/2916393/headshot.png","embeds": [{"color": 1681177,\ - "description": "**Build:** '${BUILD_NUMBER}'\\n**CI Results:** '${CI_URL}'\\n**ShellCheck Results:** '${SHELLCHECK_URL}'\\n**Status:** Success\\n**Job:** '${RUN_DISPLAY_URL}'\\n**Change:** '${CODE_URL}'\\n**External Release:**: '${RELEASE_LINK}'\\n**DockerHub:** '${DOCKERHUB_LINK}'\\n"}],\ - "username": "Jenkins"}' ${BUILDS_DISCORD} ''' - } - else { - sh ''' curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://wiki.jenkins-ci.org/download/attachments/2916393/headshot.png","embeds": [{"color": 16711680,\ - "description": "**Build:** '${BUILD_NUMBER}'\\n**CI Results:** '${CI_URL}'\\n**ShellCheck Results:** '${SHELLCHECK_URL}'\\n**Status:** failure\\n**Job:** '${RUN_DISPLAY_URL}'\\n**Change:** '${CODE_URL}'\\n**External Release:**: '${RELEASE_LINK}'\\n**DockerHub:** '${DOCKERHUB_LINK}'\\n"}],\ - "username": "Jenkins"}' ${BUILDS_DISCORD} ''' - } - } - } - cleanup { - cleanWs() - } - } -} diff --git a/README.md b/README.md index 4c084bf..560e940 100644 --- a/README.md +++ b/README.md @@ -1,52 +1,13 @@ - - - -[![linuxserver.io](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/linuxserver_medium.png)](https://linuxserver.io) - -[![Blog](https://img.shields.io/static/v1.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=linuxserver.io&message=Blog)](https://blog.linuxserver.io "all the things you can do with our containers including How-To guides, opinions and much more!") -[![Discord](https://img.shields.io/discord/354974912613449730.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=Discord&logo=discord)](https://discord.gg/YWrKVTn "realtime support / chat with the community and the team.") -[![Discourse](https://img.shields.io/discourse/https/discourse.linuxserver.io/topics.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&logo=discourse)](https://discourse.linuxserver.io "post on our community forum.") -[![Fleet](https://img.shields.io/static/v1.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=linuxserver.io&message=Fleet)](https://fleet.linuxserver.io "an online web interface which displays all of our maintained images.") -[![GitHub](https://img.shields.io/static/v1.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=linuxserver.io&message=GitHub&logo=github)](https://github.com/linuxserver "view the source for all of our repositories.") -[![Open Collective](https://img.shields.io/opencollective/all/linuxserver.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=Supporters&logo=open%20collective)](https://opencollective.com/linuxserver "please consider helping us by either donating or contributing to our budget") - -The [LinuxServer.io](https://linuxserver.io) team brings you another container release featuring: - - * regular and timely application updates - * easy user mappings (PGID, PUID) - * custom base image with s6 overlay - * weekly base OS updates with common layers across the entire LinuxServer.io ecosystem to minimise space usage, down time and bandwidth - * regular security updates - -Find us at: -* [Blog](https://blog.linuxserver.io) - all the things you can do with our containers including How-To guides, opinions and much more! -* [Discord](https://discord.gg/YWrKVTn) - realtime support / chat with the community and the team. -* [Discourse](https://discourse.linuxserver.io) - post on our community forum. -* [Fleet](https://fleet.linuxserver.io) - an online web interface which displays all of our maintained images. -* [GitHub](https://github.com/linuxserver) - view the source for all of our repositories. -* [Open Collective](https://opencollective.com/linuxserver) - please consider helping us by either donating or contributing to our budget - -# [linuxserver/swag](https://github.com/linuxserver/docker-swag) - -[![GitHub Stars](https://img.shields.io/github/stars/linuxserver/docker-swag.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&logo=github)](https://github.com/linuxserver/docker-swag) -[![GitHub Release](https://img.shields.io/github/release/linuxserver/docker-swag.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&logo=github)](https://github.com/linuxserver/docker-swag/releases) -[![GitHub Package Repository](https://img.shields.io/static/v1.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=linuxserver.io&message=GitHub%20Package&logo=github)](https://github.com/linuxserver/docker-swag/packages) -[![GitLab Container Registry](https://img.shields.io/static/v1.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=linuxserver.io&message=GitLab%20Registry&logo=gitlab)](https://gitlab.com/linuxserver.io/docker-swag/container_registry) -[![MicroBadger Layers](https://img.shields.io/microbadger/layers/linuxserver/swag.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge)](https://microbadger.com/images/linuxserver/swag "Get your own version badge on microbadger.com") -[![Docker Pulls](https://img.shields.io/docker/pulls/linuxserver/swag.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=pulls&logo=docker)](https://hub.docker.com/r/linuxserver/swag) -[![Docker Stars](https://img.shields.io/docker/stars/linuxserver/swag.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=stars&logo=docker)](https://hub.docker.com/r/linuxserver/swag) -[![Jenkins Build](https://img.shields.io/jenkins/build?labelColor=555555&logoColor=ffffff&style=for-the-badge&jobUrl=https%3A%2F%2Fci.linuxserver.io%2Fjob%2FDocker-Pipeline-Builders%2Fjob%2Fdocker-swag%2Fjob%2Fmaster%2F&logo=jenkins)](https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-swag/job/master/) -[![LSIO CI](https://img.shields.io/badge/dynamic/yaml?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=CI&query=CI&url=https%3A%2F%2Fci-tests.linuxserver.io%2Flinuxserver%2Fswag%2Flatest%2Fci-status.yml)](https://ci-tests.linuxserver.io/linuxserver/swag/latest/index.html) - -SWAG - Secure Web Application Gateway (formerly known as letsencrypt, no relation to Let's Encrypt™) sets up an Nginx webserver and reverse proxy with php support and a built-in certbot client that automates free SSL server certificate generation and renewal processes (Let's Encrypt and ZeroSSL). It also contains fail2ban for intrusion prevention. - -[![swag](https://github.com/linuxserver/docker-templates/raw/master/linuxserver.io/img/swag.gif)](https://linuxserver.io) +# Certbot_Only +Certbot_Only is a docker image based off of [linuxserver's SWAG](https://linuxserver.io) with the goal to simplify the image to *only generate DNS certificates and maintain them* while leaving them accessible for other resources to utilize. +Because Certbot_Only *only runs certbot*, DNS validation is required. +Further, in order to simplify the image, only Cloudflare DNS is currently implemented. ## Supported Architectures -Our images support multiple architectures such as `x86-64`, `arm64` and `armhf`. We utilise the docker manifest for multi-platform awareness. More information is available from docker [here](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md#manifest-list) and our announcement [here](https://blog.linuxserver.io/2019/02/21/the-lsio-pipeline-project/). +The project is built with Docker Buildx to support multiple architectures such as `amd64`, `arm64` and `arm32/v7`. -Simply pulling `ghcr.io/linuxserver/swag` should retrieve the correct image for your arch, but you can also pull specific arch images via tags. +Simply pulling `ahgraber/certbot_only` should retrieve the correct image for your arch, but you can also pull specific arch images via tags. The architectures supported by this image are: @@ -61,37 +22,30 @@ The architectures supported by this image are: Here are some example snippets to help you get started creating a container. -### docker-compose ([recommended](https://docs.linuxserver.io/general/docker-compose)) +### docker-compose (recommended) -Compatible with docker-compose v2 schemas. +Compatible with docker-compose v3 schemas. ```yaml --- -version: "2.1" +version: "3.3" services: swag: - image: ghcr.io/linuxserver/swag - container_name: swag - cap_add: - - NET_ADMIN + image: ahgraber/certbot_only + container_name: certbot environment: - PUID=1000 - PGID=1000 - TZ=Europe/London - URL=yourdomain.url - SUBDOMAINS=www, - - VALIDATION=http - - CERTPROVIDER= #optional - - DNSPLUGIN=cloudflare #optional - PROPAGATION= #optional - - DUCKDNSTOKEN= #optional - EMAIL= #optional - ONLY_SUBDOMAINS=false #optional - - EXTRA_DOMAINS= #optional - STAGING=false #optional - - MAXMINDDB_LICENSE_KEY= #optional volumes: - /path/to/appdata/config:/config + - /path/to/appdata/letsencrypt:/letsencrypt ports: - 443:443 - 80:80 #optional @@ -102,28 +56,22 @@ services: ``` docker run -d \ - --name=swag \ - --cap-add=NET_ADMIN \ + --name=certbot \ -e PUID=1000 \ -e PGID=1000 \ -e TZ=Europe/London \ -e URL=yourdomain.url \ -e SUBDOMAINS=www, \ - -e VALIDATION=http \ - -e CERTPROVIDER= `#optional` \ - -e DNSPLUGIN=cloudflare `#optional` \ -e PROPAGATION= `#optional` \ - -e DUCKDNSTOKEN= `#optional` \ -e EMAIL= `#optional` \ -e ONLY_SUBDOMAINS=false `#optional` \ - -e EXTRA_DOMAINS= `#optional` \ -e STAGING=false `#optional` \ - -e MAXMINDDB_LICENSE_KEY= `#optional` \ -p 443:443 \ -p 80:80 `#optional` \ -v /path/to/appdata/config:/config \ + -v /path/to/appdata/letsencrypt:/letsencrypt --restart unless-stopped \ - ghcr.io/linuxserver/swag + ahgraber/certbot_only ``` @@ -137,37 +85,36 @@ Container images are configured using parameters passed at runtime (such as thos | `-p 80` | Http port (required for http validation and http -> https redirect) | | `-e PUID=1000` | for UserID - see below for explanation | | `-e PGID=1000` | for GroupID - see below for explanation | -| `-e TZ=Europe/London` | Specify a timezone to use EG Europe/London. | +| `-e TZ=Europe/London` | Specify a timezone to use - e.g., Europe/London. | | `-e URL=yourdomain.url` | Top url you have control over (`customdomain.com` if you own it, or `customsubdomain.ddnsprovider.com` if dynamic dns). | | `-e SUBDOMAINS=www,` | Subdomains you'd like the cert to cover (comma separated, no spaces) ie. `www,ftp,cloud`. For a wildcard cert, set this _exactly_ to `wildcard` (wildcard cert is available via `dns` and `duckdns` validation only) | -| `-e VALIDATION=http` | Certbot validation method to use, options are `http`, `dns` or `duckdns` (`dns` method also requires `DNSPLUGIN` variable set) (`duckdns` method requires `DUCKDNSTOKEN` variable set, and the `SUBDOMAINS` variable must be either empty or set to `wildcard`). | -| `-e CERTPROVIDER=` | Optionally define the cert provider. Set to `zerossl` for ZeroSSL certs (requires existing [ZeroSSL account](https://app.zerossl.com/signup) and the e-mail address entered in `EMAIL` env var). Otherwise defaults to Let's Encrypt. | -| `-e DNSPLUGIN=cloudflare` | Required if `VALIDATION` is set to `dns`. Options are `aliyun`, `cloudflare`, `cloudxns`, `cpanel`, `digitalocean`, `dnsimple`, `dnsmadeeasy`, `domeneshop`, `gandi`, `gehirn`, `google`, `hetzner`, `inwx`, `linode`, `luadns`, `netcup`, `njalla`, `nsone`, `ovh`, `rfc2136`, `route53`, `sakuracloud` and `transip`. Also need to enter the credentials into the corresponding ini (or json for some plugins) file under `/config/dns-conf`. | | `-e PROPAGATION=` | Optionally override (in seconds) the default propagation time for the dns plugins. | -| `-e DUCKDNSTOKEN=` | Required if `VALIDATION` is set to `duckdns`. Retrieve your token from https://www.duckdns.org | -| `-e EMAIL=` | Optional e-mail address used for cert expiration notifications (Required for ZeroSSL). | +| `-e EMAIL=` | Optional e-mail address used for cert expiration notifications. | | `-e ONLY_SUBDOMAINS=false` | If you wish to get certs only for certain subdomains, but not the main domain (main domain may be hosted on another machine and cannot be validated), set this to `true` | -| `-e EXTRA_DOMAINS=` | Additional fully qualified domain names (comma separated, no spaces) ie. `extradomain.com,subdomain.anotherdomain.org,*.anotherdomain.org` | | `-e STAGING=false` | Set to `true` to retrieve certs in staging mode. Rate limits will be much higher, but the resulting cert will not pass the browser's security test. Only to be used for testing purposes. | -| `-e MAXMINDDB_LICENSE_KEY=` | Add your MaxmindDB license key to automatically download the GeoLite2-City.mmdb database. Download location is /config/geoip2db. The database is updated weekly. | -| `-v /config` | All the config files including the webroot reside here. | +| `-v /config` | All the config files reside here. | +| `-v /letsencrypt` | All the cert files reside here. | ## Environment variables from files (Docker secrets) -You can set any environment variable from a file by using a special prepend `FILE__`. +You can set any environment variable from a file by using a special prepend `__FILE` (double-underscore FILE). As an example: ``` --e FILE__PASSWORD=/run/secrets/mysecretpassword +-e PASSWORD__FILE=/run/secrets/mysecretpassword ``` Will set the environment variable `PASSWORD` based on the contents of the `/run/secrets/mysecretpassword` file. -## Umask for running applications +## Volumes +The recommended configurations create local folders `/config` and `/letsencrypt`. +`config/` + ├ `credentials/` - contains `cloudflare.ini` + ├ `crontabs` - contains root crontab + └ `deploy/` - contains deploy scripts for actions following successful Let's Encrypt renewal -For all of our images we provide the ability to override the default umask settings for services started within the containers using the optional `-e UMASK=022` setting. -Keep in mind umask is not chmod it subtracts from permissions based on it's value it does not add. Please read up [here](https://en.wikipedia.org/wiki/Umask) before asking for support. +`letsencrypt/` is populated with Let's Encrypt certificates if the generation/renewal is successful. ## User / Group Identifiers @@ -186,42 +133,14 @@ In this instance `PUID=1000` and `PGID=1000`, to find yours use `id user` as bel   ## Application Setup -> ### Migrating from the old `linuxserver/letsencrypt` image -> * If using docker cli: -> * Stop and remove existing container via `docker stop letsencrypt` and `docker rm letsencrypt` -> * Create new container using the sample on this page (container name: `swag`, image name: `linuxserver/swag`) -> * If using docker compose: -> * Edit the compose yaml to change the image to `linuxserver/swag` and change the service and container names to `swag` -> * Issue `docker-compose up -d --remove-orphans` -> * If you don't want to or can't use the option `--remove-orphans`, then you can first do `docker-compose down`, then edit the compose yaml as above, and then issue `docker-compose up -d` - -> Make sure to also update any references to this container by name. For instance, Nextcloud's `config.php` references this container in its `trusted_proxies` directive, which would have to be updated to `swag`. ### Validation and initial setup * Before running this container, make sure that the url and subdomains are properly forwarded to this container's host, and that port 443 (and/or 80) is not being used by another service on the host (NAS gui, another webserver, etc.). -* For `http` validation, port 80 on the internet side of the router should be forwarded to this container's port 80 * For `dns` validation, make sure to enter your credentials into the corresponding ini (or json for some plugins) file under `/config/dns-conf` * Cloudflare provides free accounts for managing dns and is very easy to use with this image. Make sure that it is set up for "dns only" instead of "dns + proxy" - * Google dns plugin is meant to be used with "Google Cloud DNS", a paid enterprise product, and not for "Google Domains DNS" -* For `duckdns` validation, either leave the `SUBDOMAINS` variable empty or set it to `wildcard`, and set the `DUCKDNSTOKEN` variable with your duckdns token. Due to a limitation of duckdns, the resulting cert will only cover either main subdomain (ie. `yoursubdomain.duckdns.org`), or sub-subdomains (ie. `*.yoursubdomain.duckdns.org`), but will not both at the same time. You can use our [duckdns image](https://hub.docker.com/r/linuxserver/duckdns/) to update your IP on duckdns.org. -* `--cap-add=NET_ADMIN` is required for fail2ban to modify iptables -* If you need a dynamic dns provider, you can use the free provider duckdns.org where the `URL` will be `yoursubdomain.duckdns.org` and the `SUBDOMAINS` can be `www,ftp,cloud` with http validation, or `wildcard` with dns validation. -* After setup, navigate to `https://yourdomain.url` to access the default homepage (http access through port 80 is disabled by default, you can enable it by editing the default site config at `/config/nginx/site-confs/default`). * Certs are checked nightly and if expiration is within 30 days, renewal is attempted. If your cert is about to expire in less than 30 days, check the logs under `/config/log/letsencrypt` to see why the renewals have been failing. It is recommended to input your e-mail in docker parameters so you receive expiration notices from Let's Encrypt in those circumstances. -### Security and password protection -* The container detects changes to url and subdomains, revokes existing certs and generates new ones during start. -* The container provides a pre-generated 4096-bit dhparams.pem (rotated weekly via [Jenkins job](https://ci.linuxserver.io/blue/organizations/jenkins/Xtras-Builders-Etc%2Fdhparams-uploader/activity)) for new instances, however you may generate your own by running `docker exec swag openssl dhparam -out /config/nginx/dhparams.pem 4096` WARNING: This takes a very long time -* If you'd like to password protect your sites, you can use htpasswd. Run the following command on your host to generate the htpasswd file `docker exec -it swag htpasswd -c /config/nginx/.htpasswd ` -* You can add multiple user:pass to `.htpasswd`. For the first user, use the above command, for others, use the above command without the `-c` flag, as it will force deletion of the existing `.htpasswd` and creation of a new one -* You can also use ldap auth for security and access control. A sample, user configurable ldap.conf is provided, and it requires the separate image [linuxserver/ldap-auth](https://hub.docker.com/r/linuxserver/ldap-auth/) to communicate with an ldap server. -### Site config and reverse proxy -* The default site config resides at `/config/nginx/site-confs/default`. Feel free to modify this file, and you can add other conf files to this directory. However, if you delete the `default` file, a new default will be created on container start. -* Preset reverse proxy config files are added for popular apps. See the `README.md` file under `/config/nginx/proxy_confs` for instructions on how to enable them. The preset confs reside in and get imported from [this repo](https://github.com/linuxserver/reverse-proxy-confs). -* If you wish to hide your site from search engine crawlers, you may find it useful to add this configuration line to your site config, within the server block, above the line where ssl.conf is included -`add_header X-Robots-Tag "noindex, nofollow, nosnippet, noarchive";` -This will *ask* Google et al not to index and list your site. Be careful with this, as you will eventually be de-listed if you leave this line in on a site you wish to be present on search engines -* If you wish to redirect http to https, you must expose port 80 + ### Using certs in other containers -* This container includes auto-generated pfx and private-fullchain-bundle pem certs that are needed by other apps like Emby and Znc. +* This container includes auto-generated pfx and private-fullchain-bundle pem certs that are needed by other apps like Emby and Znc, and tls.crt and tls.key certs that are needed by apps like Keycloak. * To use these certs in other containers, do either of the following: 1. *(Easier)* Mount the container's config folder in other containers (ie. `-v /path-to-le-config:/le-ssl`) and in the other containers, use the cert location `/le-ssl/keys/letsencrypt/` 2. *(More secure)* Mount the SWAG folder `etc` that resides under `/config` in other containers (ie. `-v /path-to-le-config/etc:/le-ssl`) and in the other containers, use the cert location `/le-ssl/letsencrypt/live//` (This is more secure because the first method shares the entire SWAG config folder with other containers, including the www files, whereas the second method only shares the ssl certs) @@ -229,115 +148,52 @@ This will *ask* Google et al not to index and list your site. Be careful with th 1. `cert.pem`, `chain.pem`, `fullchain.pem` and `privkey.pem`, which are generated by Certbot and used by nginx and various other apps 2. `privkey.pfx`, a format supported by Microsoft and commonly used by dotnet apps such as Emby Server (no password) 3. `priv-fullchain-bundle.pem`, a pem cert that bundles the private key and the fullchain, used by apps like ZNC -### Using fail2ban -* This container includes fail2ban set up with 4 jails by default: - 1. nginx-http-auth - 2. nginx-badbots - 3. nginx-botsearch - 4. nginx-deny -* To enable or disable other jails, modify the file `/config/fail2ban/jail.local` -* To modify filters and actions, instead of editing the `.conf` files, create `.local` files with the same name and edit those because .conf files get overwritten when the actions and filters are updated. `.local` files will append whatever's in the `.conf` files (ie. `nginx-http-auth.conf` --> `nginx-http-auth.local`) -* You can check which jails are active via `docker exec -it swag fail2ban-client status` -* You can check the status of a specific jail via `docker exec -it swag fail2ban-client status ` -* You can unban an IP via `docker exec -it swag fail2ban-client set unbanip ` -* A list of commands can be found here: https://www.fail2ban.org/wiki/index.php/Commands -### Updating configs -* This container creates a number of configs for nginx, proxy samples, etc. -* Config updates are noted in the changelog but not automatically applied to your files. -* If you have modified a file with noted changes in the changelog: - 1. Keep your existing configs as is (not broken, don't fix) - 2. Review our repository commits and apply the new changes yourself - 3. Delete the modified config file with listed updates, restart the container, reapply your changes -* If you have NOT modified a file with noted changes in the changelog: - 1. Delete the config file with listed updates, restart the container -* Proxy sample updates are not listed in the changelog. See the changes here: [https://github.com/linuxserver/reverse-proxy-confs/commits/master](https://github.com/linuxserver/reverse-proxy-confs/commits/master) -* Proxy sample files WILL be updated, however your renamed (enabled) proxy files will not. -* You can check the new sample and adjust your active config as needed. - - -## Docker Mods -[![Docker Mods](https://img.shields.io/badge/dynamic/yaml?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=swag&query=%24.mods%5B%27swag%27%5D.mod_count&url=https%3A%2F%2Fraw.githubusercontent.com%2Flinuxserver%2Fdocker-mods%2Fmaster%2Fmod-list.yml)](https://mods.linuxserver.io/?mod=swag "view available mods for this container.") [![Docker Universal Mods](https://img.shields.io/badge/dynamic/yaml?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=universal&query=%24.mods%5B%27universal%27%5D.mod_count&url=https%3A%2F%2Fraw.githubusercontent.com%2Flinuxserver%2Fdocker-mods%2Fmaster%2Fmod-list.yml)](https://mods.linuxserver.io/?mod=universal "view available universal mods.") - -We publish various [Docker Mods](https://github.com/linuxserver/docker-mods) to enable additional functionality within the containers. The list of Mods available for this image (if any) as well as universal mods that can be applied to any one of our images can be accessed via the dynamic badges above. - + 4. `tls.crt` and `tls.key`, formats which are used by x509 apps like Keycloak ## Support Info -* Shell access whilst the container is running: `docker exec -it swag /bin/bash` -* To monitor the logs of the container in realtime: `docker logs -f swag` +* Shell access whilst the container is running: `docker exec -it certbot_only /bin/bash` +* To monitor the logs of the container in realtime: `docker logs -f certbot_only` * container version number - * `docker inspect -f '{{ index .Config.Labels "build_version" }}' swag` + * `docker inspect -f '{{ index .Config.Labels "build_version" }}' certbot_only` * image version number - * `docker inspect -f '{{ index .Config.Labels "build_version" }}' ghcr.io/linuxserver/swag` + * `docker inspect -f '{{ index .Config.Labels "build_version" }}' ahgraber/certbot_only` ## Updating Info -Most of our images are static, versioned, and require an image update and container recreation to update the app inside. With some exceptions (ie. nextcloud, plex), we do not recommend or support updating apps inside the container. Please consult the [Application Setup](#application-setup) section above to see if it is recommended for the image. - Below are the instructions for updating containers: ### Via Docker Compose * Update all images: `docker-compose pull` - * or update a single image: `docker-compose pull swag` + * or update a single image: `docker-compose pull certbot_only` * Let compose update all containers as necessary: `docker-compose up -d` * or update a single container: `docker-compose up -d swag` * You can also remove the old dangling images: `docker image prune` ### Via Docker Run -* Update the image: `docker pull ghcr.io/linuxserver/swag` -* Stop the running container: `docker stop swag` -* Delete the container: `docker rm swag` +* Update the image: `docker pull ahgraber/certbot_only` +* Stop the running container: `docker stop certbot_only` +* Delete the container: `docker rm certbot_only` * Recreate a new container with the same docker run parameters as instructed above (if mapped correctly to a host folder, your `/config` folder and settings will be preserved) * You can also remove the old dangling images: `docker image prune` -### Via Watchtower auto-updater (only use if you don't remember the original parameters) -* Pull the latest image at its tag and replace it with the same env variables in one run: - ``` - docker run --rm \ - -v /var/run/docker.sock:/var/run/docker.sock \ - containrrr/watchtower \ - --run-once swag - ``` -* You can also remove the old dangling images: `docker image prune` - -**Note:** We do not endorse the use of Watchtower as a solution to automated updates of existing Docker containers. In fact we generally discourage automated updates. However, this is a useful tool for one-time manual updates of containers where you have forgotten the original parameters. In the long term, we highly recommend using [Docker Compose](https://docs.linuxserver.io/general/docker-compose). - -### Image Update Notifications - Diun (Docker Image Update Notifier) -* We recommend [Diun](https://crazymax.dev/diun/) for update notifications. Other tools that automatically update containers unattended are not recommended or supported. - ## Building locally If you want to make local modifications to these images for development purposes or just to customize the logic: + +With Docker Compose for single testing: ``` -git clone https://github.com/linuxserver/docker-swag.git -cd docker-swag -docker build \ - --no-cache \ - --pull \ - -t ghcr.io/linuxserver/swag:latest . +git clone https://github.com/ahgraber/docker-certbot-only.git +cd docker-certbot_only +docker-compose build ``` -The ARM variants can be built on x86_64 hardware using `multiarch/qemu-user-static` +With [Docker buildx](https://docs.docker.com/buildx/working-with-buildx/) for multiarch support: ``` -docker run --rm --privileged multiarch/qemu-user-static:register --reset +git clone https://github.com/ahgraber/docker-certbot-only.git +cd docker-certbot_only/scripts +bash buildx.sh {tag} ``` -Once registered you can define the dockerfile to use with `-f Dockerfile.aarch64`. - ## Versions - -* **10.02.21:** - Fix aliyun, domeneshop, inwx and transip dns confs for existing users. -* **09.02.21:** - Rebasing to alpine 3.13. Add nginx mods brotli and dav-ext. Remove nginx mods lua and lua-upstream (due to regression over the last couple of years). -* **26.01.21:** - Add support for hetzner dns validation. -* **20.01.21:** - Add check for ZeroSSL EAB retrieval. -* **08.01.21:** - Add support for getting certs from [ZeroSSL](https://zerossl.com/) via optional `CERTPROVIDER` env var. Update aliyun, domeneshop, inwx and transip dns plugins with the new plugin names. Hide `donoteditthisfile.conf` because users were editing it despite its name. Suppress harmless error when no proxy confs are enabled. -* **03.01.21:** - [Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) /config/nginx/site-confs/default - Add helper pages to aid troubleshooting -* **10.12.20:** - Add support for njalla dns validation -* **09.12.20:** - Check for template/conf updates and notify in the log. Add support for gehirn and sakuracloud dns validation. -* **01.11.20:** - Add support for netcup dns validation -* **29.10.20:** - [Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) ssl.conf - Add frame-ancestors to Content-Security-Policy. -* **04.10.20:** - [Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) nginx.conf, proxy.conf, and ssl.conf - Minor cleanups and reordering. -* **20.09.20:** - [Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) nginx.conf - Added geoip2 configs. Added MAXMINDDB_LICENSE_KEY variable to readme. -* **08.09.20:** - Add php7-xsl. -* **01.09.20:** - [Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) nginx.conf, proxy.conf, and various proxy samples - Global websockets across all configs. -* **03.08.20:** - Initial release. +11 Feb 2021: Cloned from linuxserver/docker-swag adfe04cedbb291f87ca2a923d21ab1c9ed4cefeb \ No newline at end of file diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 0000000..d573d9b --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,12 @@ +version: '3.7' + +services: + certbot: + build: + context: . + dockerfile: Dockerfile + args: + - TARGETPLATFORM='linux/amd64' + - TARGETPLATFORM='linux/arm64' + - TARGETPLATFORM='linux/arm32/v7' + image: certbot:test diff --git a/jenkins-vars.yml b/jenkins-vars.yml deleted file mode 100644 index 989b638..0000000 --- a/jenkins-vars.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- - -# jenkins variables -project_name: docker-swag -external_type: pip_version -release_type: stable -release_tag: latest -ls_branch: master -repo_vars: - - EXT_PIP = 'certbot' - - BUILD_VERSION_ARG = 'CERTBOT_VERSION' - - LS_USER = 'linuxserver' - - LS_REPO = 'docker-swag' - - CONTAINER_NAME = 'swag' - - DOCKERHUB_IMAGE = 'linuxserver/swag' - - DEV_DOCKERHUB_IMAGE = 'lsiodev/swag' - - PR_DOCKERHUB_IMAGE = 'lspipepr/swag' - - DIST_IMAGE = 'alpine' - - MULTIARCH='true' - - CI='true' - - CI_WEB='false' - - CI_PORT='80' - - CI_SSL='false' - - CI_DELAY='30' - - CI_DOCKERENV='TEST_RUN=1' - - CI_AUTH='' - - CI_WEBPATH='' -sponsor_links: - - { name: "Certbot", url: "https://supporters.eff.org/donate/support-work-on-certbot" } diff --git a/package_versions.txt b/package_versions.txt deleted file mode 100755 index a58309b..0000000 --- a/package_versions.txt +++ /dev/null @@ -1,220 +0,0 @@ -alpine-baselayout-3.2.0-r8 -alpine-keys-2.2-r0 -apache2-utils-2.4.46-r3 -apk-tools-2.12.1-r0 -apr-1.7.0-r0 -apr-util-1.6.1-r7 -argon2-libs-20190702-r1 -bash-5.1.0-r0 -brotli-libs-1.0.9-r3 -busybox-1.32.1-r2 -c-client-2007f-r11 -ca-certificates-20191127-r5 -ca-certificates-bundle-20191127-r5 -coreutils-8.32-r2 -curl-7.74.0-r0 -expat-2.2.10-r1 -fail2ban-0.11.1-r4 -freetype-2.10.4-r1 -gdbm-1.19-r0 -git-2.30.0-r0 -glib-2.66.4-r0 -gmp-6.2.1-r0 -gnupg-2.2.27-r0 -gnutls-3.7.0-r0 -icu-libs-67.1-r2 -ip6tables-1.8.6-r0 -iptables-1.8.6-r0 -libacl-2.2.53-r0 -libassuan-2.5.4-r0 -libattr-2.4.48-r0 -libblkid-2.36.1-r1 -libbsd-0.10.0-r0 -libbz2-1.0.8-r1 -libc-utils-0.7.2-r3 -libcap-2.46-r0 -libcrypto1.1-1.1.1i-r0 -libcurl-7.74.0-r0 -libedit-20191231.3.1-r1 -libevent-2.1.12-r1 -libffi-3.3-r2 -libgcc-10.2.1_pre1-r3 -libgcrypt-1.8.7-r0 -libgd-2.3.0-r2 -libgpg-error-1.41-r0 -libice-1.0.10-r0 -libidn-1.35-r0 -libintl-0.20.2-r2 -libjpeg-turbo-2.0.6-r0 -libksba-1.5.0-r0 -libldap-2.4.56-r0 -libmagic-5.39-r0 -libmaxminddb-1.5.0-r0 -libmcrypt-2.5.8-r9 -libmemcached-libs-1.0.18-r4 -libmnl-1.0.4-r1 -libmount-2.36.1-r1 -libnftnl-libs-1.1.8-r0 -libpng-1.6.37-r1 -libpq-13.1-r2 -libproc-3.3.16-r0 -libressl3.1-libcrypto-3.1.5-r0 -libressl3.1-libssl-3.1.5-r0 -libsasl-2.1.27-r10 -libseccomp-2.5.1-r0 -libsecret-0.20.4-r0 -libsm-1.2.3-r0 -libsodium-1.0.18-r0 -libssl1.1-1.1.1i-r0 -libstdc++-10.2.1_pre1-r3 -libtasn1-4.16.0-r1 -libtls-standalone-2.9.1-r1 -libunistring-0.9.10-r0 -libuuid-2.36.1-r1 -libwebp-1.1.0-r0 -libx11-1.7.0-r0 -libxau-1.0.9-r0 -libxcb-1.14-r1 -libxdmcp-1.1.3-r0 -libxext-1.3.4-r0 -libxml2-2.9.10-r6 -libxpm-3.5.13-r0 -libxslt-1.1.34-r0 -libxt-1.2.1-r0 -libzip-1.7.3-r2 -linux-pam-1.5.1-r0 -logrotate-3.18.0-r0 -lz4-libs-1.9.2-r0 -memcached-1.6.9-r0 -musl-1.2.2-r0 -musl-utils-1.2.2-r0 -nano-5.4-r1 -ncurses-libs-6.2_p20210109-r0 -ncurses-terminfo-base-6.2_p20210109-r0 -nettle-3.7-r0 -nghttp2-libs-1.42.0-r1 -nginx-1.18.0-r13 -nginx-mod-devel-kit-1.18.0-r13 -nginx-mod-http-brotli-1.18.0-r13 -nginx-mod-http-dav-ext-1.18.0-r13 -nginx-mod-http-echo-1.18.0-r13 -nginx-mod-http-fancyindex-1.18.0-r13 -nginx-mod-http-geoip2-1.18.0-r13 -nginx-mod-http-headers-more-1.18.0-r13 -nginx-mod-http-image-filter-1.18.0-r13 -nginx-mod-http-nchan-1.18.0-r13 -nginx-mod-http-perl-1.18.0-r13 -nginx-mod-http-redis2-1.18.0-r13 -nginx-mod-http-set-misc-1.18.0-r13 -nginx-mod-http-upload-progress-1.18.0-r13 -nginx-mod-http-xslt-filter-1.18.0-r13 -nginx-mod-mail-1.18.0-r13 -nginx-mod-rtmp-1.18.0-r13 -nginx-mod-stream-1.18.0-r13 -nginx-mod-stream-geoip2-1.18.0-r13 -nginx-vim-1.18.0-r13 -npth-1.6-r0 -oniguruma-6.9.6-r0 -openssl-1.1.1i-r0 -p11-kit-0.23.22-r0 -pcre-8.44-r0 -pcre2-10.36-r0 -perl-5.32.0-r0 -php7-7.4.15-r0 -php7-bcmath-7.4.15-r0 -php7-bz2-7.4.15-r0 -php7-common-7.4.15-r0 -php7-ctype-7.4.15-r0 -php7-curl-7.4.15-r0 -php7-dom-7.4.15-r0 -php7-exif-7.4.15-r0 -php7-fileinfo-7.4.14-r0 -php7-fpm-7.4.14-r0 -php7-ftp-7.4.15-r0 -php7-gd-7.4.15-r0 -php7-iconv-7.4.15-r0 -php7-imap-7.4.15-r0 -php7-intl-7.4.15-r0 -php7-json-7.4.15-r0 -php7-ldap-7.4.15-r0 -php7-mbstring-7.4.15-r0 -php7-mysqli-7.4.15-r0 -php7-mysqlnd-7.4.15-r0 -php7-opcache-7.4.15-r0 -php7-openssl-7.4.15-r0 -php7-pdo-7.4.15-r0 -php7-pdo_mysql-7.4.15-r0 -php7-pdo_odbc-7.4.15-r0 -php7-pdo_pgsql-7.4.15-r0 -php7-pdo_sqlite-7.4.15-r0 -php7-pear-7.4.15-r0 -php7-pecl-apcu-5.1.19-r1 -php7-pecl-igbinary-3.2.2_rc1-r0 -php7-pecl-mcrypt-1.0.4-r0 -php7-pecl-memcached-3.1.5-r2 -php7-pecl-redis-5.3.3-r0 -php7-pgsql-7.4.15-r0 -php7-phar-7.4.15-r0 -php7-posix-7.4.15-r0 -php7-session-7.4.15-r0 -php7-simplexml-7.4.14-r0 -php7-soap-7.4.15-r0 -php7-sockets-7.4.15-r0 -php7-sodium-7.4.15-r0 -php7-sqlite3-7.4.15-r0 -php7-tokenizer-7.4.15-r0 -php7-xml-7.4.15-r0 -php7-xmlreader-7.4.15-r0 -php7-xmlrpc-7.4.15-r0 -php7-xmlwriter-7.4.14-r0 -php7-xsl-7.4.15-r0 -php7-zip-7.4.15-r0 -pinentry-1.1.1-r0 -popt-1.18-r0 -procps-3.3.16-r0 -py3-appdirs-1.4.4-r1 -py3-asn1crypto-1.4.0-r0 -py3-cachecontrol-0.12.6-r0 -py3-cffi-1.14.4-r0 -py3-chardet-4.0.0-r0 -py3-colorama-0.4.4-r0 -py3-contextlib2-0.6.0-r0 -py3-cparser-2.20-r0 -py3-cryptography-3.3.2-r0 -py3-distlib-0.3.1-r1 -py3-distro-1.5.0-r1 -py3-future-0.18.2-r1 -py3-html5lib-1.1-r0 -py3-idna-3.1-r0 -py3-lockfile-0.12.2-r3 -py3-msgpack-1.0.2-r0 -py3-ordered-set-4.0.2-r0 -py3-packaging-20.9-r0 -py3-parsing-2.4.7-r1 -py3-pep517-0.9.1-r0 -py3-pip-20.3.4-r0 -py3-progress-1.5-r0 -py3-pytoml-0.1.21-r0 -py3-requests-2.25.1-r1 -py3-retrying-1.3.3-r0 -py3-setuptools-51.3.3-r0 -py3-six-1.15.0-r0 -py3-toml-0.10.2-r0 -py3-urllib3-1.26.2-r1 -py3-webencodings-0.5.1-r3 -python3-3.8.7-r0 -readline-8.1.0-r0 -s6-ipcserver-2.10.0.0-r0 -scanelf-1.2.8-r0 -shadow-4.8.1-r0 -skalibs-2.10.0.0-r0 -sqlite-libs-3.34.1-r0 -ssl_client-1.32.1-r2 -tzdata-2021a-r0 -unixodbc-2.3.9-r1 -utmps-0.1.0.0-r0 -whois-5.5.7-r1 -xz-5.2.5-r0 -xz-libs-5.2.5-r0 -zlib-1.2.11-r3 -zstd-libs-1.4.5-r3 diff --git a/readme-vars.yml b/readme-vars.yml deleted file mode 100755 index 3466f52..0000000 --- a/readme-vars.yml +++ /dev/null @@ -1,168 +0,0 @@ ---- - -# project information -project_name: swag -project_url: "https://linuxserver.io" -project_logo: "https://github.com/linuxserver/docker-templates/raw/master/linuxserver.io/img/swag.gif" -project_blurb: "SWAG - Secure Web Application Gateway (formerly known as letsencrypt, no relation to Let's Encrypt™) sets up an Nginx webserver and reverse proxy with php support and a built-in certbot client that automates free SSL server certificate generation and renewal processes (Let's Encrypt and ZeroSSL). It also contains fail2ban for intrusion prevention." -project_lsio_github_repo_url: "https://github.com/linuxserver/docker-{{ project_name }}" - -project_blurb_optional_extras_enabled: false -project_blurb_optional_extras: [] - -# supported architectures -available_architectures: - - { arch: "{{ arch_x86_64 }}", tag: "amd64-latest"} - - { arch: "{{ arch_arm64 }}", tag: "arm64v8-latest"} - - { arch: "{{ arch_armhf }}", tag: "arm32v7-latest"} - -# development version -development_versions: false -development_versions_items: - - { tag: "latest", desc: "Stable releases" } - - -# container parameters -common_param_env_vars_enabled: true #PGID, PUID, etc, you can set it to 'optional' -param_container_name: "{{ project_name }}" -param_usage_include_net: false #you can set it to 'optional' -param_net: "host" -param_net_desc: "Shares host networking with container." -param_usage_include_env: true -param_env_vars: - - { env_var: "TZ", env_value: "Europe/London", desc: "Specify a timezone to use EG Europe/London." } - - { env_var: "URL", env_value: "yourdomain.url", desc: "Top url you have control over (`customdomain.com` if you own it, or `customsubdomain.ddnsprovider.com` if dynamic dns)." } - - { env_var: "SUBDOMAINS", env_value: "www,", desc: "Subdomains you'd like the cert to cover (comma separated, no spaces) ie. `www,ftp,cloud`. For a wildcard cert, set this _exactly_ to `wildcard` (wildcard cert is available via `dns` and `duckdns` validation only)" } - - { env_var: "VALIDATION", env_value: "http", desc: "Certbot validation method to use, options are `http`, `dns` or `duckdns` (`dns` method also requires `DNSPLUGIN` variable set) (`duckdns` method requires `DUCKDNSTOKEN` variable set, and the `SUBDOMAINS` variable must be either empty or set to `wildcard`)." } -param_usage_include_vols: true -param_volumes: - - { vol_path: "/config", vol_host_path: "/path/to/appdata/config", desc: "All the config files including the webroot reside here." } -param_usage_include_ports: true -param_ports: - - { external_port: "443", internal_port: "443", port_desc: "Https port" } -param_device_map: false -param_devices: - - { device_path: "/dev/dri", device_host_path: "/dev/dri", desc: "For hardware transcoding" } -cap_add_param: true -cap_add_param_vars: - - { cap_add_var: "NET_ADMIN" } - -# optional container parameters -opt_param_usage_include_env: true -opt_param_env_vars: - - { env_var: "CERTPROVIDER", env_value: "", desc: "Optionally define the cert provider. Set to `zerossl` for ZeroSSL certs (requires existing [ZeroSSL account](https://app.zerossl.com/signup) and the e-mail address entered in `EMAIL` env var). Otherwise defaults to Let's Encrypt." } - - { env_var: "DNSPLUGIN", env_value: "cloudflare", desc: "Required if `VALIDATION` is set to `dns`. Options are `aliyun`, `cloudflare`, `cloudxns`, `cpanel`, `digitalocean`, `dnsimple`, `dnsmadeeasy`, `domeneshop`, `gandi`, `gehirn`, `google`, `hetzner`, `inwx`, `linode`, `luadns`, `netcup`, `njalla`, `nsone`, `ovh`, `rfc2136`, `route53`, `sakuracloud` and `transip`. Also need to enter the credentials into the corresponding ini (or json for some plugins) file under `/config/dns-conf`." } - - { env_var: "PROPAGATION", env_value: "", desc: "Optionally override (in seconds) the default propagation time for the dns plugins." } - - { env_var: "DUCKDNSTOKEN", env_value: "", desc: "Required if `VALIDATION` is set to `duckdns`. Retrieve your token from https://www.duckdns.org" } - - { env_var: "EMAIL", env_value: "", desc: "Optional e-mail address used for cert expiration notifications (Required for ZeroSSL)." } - - { env_var: "ONLY_SUBDOMAINS", env_value: "false", desc: "If you wish to get certs only for certain subdomains, but not the main domain (main domain may be hosted on another machine and cannot be validated), set this to `true`" } - - { env_var: "EXTRA_DOMAINS", env_value: "", desc: "Additional fully qualified domain names (comma separated, no spaces) ie. `extradomain.com,subdomain.anotherdomain.org,*.anotherdomain.org`" } - - { env_var: "STAGING", env_value: "false", desc: "Set to `true` to retrieve certs in staging mode. Rate limits will be much higher, but the resulting cert will not pass the browser's security test. Only to be used for testing purposes." } - - { env_var: "MAXMINDDB_LICENSE_KEY", env_value: "", desc: "Add your MaxmindDB license key to automatically download the GeoLite2-City.mmdb database. Download location is /config/geoip2db. The database is updated weekly."} -opt_param_usage_include_vols: false -opt_param_volumes: - - { vol_path: "/config", vol_host_path: "/path/to/appdata/config", desc: "Configuration files." } -opt_param_usage_include_ports: true -opt_param_ports: - - { external_port: "80", internal_port: "80", port_desc: "Http port (required for http validation and http -> https redirect)" } -opt_param_device_map: false -opt_param_devices: - - { device_path: "/dev/dri", device_host_path: "/dev/dri", desc: "For hardware transcoding" } -opt_cap_add_param: false -opt_cap_add_param_vars: - - { cap_add_var: "NET_ADMIN" } - -optional_block_1: false -optional_block_1_items: "" - -# application setup block -app_setup_block_enabled: true -app_setup_block: | - > ### Migrating from the old `linuxserver/letsencrypt` image - > * If using docker cli: - > * Stop and remove existing container via `docker stop letsencrypt` and `docker rm letsencrypt` - > * Create new container using the sample on this page (container name: `swag`, image name: `linuxserver/swag`) - > * If using docker compose: - > * Edit the compose yaml to change the image to `linuxserver/swag` and change the service and container names to `swag` - > * Issue `docker-compose up -d --remove-orphans` - > * If you don't want to or can't use the option `--remove-orphans`, then you can first do `docker-compose down`, then edit the compose yaml as above, and then issue `docker-compose up -d` - - > Make sure to also update any references to this container by name. For instance, Nextcloud's `config.php` references this container in its `trusted_proxies` directive, which would have to be updated to `swag`. - ### Validation and initial setup - * Before running this container, make sure that the url and subdomains are properly forwarded to this container's host, and that port 443 (and/or 80) is not being used by another service on the host (NAS gui, another webserver, etc.). - * For `http` validation, port 80 on the internet side of the router should be forwarded to this container's port 80 - * For `dns` validation, make sure to enter your credentials into the corresponding ini (or json for some plugins) file under `/config/dns-conf` - * Cloudflare provides free accounts for managing dns and is very easy to use with this image. Make sure that it is set up for "dns only" instead of "dns + proxy" - * Google dns plugin is meant to be used with "Google Cloud DNS", a paid enterprise product, and not for "Google Domains DNS" - * For `duckdns` validation, either leave the `SUBDOMAINS` variable empty or set it to `wildcard`, and set the `DUCKDNSTOKEN` variable with your duckdns token. Due to a limitation of duckdns, the resulting cert will only cover either main subdomain (ie. `yoursubdomain.duckdns.org`), or sub-subdomains (ie. `*.yoursubdomain.duckdns.org`), but will not both at the same time. You can use our [duckdns image](https://hub.docker.com/r/linuxserver/duckdns/) to update your IP on duckdns.org. - * `--cap-add=NET_ADMIN` is required for fail2ban to modify iptables - * If you need a dynamic dns provider, you can use the free provider duckdns.org where the `URL` will be `yoursubdomain.duckdns.org` and the `SUBDOMAINS` can be `www,ftp,cloud` with http validation, or `wildcard` with dns validation. - * After setup, navigate to `https://yourdomain.url` to access the default homepage (http access through port 80 is disabled by default, you can enable it by editing the default site config at `/config/nginx/site-confs/default`). - * Certs are checked nightly and if expiration is within 30 days, renewal is attempted. If your cert is about to expire in less than 30 days, check the logs under `/config/log/letsencrypt` to see why the renewals have been failing. It is recommended to input your e-mail in docker parameters so you receive expiration notices from Let's Encrypt in those circumstances. - ### Security and password protection - * The container detects changes to url and subdomains, revokes existing certs and generates new ones during start. - * The container provides a pre-generated 4096-bit dhparams.pem (rotated weekly via [Jenkins job](https://ci.linuxserver.io/blue/organizations/jenkins/Xtras-Builders-Etc%2Fdhparams-uploader/activity)) for new instances, however you may generate your own by running `docker exec swag openssl dhparam -out /config/nginx/dhparams.pem 4096` WARNING: This takes a very long time - * If you'd like to password protect your sites, you can use htpasswd. Run the following command on your host to generate the htpasswd file `docker exec -it swag htpasswd -c /config/nginx/.htpasswd ` - * You can add multiple user:pass to `.htpasswd`. For the first user, use the above command, for others, use the above command without the `-c` flag, as it will force deletion of the existing `.htpasswd` and creation of a new one - * You can also use ldap auth for security and access control. A sample, user configurable ldap.conf is provided, and it requires the separate image [linuxserver/ldap-auth](https://hub.docker.com/r/linuxserver/ldap-auth/) to communicate with an ldap server. - ### Site config and reverse proxy - * The default site config resides at `/config/nginx/site-confs/default`. Feel free to modify this file, and you can add other conf files to this directory. However, if you delete the `default` file, a new default will be created on container start. - * Preset reverse proxy config files are added for popular apps. See the `README.md` file under `/config/nginx/proxy_confs` for instructions on how to enable them. The preset confs reside in and get imported from [this repo](https://github.com/linuxserver/reverse-proxy-confs). - * If you wish to hide your site from search engine crawlers, you may find it useful to add this configuration line to your site config, within the server block, above the line where ssl.conf is included - `add_header X-Robots-Tag "noindex, nofollow, nosnippet, noarchive";` - This will *ask* Google et al not to index and list your site. Be careful with this, as you will eventually be de-listed if you leave this line in on a site you wish to be present on search engines - * If you wish to redirect http to https, you must expose port 80 - ### Using certs in other containers - * This container includes auto-generated pfx and private-fullchain-bundle pem certs that are needed by other apps like Emby and Znc. - * To use these certs in other containers, do either of the following: - 1. *(Easier)* Mount the container's config folder in other containers (ie. `-v /path-to-le-config:/le-ssl`) and in the other containers, use the cert location `/le-ssl/keys/letsencrypt/` - 2. *(More secure)* Mount the SWAG folder `etc` that resides under `/config` in other containers (ie. `-v /path-to-le-config/etc:/le-ssl`) and in the other containers, use the cert location `/le-ssl/letsencrypt/live//` (This is more secure because the first method shares the entire SWAG config folder with other containers, including the www files, whereas the second method only shares the ssl certs) - * These certs include: - 1. `cert.pem`, `chain.pem`, `fullchain.pem` and `privkey.pem`, which are generated by Certbot and used by nginx and various other apps - 2. `privkey.pfx`, a format supported by Microsoft and commonly used by dotnet apps such as Emby Server (no password) - 3. `priv-fullchain-bundle.pem`, a pem cert that bundles the private key and the fullchain, used by apps like ZNC - ### Using fail2ban - * This container includes fail2ban set up with 4 jails by default: - 1. nginx-http-auth - 2. nginx-badbots - 3. nginx-botsearch - 4. nginx-deny - * To enable or disable other jails, modify the file `/config/fail2ban/jail.local` - * To modify filters and actions, instead of editing the `.conf` files, create `.local` files with the same name and edit those because .conf files get overwritten when the actions and filters are updated. `.local` files will append whatever's in the `.conf` files (ie. `nginx-http-auth.conf` --> `nginx-http-auth.local`) - * You can check which jails are active via `docker exec -it swag fail2ban-client status` - * You can check the status of a specific jail via `docker exec -it swag fail2ban-client status ` - * You can unban an IP via `docker exec -it swag fail2ban-client set unbanip ` - * A list of commands can be found here: https://www.fail2ban.org/wiki/index.php/Commands - ### Updating configs - * This container creates a number of configs for nginx, proxy samples, etc. - * Config updates are noted in the changelog but not automatically applied to your files. - * If you have modified a file with noted changes in the changelog: - 1. Keep your existing configs as is (not broken, don't fix) - 2. Review our repository commits and apply the new changes yourself - 3. Delete the modified config file with listed updates, restart the container, reapply your changes - * If you have NOT modified a file with noted changes in the changelog: - 1. Delete the config file with listed updates, restart the container - * Proxy sample updates are not listed in the changelog. See the changes here: [https://github.com/linuxserver/reverse-proxy-confs/commits/master](https://github.com/linuxserver/reverse-proxy-confs/commits/master) - * Proxy sample files WILL be updated, however your renamed (enabled) proxy files will not. - * You can check the new sample and adjust your active config as needed. - -app_setup_nginx_reverse_proxy_snippet: false -app_setup_nginx_reverse_proxy_block: "" - -# changelog -changelogs: - - { date: "10.02.21:", desc: "Fix aliyun, domeneshop, inwx and transip dns confs for existing users." } - - { date: "09.02.21:", desc: "Rebasing to alpine 3.13. Add nginx mods brotli and dav-ext. Remove nginx mods lua and lua-upstream (due to regression over the last couple of years)." } - - { date: "26.01.21:", desc: "Add support for hetzner dns validation." } - - { date: "20.01.21:", desc: "Add check for ZeroSSL EAB retrieval." } - - { date: "08.01.21:", desc: "Add support for getting certs from [ZeroSSL](https://zerossl.com/) via optional `CERTPROVIDER` env var. Update aliyun, domeneshop, inwx and transip dns plugins with the new plugin names. Hide `donoteditthisfile.conf` because users were editing it despite its name. Suppress harmless error when no proxy confs are enabled." } - - { date: "03.01.21:", desc: "[Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) /config/nginx/site-confs/default - Add helper pages to aid troubleshooting" } - - { date: "10.12.20:", desc: "Add support for njalla dns validation" } - - { date: "09.12.20:", desc: "Check for template/conf updates and notify in the log. Add support for gehirn and sakuracloud dns validation." } - - { date: "01.11.20:", desc: "Add support for netcup dns validation" } - - { date: "29.10.20:", desc: "[Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) ssl.conf - Add frame-ancestors to Content-Security-Policy." } - - { date: "04.10.20:", desc: "[Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) nginx.conf, proxy.conf, and ssl.conf - Minor cleanups and reordering." } - - { date: "20.09.20:", desc: "[Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) nginx.conf - Added geoip2 configs. Added MAXMINDDB_LICENSE_KEY variable to readme."} - - { date: "08.09.20:", desc: "Add php7-xsl." } - - { date: "01.09.20:", desc: "[Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) nginx.conf, proxy.conf, and various proxy samples - Global websockets across all configs." } - - { date: "03.08.20:", desc: "Initial release." } diff --git a/root/app/duckdns-txt b/root/app/duckdns-txt deleted file mode 100644 index 9630c4c..0000000 --- a/root/app/duckdns-txt +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -. /config/.donoteditthisfile.conf - -curl https://www.duckdns.org/update?domains=${CERTBOT_DOMAIN}\&token=${ORIGDUCKDNSTOKEN}\&txt=${CERTBOT_VALIDATION} - -echo "sleeping 60" -sleep 60 diff --git a/root/app/le-renew.sh b/root/app/le-renew.sh index 5c638a5..a936df7 100644 --- a/root/app/le-renew.sh +++ b/root/app/le-renew.sh @@ -7,21 +7,4 @@ echo echo "<------------------------------------------------->" echo "cronjob running on "$(date) echo "Running certbot renew" -if [ "$ORIGVALIDATION" = "dns" ] || [ "$ORIGVALIDATION" = "duckdns" ]; then - certbot -n renew \ - --post-hook "if ps aux | grep [n]ginx: > /dev/null; then s6-svc -h /var/run/s6/services/nginx; fi; \ - cd /config/keys/letsencrypt && \ - openssl pkcs12 -export -out privkey.pfx -inkey privkey.pem -in cert.pem -certfile chain.pem -passout pass: && \ - sleep 1 && \ - cat privkey.pem fullchain.pem > priv-fullchain-bundle.pem && \ - chown -R abc:abc /config/etc/letsencrypt" -else - certbot -n renew \ - --pre-hook "if ps aux | grep [n]ginx: > /dev/null; then s6-svc -d /var/run/s6/services/nginx; fi" \ - --post-hook "if ps aux | grep 's6-supervise nginx' | grep -v grep > /dev/null; then s6-svc -u /var/run/s6/services/nginx; fi; \ - cd /config/keys/letsencrypt && \ - openssl pkcs12 -export -out privkey.pfx -inkey privkey.pem -in cert.pem -certfile chain.pem -passout pass: && \ - sleep 1 && \ - cat privkey.pem fullchain.pem > priv-fullchain-bundle.pem && \ - chown -R abc:abc /config/etc/letsencrypt" -fi +certbot renew --noninteractive --no-self-upgrade diff --git a/root/defaults/502.html b/root/defaults/502.html deleted file mode 100644 index ff7d8fc..0000000 --- a/root/defaults/502.html +++ /dev/null @@ -1,44 +0,0 @@ - - - 502 - - - -
-

502

-

Nginx can not connect to the application

-

Some common reasons are listed here: docs.linuxserver.io

-

For help and support, please visit: linuxserver.io/support

-
- - \ No newline at end of file diff --git a/root/defaults/authelia-location.conf b/root/defaults/authelia-location.conf deleted file mode 100644 index ee7c92f..0000000 --- a/root/defaults/authelia-location.conf +++ /dev/null @@ -1,11 +0,0 @@ -## Version 2020/05/31 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/authelia-location.conf -# Make sure that your authelia container is in the same user defined bridge network and is named authelia -# Make sure that the authelia configuration.yml has 'path: "authelia"' defined - -auth_request /authelia/api/verify; -auth_request_set $target_url $scheme://$http_host$request_uri; -auth_request_set $user $upstream_http_remote_user; -auth_request_set $groups $upstream_http_remote_groups; -proxy_set_header Remote-User $user; -proxy_set_header Remote-Groups $groups; -error_page 401 =302 https://$http_host/authelia/?rd=$target_url; diff --git a/root/defaults/authelia-server.conf b/root/defaults/authelia-server.conf deleted file mode 100644 index cd6a6f6..0000000 --- a/root/defaults/authelia-server.conf +++ /dev/null @@ -1,48 +0,0 @@ -## Version 2020/05/31 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/authelia-server.conf -# Make sure that your authelia container is in the same user defined bridge network and is named authelia - -location ^~ /authelia { - include /config/nginx/proxy.conf; - resolver 127.0.0.11 valid=30s; - set $upstream_authelia authelia; - proxy_pass http://$upstream_authelia:9091; -} - -location = /authelia/api/verify { - internal; - resolver 127.0.0.11 valid=30s; - set $upstream_authelia authelia; - proxy_pass_request_body off; - proxy_pass http://$upstream_authelia:9091; - proxy_set_header Content-Length ""; - - # Timeout if the real server is dead - proxy_next_upstream error timeout invalid_header http_500 http_502 http_503; - - # [REQUIRED] Needed by Authelia to check authorizations of the resource. - # Provide either X-Original-URL and X-Forwarded-Proto or - # X-Forwarded-Proto, X-Forwarded-Host and X-Forwarded-Uri or both. - # Those headers will be used by Authelia to deduce the target url of the user. - # Basic Proxy Config - client_body_buffer_size 128k; - proxy_set_header Host $host; - proxy_set_header X-Original-URL $scheme://$http_host$request_uri; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $remote_addr; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Host $http_host; - proxy_set_header X-Forwarded-Uri $request_uri; - proxy_set_header X-Forwarded-Ssl on; - proxy_redirect http:// $scheme://; - proxy_http_version 1.1; - proxy_set_header Connection ""; - proxy_cache_bypass $cookie_session; - proxy_no_cache $cookie_session; - proxy_buffers 4 32k; - - # Advanced Proxy Config - send_timeout 5m; - proxy_read_timeout 240; - proxy_send_timeout 240; - proxy_connect_timeout 240; -} diff --git a/root/defaults/dns-conf/cloudflare.ini b/root/defaults/credentials/cloudflare.ini similarity index 83% rename from root/defaults/dns-conf/cloudflare.ini rename to root/defaults/credentials/cloudflare.ini index 5cd3c0a..2bdd22f 100644 --- a/root/defaults/dns-conf/cloudflare.ini +++ b/root/defaults/credentials/cloudflare.ini @@ -6,4 +6,4 @@ dns_cloudflare_email = cloudflare@example.com dns_cloudflare_api_key = 0123456789abcdef0123456789abcdef01234567 # With token (comment out both lines above and uncomment below): -#dns_cloudflare_api_token = 0123456789abcdef0123456789abcdef01234567 \ No newline at end of file +# dns_cloudflare_api_token = 0123456789abcdef0123456789abcdef01234567 \ No newline at end of file diff --git a/root/defaults/crontabs/.gitkeep b/root/defaults/crontabs/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/root/defaults/crontabs/root b/root/defaults/crontabs/root new file mode 100644 index 0000000..6e34992 --- /dev/null +++ b/root/defaults/crontabs/root @@ -0,0 +1,9 @@ +# do daily/weekly/monthly maintenance +# min hour day month weekday command +*/15 * * * * run-parts /etc/periodic/15min +0 * * * * run-parts /etc/periodic/hourly +0 2 * * * run-parts /etc/periodic/daily +0 3 * * 6 run-parts /etc/periodic/weekly +0 5 1 * * run-parts /etc/periodic/monthly +# renew letsencrypt certs +8 2 * * * /app/le-renew.sh >> /config/log/letsencrypt/letsencrypt.log 2>&1 \ No newline at end of file diff --git a/root/defaults/default b/root/defaults/default deleted file mode 100644 index 6d76de6..0000000 --- a/root/defaults/default +++ /dev/null @@ -1,155 +0,0 @@ -## Version 2021/01/03 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/default - -error_page 502 /502.html; - -# redirect all traffic to https -server { - listen 80 default_server; - listen [::]:80 default_server; - server_name _; - return 301 https://$host$request_uri; -} - -# main server block -server { - listen 443 ssl http2 default_server; - listen [::]:443 ssl http2 default_server; - - root /config/www; - index index.html index.htm index.php; - - server_name _; - - # enable subfolder method reverse proxy confs - include /config/nginx/proxy-confs/*.subfolder.conf; - - # all ssl related config moved to ssl.conf - include /config/nginx/ssl.conf; - - # enable for ldap auth - #include /config/nginx/ldap.conf; - - # enable for Authelia - #include /config/nginx/authelia-server.conf; - - # enable for geo blocking - # See /config/nginx/geoip2.conf for more information. - #if ($allowed_country = no) { - #return 444; - #} - - client_max_body_size 0; - - location / { - try_files $uri $uri/ /index.html /index.php?$args =404; - } - - location ~ \.php$ { - fastcgi_split_path_info ^(.+\.php)(/.+)$; - fastcgi_pass 127.0.0.1:9000; - fastcgi_index index.php; - include /etc/nginx/fastcgi_params; - } - -# sample reverse proxy config for password protected couchpotato running at IP 192.168.1.50 port 5050 with base url "cp" -# notice this is within the same server block as the base -# don't forget to generate the .htpasswd file as described on docker hub -# location ^~ /cp { -# auth_basic "Restricted"; -# auth_basic_user_file /config/nginx/.htpasswd; -# include /config/nginx/proxy.conf; -# proxy_pass http://192.168.1.50:5050/cp; -# } - -} - -# sample reverse proxy config without url base, but as a subdomain "cp", ip and port same as above -# notice this is a new server block, you need a new server block for each subdomain -#server { -# listen 443 ssl http2; -# listen [::]:443 ssl http2; -# -# root /config/www; -# index index.html index.htm index.php; -# -# server_name cp.*; -# -# include /config/nginx/ssl.conf; -# -# client_max_body_size 0; -# -# location / { -# auth_basic "Restricted"; -# auth_basic_user_file /config/nginx/.htpasswd; -# include /config/nginx/proxy.conf; -# proxy_pass http://192.168.1.50:5050; -# } -#} - -# sample reverse proxy config for "heimdall" via subdomain, with ldap authentication -# ldap-auth container has to be running and the /config/nginx/ldap.conf file should be filled with ldap info -# notice this is a new server block, you need a new server block for each subdomain -#server { -# listen 443 ssl http2; -# listen [::]:443 ssl http2; -# -# root /config/www; -# index index.html index.htm index.php; -# -# server_name heimdall.*; -# -# include /config/nginx/ssl.conf; -# -# include /config/nginx/ldap.conf; -# -# client_max_body_size 0; -# -# location / { -# # the next two lines will enable ldap auth along with the included ldap.conf in the server block -# auth_request /auth; -# error_page 401 =200 /ldaplogin; -# -# include /config/nginx/proxy.conf; -# resolver 127.0.0.11 valid=30s; -# set $upstream_app heimdall; -# set $upstream_port 443; -# set $upstream_proto https; -# proxy_pass $upstream_proto://$upstream_app:$upstream_port; -# } -#} - -# sample reverse proxy config for "heimdall" via subdomain, with Authelia -# Authelia container has to be running in the same user defined bridge network, with container name "authelia", and with 'path: "authelia"' set in its configuration.yml -# notice this is a new server block, you need a new server block for each subdomain -#server { -# listen 443 ssl http2; -# listen [::]:443 ssl http2; -# -# root /config/www; -# index index.html index.htm index.php; -# -# server_name heimdall.*; -# -# include /config/nginx/ssl.conf; -# -# include /config/nginx/authelia-server.conf; -# -# client_max_body_size 0; -# -# location / { -# # the next line will enable Authelia along with the included authelia-server.conf in the server block -# include /config/nginx/authelia-location.conf; -# -# include /config/nginx/proxy.conf; -# resolver 127.0.0.11 valid=30s; -# set $upstream_app heimdall; -# set $upstream_port 443; -# set $upstream_proto https; -# proxy_pass $upstream_proto://$upstream_app:$upstream_port; -# } -#} - -# enable subdomain method reverse proxy confs -include /config/nginx/proxy-confs/*.subdomain.conf; -# enable proxy cache for auth -proxy_cache_path cache/ keys_zone=auth_cache:10m; diff --git a/root/defaults/deploy/deploy-convert-certs.sh b/root/defaults/deploy/deploy-convert-certs.sh new file mode 100644 index 0000000..cec62f2 --- /dev/null +++ b/root/defaults/deploy/deploy-convert-certs.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# convert to fullchain.pem and privkey.pem to tls.crt and tls.key + +KEYPATH="/letsencrypt/keys" + +# convert pems to cert and key +echo "Converting to tls.crt and tls.key ..." +openssl crl2pkcs7 -nocrl \ +-certfile "${KEYPATH}"/fullchain.pem | openssl pkcs7 -print_certs \ +-out "${KEYPATH}"/tls.crt +# openssl x509 -outform der -in fullchain.pem -out tls.crt +# openssl pkey -outform der -in privkey.pem -out tls.key +openssl rsa \ +-in "${KEYPATH}"/privkey.pem \ +-out "${KEYPATH}"/tls.key + +# converting to pfx and priv-fullchain-bundle +openssl pkcs12 -export \ +-certfile chain.pem \ +-in "${KEYPATH}"/cert.pem -inkey "${KEYPATH}"/privkey.pem \ +-out "${KEYPATH}"/privkey.pfx \ + -passout pass: +sleep 1 + +cat "${KEYPATH}"/{privkey,fullchain}.pem > "${KEYPATH}"/priv-fullchain-bundle.pem + +# Allow read access to certs +chmod 644 "${KEYPATH}"/*.pem +chmod 644 "${KEYPATH}"/*.pfx +chmod 644 "${KEYPATH}"/tls.* +echo "Success." + diff --git a/root/defaults/dns-conf/aliyun.ini b/root/defaults/dns-conf/aliyun.ini deleted file mode 100644 index f1121d4..0000000 --- a/root/defaults/dns-conf/aliyun.ini +++ /dev/null @@ -1,6 +0,0 @@ -# Obtain Aliyun RAM AccessKey -# https://ram.console.aliyun.com/ -# And ensure your RAM account has AliyunDNSFullAccess permission. - -dns_aliyun_access_key = 12345678 -dns_aliyun_access_key_secret = 1234567890abcdef1234567890abcdef diff --git a/root/defaults/dns-conf/cloudxns.ini b/root/defaults/dns-conf/cloudxns.ini deleted file mode 100644 index a86f7d7..0000000 --- a/root/defaults/dns-conf/cloudxns.ini +++ /dev/null @@ -1,4 +0,0 @@ -# Instructions: https://github.com/certbot/certbot/blob/master/certbot-dns-cloudxns/certbot_dns_cloudxns/__init__.py#L20 -# Replace with your values -dns_cloudxns_api_key = 1234567890abcdef1234567890abcdef -dns_cloudxns_secret_key = 1122334455667788 diff --git a/root/defaults/dns-conf/cpanel.ini b/root/defaults/dns-conf/cpanel.ini deleted file mode 100644 index 28b6953..0000000 --- a/root/defaults/dns-conf/cpanel.ini +++ /dev/null @@ -1,6 +0,0 @@ -# Instructions: https://github.com/badjware/certbot-dns-cpanel#credentials -# Replace with your values -# include the scheme and the port number (usually 2083 for https) -certbot_dns_cpanel:cpanel_url = https://cpanel.example.com:2083 -certbot_dns_cpanel:cpanel_username = username -certbot_dns_cpanel:cpanel_password = 1234567890abcdef \ No newline at end of file diff --git a/root/defaults/dns-conf/digitalocean.ini b/root/defaults/dns-conf/digitalocean.ini deleted file mode 100644 index eff7677..0000000 --- a/root/defaults/dns-conf/digitalocean.ini +++ /dev/null @@ -1,3 +0,0 @@ -# Instructions: https://github.com/certbot/certbot/blob/master/certbot-dns-digitalocean/certbot_dns_digitalocean/__init__.py#L21 -# Replace with your value -dns_digitalocean_token = 0000111122223333444455556666777788889999aaaabbbbccccddddeeeeffff diff --git a/root/defaults/dns-conf/dnsimple.ini b/root/defaults/dns-conf/dnsimple.ini deleted file mode 100644 index 8eedb63..0000000 --- a/root/defaults/dns-conf/dnsimple.ini +++ /dev/null @@ -1,3 +0,0 @@ -# Instructions: https://github.com/certbot/certbot/blob/master/certbot-dns-dnsimple/certbot_dns_dnsimple/__init__.py#L20 -# Replace with your value -dns_dnsimple_token = MDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAw diff --git a/root/defaults/dns-conf/dnsmadeeasy.ini b/root/defaults/dns-conf/dnsmadeeasy.ini deleted file mode 100644 index 942c403..0000000 --- a/root/defaults/dns-conf/dnsmadeeasy.ini +++ /dev/null @@ -1,4 +0,0 @@ -# Instructions: https://github.com/certbot/certbot/blob/master/certbot-dns-dnsmadeeasy/certbot_dns_dnsmadeeasy/__init__.py#L20 -# Replace with your values -dns_dnsmadeeasy_api_key = 1c1a3c91-4770-4ce7-96f4-54c0eb0e457a -dns_dnsmadeeasy_secret_key = c9b5625f-9834-4ff8-baba-4ed5f32cae55 diff --git a/root/defaults/dns-conf/domeneshop.ini b/root/defaults/dns-conf/domeneshop.ini deleted file mode 100644 index 569c713..0000000 --- a/root/defaults/dns-conf/domeneshop.ini +++ /dev/null @@ -1,4 +0,0 @@ -# Instructions: https://github.com/domeneshop/certbot-dns-domeneshop#credentials -# Replace with your values -dns_domeneshop_client_token=1234567890abcdef -dns_domeneshop_client_secret=1234567890abcdefghijklmnopqrstuvxyz1234567890abcdefghijklmnopqrs \ No newline at end of file diff --git a/root/defaults/dns-conf/gandi.ini b/root/defaults/dns-conf/gandi.ini deleted file mode 100644 index a5c04b3..0000000 --- a/root/defaults/dns-conf/gandi.ini +++ /dev/null @@ -1,3 +0,0 @@ -# Instructions: https://github.com/obynio/certbot-plugin-gandi#usage -# Replace with your value -certbot_plugin_gandi:dns_api_key=APIKEY diff --git a/root/defaults/dns-conf/gehirn.ini b/root/defaults/dns-conf/gehirn.ini deleted file mode 100644 index e1ac409..0000000 --- a/root/defaults/dns-conf/gehirn.ini +++ /dev/null @@ -1,4 +0,0 @@ -# Instructions: https://certbot-dns-gehirn.readthedocs.io/en/stable/ -# Replace with your values -dns_gehirn_api_token = 00000000-0000-0000-0000-000000000000 -dns_gehirn_api_secret = MDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAw diff --git a/root/defaults/dns-conf/google.json b/root/defaults/dns-conf/google.json deleted file mode 100644 index c5a59cf..0000000 --- a/root/defaults/dns-conf/google.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "instructions": "https://github.com/certbot/certbot/blob/master/certbot-dns-google/certbot_dns_google/__init__.py", - "_comment": "Replace with your values", - "type": "service_account", - "rest": "..." -} \ No newline at end of file diff --git a/root/defaults/dns-conf/hetzner.ini b/root/defaults/dns-conf/hetzner.ini deleted file mode 100644 index f616823..0000000 --- a/root/defaults/dns-conf/hetzner.ini +++ /dev/null @@ -1,3 +0,0 @@ -# Instructions: https://github.com/ctrlaltcoop/certbot-dns-hetzner -# Replace with your values -dns_hetzner_api_token = nohnah4zoo9Kiejee9aGh0thoopee2sa \ No newline at end of file diff --git a/root/defaults/dns-conf/inwx.ini b/root/defaults/dns-conf/inwx.ini deleted file mode 100644 index 086d866..0000000 --- a/root/defaults/dns-conf/inwx.ini +++ /dev/null @@ -1,6 +0,0 @@ -# Instructions: https://github.com/oGGy990/certbot-dns-inwx -# Replace with your values -dns_inwx_url = https://api.domrobot.com/xmlrpc/ -dns_inwx_username = your_username -dns_inwx_password = your_password -dns_inwx_shared_secret = your_shared_secret optional diff --git a/root/defaults/dns-conf/linode.ini b/root/defaults/dns-conf/linode.ini deleted file mode 100644 index 2d434a1..0000000 --- a/root/defaults/dns-conf/linode.ini +++ /dev/null @@ -1,3 +0,0 @@ -# Instructions: https://github.com/certbot/certbot/blob/master/certbot-dns-linode/certbot_dns_linode/__init__.py#L25 -# Replace with your values -dns_linode_key = 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ64 diff --git a/root/defaults/dns-conf/luadns.ini b/root/defaults/dns-conf/luadns.ini deleted file mode 100644 index 01de1dd..0000000 --- a/root/defaults/dns-conf/luadns.ini +++ /dev/null @@ -1,4 +0,0 @@ -# Instructions: https://github.com/certbot/certbot/blob/master/certbot-dns-luadns/certbot_dns_luadns/__init__.py#L20 -# Replace with your values -dns_luadns_email = user@example.com -dns_luadns_token = 0123456789abcdef0123456789abcdef diff --git a/root/defaults/dns-conf/netcup.ini b/root/defaults/dns-conf/netcup.ini deleted file mode 100644 index a3a1e90..0000000 --- a/root/defaults/dns-conf/netcup.ini +++ /dev/null @@ -1,3 +0,0 @@ -dns_netcup_customer_id = 123456 -dns_netcup_api_key = 0123456789abcdef0123456789abcdef01234567 -dns_netcup_api_password = abcdef0123456789abcdef01234567abcdef0123 diff --git a/root/defaults/dns-conf/njalla.ini b/root/defaults/dns-conf/njalla.ini deleted file mode 100644 index 4b2a930..0000000 --- a/root/defaults/dns-conf/njalla.ini +++ /dev/null @@ -1,2 +0,0 @@ -# Generate your API token here: https://njal.la/settings/api/ -dns_njalla_token=0000000000000000000000000000000000000000 diff --git a/root/defaults/dns-conf/nsone.ini b/root/defaults/dns-conf/nsone.ini deleted file mode 100644 index f1858ca..0000000 --- a/root/defaults/dns-conf/nsone.ini +++ /dev/null @@ -1,3 +0,0 @@ -# Instructions: https://github.com/certbot/certbot/blob/master/certbot-dns-nsone/certbot_dns_nsone/__init__.py#L20 -# Replace with your value -dns_nsone_api_key = MDAwMDAwMDAwMDAwMDAw diff --git a/root/defaults/dns-conf/ovh.ini b/root/defaults/dns-conf/ovh.ini deleted file mode 100644 index f8fef57..0000000 --- a/root/defaults/dns-conf/ovh.ini +++ /dev/null @@ -1,6 +0,0 @@ -# Instructions: https://github.com/certbot/certbot/blob/master/certbot-dns-ovh/certbot_dns_ovh/__init__.py#L20 -# Replace with your values -dns_ovh_endpoint = ovh-eu -dns_ovh_application_key = MDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAw -dns_ovh_application_secret = MDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAw -dns_ovh_consumer_key = MDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAw diff --git a/root/defaults/dns-conf/rfc2136.ini b/root/defaults/dns-conf/rfc2136.ini deleted file mode 100644 index 75b6c7a..0000000 --- a/root/defaults/dns-conf/rfc2136.ini +++ /dev/null @@ -1,11 +0,0 @@ -# Instructions: https://github.com/certbot/certbot/blob/master/certbot-dns-rfc2136/certbot_dns_rfc2136/__init__.py#L20 -# Replace with your values -# Target DNS server -dns_rfc2136_server = 192.0.2.1 -# TSIG key name -dns_rfc2136_name = keyname. -# TSIG key secret -dns_rfc2136_secret = 4q4wM/2I180UXoMyN4INVhJNi8V9BCV+jMw2mXgZw/CSuxUT8C7NKKFs \ -AmKd7ak51vWKgSl12ib86oQRPkpDjg== -# TSIG key algorithm -dns_rfc2136_algorithm = HMAC-SHA512 diff --git a/root/defaults/dns-conf/route53.ini b/root/defaults/dns-conf/route53.ini deleted file mode 100644 index 18ce326..0000000 --- a/root/defaults/dns-conf/route53.ini +++ /dev/null @@ -1,5 +0,0 @@ -# Instructions: https://github.com/certbot/certbot/blob/master/certbot-dns-route53/certbot_dns_route53/__init__.py#L18 -# Replace with your values -[default] -aws_access_key_id=AKIAIOSFODNN7EXAMPLE -aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY diff --git a/root/defaults/dns-conf/sakuracloud.ini b/root/defaults/dns-conf/sakuracloud.ini deleted file mode 100644 index 17f3ac8..0000000 --- a/root/defaults/dns-conf/sakuracloud.ini +++ /dev/null @@ -1,4 +0,0 @@ -# Instructions: https://certbot-dns-sakuracloud.readthedocs.io/en/stable/ -# Replace with your values -dns_sakuracloud_api_token = 00000000-0000-0000-0000-000000000000 -dns_sakuracloud_api_secret = MDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAw diff --git a/root/defaults/dns-conf/transip.ini b/root/defaults/dns-conf/transip.ini deleted file mode 100644 index 68d0b4f..0000000 --- a/root/defaults/dns-conf/transip.ini +++ /dev/null @@ -1,30 +0,0 @@ -# Instructions: https://readthedocs.org/projects/certbot-dns-transip/ -# -# This DNS plugin can be used to generate SSL wildcard certificates via TransIP DNS TXT records -# -# Login with your TransIP account and go to My Account | API: -# 1. API-settings: On -# -# 2. IP-address/ranges whitelist: Add a new authorized IP address (Swag Docker) to use the API -# -# 3. Generate a new Key Pair and copy the private key to a new transip.key file in the format: -# -----BEGIN PRIVATE KEY----- -# ... -# -----END PRIVATE KEY----- -# -# 4. Convert the key to an RSA key with command: -# openssl rsa -in transip.key -out /config/dns-conf/transip-rsa.key -# -# 5. Set permission -# chmod 600 /config/dns-conf/transip-rsa.key -# -# 6. Replace below with your TransIP username -# -# 7. Create wildcard certificate with Swag environment variables: -# SUBDOMAINS=wildcard -# VALIDATION=dns -# DNSPLUGIN=transip - -dns_transip_username = -dns_transip_key_file = /config/dns-conf/transip-rsa.key - diff --git a/root/defaults/fail2ban/fail2ban.local b/root/defaults/fail2ban/fail2ban.local deleted file mode 100644 index 3571c73..0000000 --- a/root/defaults/fail2ban/fail2ban.local +++ /dev/null @@ -1,4 +0,0 @@ -[Definition] - -logtarget = /config/log/fail2ban/fail2ban.log -dbfile = /config/fail2ban/fail2ban.sqlite3 diff --git a/root/defaults/fail2ban/filter.d/nginx-badbots.conf b/root/defaults/fail2ban/filter.d/nginx-badbots.conf deleted file mode 100644 index 48b3066..0000000 --- a/root/defaults/fail2ban/filter.d/nginx-badbots.conf +++ /dev/null @@ -1,21 +0,0 @@ -# Fail2Ban configuration file -# -# Regexp to catch known spambots and software alike. Please verify -# that it is your intent to block IPs which were driven by -# above mentioned bots. - - -[Definition] - -badbotscustom = EmailCollector|WebEMailExtrac|TrackBack/1\.02|sogou music spider -badbots = Atomic_Email_Hunter/4\.0|atSpider/1\.0|autoemailspider|bwh3_user_agent|China Local Browse 2\.6|ContactBot/0\.2|ContentSmartz|DataCha0s/2\.0|DBrowse 1\.4b|DBrowse 1\.4d|Demo Bot DOT 16b|Demo Bot Z 16b|DSurf15a 01|DSurf15a 71|DSurf15a 81|DSurf15a VA|EBrowse 1\.4b|Educate Search VxB|EmailSiphon|EmailSpider|EmailWolf 1\.00|ESurf15a 15|ExtractorPro|Franklin Locator 1\.8|FSurf15a 01|Full Web Bot 0416B|Full Web Bot 0516B|Full Web Bot 2816B|Guestbook Auto Submitter|Industry Program 1\.0\.x|ISC Systems iRc Search 2\.1|IUPUI Research Bot v 1\.9a|LARBIN-EXPERIMENTAL \(efp@gmx\.net\)|LetsCrawl\.com/1\.0 \+http\://letscrawl\.com/|Lincoln State Web Browser|LMQueueBot/0\.2|LWP\:\:Simple/5\.803|Mac Finder 1\.0\.xx|MFC Foundation Class Library 4\.0|Microsoft URL Control - 6\.00\.8xxx|Missauga Locate 1\.0\.0|Missigua Locator 1\.9|Missouri College Browse|Mizzu Labs 2\.2|Mo College 1\.9|MVAClient|Mozilla/2\.0 \(compatible; NEWT ActiveX; Win32\)|Mozilla/3\.0 \(compatible; Indy Library\)|Mozilla/3\.0 \(compatible; scan4mail \(advanced version\) http\://www\.peterspages\.net/?scan4mail\)|Mozilla/4\.0 \(compatible; Advanced Email Extractor v2\.xx\)|Mozilla/4\.0 \(compatible; Iplexx Spider/1\.0 http\://www\.iplexx\.at\)|Mozilla/4\.0 \(compatible; MSIE 5\.0; Windows NT; DigExt; DTS Agent|Mozilla/4\.0 efp@gmx\.net|Mozilla/5\.0 \(Version\: xxxx Type\:xx\)|NameOfAgent \(CMS Spider\)|NASA Search 1\.0|Nsauditor/1\.x|PBrowse 1\.4b|PEval 1\.4b|Poirot|Port Huron Labs|Production Bot 0116B|Production Bot 2016B|Production Bot DOT 3016B|Program Shareware 1\.0\.2|PSurf15a 11|PSurf15a 51|PSurf15a VA|psycheclone|RSurf15a 41|RSurf15a 51|RSurf15a 81|searchbot admin@google\.com|ShablastBot 1\.0|snap\.com beta crawler v0|Snapbot/1\.0|Snapbot/1\.0 \(Snap Shots, \+http\://www\.snap\.com\)|sogou develop spider|Sogou Orion spider/3\.0\(\+http\://www\.sogou\.com/docs/help/webmasters\.htm#07\)|sogou spider|Sogou web spider/3\.0\(\+http\://www\.sogou\.com/docs/help/webmasters\.htm#07\)|sohu agent|SSurf15a 11 |TSurf15a 11|Under the Rainbow 2\.2|User-Agent\: Mozilla/4\.0 \(compatible; MSIE 6\.0; Windows NT 5\.1\)|VadixBot|WebVulnCrawl\.unknown/1\.0 libwww-perl/5\.803|Wells Search II|WEP Search 00 - -failregex = ^ -.*"(GET|POST|HEAD).*HTTP.*"(?:%(badbots)s|%(badbotscustom)s)"$ - -ignoreregex = - -# DEV Notes: -# List of bad bots fetched from http://www.user-agents.org -# Generated on Thu Nov 7 14:23:35 PST 2013 by files/gen_badbots. -# -# Author: Yaroslav Halchenko diff --git a/root/defaults/fail2ban/filter.d/nginx-deny.conf b/root/defaults/fail2ban/filter.d/nginx-deny.conf deleted file mode 100644 index d9f4694..0000000 --- a/root/defaults/fail2ban/filter.d/nginx-deny.conf +++ /dev/null @@ -1,15 +0,0 @@ -# fail2ban filter configuration for nginx - - -[Definition] - - -failregex = ^ \[error\] \d+#\d+: \*\d+ (access forbidden by rule), client: , server: \S*, request: "\S+ \S+ HTTP\/\d+\.\d+", host: "\S+"(?:, referrer: "\S+")?\s*$ - -ignoreregex = - -datepattern = {^LN-BEG} - -# DEV NOTES: -# -# Author: Will L (driz@linuxserver.io) diff --git a/root/defaults/geoip2.conf b/root/defaults/geoip2.conf deleted file mode 100644 index 702c4dc..0000000 --- a/root/defaults/geoip2.conf +++ /dev/null @@ -1,123 +0,0 @@ -## Version 2020/10/27 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/geoip2.conf -# To enable, uncommment the Geoip2 config line in nginx.conf -# Add the -e MAXMINDDB_LICENSE_KEY= to automatically download the Geolite2 database. -# A Maxmind license key can be acquired here: https://www.maxmind.com/en/geolite2/signup - -geoip2 /config/geoip2db/GeoLite2-City.mmdb { - auto_reload 1w; - $geoip2_data_city_name city names en; - $geoip2_data_postal_code postal code; - $geoip2_data_latitude location latitude; - $geoip2_data_longitude location longitude; - $geoip2_data_state_name subdivisions 0 names en; - $geoip2_data_state_code subdivisions 0 iso_code; - $geoip2_data_continent_code continent code; - $geoip2_data_country_iso_code country iso_code; -} - -# GEOIP2 COUNTRY CONFIG -map $geoip2_data_country_iso_code $allowed_country { - # default must be yes or no - # If default is set to "no" you will need to add the local ip ranges that you want to allow access in the $allow_list variable below. - default yes; - - # Below you will setup conditions with yes or no - # ex: ; - - # allow United Kingdom. - #GB yes; -} - -# GEOIP2 CITY CONFIG -map $geoip2_data_city_name $allowed_city { - # default must be yes or no - # If default is set to "no" you will need to add the local ip ranges that you want to allow access in the $allow_list variable below. - default yes; - - # Below you will setup conditions with yes or no - # ex: ; - - # allow Inverness. - #Inverness yes; -} - -# ALLOW LOCAL ACCESS -geo $allow_list { - default yes; # Set this to no if $allowed_country or $allowed_city default is no. - # IP/CIDR yes; # e.g. 192.168.1.0/24 yes; -} - -# Server config example: -# Add the following if statements inside any server context where you want to geo block countries. - -######################################## -# if ($allow_list = yes) { -# set $allowed_country yes; -# } -# if ($allowed_country = no) { -# return 444; -# } -######################################### - -# Add the following if statements inside any server context where you want to geo block cities. -######################################## -# if ($allow_list = yes) { -# set $allowed_country yes; -# } -# if ($allowed_city = no) { -# return 444; -# } -######################################### - -# Example using a config from proxy-confs - -#server { -# listen 443 ssl; -# listen [::]:443 ssl; -# -# server_name unifi.*; -# -# include /config/nginx/ssl.conf; -# -# client_max_body_size 0; -# -# # enable for ldap auth, fill in ldap details in ldap.conf -# #include /config/nginx/ldap.conf; -# -# # enable for Authelia -# #include /config/nginx/authelia-server.conf; - - -# # Allow lan access if default is set to no -# if ($allow_list = yes) { -# set $allowed_country yes; -# } -# # Country geo block -# if ($allowed_country = no) { -# return 444; -# } - - -# -# location / { -# # enable the next two lines for http auth -# #auth_basic "Restricted"; -# #auth_basic_user_file /config/nginx/.htpasswd; -# -# # enable the next two lines for ldap auth -# #auth_request /auth; -# #error_page 401 =200 /ldaplogin; -# -# # enable for Authelia -# #include /config/nginx/authelia-location.conf; -# -# include /config/nginx/proxy.conf; -# resolver 127.0.0.11 valid=30s; -# set $upstream_app unifi-controller; -# set $upstream_port 8443; -# set $upstream_proto https; -# proxy_pass $upstream_proto://$upstream_app:$upstream_port; -# -# proxy_buffering off; -# } -#} diff --git a/root/defaults/index.html b/root/defaults/index.html deleted file mode 100644 index 352d1af..0000000 --- a/root/defaults/index.html +++ /dev/null @@ -1,39 +0,0 @@ - - - Welcome to your SWAG instance - - - -
-

Welcome to your SWAG instance

-

A webserver and reverse proxy solution brought to you by linuxserver.io with php support and a built-in Certbot client.

-

We have an article on how to use swag here: docs.linuxserver.io

-

For help and support, please visit: linuxserver.io/support

-
- - \ No newline at end of file diff --git a/root/defaults/jail.local b/root/defaults/jail.local deleted file mode 100644 index 9b8673c..0000000 --- a/root/defaults/jail.local +++ /dev/null @@ -1,57 +0,0 @@ -## Version 2020/05/10 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/jail.local -# This is the custom version of the jail.conf for fail2ban -# Feel free to modify this and add additional filters -# Then you can drop the new filter conf files into the fail2ban-filters -# folder and restart the container - -[DEFAULT] - -# Changes the default ban action from "iptables-multiport", which causes issues on some platforms, to "iptables-allports". -banaction = iptables-allports - -# "bantime" is the number of seconds that a host is banned. -bantime = 600 - -# A host is banned if it has generated "maxretry" during the last "findtime" -# seconds. -findtime = 600 - -# "maxretry" is the number of failures before a host get banned. -maxretry = 5 - - -[ssh] - -enabled = false - - -[nginx-http-auth] - -enabled = true -filter = nginx-http-auth -port = http,https -logpath = /config/log/nginx/error.log - - -[nginx-badbots] - -enabled = true -port = http,https -filter = nginx-badbots -logpath = /config/log/nginx/access.log -maxretry = 2 - - -[nginx-botsearch] - -enabled = true -port = http,https -filter = nginx-botsearch -logpath = /config/log/nginx/access.log - -[nginx-deny] - -enabled = true -port = http,https -filter = nginx-deny -logpath = /config/log/nginx/error.log diff --git a/root/defaults/ldap.conf b/root/defaults/ldap.conf deleted file mode 100644 index 90120c7..0000000 --- a/root/defaults/ldap.conf +++ /dev/null @@ -1,92 +0,0 @@ -## Version 2020/06/02 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/ldap.conf -## this conf is meant to be used in conjunction with our ldap-auth image: https://github.com/linuxserver/docker-ldap-auth -## see the heimdall example in the default site config for info on enabling ldap auth -## for further instructions on this conf, see https://github.com/nginxinc/nginx-ldap-auth - - location /ldaplogin { - resolver 127.0.0.11 valid=30s; - set $upstream_auth_app ldap-auth; - set $upstream_auth_port 9000; - set $upstream_auth_proto http; - proxy_pass $upstream_auth_proto://$upstream_auth_app:$upstream_auth_port; - proxy_set_header X-Target $request_uri; - } - - location = /auth { - resolver 127.0.0.11 valid=30s; - set $upstream_auth_app ldap-auth; - set $upstream_auth_port 8888; - set $upstream_auth_proto http; - proxy_pass $upstream_auth_proto://$upstream_auth_app:$upstream_auth_port; - - proxy_pass_request_body off; - proxy_set_header Content-Length ""; - - #Before enabling the below caching options, make sure you have the line "proxy_cache_path cache/ keys_zone=auth_cache:10m;" at the bottom your default site config - #proxy_cache auth_cache; - #proxy_cache_valid 200 10m; - #proxy_cache_key "$http_authorization$cookie_nginxauth"; - - # As implemented in nginx-ldap-auth-daemon.py, the ldap-auth daemon - # communicates with a LDAP server, passing in the following - # parameters to specify which user account to authenticate. To - # eliminate the need to modify the Python code, this file contains - # 'proxy_set_header' directives that set the values of the - # parameters. Set or change them as instructed in the comments. - # - # Parameter Proxy header - # ----------- ---------------- - # url X-Ldap-URL - # starttls X-Ldap-Starttls - # basedn X-Ldap-BaseDN - # binddn X-Ldap-BindDN - # bindpasswd X-Ldap-BindPass - # cookiename X-CookieName - # realm X-Ldap-Realm - # template X-Ldap-Template - - # (Required) Set the URL and port for connecting to the LDAP server, - # by replacing 'example.com'. - # Do not mix ldaps-style URL and X-Ldap-Starttls as it will not work. - proxy_set_header X-Ldap-URL "ldap://example.com"; - - # (Optional) Establish a TLS-enabled LDAP session after binding to the - # LDAP server. - # This is the 'proper' way to establish encrypted TLS connections, see - # http://www.openldap.org/faq/data/cache/185.html - #proxy_set_header X-Ldap-Starttls "true"; - - # (Required) Set the Base DN, by replacing the value enclosed in - # double quotes. - proxy_set_header X-Ldap-BaseDN "cn=Users,dc=test,dc=local"; - - # (Required) Set the Bind DN, by replacing the value enclosed in - # double quotes. - # If AD, use "root@test.local" - proxy_set_header X-Ldap-BindDN "cn=root,dc=test,dc=local"; - - # (Required) Set the Bind password, by replacing 'secret'. - proxy_set_header X-Ldap-BindPass "secret"; - - # (Required) The following directives set the cookie name and pass - # it, respectively. They are required for cookie-based - # authentication. Comment them out if using HTTP basic - # authentication. - proxy_set_header X-CookieName "nginxauth"; - proxy_set_header Cookie nginxauth=$cookie_nginxauth; - - # (Required if using Microsoft Active Directory as the LDAP server) - # Set the LDAP template by uncommenting the following directive. - #proxy_set_header X-Ldap-Template "(sAMAccountName=%(username)s)"; - - # (Optional if using OpenLDAP as the LDAP server) Set the LDAP - # template by uncommenting the following directive and replacing - # '(cn=%(username)s)' which is the default set in - # nginx-ldap-auth-daemon.py. - #proxy_set_header X-Ldap-Template "(cn=%(username)s)"; - - # (Optional) Set the realm name, by uncommenting the following - # directive and replacing 'Restricted' which is the default set - # in nginx-ldap-auth-daemon.py. - #proxy_set_header X-Ldap-Realm "Restricted"; - } diff --git a/root/defaults/nginx.conf b/root/defaults/nginx.conf deleted file mode 100644 index a47a405..0000000 --- a/root/defaults/nginx.conf +++ /dev/null @@ -1,120 +0,0 @@ -## Version 2021/02/09 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/nginx.conf - -user abc; -worker_processes 4; -pid /run/nginx.pid; -include /etc/nginx/modules/*.conf; - -events { - worker_connections 768; - # multi_accept on; -} - -http { - - ## - # Basic Settings - ## - - client_body_buffer_size 128k; - client_max_body_size 0; - keepalive_timeout 65; - large_client_header_buffers 4 16k; - send_timeout 5m; - sendfile on; - tcp_nodelay on; - tcp_nopush on; - types_hash_max_size 2048; - variables_hash_max_size 2048; - - # server_tokens off; - # server_names_hash_bucket_size 64; - # server_name_in_redirect off; - - include /etc/nginx/mime.types; - default_type application/octet-stream; - - ## - # Logging Settings - ## - - access_log /config/log/nginx/access.log; - error_log /config/log/nginx/error.log; - - ## - # Gzip Settings - ## - - gzip on; - gzip_disable "msie6"; - - # gzip_vary on; - # gzip_proxied any; - # gzip_comp_level 6; - # gzip_buffers 16 8k; - # gzip_http_version 1.1; - # gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript; - - ## - # nginx-naxsi config - ## - # Uncomment it if you installed nginx-naxsi - ## - - #include /etc/nginx/naxsi_core.rules; - - ## - # nginx-passenger config - ## - # Uncomment it if you installed nginx-passenger - ## - - #passenger_root /usr; - #passenger_ruby /usr/bin/ruby; - - ## - # WebSocket proxying - ## - map $http_upgrade $connection_upgrade { - default upgrade; - '' close; - } - - ## - # Virtual Host Configs - ## - include /etc/nginx/conf.d/*.conf; - include /config/nginx/site-confs/*; - #Removed lua. Do not remove this comment - - ## - # Geoip2 config - ## - # Uncomment to add the Geoip2 configs needed to geo block countries/cities. - ## - - #include /config/nginx/geoip2.conf; -} - - -#mail { -# # See sample authentication script at: -# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript -# -# # auth_http localhost/auth.php; -# # pop3_capabilities "TOP" "USER"; -# # imap_capabilities "IMAP4rev1" "UIDPLUS"; -# -# server { -# listen localhost:110; -# protocol pop3; -# proxy on; -# } -# -# server { -# listen localhost:143; -# protocol imap; -# proxy on; -# } -#} -daemon off; diff --git a/root/defaults/proxy.conf b/root/defaults/proxy.conf deleted file mode 100644 index d1a383c..0000000 --- a/root/defaults/proxy.conf +++ /dev/null @@ -1,30 +0,0 @@ -## Version 2020/10/04 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/proxy.conf - -# Timeout if the real server is dead -proxy_next_upstream error timeout invalid_header http_500 http_502 http_503; - -# Proxy Connection Settings -proxy_buffers 32 4k; -proxy_connect_timeout 240; -proxy_headers_hash_bucket_size 128; -proxy_headers_hash_max_size 1024; -proxy_http_version 1.1; -proxy_read_timeout 240; -proxy_redirect http:// $scheme://; -proxy_send_timeout 240; - -# Proxy Cache and Cookie Settings -proxy_cache_bypass $cookie_session; -#proxy_cookie_path / "/; Secure"; # enable at your own risk, may break certain apps -proxy_no_cache $cookie_session; - -# Proxy Header Settings -proxy_set_header Connection $connection_upgrade; -proxy_set_header Early-Data $ssl_early_data; -proxy_set_header Host $host; -proxy_set_header Upgrade $http_upgrade; -proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; -proxy_set_header X-Forwarded-Host $host; -proxy_set_header X-Forwarded-Proto https; -proxy_set_header X-Forwarded-Ssl on; -proxy_set_header X-Real-IP $remote_addr; diff --git a/root/defaults/ssl.conf b/root/defaults/ssl.conf deleted file mode 100644 index 654c512..0000000 --- a/root/defaults/ssl.conf +++ /dev/null @@ -1,48 +0,0 @@ -## Version 2020/10/29 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/ssl.conf - -### Mozilla Recommendations -# generated 2020-06-17, Mozilla Guideline v5.4, nginx 1.18.0-r0, OpenSSL 1.1.1g-r0, intermediate configuration -# https://ssl-config.mozilla.org/#server=nginx&version=1.18.0-r0&config=intermediate&openssl=1.1.1g-r0&guideline=5.4 - -ssl_session_timeout 1d; -ssl_session_cache shared:MozSSL:10m; # about 40000 sessions -ssl_session_tickets off; - -# intermediate configuration -ssl_protocols TLSv1.2 TLSv1.3; -ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384; -ssl_prefer_server_ciphers off; - -# OCSP stapling -ssl_stapling on; -ssl_stapling_verify on; - - -### Linuxserver.io Defaults - -# Certificates -ssl_certificate /config/keys/letsencrypt/fullchain.pem; -ssl_certificate_key /config/keys/letsencrypt/privkey.pem; -# verify chain of trust of OCSP response using Root CA and Intermediate certs -ssl_trusted_certificate /config/keys/letsencrypt/fullchain.pem; - -# Diffie-Hellman Parameters -ssl_dhparam /config/nginx/dhparams.pem; - -# Resolver -resolver 127.0.0.11 valid=30s; # Docker DNS Server - -# Enable TLS 1.3 early data -ssl_early_data on; - -# HSTS, remove # from the line below to enable HSTS -#add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload" always; - -# Optional additional headers -#add_header Cache-Control "no-transform" always; -#add_header Content-Security-Policy "upgrade-insecure-requests; frame-ancestors 'self'"; -#add_header Referrer-Policy "same-origin" always; -#add_header X-Content-Type-Options "nosniff" always; -#add_header X-Frame-Options "SAMEORIGIN" always; -#add_header X-UA-Compatible "IE=Edge" always; -#add_header X-XSS-Protection "1; mode=block" always; diff --git a/root/donate.txt b/root/donate.txt deleted file mode 100644 index ca1e150..0000000 --- a/root/donate.txt +++ /dev/null @@ -1 +0,0 @@ -Certbot: https://supporters.eff.org/donate/support-work-on-certbot diff --git a/root/etc/cont-init.d/.gitkeep b/root/etc/cont-init.d/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/root/etc/cont-init.d/00-s6-secret-init.sh b/root/etc/cont-init.d/00-s6-secret-init.sh new file mode 100644 index 0000000..9306181 --- /dev/null +++ b/root/etc/cont-init.d/00-s6-secret-init.sh @@ -0,0 +1,29 @@ +#!/usr/bin/with-contenv bash +# ref: https://github.com/linuxserver/docker-baseimage-alpine/blob/master/root/etc/cont-init.d/01-envfile + +# in s6, environmental variables are written as text files for s6 to monitor +# seach through full-path filenames for files ending in "__FILE" +for FILENAME in $(find /var/run/s6/container_environment/ | grep "__FILE$"); do + echo "[secret-init] Evaluating ${FILENAME##*/} ..." + + # set SECRETFILE to the contents of the full-path textfile + SECRETFILE=$(cat ${FILENAME}) + # SECRETFILE=${FILENAME} + # echo "[secret-init] Set SECRETFILE to ${SECRETFILE}" # DEBUG - rm for prod! + + # if SECRETFILE exists / is not null + if [[ -f ${SECRETFILE} ]]; then + # strip the appended "__FILE" from environmental variable name ... + STRIPFILE=$(echo ${FILENAME} | sed "s/__FILE//g") + # echo "[secret-init] Set STRIPFILE to ${STRIPFILE}" # DEBUG - rm for prod! + + # ... and set value to contents of secretfile + # since s6 uses text files, this is effectively "export ..." + printf $(cat ${SECRETFILE}) > ${STRIPFILE} + # echo "[secret-init] Set ${STRIPFILE##*/} to $(cat ${STRIPFILE})" # DEBUG - rm for prod!" + echo "[secret-init] Success! ${STRIPFILE##*/} set from ${FILENAME##*/}" + + else + echo "[secret-init] cannot find secret in ${FILENAME}" + fi +done \ No newline at end of file diff --git a/root/etc/cont-init.d/01-add_user.sh b/root/etc/cont-init.d/01-add_user.sh new file mode 100644 index 0000000..567b070 --- /dev/null +++ b/root/etc/cont-init.d/01-add_user.sh @@ -0,0 +1,38 @@ +#!/usr/bin/with-contenv bash + +PUID=${PUID:-911} +PGID=${PGID:-911} + +groupmod -o -g "$PGID" abc +usermod -o -u "$PUID" abc + +if [ "$(date +%Y)" == "1970" ] && [ "$(uname -m)" == "armv7l" ]; then + echo ' +@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + +Your DockerHost is most likely running an outdated version of libseccomp + +To fix this, please visit https://docs.linuxserver.io/faq#libseccomp + +Some apps might not behave correctly without this + +@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ +' +fi + +echo ' +Cribbed from https://github.com/linuxserver/docker-baseimage-alpine/blob/master/root/etc/cont-init.d/10-adduser +-------------------------------------' + +echo ' +------------------------------------- +GID/UID +-------------------------------------' +echo " +User uid: $(id -u abc) +User gid: $(id -g abc) +------------------------------------- +" +chown abc:abc /app +chown abc:abc /config +chown abc:abc /letsencrypt \ No newline at end of file diff --git a/root/etc/cont-init.d/02-set-timezone.sh b/root/etc/cont-init.d/02-set-timezone.sh new file mode 100644 index 0000000..3ed7fc8 --- /dev/null +++ b/root/etc/cont-init.d/02-set-timezone.sh @@ -0,0 +1,9 @@ +#!/usr/bin/with-contenv bash + +echo -e "Setting time sone:\\n\ +TIME ZONE=${TZ}\\n\ +" +TZ=${TZ:-"UTC"} +echo "${TZ}" > /etc/timezone +rm -f /etc/localtime +dpkg-reconfigure -f noninteractive tzdata \ No newline at end of file diff --git a/root/etc/cont-init.d/10-permissions.sh b/root/etc/cont-init.d/10-permissions.sh new file mode 100644 index 0000000..3920cde --- /dev/null +++ b/root/etc/cont-init.d/10-permissions.sh @@ -0,0 +1,15 @@ +#!/usr/bin/with-contenv bash + +# permissions +if [ -d "/letsencrypt" ]; then + chown -R 644 /letsencrypt +fi +if [ -d "/config/log" ]; then + chmod -R +r /config/log +fi +chmod +x /app/le-renew.sh + +# set permissions on cloudflare.ini +if [ -f "/config/credentials/cloudflare.ini" ]; then + chmod 600 /config/credentials/cloudflare.ini +fi \ No newline at end of file diff --git a/root/etc/cont-init.d/50-certbot.sh b/root/etc/cont-init.d/50-certbot.sh new file mode 100644 index 0000000..b3cd96b --- /dev/null +++ b/root/etc/cont-init.d/50-certbot.sh @@ -0,0 +1,171 @@ +#!/usr/bin/with-contenv bash +# cribbed from https://github.com/linuxserver/docker-swag/blob/master/root/etc/cont-init.d/50-config + +# Display variables for troubleshooting +echo -e "Variables set:\\n\ +PUID=${PUID}\\n\ +PGID=${PGID}\\n\ +TZ=${TZ}\\n\ +TLD=${TLD}\\n\ +SUBDOMAINS=${SUBDOMAINS}\\n\ +ONLY_SUBDOMAINS=${ONLY_SUBDOMAINS}\\n\ +EMAIL=${EMAIL}\\n\ +STAGING=${STAGING}\\n +" + +# Force cloudflare +DNSPLUGIN=${DNSPLUGIN:="cloudflare"} + +# Sanitize variables +SANED_VARS=( EMAIL ONLY_SUBDOMAINS STAGING SUBDOMAINS TLD ) +for i in "${SANED_VARS[@]}" +do + export echo "$i"="${!i//\"/}" + export echo "$i"="$(echo "${!i}" | tr '[:upper:]' '[:lower:]')" +done + +# Check to make sure that the required variables are set +[[ -z "${TLD}" ]] && \ + echo "Please pass your Top Level Domain (TLD) as an environment variable in your docker run command. See README for more details." && \ + sleep infinity + +# Make our folders and links +mkdir -p \ + /config/{log/letsencrypt,crontabs,deploy} \ + /etc/letsencrypt/live \ + /etc/letsencrypt/renewal-hooks/deploy +# rm -rf /etc/letsencrypt +# ln -s /letsencrypt /etc/letsencrypt/live +ln -s /config/log/letsencrypt /var/log/letsencrypt + +# Copy crontab defaults if needed +[[ ! -f /config/crontabs/root ]] && \ + cp /etc/crontabs/root /config/crontabs/ +# Import user crontabs +rm /etc/crontabs/* +cp /defaults/crontabs/* /etc/crontabs/ + +# Copy deploy hook defaults if needed +[[ -z "$(ls -A /config/deploy)" ]] && [[ -z "$(ls -A /etc/letsencrypt/renewal-hooks/deploy)" ]] && \ + cp /etc/letsencrypt/renewal-hooks/deploy/* /config/deploy/ && \ + rm /etc/letsencrypt/renewal-hooks/deploy/* +# Import deploy hooks +cp /config/deploy/* /etc/letsencrypt/renewal-hooks/deploy/ + +# chown -R $(whoami) /etc/letsencrypt +# chown -R $(whoami) /letsencrypt + +# Create original config file if it doesn't exist +if [ ! -f "/config/.donoteditthisfile.conf" ]; then + echo -e "ORIGTLD=\"${TLD}\" ORIGSUBDOMAINS=\"${SUBDOMAINS}\" ORIGONLY_SUBDOMAINS=\"${ONLY_SUBDOMAINS}\" ORIGPROPAGATION=\"${PROPAGATION}\" ORIGSTAGING=\"${STAGING}\" ORIGEMAIL=\"${EMAIL}\"" > /config/.donoteditthisfile.conf + echo "Created .donoteditthisfile.conf" +fi + +# :oad original config settings +# shellcheck disable=SC1091 +. /config/.donoteditthisfile.conf + +# If staging is set to true, use the relevant server +if [ "${STAGING}" = "true" ]; then + echo "NOTICE: Staging is active" + echo "Using Let's Encrypt as the cert provider" + ACMESERVER="https://acme-staging-v02.api.letsencrypt.org/directory" +else + echo "Using Let's Encrypt as the cert provider" + ACMESERVER="https://acme-v02.api.letsencrypt.org/directory" +fi + +# figuring out url only vs url & subdomains vs subdomains only +if [ -n "${SUBDOMAINS}" ]; then + echo "SUBDOMAINS entered, processing" + if [ "${SUBDOMAINS}" = "wildcard" ]; then + if [ "${ONLY_SUBDOMAINS}" = true ]; then + export TLD_REAL="-d *.${TLD}" + echo "Wildcard cert for only the subdomains of ${TLD} will be requested" + else + export TLD_REAL="-d *.${TLD} -d ${TLD}" + echo "Wildcard cert for ${TLD} will be requested" + fi + else + echo "SUBDOMAINS entered, processing" + for job in $(echo "${SUBDOMAINS}" | tr "," " "); do + export SUBDOMAINS_REAL="${SUBDOMAINS_REAL} -d ${job}.${TLD}" + done + if [ "${ONLY_SUBDOMAINS}" = true ]; then + TLD_REAL="${SUBDOMAINS_REAL}" + echo "Only subdomains, no URL in cert" + else + TLD_REAL="-d ${TLD}${SUBDOMAINS_REAL}" + fi + echo "Sub-domains processed are: ${SUBDOMAINS_REAL}" + fi +else + echo "No subdomains defined" + TLD_REAL="-d ${TLD}" +fi + +# figuring out whether to use e-mail and which +if [[ $EMAIL == *@* ]]; then + echo "E-mail address entered: ${EMAIL}" + EMAILPARAM="-m ${EMAIL} --no-eff-email" +else + echo "No e-mail address entered or address invalid" + EMAILPARAM="--register-unsafely-without-email" +fi + +# Set up validation method to use +PROPAGATIONPARAM="--dns-${DNSPLUGIN}-propagation-seconds ${PROPAGATION:-60}" +PREFCHAL="--dns-${DNSPLUGIN} --dns-${DNSPLUGIN}-credentials /config/credentials/${DNSPLUGIN}.ini ${PROPAGATIONPARAM}" +echo "${VALIDATION:="DNS"} validation via ${DNSPLUGIN} plugin is selected" + +# Set the symlink for key location +rm -rf /letsencrypt/* +if [ "${ONLY_SUBDOMAINS}" = "true" ] && [ ! "${SUBDOMAINS}" = "wildcard" ] ; then + DOMAIN="$(echo "${SUBDOMAINS}" | tr ',' ' ' | awk '{print $1}').${TLD}" + LE_LOC="../etc/letsencrypt/live/${DOMAIN}" + # ln -s ../etc/letsencrypt/live/"${DOMAIN}" /letsencrypt +else + LE_LOC="../etc/letsencrypt/live/${TLD}" + # ln -s ../etc/letsencrypt/live/"${TLD}" /letsencrypt +fi +[[ ! -d "${LE_LOC}" ]] && \ + mkdir -p ${LE_LOC} +ln -s ${LE_LOC} /letsencrypt + +# Check for changes in cert variables; revoke certs if necessary +if [ ! "${TLD}" = "${ORIGTLD}" ] || [ ! "${SUBDOMAINS}" = "${ORIGSUBDOMAINS}" ] || [ ! "${ONLY_SUBDOMAINS}" = "${ORIGONLY_SUBDOMAINS}" ] || [ ! "${STAGING}" = "${ORIGSTAGING}" ]; then + echo "Different validation parameters entered than what was used before. Revoking and deleting existing certificate, and an updated one will be created" + if [ "${ORIGONLY_SUBDOMAINS}" = "true" ] && [ ! "${ORIGSUBDOMAINS}" = "wildcard" ]; then + ORIGDOMAIN="$(echo "${ORIGSUBDOMAINS}" | tr ',' ' ' | awk '{print $1}').${ORIGTLD}" + else + ORIGDOMAIN="${ORIGTLD}" + fi +if [ "${ORIGSTAGING}" = "true" ]; then + REV_ACMESERVER="https://acme-staging-v02.api.letsencrypt.org/directory" + else + REV_ACMESERVER="https://acme-v02.api.letsencrypt.org/directory" + fi + [[ -f /etc/letsencrypt/live/"${ORIGDOMAIN}"/fullchain.pem ]] && certbot revoke --non-interactive --cert-path /etc/letsencrypt/live/"${ORIGDOMAIN}"/fullchain.pem --server ${REV_ACMESERVER} + rm -rf /letsencrypt/* + mkdir -p /letsencrypt +fi + +# Save new variables +echo -e "ORIGTLD=\"${TLD}\" ORIGSUBDOMAINS=\"${SUBDOMAINS}\" ORIGONLY_SUBDOMAINS=\"${ONLY_SUBDOMAINS}\" ORIGPROPAGATION=\"${PROPAGATION}\" ORIGSTAGING=\"${STAGING}\" ORIGEMAIL=\"${EMAIL}\"" > /config/.donoteditthisfile.conf + +# generating certs if necessary +if [ ! -f "/letsencrypt/fullchain.pem" ]; then + echo "Generating new certificate" + # shellcheck disable=SC2086 + certbot certonly --renew-by-default --server ${ACMESERVER} ${PREFCHAL} --rsa-key-size 4096 ${EMAILPARAM} --agree-tos ${TLD_REAL} + if [ -d /letsencrypt ]; then + cd /letsencrypt || exit + else + echo "ERROR: Cert does not exist! Please see the validation error above. Make sure you entered correct credentials into the /config/dns-conf/${FILENAME} file." + sleep infinity + fi + echo "New certificate generated" +else + echo "Certificate exists; parameters unchanged" +fi + diff --git a/root/etc/cont-init.d/50-config b/root/etc/cont-init.d/50-config deleted file mode 100644 index bb78177..0000000 --- a/root/etc/cont-init.d/50-config +++ /dev/null @@ -1,356 +0,0 @@ -#!/usr/bin/with-contenv bash - -# Display variables for troubleshooting -echo -e "Variables set:\\n\ -PUID=${PUID}\\n\ -PGID=${PGID}\\n\ -TZ=${TZ}\\n\ -URL=${URL}\\n\ -SUBDOMAINS=${SUBDOMAINS}\\n\ -EXTRA_DOMAINS=${EXTRA_DOMAINS}\\n\ -ONLY_SUBDOMAINS=${ONLY_SUBDOMAINS}\\n\ -VALIDATION=${VALIDATION}\\n\ -CERTPROVIDER=${CERTPROVIDER}\\n\ -DNSPLUGIN=${DNSPLUGIN}\\n\ -EMAIL=${EMAIL}\\n\ -STAGING=${STAGING}\\n" - -# Echo init finish for test runs -if [ -n "${TEST_RUN}" ]; then - echo '[services.d] done.' -fi - -# Sanitize variables -SANED_VARS=( DNSPLUGIN EMAIL EXTRA_DOMAINS ONLY_SUBDOMAINS STAGING SUBDOMAINS URL VALIDATION CERTPROVIDER ) -for i in "${SANED_VARS[@]}" -do - export echo "$i"="${!i//\"/}" - export echo "$i"="$(echo "${!i}" | tr '[:upper:]' '[:lower:]')" -done - -# check to make sure that the required variables are set -[[ -z "$URL" ]] && \ - echo "Please pass your URL as an environment variable in your docker run command. See docker info for more details." && \ - sleep infinity - -# make our folders and links -mkdir -p \ - /config/{log/letsencrypt,log/fail2ban,etc/letsencrypt,fail2ban,crontabs,dns-conf,geoip2db} \ - /var/run/fail2ban -rm -rf /etc/letsencrypt -ln -s /config/etc/letsencrypt /etc/letsencrypt - -# copy dns default configs -cp -n /defaults/dns-conf/* /config/dns-conf/ -chown -R abc:abc /config/dns-conf - -# copy reverse proxy configs -cp -R /defaults/proxy-confs /config/nginx/ - -# copy/update the fail2ban config defaults to/in /config -cp -R /defaults/fail2ban/filter.d /config/fail2ban/ -cp -R /defaults/fail2ban/action.d /config/fail2ban/ -# if jail.local is missing in /config, copy default -[[ ! -f /config/fail2ban/jail.local ]] && \ - cp /defaults/jail.local /config/fail2ban/jail.local -# Replace fail2ban config with user config -[[ -d /etc/fail2ban/filter.d ]] && \ - rm -rf /etc/fail2ban/filter.d -[[ -d /etc/fail2ban/action.d ]] && \ - rm -rf /etc/fail2ban/action.d -cp -R /config/fail2ban/filter.d /etc/fail2ban/ -cp -R /config/fail2ban/action.d /etc/fail2ban/ -cp /defaults/fail2ban/fail2ban.local /etc/fail2ban/ -cp /config/fail2ban/jail.local /etc/fail2ban/jail.local - -# copy crontab and proxy defaults if needed -[[ ! -f /config/crontabs/root ]] && \ - cp /etc/crontabs/root /config/crontabs/ -[[ ! -f /config/nginx/proxy.conf ]] && \ - cp /defaults/proxy.conf /config/nginx/proxy.conf -[[ ! -f /config/nginx/ssl.conf ]] && \ - cp /defaults/ssl.conf /config/nginx/ssl.conf -[[ ! -f /config/nginx/ldap.conf ]] && \ - cp /defaults/ldap.conf /config/nginx/ldap.conf -[[ ! -f /config/nginx/authelia-server.conf ]] && \ - cp /defaults/authelia-server.conf /config/nginx/authelia-server.conf -[[ ! -f /config/nginx/authelia-location.conf ]] && \ - cp /defaults/authelia-location.conf /config/nginx/authelia-location.conf -[[ ! -f /config/nginx/geoip2.conf ]] && \ - cp /defaults/geoip2.conf /config/nginx/geoip2.conf -[[ ! -f /config/www/502.html ]] && - cp /defaults/502.html /config/www/502.html - -# remove lua bits from nginx.conf if not done before -if ! grep -q '#Removed lua' /config/nginx/nginx.conf; then - echo "Removing lua specific info from nginx.conf" - sed -i 's|\tlua_load_resty_core off;|\t#Removed lua. Do not remove this comment|g' /config/nginx/nginx.conf -fi - -# copy pre-generated dhparams or generate if needed -[[ ! -f /config/nginx/dhparams.pem ]] && \ - cp /defaults/dhparams.pem /config/nginx/dhparams.pem -if ! grep -q 'PARAMETERS' "/config/nginx/dhparams.pem"; then - curl -o /config/nginx/dhparams.pem -L "https://lsio.ams3.digitaloceanspaces.com/dhparams.pem" -fi -if ! grep -q 'PARAMETERS' "/config/nginx/dhparams.pem"; then - echo "Generating dhparams.pem. This will take a long time. Do not stop the container until this process is completed." - openssl dhparam -out /config/nginx/dhparams.pem 4096 -fi - -# check to make sure DNSPLUGIN is selected if dns validation is used -[[ "$VALIDATION" = "dns" ]] && [[ ! "$DNSPLUGIN" =~ ^(aliyun|cloudflare|cloudxns|cpanel|digitalocean|dnsimple|dnsmadeeasy|domeneshop|gandi|gehirn|google|hetzner|inwx|linode|luadns|netcup|njalla|nsone|ovh|rfc2136|route53|sakuracloud|transip)$ ]] && \ - echo "Please set the DNSPLUGIN variable to a valid plugin name. See docker info for more details." && \ - sleep infinity - -# import user crontabs -rm /etc/crontabs/* -cp /config/crontabs/* /etc/crontabs/ - -# create original config file if it doesn't exist, move non-hidden legacy file to hidden -if [ -f "/config/donoteditthisfile.conf" ]; then - mv /config/donoteditthisfile.conf /config/.donoteditthisfile.conf -fi -if [ ! -f "/config/.donoteditthisfile.conf" ]; then - echo -e "ORIGURL=\"$URL\" ORIGSUBDOMAINS=\"$SUBDOMAINS\" ORIGONLY_SUBDOMAINS=\"$ONLY_SUBDOMAINS\" ORIGEXTRA_DOMAINS=\"$EXTRA_DOMAINS\" ORIGVALIDATION=\"$VALIDATION\" ORIGDNSPLUGIN=\"$DNSPLUGIN\" ORIGPROPAGATION=\"$PROPAGATION\" ORIGSTAGING=\"$STAGING\" ORIGDUCKDNSTOKEN=\"$DUCKDNSTOKEN\" ORIGCERTPROVIDER=\"$CERTPROVIDER\" ORIGEMAIL=\"$EMAIL\"" > /config/.donoteditthisfile.conf - echo "Created .donoteditthisfile.conf" -fi - -# load original config settings -# shellcheck disable=SC1091 -. /config/.donoteditthisfile.conf - -# set default validation to http -if [ -z "$VALIDATION" ]; then - VALIDATION="http" - echo "VALIDATION parameter not set; setting it to http" -fi - -# if zerossl is selected or staging is set to true, use the relevant server -if [ "$CERTPROVIDER" = "zerossl" ] && [ "$STAGING" = "true" ]; then - echo "ZeroSSL does not support staging mode, ignoring STAGING variable" -fi -if [ "$CERTPROVIDER" = "zerossl" ] && [ -n "$EMAIL" ]; then - echo "ZeroSSL is selected as the cert provider, registering cert with $EMAIL" - ACMESERVER="https://acme.zerossl.com/v2/DV90" -elif [ "$CERTPROVIDER" = "zerossl" ] && [ -z "$EMAIL" ]; then - echo "ZeroSSL is selected as the cert provider, but the e-mail address has not been entered. Please visit https://zerossl.com, register a new account and set the account e-mail address in the EMAIL environment variable" - sleep infinity -elif [ "$STAGING" = "true" ]; then - echo "NOTICE: Staging is active" - echo "Using Let's Encrypt as the cert provider" - ACMESERVER="https://acme-staging-v02.api.letsencrypt.org/directory" -else - echo "Using Let's Encrypt as the cert provider" - ACMESERVER="https://acme-v02.api.letsencrypt.org/directory" -fi - -# figuring out url only vs url & subdomains vs subdomains only -if [ -n "$SUBDOMAINS" ]; then - echo "SUBDOMAINS entered, processing" - if [ "$SUBDOMAINS" = "wildcard" ]; then - if [ "$ONLY_SUBDOMAINS" = true ]; then - export URL_REAL="-d *.${URL}" - echo "Wildcard cert for only the subdomains of $URL will be requested" - else - export URL_REAL="-d *.${URL} -d ${URL}" - echo "Wildcard cert for $URL will be requested" - fi - else - echo "SUBDOMAINS entered, processing" - for job in $(echo "$SUBDOMAINS" | tr "," " "); do - export SUBDOMAINS_REAL="$SUBDOMAINS_REAL -d ${job}.${URL}" - done - if [ "$ONLY_SUBDOMAINS" = true ]; then - URL_REAL="$SUBDOMAINS_REAL" - echo "Only subdomains, no URL in cert" - else - URL_REAL="-d ${URL}${SUBDOMAINS_REAL}" - fi - echo "Sub-domains processed are: $SUBDOMAINS_REAL" - fi -else - echo "No subdomains defined" - URL_REAL="-d $URL" -fi - -# add extra domains -if [ -n "$EXTRA_DOMAINS" ]; then - echo "EXTRA_DOMAINS entered, processing" - for job in $(echo "$EXTRA_DOMAINS" | tr "," " "); do - export EXTRA_DOMAINS_REAL="$EXTRA_DOMAINS_REAL -d ${job}" - done - echo "Extra domains processed are: $EXTRA_DOMAINS_REAL" - URL_REAL="$URL_REAL $EXTRA_DOMAINS_REAL" -fi - -# figuring out whether to use e-mail and which -if [[ $EMAIL == *@* ]]; then - echo "E-mail address entered: ${EMAIL}" - EMAILPARAM="-m ${EMAIL} --no-eff-email" -else - echo "No e-mail address entered or address invalid" - EMAILPARAM="--register-unsafely-without-email" -fi - -# update plugin names in dns conf inis -sed -i 's|^certbot_dns_aliyun:||g' /config/dns-conf/aliyun.ini -sed -i 's|^certbot_dns_domeneshop:||g' /config/dns-conf/domeneshop.ini -sed -i 's|^certbot_dns_inwx:||g' /config/dns-conf/inwx.ini -sed -i 's|^certbot_dns_transip:||g' /config/dns-conf/transip.ini - -# setting the validation method to use -if [ "$VALIDATION" = "dns" ]; then - if [ "$DNSPLUGIN" = "route53" ]; then - if [ -n "$PROPAGATION" ];then PROPAGATIONPARAM="--dns-${DNSPLUGIN}-propagation-seconds ${PROPAGATION}"; fi - PREFCHAL="--dns-${DNSPLUGIN} ${PROPAGATIONPARAM}" - elif [[ "$DNSPLUGIN" =~ ^(cpanel)$ ]]; then - if [ -n "$PROPAGATION" ];then PROPAGATIONPARAM="--certbot-dns-${DNSPLUGIN}:${DNSPLUGIN}-propagation-seconds ${PROPAGATION}"; fi - PREFCHAL="-a certbot-dns-${DNSPLUGIN}:${DNSPLUGIN} --certbot-dns-${DNSPLUGIN}:${DNSPLUGIN}-credentials /config/dns-conf/${DNSPLUGIN}.ini ${PROPAGATIONPARAM}" - elif [[ "$DNSPLUGIN" =~ ^(gandi)$ ]]; then - if [ -n "$PROPAGATION" ];then echo "Gandi dns plugin does not support setting propagation time"; fi - PREFCHAL="-a certbot-plugin-${DNSPLUGIN}:dns --certbot-plugin-${DNSPLUGIN}:dns-credentials /config/dns-conf/${DNSPLUGIN}.ini" - elif [[ "$DNSPLUGIN" =~ ^(google)$ ]]; then - if [ -n "$PROPAGATION" ];then PROPAGATIONPARAM="--dns-${DNSPLUGIN}-propagation-seconds ${PROPAGATION}"; fi - PREFCHAL="--dns-${DNSPLUGIN} --dns-${DNSPLUGIN}-credentials /config/dns-conf/${DNSPLUGIN}.json ${PROPAGATIONPARAM}" - elif [[ "$DNSPLUGIN" =~ ^(aliyun|domeneshop|hetzner|inwx|netcup|njalla|transip)$ ]]; then - if [ -n "$PROPAGATION" ];then PROPAGATIONPARAM="--dns-${DNSPLUGIN}-propagation-seconds ${PROPAGATION}"; fi - PREFCHAL="-a dns-${DNSPLUGIN} --dns-${DNSPLUGIN}-credentials /config/dns-conf/${DNSPLUGIN}.ini ${PROPAGATIONPARAM}" - else - if [ -n "$PROPAGATION" ];then PROPAGATIONPARAM="--dns-${DNSPLUGIN}-propagation-seconds ${PROPAGATION}"; fi - PREFCHAL="--dns-${DNSPLUGIN} --dns-${DNSPLUGIN}-credentials /config/dns-conf/${DNSPLUGIN}.ini ${PROPAGATIONPARAM}" - fi - echo "${VALIDATION} validation via ${DNSPLUGIN} plugin is selected" -elif [ "$VALIDATION" = "tls-sni" ]; then - PREFCHAL="--non-interactive --standalone --preferred-challenges http" - echo "*****tls-sni validation has been deprecated, attempting http validation instead" -elif [ "$VALIDATION" = "duckdns" ]; then - PREFCHAL="--non-interactive --manual --preferred-challenges dns --manual-auth-hook /app/duckdns-txt" - chmod +x /app/duckdns-txt - echo "duckdns validation is selected" - if [ "$SUBDOMAINS" = "wildcard" ]; then - echo "the resulting certificate will only cover the subdomains due to a limitation of duckdns, so it is advised to set the root location to use www.subdomain.duckdns.org" - export URL_REAL="-d *.${URL}" - else - echo "the resulting certificate will only cover the main domain due to a limitation of duckdns, ie. subdomain.duckdns.org" - export URL_REAL="-d ${URL}" - fi -else - PREFCHAL="--non-interactive --standalone --preferred-challenges http" - echo "http validation is selected" -fi - -# setting the symlink for key location -rm -rf /config/keys/letsencrypt -if [ "$ONLY_SUBDOMAINS" = "true" ] && [ ! "$SUBDOMAINS" = "wildcard" ] ; then - DOMAIN="$(echo "$SUBDOMAINS" | tr ',' ' ' | awk '{print $1}').${URL}" - ln -s ../etc/letsencrypt/live/"$DOMAIN" /config/keys/letsencrypt -else - ln -s ../etc/letsencrypt/live/"$URL" /config/keys/letsencrypt -fi - -# checking for changes in cert variables, revoking certs if necessary -if [ ! "$URL" = "$ORIGURL" ] || [ ! "$SUBDOMAINS" = "$ORIGSUBDOMAINS" ] || [ ! "$ONLY_SUBDOMAINS" = "$ORIGONLY_SUBDOMAINS" ] || [ ! "$EXTRA_DOMAINS" = "$ORIGEXTRA_DOMAINS" ] || [ ! "$VALIDATION" = "$ORIGVALIDATION" ] || [ ! "$DNSPLUGIN" = "$ORIGDNSPLUGIN" ] || [ ! "$PROPAGATION" = "$ORIGPROPAGATION" ] || [ ! "$STAGING" = "$ORIGSTAGING" ] || [ ! "$DUCKDNSTOKEN" = "$ORIGDUCKDNSTOKEN" ] || [ ! "$CERTPROVIDER" = "$ORIGCERTPROVIDER" ]; then - echo "Different validation parameters entered than what was used before. Revoking and deleting existing certificate, and an updated one will be created" - if [ "$ORIGONLY_SUBDOMAINS" = "true" ] && [ ! "$ORIGSUBDOMAINS" = "wildcard" ]; then - ORIGDOMAIN="$(echo "$ORIGSUBDOMAINS" | tr ',' ' ' | awk '{print $1}').${ORIGURL}" - else - ORIGDOMAIN="$ORIGURL" - fi - if [ "$ORIGCERTPROVIDER" = "zerossl" ] && [ -n "$ORIGEMAIL" ]; then - REV_EAB_CREDS=$(curl -s https://api.zerossl.com/acme/eab-credentials-email --data "email=$ORIGEMAIL") - REV_ZEROSSL_EAB_KID=$(echo "$REV_EAB_CREDS" | python3 -c "import sys, json; print(json.load(sys.stdin)['eab_kid'])") - REV_ZEROSSL_EAB_HMAC_KEY=$(echo "$REV_EAB_CREDS" | python3 -c "import sys, json; print(json.load(sys.stdin)['eab_hmac_key'])") - if [ -z "$REV_ZEROSSL_EAB_KID" ] || [ -z "$REV_ZEROSSL_EAB_HMAC_KEY" ]; then - echo "Unable to retrieve EAB credentials from ZeroSSL. Check the outgoing connections to api.zerossl.com and dns. Sleeping." - sleep infinity - fi - REV_ACMESERVER="https://acme.zerossl.com/v2/DV90 --eab-kid ${REV_ZEROSSL_EAB_KID} --eab-hmac-key ${REV_ZEROSSL_EAB_HMAC_KEY}" - elif [ "$ORIGSTAGING" = "true" ]; then - REV_ACMESERVER="https://acme-staging-v02.api.letsencrypt.org/directory" - else - REV_ACMESERVER="https://acme-v02.api.letsencrypt.org/directory" - fi - [[ -f /config/etc/letsencrypt/live/"$ORIGDOMAIN"/fullchain.pem ]] && certbot revoke --non-interactive --cert-path /config/etc/letsencrypt/live/"$ORIGDOMAIN"/fullchain.pem --server $REV_ACMESERVER - rm -rf /config/etc/letsencrypt - mkdir -p /config/etc/letsencrypt -fi - -# saving new variables -echo -e "ORIGURL=\"$URL\" ORIGSUBDOMAINS=\"$SUBDOMAINS\" ORIGONLY_SUBDOMAINS=\"$ONLY_SUBDOMAINS\" ORIGEXTRA_DOMAINS=\"$EXTRA_DOMAINS\" ORIGVALIDATION=\"$VALIDATION\" ORIGDNSPLUGIN=\"$DNSPLUGIN\" ORIGPROPAGATION=\"$PROPAGATION\" ORIGSTAGING=\"$STAGING\" ORIGDUCKDNSTOKEN=\"$DUCKDNSTOKEN\" ORIGCERTPROVIDER=\"$CERTPROVIDER\" ORIGEMAIL=\"$EMAIL\"" > /config/.donoteditthisfile.conf - -# alter extension for error message -if [ "$DNSPLUGIN" = "google" ]; then - FILENAME="$DNSPLUGIN.json" -else - FILENAME="$DNSPLUGIN.ini" -fi - -# generating certs if necessary -if [ ! -f "/config/keys/letsencrypt/fullchain.pem" ]; then - if [ "$CERTPROVIDER" = "zerossl" ] && [ -n "$EMAIL" ]; then - echo "Retrieving EAB from ZeroSSL" - EAB_CREDS=$(curl -s https://api.zerossl.com/acme/eab-credentials-email --data "email=$EMAIL") - ZEROSSL_EAB_KID=$(echo "$EAB_CREDS" | python3 -c "import sys, json; print(json.load(sys.stdin)['eab_kid'])") - ZEROSSL_EAB_HMAC_KEY=$(echo "$EAB_CREDS" | python3 -c "import sys, json; print(json.load(sys.stdin)['eab_hmac_key'])") - if [ -z "$ZEROSSL_EAB_KID" ] || [ -z "$ZEROSSL_EAB_HMAC_KEY" ]; then - echo "Unable to retrieve EAB credentials from ZeroSSL. Check the outgoing connections to api.zerossl.com and dns. Sleeping." - sleep infinity - fi - ZEROSSL_EAB="--eab-kid ${ZEROSSL_EAB_KID} --eab-hmac-key ${ZEROSSL_EAB_HMAC_KEY}" - fi - echo "Generating new certificate" - # shellcheck disable=SC2086 - certbot certonly --renew-by-default --server $ACMESERVER $ZEROSSL_EAB $PREFCHAL --rsa-key-size 4096 $EMAILPARAM --agree-tos $URL_REAL - if [ -d /config/keys/letsencrypt ]; then - cd /config/keys/letsencrypt || exit - else - if [ "$VALIDATION" = "dns" ]; then - echo "ERROR: Cert does not exist! Please see the validation error above. Make sure you entered correct credentials into the /config/dns-conf/${FILENAME} file." - elif [ "$VALIDATION" = "duckdns" ]; then - echo "ERROR: Cert does not exist! Please see the validation error above. Make sure your DUCKDNSTOKEN is correct." - else - echo "ERROR: Cert does not exist! Please see the validation error above. The issue may be due to incorrect dns or port forwarding settings. Please fix your settings and recreate the container" - fi - sleep infinity - fi - openssl pkcs12 -export -out privkey.pfx -inkey privkey.pem -in cert.pem -certfile chain.pem -passout pass: - sleep 1 - cat {privkey,fullchain}.pem > priv-fullchain-bundle.pem - echo "New certificate generated; starting nginx" -else - echo "Certificate exists; parameters unchanged; starting nginx" -fi - -# create GeoIP2 folder symlink -[[ -d /var/lib/libmaxminddb ]] && [[ ! -L /var/lib/libmaxminddb ]] && \ - rm -rf /var/lib/libmaxminddb -[[ ! -d /var/lib/libmaxminddb ]] && \ - ln -s /config/geoip2db /var/lib/libmaxminddb -# check GeoIP2 database -if [ -n "$MAXMINDDB_LICENSE_KEY" ]; then - sed -i "s|.*MAXMINDDB_LICENSE_KEY.*|MAXMINDDB_LICENSE_KEY=\"${MAXMINDDB_LICENSE_KEY}\"|g" /etc/conf.d/libmaxminddb - if [ ! -f /var/lib/libmaxminddb/GeoLite2-City.mmdb ]; then - echo "Downloading GeoIP2 City database." - /etc/periodic/weekly/libmaxminddb - fi -elif [ -f /var/lib/libmaxminddb/GeoLite2-City.mmdb ]; then - echo -e "Currently using the user provided GeoLite2-City.mmdb.\nIf you want to enable weekly auto-updates of the database, retrieve a free license key from MaxMind,\nand add a new env variable \"MAXMINDDB_LICENSE_KEY\", set to your license key." -else - echo -e "Starting 2019/12/30, GeoIP2 databases require personal license key to download. Please retrieve a free license key from MaxMind,\nand add a new env variable \"MAXMINDDB_LICENSE_KEY\", set to your license key." -fi - -# logfiles needed by fail2ban -[[ ! -f /config/log/nginx/error.log ]] && \ - touch /config/log/nginx/error.log -[[ ! -f /config/log/nginx/access.log ]] && \ - touch /config/log/nginx/access.log - -# permissions -chown -R abc:abc \ - /config -chmod -R 0644 /etc/logrotate.d -chmod -R +r /config/log -chmod +x /app/le-renew.sh -chmod 700 /defaults/dns-conf -chmod 600 /defaults/dns-conf/* diff --git a/root/etc/cont-init.d/60-renew b/root/etc/cont-init.d/60-renew deleted file mode 100644 index 975c73a..0000000 --- a/root/etc/cont-init.d/60-renew +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/with-contenv bash - -# Check if the cert is expired or expires within a day, if so, renew -if openssl x509 -in /config/keys/letsencrypt/fullchain.pem -noout -checkend 86400 >/dev/null; then - echo "The cert does not expire within the next day. Letting the cron script handle the renewal attempts overnight (2:08am)." -else - echo "The cert is either expired or it expires within the next day. Attempting to renew. This could take up to 10 minutes." - /app/le-renew.sh - sleep 1 -fi \ No newline at end of file diff --git a/root/etc/cont-init.d/60_renewal-init.sh b/root/etc/cont-init.d/60_renewal-init.sh new file mode 100644 index 0000000..45cf951 --- /dev/null +++ b/root/etc/cont-init.d/60_renewal-init.sh @@ -0,0 +1,15 @@ +#!/usr/bin/with-contenv bash + +# Check if the cert is expired or expires within a day, if so, renew +if openssl x509 -in /letsencrypt/fullchain.pem -noout -checkend 86400 >/dev/null; then + echo "The cert does not expire within the next day." + if [ ! "${STAGING}" = "true" ]; then + echo "Testing renewal..." + certbot renew --dry-run + fi + echo "Letting the cron script handle the renewal attempts overnight (2:08am)." +else + echo "The cert is either expired or it expires within the next day. Attempting to renew. This could take up to 10 minutes." + /app/le-renew.sh + sleep 1 +fi \ No newline at end of file diff --git a/root/etc/cont-init.d/70-templates b/root/etc/cont-init.d/70-templates deleted file mode 100644 index 6b60ed1..0000000 --- a/root/etc/cont-init.d/70-templates +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/with-contenv bash - -nginx_confs=( \ - authelia-location.conf \ - authelia-server.conf \ - geoip2.conf \ - ldap.conf \ - nginx.conf \ - proxy.conf \ - site-confs/default \ - ssl.conf ) - -for i in ${nginx_confs[@]}; do - if [ "$(sed -nE 's|^## Version ([0-9]{4}\/[0-9]{2}\/[0-9]{2}).*|\1|p' /config/nginx/${i})" != "$(sed -nE 's|^## Version ([0-9]{4}\/[0-9]{2}\/[0-9]{2}).*|\1|p' /defaults/$(basename ${i}))" ]; then - nginx_confs_changed="/config/nginx/${i}\n${nginx_confs_changed}" - fi -done - -if [ -n "$nginx_confs_changed" ]; then - echo "**** The following nginx confs have different version dates than the defaults that are shipped. ****" - echo "**** This may be due to user customization or an update to the defaults. ****" - echo "**** To update them to the latest defaults shipped within the image, delete these files and restart the container. ****" - echo "**** If they are user customized, check the date version at the top and compare to the upstream changelog via the link. ****" - echo -e "${nginx_confs_changed}" -fi - -proxy_confs=$(ls /config/nginx/proxy-confs/*.conf 2>/dev/null) - -for i in $proxy_confs; do - if [ -f "${i}.sample" ]; then - if [ "$(sed -nE 's|^## Version ([0-9]{4}\/[0-9]{2}\/[0-9]{2}).*|\1|p' ${i})" != "$(sed -nE 's|^## Version ([0-9]{4}\/[0-9]{2}\/[0-9]{2}).*|\1|p' ${i}.sample)" ]; then - proxy_confs_changed="${i}\n${proxy_confs_changed}" - fi - fi -done - -if [ -n "$proxy_confs_changed" ]; then - echo "**** The following reverse proxy confs have different version dates than the samples that are shipped. ****" - echo "**** This may be due to user customization or an update to the samples. ****" - echo "**** You should compare them to the samples in the same folder to make sure you have the latest updates. ****" - echo -e "${proxy_confs_changed}" -fi diff --git a/root/etc/crontabs/.gitkeep b/root/etc/crontabs/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/root/etc/logrotate.d/fail2ban b/root/etc/logrotate.d/fail2ban deleted file mode 100644 index 7f3e6ce..0000000 --- a/root/etc/logrotate.d/fail2ban +++ /dev/null @@ -1,12 +0,0 @@ -/config/log/fail2ban/fail2ban.log { - weekly - rotate 7 - missingok - compress - delaycompress - nodateext - postrotate - /usr/bin/fail2ban-client flushlogs 1>/dev/null || true - endscript - su abc abc -} diff --git a/root/etc/logrotate.d/lerotate b/root/etc/logrotate.d/lerotate deleted file mode 100644 index 28a38a7..0000000 --- a/root/etc/logrotate.d/lerotate +++ /dev/null @@ -1,11 +0,0 @@ -/config/log/letsencrypt/*.log { - weekly - rotate 52 - compress - delaycompress - nodateext - missingok - notifempty - sharedscripts - su abc abc -} diff --git a/root/etc/services.d/.gitkeep b/root/etc/services.d/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/root/etc/services.d/cron/finish b/root/etc/services.d/cron/finish new file mode 100644 index 0000000..8caa421 --- /dev/null +++ b/root/etc/services.d/cron/finish @@ -0,0 +1,3 @@ +#!/usr/bin/execlineb -S0 + +s6-svscanctl -t /var/run/s6/services \ No newline at end of file diff --git a/root/etc/services.d/cron/run b/root/etc/services.d/cron/run new file mode 100644 index 0000000..a158f64 --- /dev/null +++ b/root/etc/services.d/cron/run @@ -0,0 +1,8 @@ +#!/usr/bin/with-contenv bash + +# # this line here is what we should have to get rid of the hard link error +# touch /etc/crontab /etc/cron.*/* + +exec /usr/sbin/cron -f + +# exec /usr/sbin/cron -f -S -l 5 -c /etc/crontabs \ No newline at end of file diff --git a/root/etc/services.d/fail2ban/run b/root/etc/services.d/fail2ban/run deleted file mode 100644 index c023109..0000000 --- a/root/etc/services.d/fail2ban/run +++ /dev/null @@ -1,4 +0,0 @@ -#!/usr/bin/with-contenv bash - - exec \ - fail2ban-client -x -f start diff --git a/scripts/archive/00_secret-init.sh b/scripts/archive/00_secret-init.sh new file mode 100644 index 0000000..1109fcc --- /dev/null +++ b/scripts/archive/00_secret-init.sh @@ -0,0 +1,36 @@ +#!/bin/sh + +# logic cribbed from linuxserver.io: +# https://github.com/linuxserver/docker-baseimage-ubuntu/blob/bionic/root/etc/cont-init.d/01-envfile + +# iterate over environmental variables +# if variable ends in "__FILE" +for FULLVAR in $(env | grep "^.*__FILE="); do + # trim "=..." from variable name + VARNAME=$(echo $FULLVAR | sed "s/=.*//g") + echo "[secret-init] Evaluating ${VARNAME}" + + # set SECRETFILE to the contents of the variable + # Use 'eval hack' for indirect expansion in sh: https://unix.stackexchange.com/questions/111618/indirect-variable-expansion-in-posix-as-done-in-bash + # WARNING: It's not foolproof is an arbitrary command injection vulnerability + eval SECRETFILE="\$${VARNAME}" + + # echo "[secret-init] Setting SECRETFILE to ${SECRETFILE} ..." # DEBUG - rm for prod! + + # if SECRETFILE exists + if [[ -f ${SECRETFILE} ]]; then + # strip the appended "__FILE" from environmental variable name + STRIPVAR=$(echo $VARNAME | sed "s/__FILE//g") + # echo "[secret-init] Set STRIPVAR to ${STRIPVAR}" # DEBUG - rm for prod! + + # set value to contents of secretfile + eval ${STRIPVAR}=$(cat "${SECRETFILE}") + # echo "[secret_init] Set ${STRIPVAR} to $(eval echo \$${STRIPVAR})" # DEBUG - rm for prod! + + export "${STRIPVAR}" + echo "[secret-init] Success! ${STRIPVAR} set from ${VARNAME}" + + else + echo "[secret-init] ERROR: Cannot find secret in ${VARNAME}" + fi +done diff --git a/scripts/archive/01-add_user.sh b/scripts/archive/01-add_user.sh new file mode 100644 index 0000000..567b070 --- /dev/null +++ b/scripts/archive/01-add_user.sh @@ -0,0 +1,38 @@ +#!/usr/bin/with-contenv bash + +PUID=${PUID:-911} +PGID=${PGID:-911} + +groupmod -o -g "$PGID" abc +usermod -o -u "$PUID" abc + +if [ "$(date +%Y)" == "1970" ] && [ "$(uname -m)" == "armv7l" ]; then + echo ' +@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + +Your DockerHost is most likely running an outdated version of libseccomp + +To fix this, please visit https://docs.linuxserver.io/faq#libseccomp + +Some apps might not behave correctly without this + +@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ +' +fi + +echo ' +Cribbed from https://github.com/linuxserver/docker-baseimage-alpine/blob/master/root/etc/cont-init.d/10-adduser +-------------------------------------' + +echo ' +------------------------------------- +GID/UID +-------------------------------------' +echo " +User uid: $(id -u abc) +User gid: $(id -g abc) +------------------------------------- +" +chown abc:abc /app +chown abc:abc /config +chown abc:abc /letsencrypt \ No newline at end of file diff --git a/scripts/archive/init.sh b/scripts/archive/init.sh new file mode 100644 index 0000000..121572c --- /dev/null +++ b/scripts/archive/init.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +# run scripts in /app/cont-init.d +# execute any cont-init scripts +for i in /app/cont-init.d/*sh +do + if [ -e "${i}" ]; then + echo "[i] cont-init.d - processing $i" + . "${i}" + fi +done + +# run cron in foreground for monitoring certs +service cron start +cron -f \ No newline at end of file diff --git a/scripts/backups/deploy-convert-certs.sh b/scripts/backups/deploy-convert-certs.sh new file mode 100644 index 0000000..f68059d --- /dev/null +++ b/scripts/backups/deploy-convert-certs.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# convert to fullchain.pem and privkey.pem to tls.crt and tls.key + +# Identify key path +. /config/.donoteditthisfile.conf +if [ "${ORIGONLY_SUBDOMAINS}" = "true" ] && [ ! "${ORIGSUBDOMAINS}" = "wildcard" ]; then + ORIGDOMAIN="$(echo "${ORIGSUBDOMAINS}" | tr ',' ' ' | awk '{print $1}').${ORIGTLD}" + ORIGKEYPATH="/etc/letsencrypt/live/"${ORIGDOMAIN}"" +else + ORIGKEYPATH="/etc/letsencrypt/live/"${ORIGTLD}"" +fi + +# convert pems to cert and key +echo "Converting to tls.crt and tls.key ..." +if [ ! -f "${ORIGKEYPATH}/fullchain.pem" ] || [ ! -f "${ORIGKEYPATH}/privkey.pem" ]; then + echo "Error: fullchain.pem or privkey.pem not found in ${ORIGKEYPATH}" + sleep infinity +else + openssl crl2pkcs7 -nocrl \ + -certfile "${ORIGKEYPATH}"/fullchain.pem | openssl pkcs7 -print_certs \ + -out "${ORIGKEYPATH}"/tls.crt + # openssl x509 -outform der -in fullchain.pem -out tls.crt + # openssl pkey -outform der -in privkey.pem -out tls.key + openssl rsa \ + -in "${ORIGKEYPATH}"/privkey.pem \ + -out "${ORIGKEYPATH}"/tls.key + + # allow read all for tls.crt and tls.key + chmod 644 "${ORIGKEYPATH}"/tls.* + fi diff --git a/scripts/backups/root b/scripts/backups/root new file mode 100644 index 0000000..6e34992 --- /dev/null +++ b/scripts/backups/root @@ -0,0 +1,9 @@ +# do daily/weekly/monthly maintenance +# min hour day month weekday command +*/15 * * * * run-parts /etc/periodic/15min +0 * * * * run-parts /etc/periodic/hourly +0 2 * * * run-parts /etc/periodic/daily +0 3 * * 6 run-parts /etc/periodic/weekly +0 5 1 * * run-parts /etc/periodic/monthly +# renew letsencrypt certs +8 2 * * * /app/le-renew.sh >> /config/log/letsencrypt/letsencrypt.log 2>&1 \ No newline at end of file diff --git a/scripts/buildx.sh b/scripts/buildx.sh new file mode 100755 index 0000000..9987549 --- /dev/null +++ b/scripts/buildx.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +REGISTRY="ahgraber" +TAG=${1:-"test"} + +# clone/update keycloak container instructions +DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) + +# buildx +docker buildx create --name "${BUILDX_NAME:-certbot}" || echo +docker buildx use "${BUILDX_NAME:-certbot}" + +docker buildx build \ + -f Dockerfile \ + -t ${REGISTRY}/certbot:${TAG} \ + --platform linux/amd64,linux/arm64 \ + --push \ + . + +# cleanup +docker buildx rm "${BUILDX_NAME:-certbot}" +cd ${DIR} \ + && rm -rf ./tmp \ No newline at end of file diff --git a/scripts/install-s6.sh b/scripts/install-s6.sh new file mode 100755 index 0000000..a75388e --- /dev/null +++ b/scripts/install-s6.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +# Note: This script is designed to be run inside a Docker Build for a container + +S6_OVERLAY_VERSION=1.22.1.0 +TARGETPLATFORM=$1 + +# Determine the correct binary file for the architecture given +case $TARGETPLATFORM in + linux/arm64) + S6_ARCH=aarch64 + ;; + + linux/arm/v7) + S6_ARCH=armhf + ;; + + *) + S6_ARCH=amd64 + ;; +esac + +echo -e "Installing S6-overlay v${S6_OVERLAY_VERSION} for ${TARGETPLATFORM} (${S6_ARCH})" + +curl -L -o "/tmp/s6-overlay-${S6_ARCH}.tar.gz" "https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-${S6_ARCH}.tar.gz" \ + && tar xzf /tmp/s6-overlay-amd64.tar.gz -C / --exclude="./bin" && \ + tar xzf /tmp/s6-overlay-amd64.tar.gz -C /usr ./bin + +echo -e "S6-overlay install complete." \ No newline at end of file