Compare commits
No commits in common. "master" and "1.6.0-ls1" have entirely different histories.
@ -1,20 +0,0 @@
|
|||||||
# This file is globally distributed to all container image projects from
|
|
||||||
# https://github.com/linuxserver/docker-jenkins-builder/blob/master/.editorconfig
|
|
||||||
|
|
||||||
# top-most EditorConfig file
|
|
||||||
root = true
|
|
||||||
|
|
||||||
# Unix-style newlines with a newline ending every file
|
|
||||||
[*]
|
|
||||||
end_of_line = lf
|
|
||||||
insert_final_newline = true
|
|
||||||
# trim_trailing_whitespace may cause unintended issues and should not be globally set true
|
|
||||||
trim_trailing_whitespace = false
|
|
||||||
|
|
||||||
[{Dockerfile*,**.yml}]
|
|
||||||
indent_style = space
|
|
||||||
indent_size = 2
|
|
||||||
|
|
||||||
[{**.sh,root/etc/s6-overlay/s6-rc.d/**,root/etc/cont-init.d/**,root/etc/services.d/**}]
|
|
||||||
indent_style = space
|
|
||||||
indent_size = 4
|
|
||||||
123
.github/CONTRIBUTING.md
vendored
123
.github/CONTRIBUTING.md
vendored
@ -1,123 +0,0 @@
|
|||||||
# Contributing to swag
|
|
||||||
|
|
||||||
## Gotchas
|
|
||||||
|
|
||||||
* While contributing make sure to make all your changes before creating a Pull Request, as our pipeline builds each commit after the PR is open.
|
|
||||||
* Read, and fill the Pull Request template
|
|
||||||
* If this is a fix for a typo (in code, documentation, or the README) please file an issue and let us sort it out. We do not need a PR
|
|
||||||
* If the PR is addressing an existing issue include, closes #\<issue number>, in the body of the PR commit message
|
|
||||||
* If you want to discuss changes, you can also bring it up in [#dev-talk](https://discordapp.com/channels/354974912613449730/757585807061155840) in our [Discord server](https://discord.gg/YWrKVTn)
|
|
||||||
|
|
||||||
## Common files
|
|
||||||
|
|
||||||
| File | Use case |
|
|
||||||
| :----: | --- |
|
|
||||||
| `Dockerfile` | Dockerfile used to build amd64 images |
|
|
||||||
| `Dockerfile.aarch64` | Dockerfile used to build 64bit ARM architectures |
|
|
||||||
| `Dockerfile.armhf` | Dockerfile used to build 32bit ARM architectures |
|
|
||||||
| `Jenkinsfile` | This file is a product of our builder and should not be edited directly. This is used to build the image |
|
|
||||||
| `jenkins-vars.yml` | This file is used to generate the `Jenkinsfile` mentioned above, it only affects the build-process |
|
|
||||||
| `package_versions.txt` | This file is generated as a part of the build-process and should not be edited directly. It lists all the installed packages and their versions |
|
|
||||||
| `README.md` | This file is a product of our builder and should not be edited directly. This displays the readme for the repository and image registries |
|
|
||||||
| `readme-vars.yml` | This file is used to generate the `README.md` |
|
|
||||||
|
|
||||||
## Readme
|
|
||||||
|
|
||||||
If you would like to change our readme, please __**do not**__ directly edit the readme, as it is auto-generated on each commit.
|
|
||||||
Instead edit the [readme-vars.yml](https://github.com/linuxserver/docker-swag/edit/master/readme-vars.yml).
|
|
||||||
|
|
||||||
These variables are used in a template for our [Jenkins Builder](https://github.com/linuxserver/docker-jenkins-builder) as part of an ansible play.
|
|
||||||
Most of these variables are also carried over to [docs.linuxserver.io](https://docs.linuxserver.io/images/docker-swag)
|
|
||||||
|
|
||||||
### Fixing typos or clarify the text in the readme
|
|
||||||
|
|
||||||
There are variables for multiple parts of the readme, the most common ones are:
|
|
||||||
|
|
||||||
| Variable | Description |
|
|
||||||
| :----: | --- |
|
|
||||||
| `project_blurb` | This is the short excerpt shown above the project logo. |
|
|
||||||
| `app_setup_block` | This is the text that shows up under "Application Setup" if enabled |
|
|
||||||
|
|
||||||
### Parameters
|
|
||||||
|
|
||||||
The compose and run examples are also generated from these variables.
|
|
||||||
|
|
||||||
We have a [reference file](https://github.com/linuxserver/docker-jenkins-builder/blob/master/vars/_container-vars-blank) in our Jenkins Builder.
|
|
||||||
|
|
||||||
These are prefixed with `param_` for required parameters, or `opt_param` for optional parameters, except for `cap_add`.
|
|
||||||
Remember to enable param, if currently disabled. This differs between parameters, and can be seen in the reference file.
|
|
||||||
|
|
||||||
Devices, environment variables, ports and volumes expects its variables in a certain way.
|
|
||||||
|
|
||||||
### Devices
|
|
||||||
|
|
||||||
```yml
|
|
||||||
param_devices:
|
|
||||||
- { device_path: "/dev/dri", device_host_path: "/dev/dri", desc: "For hardware transcoding" }
|
|
||||||
opt_param_devices:
|
|
||||||
- { device_path: "/dev/dri", device_host_path: "/dev/dri", desc: "For hardware transcoding" }
|
|
||||||
```
|
|
||||||
|
|
||||||
### Environment variables
|
|
||||||
|
|
||||||
```yml
|
|
||||||
param_env_vars:
|
|
||||||
- { env_var: "TZ", env_value: "Europe/London", desc: "Specify a timezone to use EG Europe/London." }
|
|
||||||
opt_param_env_vars:
|
|
||||||
- { env_var: "VERSION", env_value: "latest", desc: "Supported values are LATEST, PLEXPASS or a specific version number." }
|
|
||||||
```
|
|
||||||
|
|
||||||
### Ports
|
|
||||||
|
|
||||||
```yml
|
|
||||||
param_ports:
|
|
||||||
- { external_port: "80", internal_port: "80", port_desc: "Application WebUI" }
|
|
||||||
opt_param_ports:
|
|
||||||
- { external_port: "80", internal_port: "80", port_desc: "Application WebUI" }
|
|
||||||
```
|
|
||||||
|
|
||||||
### Volumes
|
|
||||||
|
|
||||||
```yml
|
|
||||||
param_volumes:
|
|
||||||
- { vol_path: "/config", vol_host_path: "</path/to/appdata/config>", desc: "Configuration files." }
|
|
||||||
opt_param_volumes:
|
|
||||||
- { vol_path: "/config", vol_host_path: "</path/to/appdata/config>", desc: "Configuration files." }
|
|
||||||
```
|
|
||||||
|
|
||||||
### Testing template changes
|
|
||||||
|
|
||||||
After you make any changes to the templates, you can use our [Jenkins Builder](https://github.com/linuxserver/docker-jenkins-builder) to have the files updated from the modified templates. Please use the command found under `Running Locally` [on this page](https://github.com/linuxserver/docker-jenkins-builder/blob/master/README.md) to generate them prior to submitting a PR.
|
|
||||||
|
|
||||||
## Dockerfiles
|
|
||||||
|
|
||||||
We use multiple Dockerfiles in our repos, this is because sometimes some CPU architectures needs different packages to work.
|
|
||||||
If you are proposing additional packages to be added, ensure that you added the packages to all the Dockerfiles in alphabetical order.
|
|
||||||
|
|
||||||
### Testing your changes
|
|
||||||
|
|
||||||
```bash
|
|
||||||
git clone https://github.com/linuxserver/docker-swag.git
|
|
||||||
cd docker-swag
|
|
||||||
docker build \
|
|
||||||
--no-cache \
|
|
||||||
--pull \
|
|
||||||
-t linuxserver/swag:latest .
|
|
||||||
```
|
|
||||||
|
|
||||||
The ARM variants can be built on x86_64 hardware using `multiarch/qemu-user-static`
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker run --rm --privileged multiarch/qemu-user-static:register --reset
|
|
||||||
```
|
|
||||||
|
|
||||||
Once registered you can define the dockerfile to use with `-f Dockerfile.aarch64`.
|
|
||||||
|
|
||||||
## Update the changelog
|
|
||||||
|
|
||||||
If you are modifying the Dockerfiles or any of the startup scripts in [root](https://github.com/linuxserver/docker-swag/tree/master/root), add an entry to the changelog
|
|
||||||
|
|
||||||
```yml
|
|
||||||
changelogs:
|
|
||||||
- { date: "DD.MM.YY:", desc: "Added some love to templates" }
|
|
||||||
```
|
|
||||||
34
.github/ISSUE_TEMPLATE.md
vendored
Normal file
34
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
[linuxserverurl]: https://linuxserver.io
|
||||||
|
[][linuxserverurl]
|
||||||
|
|
||||||
|
If you are new to Docker or this application our issue tracker is **ONLY** used for reporting bugs or requesting features. Please use [our discord server](https://discord.gg/YWrKVTn) for general support.
|
||||||
|
|
||||||
|
<!--- Provide a general summary of the issue in the Title above -->
|
||||||
|
|
||||||
|
------------------------------
|
||||||
|
|
||||||
|
## Expected Behavior
|
||||||
|
<!--- Tell us what should happen -->
|
||||||
|
|
||||||
|
## Current Behavior
|
||||||
|
<!--- Tell us what happens instead of the expected behavior -->
|
||||||
|
|
||||||
|
## Steps to Reproduce
|
||||||
|
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
|
||||||
|
<!--- reproduce this bug. Include code to reproduce, if relevant -->
|
||||||
|
1.
|
||||||
|
2.
|
||||||
|
3.
|
||||||
|
4.
|
||||||
|
|
||||||
|
## Environment
|
||||||
|
**OS:**
|
||||||
|
**CPU architecture:** x86_64/arm32/arm64
|
||||||
|
**How docker service was installed:**
|
||||||
|
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
|
||||||
|
|
||||||
|
## Command used to create docker container (run/create/compose/screenshot)
|
||||||
|
<!--- Provide your docker create/run command or compose yaml snippet, or a screenshot of settings if using a gui to create the container -->
|
||||||
|
|
||||||
|
## Docker logs
|
||||||
|
<!--- Provide a full docker log, output of "docker logs swag" -->
|
||||||
13
.github/ISSUE_TEMPLATE/config.yml
vendored
13
.github/ISSUE_TEMPLATE/config.yml
vendored
@ -1,13 +0,0 @@
|
|||||||
blank_issues_enabled: false
|
|
||||||
contact_links:
|
|
||||||
- name: Discord chat support
|
|
||||||
url: https://discord.gg/YWrKVTn
|
|
||||||
about: Realtime support / chat with the community and the team.
|
|
||||||
|
|
||||||
- name: Discourse discussion forum
|
|
||||||
url: https://discourse.linuxserver.io
|
|
||||||
about: Post on our community forum.
|
|
||||||
|
|
||||||
- name: Documentation
|
|
||||||
url: https://docs.linuxserver.io/images/docker-swag
|
|
||||||
about: Documentation - information about all of our containers.
|
|
||||||
77
.github/ISSUE_TEMPLATE/issue.bug.yml
vendored
77
.github/ISSUE_TEMPLATE/issue.bug.yml
vendored
@ -1,77 +0,0 @@
|
|||||||
# Based on the issue template
|
|
||||||
name: Bug report
|
|
||||||
description: Create a report to help us improve
|
|
||||||
title: "[BUG] <title>"
|
|
||||||
labels: [Bug]
|
|
||||||
body:
|
|
||||||
- type: checkboxes
|
|
||||||
attributes:
|
|
||||||
label: Is there an existing issue for this?
|
|
||||||
description: Please search to see if an issue already exists for the bug you encountered.
|
|
||||||
options:
|
|
||||||
- label: I have searched the existing issues
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Current Behavior
|
|
||||||
description: Tell us what happens instead of the expected behavior.
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Expected Behavior
|
|
||||||
description: Tell us what should happen.
|
|
||||||
validations:
|
|
||||||
required: false
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Steps To Reproduce
|
|
||||||
description: Steps to reproduce the behavior.
|
|
||||||
placeholder: |
|
|
||||||
1. In this environment...
|
|
||||||
2. With this config...
|
|
||||||
3. Run '...'
|
|
||||||
4. See error...
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Environment
|
|
||||||
description: |
|
|
||||||
examples:
|
|
||||||
- **OS**: Ubuntu 20.04
|
|
||||||
- **How docker service was installed**: distro's packagemanager
|
|
||||||
value: |
|
|
||||||
- OS:
|
|
||||||
- How docker service was installed:
|
|
||||||
render: markdown
|
|
||||||
validations:
|
|
||||||
required: false
|
|
||||||
- type: dropdown
|
|
||||||
attributes:
|
|
||||||
label: CPU architecture
|
|
||||||
options:
|
|
||||||
- x86-64
|
|
||||||
- arm64
|
|
||||||
- armhf
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Docker creation
|
|
||||||
description: |
|
|
||||||
Command used to create docker container
|
|
||||||
Provide your docker create/run command or compose yaml snippet, or a screenshot of settings if using a gui to create the container
|
|
||||||
render: bash
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
description: |
|
|
||||||
Provide a full docker log, output of "docker logs linuxserver.io"
|
|
||||||
label: Container logs
|
|
||||||
placeholder: |
|
|
||||||
Output of `docker logs linuxserver.io`
|
|
||||||
render: bash
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
31
.github/ISSUE_TEMPLATE/issue.feature.yml
vendored
31
.github/ISSUE_TEMPLATE/issue.feature.yml
vendored
@ -1,31 +0,0 @@
|
|||||||
# Based on the issue template
|
|
||||||
name: Feature request
|
|
||||||
description: Suggest an idea for this project
|
|
||||||
title: "[FEAT] <title>"
|
|
||||||
labels: [enhancement]
|
|
||||||
body:
|
|
||||||
- type: checkboxes
|
|
||||||
attributes:
|
|
||||||
label: Is this a new feature request?
|
|
||||||
description: Please search to see if a feature request already exists.
|
|
||||||
options:
|
|
||||||
- label: I have searched the existing issues
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Wanted change
|
|
||||||
description: Tell us what you want to happen.
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Reason for change
|
|
||||||
description: Justify your request, why do you want it, what is the benefit.
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Proposed code change
|
|
||||||
description: Do you have a potential code change in mind?
|
|
||||||
validations:
|
|
||||||
required: false
|
|
||||||
10
.github/PULL_REQUEST_TEMPLATE.md
vendored
10
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -2,11 +2,11 @@
|
|||||||
|
|
||||||
[linuxserverurl]: https://linuxserver.io
|
[linuxserverurl]: https://linuxserver.io
|
||||||
[][linuxserverurl]
|
[][linuxserverurl]
|
||||||
|
|
||||||
|
|
||||||
<!--- Before submitting a pull request please check the following -->
|
<!--- Before submitting a pull request please check the following -->
|
||||||
|
|
||||||
<!--- If this is a fix for a typo (in code, documentation, or the README) please file an issue and let us sort it out. We do not need a PR -->
|
<!--- If this is a fix for a typo in code or documentation in the README please file an issue and let us sort it out we do not need a PR -->
|
||||||
<!--- Ask yourself if this modification is something the whole userbase will benefit from, if this is a specific change for corner case functionality or plugins please look at making a Docker Mod or local script https://blog.linuxserver.io/2019/09/14/customizing-our-containers/ -->
|
<!--- Ask yourself if this modification is something the whole userbase will benefit from, if this is a specific change for corner case functionality or plugins please look at making a Docker Mod or local script https://blog.linuxserver.io/2019/09/14/customizing-our-containers/ -->
|
||||||
<!--- That if the PR is addressing an existing issue include, closes #<issue number> , in the body of the PR commit message -->
|
<!--- That if the PR is addressing an existing issue include, closes #<issue number> , in the body of the PR commit message -->
|
||||||
<!--- You have included links to any files / patches etc your PR may be using in the body of the PR commit message -->
|
<!--- You have included links to any files / patches etc your PR may be using in the body of the PR commit message -->
|
||||||
@ -21,11 +21,7 @@
|
|||||||
|
|
||||||
------------------------------
|
------------------------------
|
||||||
|
|
||||||
- [ ] I have read the [contributing](https://github.com/linuxserver/docker-swag/blob/master/.github/CONTRIBUTING.md) guideline and understand that I have made the correct modifications
|
We welcome all PR’s though this doesn’t guarantee it will be accepted.
|
||||||
|
|
||||||
------------------------------
|
|
||||||
|
|
||||||
<!--- We welcome all PR’s though this doesn’t guarantee it will be accepted. -->
|
|
||||||
|
|
||||||
## Description:
|
## Description:
|
||||||
<!--- Describe your changes in detail -->
|
<!--- Describe your changes in detail -->
|
||||||
|
|||||||
14
.github/workflows/call_issue_pr_tracker.yml
vendored
14
.github/workflows/call_issue_pr_tracker.yml
vendored
@ -1,14 +0,0 @@
|
|||||||
name: Issue & PR Tracker
|
|
||||||
|
|
||||||
on:
|
|
||||||
issues:
|
|
||||||
types: [opened,reopened,labeled,unlabeled]
|
|
||||||
pull_request_target:
|
|
||||||
types: [opened,reopened,review_requested,review_request_removed,labeled,unlabeled]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
manage-project:
|
|
||||||
permissions:
|
|
||||||
issues: write
|
|
||||||
uses: linuxserver/github-workflows/.github/workflows/issue-pr-tracker.yml@v1
|
|
||||||
secrets: inherit
|
|
||||||
13
.github/workflows/call_issues_cron.yml
vendored
13
.github/workflows/call_issues_cron.yml
vendored
@ -1,13 +0,0 @@
|
|||||||
name: Mark stale issues and pull requests
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: '35 15 * * *'
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
stale:
|
|
||||||
permissions:
|
|
||||||
issues: write
|
|
||||||
pull-requests: write
|
|
||||||
uses: linuxserver/github-workflows/.github/workflows/issues-cron.yml@v1
|
|
||||||
secrets: inherit
|
|
||||||
96
.github/workflows/external_trigger.yml
vendored
96
.github/workflows/external_trigger.yml
vendored
@ -1,96 +0,0 @@
|
|||||||
name: External Trigger Main
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
external-trigger-master:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3.1.0
|
|
||||||
|
|
||||||
- name: External Trigger
|
|
||||||
if: github.ref == 'refs/heads/master'
|
|
||||||
run: |
|
|
||||||
if [ -n "${{ secrets.PAUSE_EXTERNAL_TRIGGER_SWAG_MASTER }}" ]; then
|
|
||||||
echo "**** Github secret PAUSE_EXTERNAL_TRIGGER_SWAG_MASTER is set; skipping trigger. ****"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
echo "**** External trigger running off of master branch. To disable this trigger, set a Github secret named \"PAUSE_EXTERNAL_TRIGGER_SWAG_MASTER\". ****"
|
|
||||||
echo "**** Retrieving external version ****"
|
|
||||||
EXT_RELEASE=$(curl -sL "https://pypi.python.org/pypi/certbot/json" |jq -r '. | .info.version')
|
|
||||||
if [ -z "${EXT_RELEASE}" ] || [ "${EXT_RELEASE}" == "null" ]; then
|
|
||||||
echo "**** Can't retrieve external version, exiting ****"
|
|
||||||
FAILURE_REASON="Can't retrieve external version for swag branch master"
|
|
||||||
GHA_TRIGGER_URL="https://github.com/linuxserver/docker-swag/actions/runs/${{ github.run_id }}"
|
|
||||||
curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://cdn.discordapp.com/avatars/354986384542662657/df91181b3f1cf0ef1592fbe18e0962d7.png","embeds": [{"color": 16711680,
|
|
||||||
"description": "**Trigger Failed** \n**Reason:** '"${FAILURE_REASON}"' \n**Trigger URL:** '"${GHA_TRIGGER_URL}"' \n"}],
|
|
||||||
"username": "Github Actions"}' ${{ secrets.DISCORD_WEBHOOK }}
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
EXT_RELEASE=$(echo ${EXT_RELEASE} | sed 's/[~,%@+;:/]//g')
|
|
||||||
echo "**** External version: ${EXT_RELEASE} ****"
|
|
||||||
echo "**** Retrieving last pushed version ****"
|
|
||||||
image="linuxserver/swag"
|
|
||||||
tag="latest"
|
|
||||||
token=$(curl -sX GET \
|
|
||||||
"https://ghcr.io/token?scope=repository%3Alinuxserver%2Fswag%3Apull" \
|
|
||||||
| jq -r '.token')
|
|
||||||
multidigest=$(curl -s \
|
|
||||||
--header "Accept: application/vnd.docker.distribution.manifest.v2+json" \
|
|
||||||
--header "Authorization: Bearer ${token}" \
|
|
||||||
"https://ghcr.io/v2/${image}/manifests/${tag}" \
|
|
||||||
| jq -r 'first(.manifests[].digest)')
|
|
||||||
digest=$(curl -s \
|
|
||||||
--header "Accept: application/vnd.docker.distribution.manifest.v2+json" \
|
|
||||||
--header "Authorization: Bearer ${token}" \
|
|
||||||
"https://ghcr.io/v2/${image}/manifests/${multidigest}" \
|
|
||||||
| jq -r '.config.digest')
|
|
||||||
image_info=$(curl -sL \
|
|
||||||
--header "Authorization: Bearer ${token}" \
|
|
||||||
"https://ghcr.io/v2/${image}/blobs/${digest}")
|
|
||||||
if [[ $(echo $image_info | jq -r '.container_config') == "null" ]]; then
|
|
||||||
image_info=$(echo $image_info | jq -r '.config')
|
|
||||||
else
|
|
||||||
image_info=$(echo $image_info | jq -r '.container_config')
|
|
||||||
fi
|
|
||||||
IMAGE_RELEASE=$(echo ${image_info} | jq -r '.Labels.build_version' | awk '{print $3}')
|
|
||||||
IMAGE_VERSION=$(echo ${IMAGE_RELEASE} | awk -F'-ls' '{print $1}')
|
|
||||||
if [ -z "${IMAGE_VERSION}" ]; then
|
|
||||||
echo "**** Can't retrieve last pushed version, exiting ****"
|
|
||||||
FAILURE_REASON="Can't retrieve last pushed version for swag tag latest"
|
|
||||||
curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://cdn.discordapp.com/avatars/354986384542662657/df91181b3f1cf0ef1592fbe18e0962d7.png","embeds": [{"color": 16711680,
|
|
||||||
"description": "**Trigger Failed** \n**Reason:** '"${FAILURE_REASON}"' \n"}],
|
|
||||||
"username": "Github Actions"}' ${{ secrets.DISCORD_WEBHOOK }}
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo "**** Last pushed version: ${IMAGE_VERSION} ****"
|
|
||||||
if [ "${EXT_RELEASE}" == "${IMAGE_VERSION}" ]; then
|
|
||||||
echo "**** Version ${EXT_RELEASE} already pushed, exiting ****"
|
|
||||||
exit 0
|
|
||||||
elif [ $(curl -s https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-swag/job/master/lastBuild/api/json | jq -r '.building') == "true" ]; then
|
|
||||||
echo "**** New version ${EXT_RELEASE} found; but there already seems to be an active build on Jenkins; exiting ****"
|
|
||||||
exit 0
|
|
||||||
else
|
|
||||||
echo "**** New version ${EXT_RELEASE} found; old version was ${IMAGE_VERSION}. Triggering new build ****"
|
|
||||||
response=$(curl -iX POST \
|
|
||||||
https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-swag/job/master/buildWithParameters?PACKAGE_CHECK=false \
|
|
||||||
--user ${{ secrets.JENKINS_USER }}:${{ secrets.JENKINS_TOKEN }} | grep -i location | sed "s|^[L|l]ocation: \(.*\)|\1|")
|
|
||||||
echo "**** Jenkins job queue url: ${response%$'\r'} ****"
|
|
||||||
echo "**** Sleeping 10 seconds until job starts ****"
|
|
||||||
sleep 10
|
|
||||||
buildurl=$(curl -s "${response%$'\r'}api/json" | jq -r '.executable.url')
|
|
||||||
buildurl="${buildurl%$'\r'}"
|
|
||||||
echo "**** Jenkins job build url: ${buildurl} ****"
|
|
||||||
echo "**** Attempting to change the Jenkins job description ****"
|
|
||||||
curl -iX POST \
|
|
||||||
"${buildurl}submitDescription" \
|
|
||||||
--user ${{ secrets.JENKINS_USER }}:${{ secrets.JENKINS_TOKEN }} \
|
|
||||||
--data-urlencode "description=GHA external trigger https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" \
|
|
||||||
--data-urlencode "Submit=Submit"
|
|
||||||
echo "**** Notifying Discord ****"
|
|
||||||
TRIGGER_REASON="A version change was detected for swag tag latest. Old version:${IMAGE_VERSION} New version:${EXT_RELEASE}"
|
|
||||||
curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://cdn.discordapp.com/avatars/354986384542662657/df91181b3f1cf0ef1592fbe18e0962d7.png","embeds": [{"color": 9802903,
|
|
||||||
"description": "**Build Triggered** \n**Reason:** '"${TRIGGER_REASON}"' \n**Build URL:** '"${buildurl}display/redirect"' \n"}],
|
|
||||||
"username": "Github Actions"}' ${{ secrets.DISCORD_WEBHOOK }}
|
|
||||||
fi
|
|
||||||
43
.github/workflows/external_trigger_scheduler.yml
vendored
43
.github/workflows/external_trigger_scheduler.yml
vendored
@ -1,43 +0,0 @@
|
|||||||
name: External Trigger Scheduler
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: '50 * * * *'
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
external-trigger-scheduler:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3.1.0
|
|
||||||
with:
|
|
||||||
fetch-depth: '0'
|
|
||||||
|
|
||||||
- name: External Trigger Scheduler
|
|
||||||
run: |
|
|
||||||
echo "**** Branches found: ****"
|
|
||||||
git for-each-ref --format='%(refname:short)' refs/remotes
|
|
||||||
echo "**** Pulling the yq docker image ****"
|
|
||||||
docker pull ghcr.io/linuxserver/yq
|
|
||||||
for br in $(git for-each-ref --format='%(refname:short)' refs/remotes)
|
|
||||||
do
|
|
||||||
br=$(echo "$br" | sed 's|origin/||g')
|
|
||||||
echo "**** Evaluating branch ${br} ****"
|
|
||||||
ls_branch=$(curl -sX GET https://raw.githubusercontent.com/linuxserver/docker-swag/${br}/jenkins-vars.yml \
|
|
||||||
| docker run --rm -i --entrypoint yq ghcr.io/linuxserver/yq -r .ls_branch)
|
|
||||||
if [ "$br" == "$ls_branch" ]; then
|
|
||||||
echo "**** Branch ${br} appears to be live; checking workflow. ****"
|
|
||||||
if curl -sfX GET https://raw.githubusercontent.com/linuxserver/docker-swag/${br}/.github/workflows/external_trigger.yml > /dev/null 2>&1; then
|
|
||||||
echo "**** Workflow exists. Triggering external trigger workflow for branch ${br} ****."
|
|
||||||
curl -iX POST \
|
|
||||||
-H "Authorization: token ${{ secrets.CR_PAT }}" \
|
|
||||||
-H "Accept: application/vnd.github.v3+json" \
|
|
||||||
-d "{\"ref\":\"refs/heads/${br}\"}" \
|
|
||||||
https://api.github.com/repos/linuxserver/docker-swag/actions/workflows/external_trigger.yml/dispatches
|
|
||||||
else
|
|
||||||
echo "**** Workflow doesn't exist; skipping trigger. ****"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "**** ${br} appears to be a dev branch; skipping trigger. ****"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
6
.github/workflows/greetings.yml
vendored
6
.github/workflows/greetings.yml
vendored
@ -1,6 +1,6 @@
|
|||||||
name: Greetings
|
name: Greetings
|
||||||
|
|
||||||
on: [pull_request_target, issues]
|
on: [pull_request, issues]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
greeting:
|
greeting:
|
||||||
@ -8,6 +8,6 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/first-interaction@v1
|
- uses: actions/first-interaction@v1
|
||||||
with:
|
with:
|
||||||
issue-message: 'Thanks for opening your first issue here! Be sure to follow the relevant issue templates, or risk having this issue marked as invalid.'
|
issue-message: 'Thanks for opening your first issue here! Be sure to follow the [issue template](https://github.com/linuxserver/docker-swag/.github/ISSUE_TEMPLATE.md)!'
|
||||||
pr-message: 'Thanks for opening this pull request! Be sure to follow the [pull request template](https://github.com/linuxserver/docker-swag/blob/master/.github/PULL_REQUEST_TEMPLATE.md)!'
|
pr-message: 'Thanks for opening this pull request! Be sure to follow the [pull request template](https://github.com/linuxserver/docker-swag/.github/PULL_REQUEST_TEMPLATE.md)!'
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|||||||
38
.github/workflows/package_trigger.yml
vendored
38
.github/workflows/package_trigger.yml
vendored
@ -1,38 +0,0 @@
|
|||||||
name: Package Trigger Main
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
package-trigger-master:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3.1.0
|
|
||||||
|
|
||||||
- name: Package Trigger
|
|
||||||
if: github.ref == 'refs/heads/master'
|
|
||||||
run: |
|
|
||||||
if [ -n "${{ secrets.PAUSE_PACKAGE_TRIGGER_SWAG_MASTER }}" ]; then
|
|
||||||
echo "**** Github secret PAUSE_PACKAGE_TRIGGER_SWAG_MASTER is set; skipping trigger. ****"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
if [ $(curl -s https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-swag/job/master/lastBuild/api/json | jq -r '.building') == "true" ]; then
|
|
||||||
echo "**** There already seems to be an active build on Jenkins; skipping package trigger ****"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
echo "**** Package trigger running off of master branch. To disable, set a Github secret named \"PAUSE_PACKAGE_TRIGGER_SWAG_MASTER\". ****"
|
|
||||||
response=$(curl -iX POST \
|
|
||||||
https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-swag/job/master/buildWithParameters?PACKAGE_CHECK=true \
|
|
||||||
--user ${{ secrets.JENKINS_USER }}:${{ secrets.JENKINS_TOKEN }} | grep -i location | sed "s|^[L|l]ocation: \(.*\)|\1|")
|
|
||||||
echo "**** Jenkins job queue url: ${response%$'\r'} ****"
|
|
||||||
echo "**** Sleeping 10 seconds until job starts ****"
|
|
||||||
sleep 10
|
|
||||||
buildurl=$(curl -s "${response%$'\r'}api/json" | jq -r '.executable.url')
|
|
||||||
buildurl="${buildurl%$'\r'}"
|
|
||||||
echo "**** Jenkins job build url: ${buildurl} ****"
|
|
||||||
echo "**** Attempting to change the Jenkins job description ****"
|
|
||||||
curl -iX POST \
|
|
||||||
"${buildurl}submitDescription" \
|
|
||||||
--user ${{ secrets.JENKINS_USER }}:${{ secrets.JENKINS_TOKEN }} \
|
|
||||||
--data-urlencode "description=GHA package trigger https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" \
|
|
||||||
--data-urlencode "Submit=Submit"
|
|
||||||
50
.github/workflows/package_trigger_scheduler.yml
vendored
50
.github/workflows/package_trigger_scheduler.yml
vendored
@ -1,50 +0,0 @@
|
|||||||
name: Package Trigger Scheduler
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: '1 3 * * 6'
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
package-trigger-scheduler:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3.1.0
|
|
||||||
with:
|
|
||||||
fetch-depth: '0'
|
|
||||||
|
|
||||||
- name: Package Trigger Scheduler
|
|
||||||
run: |
|
|
||||||
echo "**** Branches found: ****"
|
|
||||||
git for-each-ref --format='%(refname:short)' refs/remotes
|
|
||||||
echo "**** Pulling the yq docker image ****"
|
|
||||||
docker pull ghcr.io/linuxserver/yq
|
|
||||||
for br in $(git for-each-ref --format='%(refname:short)' refs/remotes)
|
|
||||||
do
|
|
||||||
br=$(echo "$br" | sed 's|origin/||g')
|
|
||||||
echo "**** Evaluating branch ${br} ****"
|
|
||||||
ls_branch=$(curl -sX GET https://raw.githubusercontent.com/linuxserver/docker-swag/${br}/jenkins-vars.yml \
|
|
||||||
| docker run --rm -i --entrypoint yq ghcr.io/linuxserver/yq -r .ls_branch)
|
|
||||||
if [ "${br}" == "${ls_branch}" ]; then
|
|
||||||
echo "**** Branch ${br} appears to be live; checking workflow. ****"
|
|
||||||
if curl -sfX GET https://raw.githubusercontent.com/linuxserver/docker-swag/${br}/.github/workflows/package_trigger.yml > /dev/null 2>&1; then
|
|
||||||
echo "**** Workflow exists. Triggering package trigger workflow for branch ${br}. ****"
|
|
||||||
triggered_branches="${triggered_branches}${br} "
|
|
||||||
curl -iX POST \
|
|
||||||
-H "Authorization: token ${{ secrets.CR_PAT }}" \
|
|
||||||
-H "Accept: application/vnd.github.v3+json" \
|
|
||||||
-d "{\"ref\":\"refs/heads/${br}\"}" \
|
|
||||||
https://api.github.com/repos/linuxserver/docker-swag/actions/workflows/package_trigger.yml/dispatches
|
|
||||||
sleep 30
|
|
||||||
else
|
|
||||||
echo "**** Workflow doesn't exist; skipping trigger. ****"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "**** ${br} appears to be a dev branch; skipping trigger. ****"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
echo "**** Package check build(s) triggered for branch(es): ${triggered_branches} ****"
|
|
||||||
echo "**** Notifying Discord ****"
|
|
||||||
curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://cdn.discordapp.com/avatars/354986384542662657/df91181b3f1cf0ef1592fbe18e0962d7.png","embeds": [{"color": 9802903,
|
|
||||||
"description": "**Package Check Build(s) Triggered for swag** \n**Branch(es):** '"${triggered_branches}"' \n**Build URL:** '"https://ci.linuxserver.io/blue/organizations/jenkins/Docker-Pipeline-Builders%2Fdocker-swag/activity/"' \n"}],
|
|
||||||
"username": "Github Actions"}' ${{ secrets.DISCORD_WEBHOOK }}
|
|
||||||
10
.github/workflows/permissions.yml
vendored
10
.github/workflows/permissions.yml
vendored
@ -1,10 +0,0 @@
|
|||||||
name: Permission check
|
|
||||||
on:
|
|
||||||
pull_request_target:
|
|
||||||
paths:
|
|
||||||
- '**/run'
|
|
||||||
- '**/finish'
|
|
||||||
- '**/check'
|
|
||||||
jobs:
|
|
||||||
permission_check:
|
|
||||||
uses: linuxserver/github-workflows/.github/workflows/init-svc-executable-permissions.yml@v1
|
|
||||||
23
.github/workflows/stale.yml
vendored
Normal file
23
.github/workflows/stale.yml
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
name: Mark stale issues and pull requests
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "30 1 * * *"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
stale:
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/stale@v1
|
||||||
|
with:
|
||||||
|
stale-issue-message: "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions."
|
||||||
|
stale-pr-message: "This pull request has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions."
|
||||||
|
stale-issue-label: 'no-issue-activity'
|
||||||
|
stale-pr-label: 'no-pr-activity'
|
||||||
|
days-before-stale: 30
|
||||||
|
days-before-close: 365
|
||||||
|
exempt-issue-labels: 'awaiting-approval,work-in-progress'
|
||||||
|
exempt-pr-labels: 'awaiting-approval,work-in-progress'
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
1
.gitignore
vendored
1
.gitignore
vendored
@ -1 +0,0 @@
|
|||||||
.jenkins-external
|
|
||||||
307
Dockerfile
307
Dockerfile
@ -1,186 +1,149 @@
|
|||||||
# syntax=docker/dockerfile:1
|
FROM lsiobase/nginx:3.12
|
||||||
|
|
||||||
FROM ghcr.io/linuxserver/baseimage-alpine-nginx:3.17
|
|
||||||
|
|
||||||
# set version label
|
# set version label
|
||||||
ARG BUILD_DATE
|
ARG BUILD_DATE
|
||||||
ARG VERSION
|
ARG VERSION
|
||||||
ARG CERTBOT_VERSION
|
ARG CERTBOT_VERSION
|
||||||
LABEL build_version="Linuxserver.io version:- ${VERSION} Build-date:- ${BUILD_DATE}"
|
LABEL build_version="Linuxserver.io version:- ${VERSION} Build-date:- ${BUILD_DATE}"
|
||||||
LABEL maintainer="nemchik"
|
LABEL maintainer="aptalca"
|
||||||
|
|
||||||
# environment settings
|
# environment settings
|
||||||
ENV DHLEVEL=2048 ONLY_SUBDOMAINS=false AWS_CONFIG_FILE=/config/dns-conf/route53.ini
|
ENV DHLEVEL=2048 ONLY_SUBDOMAINS=false AWS_CONFIG_FILE=/config/dns-conf/route53.ini
|
||||||
ENV S6_BEHAVIOUR_IF_STAGE2_FAILS=2
|
ENV S6_BEHAVIOUR_IF_STAGE2_FAILS=2
|
||||||
|
|
||||||
RUN \
|
RUN \
|
||||||
echo "**** install build packages ****" && \
|
echo "**** install build packages ****" && \
|
||||||
apk add --no-cache --virtual=build-dependencies \
|
apk add --no-cache --virtual=build-dependencies \
|
||||||
build-base \
|
g++ \
|
||||||
cargo \
|
gcc \
|
||||||
libffi-dev \
|
libffi-dev \
|
||||||
libxml2-dev \
|
openssl-dev \
|
||||||
libxslt-dev \
|
python3-dev && \
|
||||||
openssl-dev \
|
echo "**** install runtime packages ****" && \
|
||||||
python3-dev && \
|
apk add --no-cache --upgrade \
|
||||||
echo "**** install runtime packages ****" && \
|
curl \
|
||||||
apk add --no-cache --upgrade \
|
fail2ban \
|
||||||
fail2ban \
|
gnupg \
|
||||||
gnupg \
|
memcached \
|
||||||
memcached \
|
nginx \
|
||||||
nginx-mod-http-brotli \
|
nginx-mod-http-echo \
|
||||||
nginx-mod-http-dav-ext \
|
nginx-mod-http-fancyindex \
|
||||||
nginx-mod-http-echo \
|
nginx-mod-http-geoip2 \
|
||||||
nginx-mod-http-fancyindex \
|
nginx-mod-http-headers-more \
|
||||||
nginx-mod-http-geoip2 \
|
nginx-mod-http-image-filter \
|
||||||
nginx-mod-http-headers-more \
|
nginx-mod-http-lua \
|
||||||
nginx-mod-http-image-filter \
|
nginx-mod-http-lua-upstream \
|
||||||
nginx-mod-http-perl \
|
nginx-mod-http-nchan \
|
||||||
nginx-mod-http-redis2 \
|
nginx-mod-http-perl \
|
||||||
nginx-mod-http-set-misc \
|
nginx-mod-http-redis2 \
|
||||||
nginx-mod-http-upload-progress \
|
nginx-mod-http-set-misc \
|
||||||
nginx-mod-http-xslt-filter \
|
nginx-mod-http-upload-progress \
|
||||||
nginx-mod-mail \
|
nginx-mod-http-xslt-filter \
|
||||||
nginx-mod-rtmp \
|
nginx-mod-mail \
|
||||||
nginx-mod-stream \
|
nginx-mod-rtmp \
|
||||||
nginx-mod-stream-geoip2 \
|
nginx-mod-stream \
|
||||||
nginx-vim \
|
nginx-mod-stream-geoip2 \
|
||||||
php81-bcmath \
|
nginx-vim \
|
||||||
php81-bz2 \
|
php7-bcmath \
|
||||||
php81-ctype \
|
php7-bz2 \
|
||||||
php81-curl \
|
php7-ctype \
|
||||||
php81-dom \
|
php7-curl \
|
||||||
php81-exif \
|
php7-dom \
|
||||||
php81-ftp \
|
php7-exif \
|
||||||
php81-gd \
|
php7-ftp \
|
||||||
php81-gmp \
|
php7-gd \
|
||||||
php81-iconv \
|
php7-iconv \
|
||||||
php81-imap \
|
php7-imap \
|
||||||
php81-intl \
|
php7-intl \
|
||||||
php81-ldap \
|
php7-ldap \
|
||||||
php81-mysqli \
|
php7-mcrypt \
|
||||||
php81-mysqlnd \
|
php7-memcached \
|
||||||
php81-opcache \
|
php7-mysqli \
|
||||||
php81-pdo_mysql \
|
php7-mysqlnd \
|
||||||
php81-pdo_odbc \
|
php7-opcache \
|
||||||
php81-pdo_pgsql \
|
php7-pdo_mysql \
|
||||||
php81-pdo_sqlite \
|
php7-pdo_odbc \
|
||||||
php81-pear \
|
php7-pdo_pgsql \
|
||||||
php81-pecl-apcu \
|
php7-pdo_sqlite \
|
||||||
php81-pecl-mailparse \
|
php7-pear \
|
||||||
php81-pecl-memcached \
|
php7-pecl-apcu \
|
||||||
php81-pecl-redis \
|
php7-pecl-redis \
|
||||||
php81-pgsql \
|
php7-pgsql \
|
||||||
php81-phar \
|
php7-phar \
|
||||||
php81-posix \
|
php7-posix \
|
||||||
php81-soap \
|
php7-soap \
|
||||||
php81-sockets \
|
php7-sockets \
|
||||||
php81-sodium \
|
php7-sodium \
|
||||||
php81-sqlite3 \
|
php7-sqlite3 \
|
||||||
php81-tokenizer \
|
php7-tokenizer \
|
||||||
php81-xmlreader \
|
php7-xml \
|
||||||
php81-xsl \
|
php7-xmlreader \
|
||||||
php81-zip \
|
php7-xmlrpc \
|
||||||
whois && \
|
php7-zip \
|
||||||
apk add --no-cache --repository=http://dl-cdn.alpinelinux.org/alpine/edge/testing \
|
py3-cryptography \
|
||||||
php81-pecl-mcrypt \
|
py3-future \
|
||||||
php81-pecl-xmlrpc && \
|
py3-pip \
|
||||||
echo "**** install certbot plugins ****" && \
|
whois && \
|
||||||
if [ -z ${CERTBOT_VERSION+x} ]; then \
|
echo "**** install certbot plugins ****" && \
|
||||||
CERTBOT_VERSION=$(curl -sL https://pypi.python.org/pypi/certbot/json |jq -r '. | .info.version'); \
|
if [ -z ${CERTBOT_VERSION+x} ]; then \
|
||||||
fi && \
|
CERTBOT="certbot"; \
|
||||||
python3 -m ensurepip && \
|
else \
|
||||||
pip3 install -U --no-cache-dir \
|
CERTBOT="certbot==${CERTBOT_VERSION}"; \
|
||||||
pip \
|
fi && \
|
||||||
wheel && \
|
pip3 install -U \
|
||||||
pip3 install -U --no-cache-dir --find-links https://wheel-index.linuxserver.io/alpine-3.17/ \
|
pip && \
|
||||||
certbot==${CERTBOT_VERSION} \
|
pip3 install -U \
|
||||||
certbot-dns-acmedns \
|
${CERTBOT} \
|
||||||
certbot-dns-aliyun \
|
certbot-dns-aliyun \
|
||||||
certbot-dns-azure \
|
certbot-dns-cloudflare \
|
||||||
certbot-dns-cloudflare \
|
certbot-dns-cloudxns \
|
||||||
certbot-dns-cpanel \
|
certbot-dns-cpanel \
|
||||||
certbot-dns-desec \
|
certbot-dns-digitalocean \
|
||||||
certbot-dns-digitalocean \
|
certbot-dns-dnsimple \
|
||||||
certbot-dns-directadmin \
|
certbot-dns-dnsmadeeasy \
|
||||||
certbot-dns-dnsimple \
|
certbot-dns-domeneshop \
|
||||||
certbot-dns-dnsmadeeasy \
|
certbot-dns-google \
|
||||||
certbot-dns-dnspod \
|
certbot-dns-inwx \
|
||||||
certbot-dns-do \
|
certbot-dns-linode \
|
||||||
certbot-dns-domeneshop \
|
certbot-dns-luadns \
|
||||||
certbot-dns-duckdns \
|
certbot-dns-nsone \
|
||||||
certbot-dns-dynu \
|
certbot-dns-ovh \
|
||||||
certbot-dns-gehirn \
|
certbot-dns-rfc2136 \
|
||||||
certbot-dns-godaddy \
|
certbot-dns-route53 \
|
||||||
certbot-dns-google \
|
certbot-dns-transip \
|
||||||
certbot-dns-google-domains \
|
certbot-plugin-gandi \
|
||||||
certbot-dns-he \
|
cryptography \
|
||||||
certbot-dns-hetzner \
|
requests && \
|
||||||
certbot-dns-infomaniak \
|
echo "**** remove unnecessary fail2ban filters ****" && \
|
||||||
certbot-dns-inwx \
|
rm \
|
||||||
certbot-dns-ionos \
|
/etc/fail2ban/jail.d/alpine-ssh.conf && \
|
||||||
certbot-dns-linode \
|
echo "**** copy fail2ban default action and filter to /default ****" && \
|
||||||
certbot-dns-loopia \
|
mkdir -p /defaults/fail2ban && \
|
||||||
certbot-dns-luadns \
|
mv /etc/fail2ban/action.d /defaults/fail2ban/ && \
|
||||||
certbot-dns-netcup \
|
mv /etc/fail2ban/filter.d /defaults/fail2ban/ && \
|
||||||
certbot-dns-njalla \
|
echo "**** copy proxy confs to /default ****" && \
|
||||||
certbot-dns-nsone \
|
mkdir -p /defaults/proxy-confs && \
|
||||||
certbot-dns-ovh \
|
curl -o \
|
||||||
certbot-dns-porkbun \
|
/tmp/proxy.tar.gz -L \
|
||||||
certbot-dns-rfc2136 \
|
"https://github.com/linuxserver/reverse-proxy-confs/tarball/master" && \
|
||||||
certbot-dns-route53 \
|
tar xf \
|
||||||
certbot-dns-sakuracloud \
|
/tmp/proxy.tar.gz -C \
|
||||||
certbot-dns-standalone \
|
/defaults/proxy-confs --strip-components=1 --exclude=linux*/.gitattributes --exclude=linux*/.github --exclude=linux*/.gitignore --exclude=linux*/LICENSE && \
|
||||||
certbot-dns-transip \
|
echo "**** configure nginx ****" && \
|
||||||
certbot-dns-vultr \
|
rm -f /etc/nginx/conf.d/default.conf && \
|
||||||
certbot-plugin-gandi \
|
curl -o \
|
||||||
cryptography \
|
/defaults/dhparams.pem -L \
|
||||||
future \
|
"https://lsio.ams3.digitaloceanspaces.com/dhparams.pem" && \
|
||||||
requests && \
|
echo "**** cleanup ****" && \
|
||||||
echo "**** enable OCSP stapling from base ****" && \
|
apk del --purge \
|
||||||
sed -i \
|
build-dependencies && \
|
||||||
's|#ssl_stapling on;|ssl_stapling on;|' \
|
for cleanfiles in *.pyc *.pyo; \
|
||||||
/defaults/nginx/ssl.conf.sample && \
|
do \
|
||||||
sed -i \
|
find /usr/lib/python3.* -iname "${cleanfiles}" -exec rm -f '{}' + \
|
||||||
's|#ssl_stapling_verify on;|ssl_stapling_verify on;|' \
|
; done && \
|
||||||
/defaults/nginx/ssl.conf.sample && \
|
rm -rf \
|
||||||
sed -i \
|
/tmp/* \
|
||||||
's|#ssl_trusted_certificate /config/keys/cert.crt;|ssl_trusted_certificate /config/keys/cert.crt;|' \
|
/root/.cache
|
||||||
/defaults/nginx/ssl.conf.sample && \
|
|
||||||
echo "**** correct ip6tables legacy issue ****" && \
|
|
||||||
rm \
|
|
||||||
/sbin/ip6tables && \
|
|
||||||
ln -s \
|
|
||||||
/sbin/ip6tables-nft /sbin/ip6tables && \
|
|
||||||
echo "**** remove unnecessary fail2ban filters ****" && \
|
|
||||||
rm \
|
|
||||||
/etc/fail2ban/jail.d/alpine-ssh.conf && \
|
|
||||||
echo "**** copy fail2ban default action and filter to /defaults ****" && \
|
|
||||||
mkdir -p /defaults/fail2ban && \
|
|
||||||
mv /etc/fail2ban/action.d /defaults/fail2ban/ && \
|
|
||||||
mv /etc/fail2ban/filter.d /defaults/fail2ban/ && \
|
|
||||||
echo "**** define allowipv6 to silence warning ****" && \
|
|
||||||
sed -i 's/#allowipv6 = auto/allowipv6 = auto/g' /etc/fail2ban/fail2ban.conf && \
|
|
||||||
echo "**** copy proxy confs to /defaults ****" && \
|
|
||||||
mkdir -p \
|
|
||||||
/defaults/nginx/proxy-confs && \
|
|
||||||
curl -o \
|
|
||||||
/tmp/proxy-confs.tar.gz -L \
|
|
||||||
"https://github.com/linuxserver/reverse-proxy-confs/tarball/master" && \
|
|
||||||
tar xf \
|
|
||||||
/tmp/proxy-confs.tar.gz -C \
|
|
||||||
/defaults/nginx/proxy-confs --strip-components=1 --exclude=linux*/.editorconfig --exclude=linux*/.gitattributes --exclude=linux*/.github --exclude=linux*/.gitignore --exclude=linux*/LICENSE && \
|
|
||||||
echo "**** cleanup ****" && \
|
|
||||||
apk del --purge \
|
|
||||||
build-dependencies && \
|
|
||||||
rm -rf \
|
|
||||||
/tmp/* \
|
|
||||||
$HOME/.cache \
|
|
||||||
$HOME/.cargo
|
|
||||||
|
|
||||||
# copy local files
|
# add local files
|
||||||
COPY root/ /
|
COPY root/ /
|
||||||
|
|
||||||
# ports and volumes
|
|
||||||
EXPOSE 80 443
|
|
||||||
VOLUME /config
|
|
||||||
|
|||||||
@ -1,186 +1,149 @@
|
|||||||
# syntax=docker/dockerfile:1
|
FROM lsiobase/nginx:arm64v8-3.12
|
||||||
|
|
||||||
FROM ghcr.io/linuxserver/baseimage-alpine-nginx:arm64v8-3.17
|
|
||||||
|
|
||||||
# set version label
|
# set version label
|
||||||
ARG BUILD_DATE
|
ARG BUILD_DATE
|
||||||
ARG VERSION
|
ARG VERSION
|
||||||
ARG CERTBOT_VERSION
|
ARG CERTBOT_VERSION
|
||||||
LABEL build_version="Linuxserver.io version:- ${VERSION} Build-date:- ${BUILD_DATE}"
|
LABEL build_version="Linuxserver.io version:- ${VERSION} Build-date:- ${BUILD_DATE}"
|
||||||
LABEL maintainer="nemchik"
|
LABEL maintainer="aptalca"
|
||||||
|
|
||||||
# environment settings
|
# environment settings
|
||||||
ENV DHLEVEL=2048 ONLY_SUBDOMAINS=false AWS_CONFIG_FILE=/config/dns-conf/route53.ini
|
ENV DHLEVEL=2048 ONLY_SUBDOMAINS=false AWS_CONFIG_FILE=/config/dns-conf/route53.ini
|
||||||
ENV S6_BEHAVIOUR_IF_STAGE2_FAILS=2
|
ENV S6_BEHAVIOUR_IF_STAGE2_FAILS=2
|
||||||
|
|
||||||
RUN \
|
RUN \
|
||||||
echo "**** install build packages ****" && \
|
echo "**** install build packages ****" && \
|
||||||
apk add --no-cache --virtual=build-dependencies \
|
apk add --no-cache --virtual=build-dependencies \
|
||||||
build-base \
|
g++ \
|
||||||
cargo \
|
gcc \
|
||||||
libffi-dev \
|
libffi-dev \
|
||||||
libxml2-dev \
|
openssl-dev \
|
||||||
libxslt-dev \
|
python3-dev && \
|
||||||
openssl-dev \
|
echo "**** install runtime packages ****" && \
|
||||||
python3-dev && \
|
apk add --no-cache --upgrade \
|
||||||
echo "**** install runtime packages ****" && \
|
curl \
|
||||||
apk add --no-cache --upgrade \
|
fail2ban \
|
||||||
fail2ban \
|
gnupg \
|
||||||
gnupg \
|
memcached \
|
||||||
memcached \
|
nginx \
|
||||||
nginx-mod-http-brotli \
|
nginx-mod-http-echo \
|
||||||
nginx-mod-http-dav-ext \
|
nginx-mod-http-fancyindex \
|
||||||
nginx-mod-http-echo \
|
nginx-mod-http-geoip2 \
|
||||||
nginx-mod-http-fancyindex \
|
nginx-mod-http-headers-more \
|
||||||
nginx-mod-http-geoip2 \
|
nginx-mod-http-image-filter \
|
||||||
nginx-mod-http-headers-more \
|
nginx-mod-http-lua \
|
||||||
nginx-mod-http-image-filter \
|
nginx-mod-http-lua-upstream \
|
||||||
nginx-mod-http-perl \
|
nginx-mod-http-nchan \
|
||||||
nginx-mod-http-redis2 \
|
nginx-mod-http-perl \
|
||||||
nginx-mod-http-set-misc \
|
nginx-mod-http-redis2 \
|
||||||
nginx-mod-http-upload-progress \
|
nginx-mod-http-set-misc \
|
||||||
nginx-mod-http-xslt-filter \
|
nginx-mod-http-upload-progress \
|
||||||
nginx-mod-mail \
|
nginx-mod-http-xslt-filter \
|
||||||
nginx-mod-rtmp \
|
nginx-mod-mail \
|
||||||
nginx-mod-stream \
|
nginx-mod-rtmp \
|
||||||
nginx-mod-stream-geoip2 \
|
nginx-mod-stream \
|
||||||
nginx-vim \
|
nginx-mod-stream-geoip2 \
|
||||||
php81-bcmath \
|
nginx-vim \
|
||||||
php81-bz2 \
|
php7-bcmath \
|
||||||
php81-ctype \
|
php7-bz2 \
|
||||||
php81-curl \
|
php7-ctype \
|
||||||
php81-dom \
|
php7-curl \
|
||||||
php81-exif \
|
php7-dom \
|
||||||
php81-ftp \
|
php7-exif \
|
||||||
php81-gd \
|
php7-ftp \
|
||||||
php81-gmp \
|
php7-gd \
|
||||||
php81-iconv \
|
php7-iconv \
|
||||||
php81-imap \
|
php7-imap \
|
||||||
php81-intl \
|
php7-intl \
|
||||||
php81-ldap \
|
php7-ldap \
|
||||||
php81-mysqli \
|
php7-mcrypt \
|
||||||
php81-mysqlnd \
|
php7-memcached \
|
||||||
php81-opcache \
|
php7-mysqli \
|
||||||
php81-pdo_mysql \
|
php7-mysqlnd \
|
||||||
php81-pdo_odbc \
|
php7-opcache \
|
||||||
php81-pdo_pgsql \
|
php7-pdo_mysql \
|
||||||
php81-pdo_sqlite \
|
php7-pdo_odbc \
|
||||||
php81-pear \
|
php7-pdo_pgsql \
|
||||||
php81-pecl-apcu \
|
php7-pdo_sqlite \
|
||||||
php81-pecl-mailparse \
|
php7-pear \
|
||||||
php81-pecl-memcached \
|
php7-pecl-apcu \
|
||||||
php81-pecl-redis \
|
php7-pecl-redis \
|
||||||
php81-pgsql \
|
php7-pgsql \
|
||||||
php81-phar \
|
php7-phar \
|
||||||
php81-posix \
|
php7-posix \
|
||||||
php81-soap \
|
php7-soap \
|
||||||
php81-sockets \
|
php7-sockets \
|
||||||
php81-sodium \
|
php7-sodium \
|
||||||
php81-sqlite3 \
|
php7-sqlite3 \
|
||||||
php81-tokenizer \
|
php7-tokenizer \
|
||||||
php81-xmlreader \
|
php7-xml \
|
||||||
php81-xsl \
|
php7-xmlreader \
|
||||||
php81-zip \
|
php7-xmlrpc \
|
||||||
whois && \
|
php7-zip \
|
||||||
apk add --no-cache --repository=http://dl-cdn.alpinelinux.org/alpine/edge/testing \
|
py3-cryptography \
|
||||||
php81-pecl-mcrypt \
|
py3-future \
|
||||||
php81-pecl-xmlrpc && \
|
py3-pip \
|
||||||
echo "**** install certbot plugins ****" && \
|
whois && \
|
||||||
if [ -z ${CERTBOT_VERSION+x} ]; then \
|
echo "**** install certbot plugins ****" && \
|
||||||
CERTBOT_VERSION=$(curl -sL https://pypi.python.org/pypi/certbot/json |jq -r '. | .info.version'); \
|
if [ -z ${CERTBOT_VERSION+x} ]; then \
|
||||||
fi && \
|
CERTBOT="certbot"; \
|
||||||
python3 -m ensurepip && \
|
else \
|
||||||
pip3 install -U --no-cache-dir \
|
CERTBOT="certbot==${CERTBOT_VERSION}"; \
|
||||||
pip \
|
fi && \
|
||||||
wheel && \
|
pip3 install -U \
|
||||||
pip3 install -U --no-cache-dir --find-links https://wheel-index.linuxserver.io/alpine-3.17/ \
|
pip && \
|
||||||
certbot==${CERTBOT_VERSION} \
|
pip3 install -U \
|
||||||
certbot-dns-acmedns \
|
${CERTBOT} \
|
||||||
certbot-dns-aliyun \
|
certbot-dns-aliyun \
|
||||||
certbot-dns-azure \
|
certbot-dns-cloudflare \
|
||||||
certbot-dns-cloudflare \
|
certbot-dns-cloudxns \
|
||||||
certbot-dns-cpanel \
|
certbot-dns-cpanel \
|
||||||
certbot-dns-desec \
|
certbot-dns-digitalocean \
|
||||||
certbot-dns-digitalocean \
|
certbot-dns-dnsimple \
|
||||||
certbot-dns-directadmin \
|
certbot-dns-dnsmadeeasy \
|
||||||
certbot-dns-dnsimple \
|
certbot-dns-domeneshop \
|
||||||
certbot-dns-dnsmadeeasy \
|
certbot-dns-google \
|
||||||
certbot-dns-dnspod \
|
certbot-dns-inwx \
|
||||||
certbot-dns-do \
|
certbot-dns-linode \
|
||||||
certbot-dns-domeneshop \
|
certbot-dns-luadns \
|
||||||
certbot-dns-duckdns \
|
certbot-dns-nsone \
|
||||||
certbot-dns-dynu \
|
certbot-dns-ovh \
|
||||||
certbot-dns-gehirn \
|
certbot-dns-rfc2136 \
|
||||||
certbot-dns-godaddy \
|
certbot-dns-route53 \
|
||||||
certbot-dns-google \
|
certbot-dns-transip \
|
||||||
certbot-dns-google-domains \
|
certbot-plugin-gandi \
|
||||||
certbot-dns-he \
|
cryptography \
|
||||||
certbot-dns-hetzner \
|
requests && \
|
||||||
certbot-dns-infomaniak \
|
echo "**** remove unnecessary fail2ban filters ****" && \
|
||||||
certbot-dns-inwx \
|
rm \
|
||||||
certbot-dns-ionos \
|
/etc/fail2ban/jail.d/alpine-ssh.conf && \
|
||||||
certbot-dns-linode \
|
echo "**** copy fail2ban default action and filter to /default ****" && \
|
||||||
certbot-dns-loopia \
|
mkdir -p /defaults/fail2ban && \
|
||||||
certbot-dns-luadns \
|
mv /etc/fail2ban/action.d /defaults/fail2ban/ && \
|
||||||
certbot-dns-netcup \
|
mv /etc/fail2ban/filter.d /defaults/fail2ban/ && \
|
||||||
certbot-dns-njalla \
|
echo "**** copy proxy confs to /default ****" && \
|
||||||
certbot-dns-nsone \
|
mkdir -p /defaults/proxy-confs && \
|
||||||
certbot-dns-ovh \
|
curl -o \
|
||||||
certbot-dns-porkbun \
|
/tmp/proxy.tar.gz -L \
|
||||||
certbot-dns-rfc2136 \
|
"https://github.com/linuxserver/reverse-proxy-confs/tarball/master" && \
|
||||||
certbot-dns-route53 \
|
tar xf \
|
||||||
certbot-dns-sakuracloud \
|
/tmp/proxy.tar.gz -C \
|
||||||
certbot-dns-standalone \
|
/defaults/proxy-confs --strip-components=1 --exclude=linux*/.gitattributes --exclude=linux*/.github --exclude=linux*/.gitignore --exclude=linux*/LICENSE && \
|
||||||
certbot-dns-transip \
|
echo "**** configure nginx ****" && \
|
||||||
certbot-dns-vultr \
|
rm -f /etc/nginx/conf.d/default.conf && \
|
||||||
certbot-plugin-gandi \
|
curl -o \
|
||||||
cryptography \
|
/defaults/dhparams.pem -L \
|
||||||
future \
|
"https://lsio.ams3.digitaloceanspaces.com/dhparams.pem" && \
|
||||||
requests && \
|
echo "**** cleanup ****" && \
|
||||||
echo "**** enable OCSP stapling from base ****" && \
|
apk del --purge \
|
||||||
sed -i \
|
build-dependencies && \
|
||||||
's|#ssl_stapling on;|ssl_stapling on;|' \
|
for cleanfiles in *.pyc *.pyo; \
|
||||||
/defaults/nginx/ssl.conf.sample && \
|
do \
|
||||||
sed -i \
|
find /usr/lib/python3.* -iname "${cleanfiles}" -exec rm -f '{}' + \
|
||||||
's|#ssl_stapling_verify on;|ssl_stapling_verify on;|' \
|
; done && \
|
||||||
/defaults/nginx/ssl.conf.sample && \
|
rm -rf \
|
||||||
sed -i \
|
/tmp/* \
|
||||||
's|#ssl_trusted_certificate /config/keys/cert.crt;|ssl_trusted_certificate /config/keys/cert.crt;|' \
|
/root/.cache
|
||||||
/defaults/nginx/ssl.conf.sample && \
|
|
||||||
echo "**** correct ip6tables legacy issue ****" && \
|
|
||||||
rm \
|
|
||||||
/sbin/ip6tables && \
|
|
||||||
ln -s \
|
|
||||||
/sbin/ip6tables-nft /sbin/ip6tables && \
|
|
||||||
echo "**** remove unnecessary fail2ban filters ****" && \
|
|
||||||
rm \
|
|
||||||
/etc/fail2ban/jail.d/alpine-ssh.conf && \
|
|
||||||
echo "**** copy fail2ban default action and filter to /defaults ****" && \
|
|
||||||
mkdir -p /defaults/fail2ban && \
|
|
||||||
mv /etc/fail2ban/action.d /defaults/fail2ban/ && \
|
|
||||||
mv /etc/fail2ban/filter.d /defaults/fail2ban/ && \
|
|
||||||
echo "**** define allowipv6 to silence warning ****" && \
|
|
||||||
sed -i 's/#allowipv6 = auto/allowipv6 = auto/g' /etc/fail2ban/fail2ban.conf && \
|
|
||||||
echo "**** copy proxy confs to /defaults ****" && \
|
|
||||||
mkdir -p \
|
|
||||||
/defaults/nginx/proxy-confs && \
|
|
||||||
curl -o \
|
|
||||||
/tmp/proxy-confs.tar.gz -L \
|
|
||||||
"https://github.com/linuxserver/reverse-proxy-confs/tarball/master" && \
|
|
||||||
tar xf \
|
|
||||||
/tmp/proxy-confs.tar.gz -C \
|
|
||||||
/defaults/nginx/proxy-confs --strip-components=1 --exclude=linux*/.editorconfig --exclude=linux*/.gitattributes --exclude=linux*/.github --exclude=linux*/.gitignore --exclude=linux*/LICENSE && \
|
|
||||||
echo "**** cleanup ****" && \
|
|
||||||
apk del --purge \
|
|
||||||
build-dependencies && \
|
|
||||||
rm -rf \
|
|
||||||
/tmp/* \
|
|
||||||
$HOME/.cache \
|
|
||||||
$HOME/.cargo
|
|
||||||
|
|
||||||
# copy local files
|
# add local files
|
||||||
COPY root/ /
|
COPY root/ /
|
||||||
|
|
||||||
# ports and volumes
|
|
||||||
EXPOSE 80 443
|
|
||||||
VOLUME /config
|
|
||||||
|
|||||||
307
Dockerfile.armhf
307
Dockerfile.armhf
@ -1,186 +1,149 @@
|
|||||||
# syntax=docker/dockerfile:1
|
FROM lsiobase/nginx:arm32v7-3.12
|
||||||
|
|
||||||
FROM ghcr.io/linuxserver/baseimage-alpine-nginx:arm32v7-3.17
|
|
||||||
|
|
||||||
# set version label
|
# set version label
|
||||||
ARG BUILD_DATE
|
ARG BUILD_DATE
|
||||||
ARG VERSION
|
ARG VERSION
|
||||||
ARG CERTBOT_VERSION
|
ARG CERTBOT_VERSION
|
||||||
LABEL build_version="Linuxserver.io version:- ${VERSION} Build-date:- ${BUILD_DATE}"
|
LABEL build_version="Linuxserver.io version:- ${VERSION} Build-date:- ${BUILD_DATE}"
|
||||||
LABEL maintainer="nemchik"
|
LABEL maintainer="aptalca"
|
||||||
|
|
||||||
# environment settings
|
# environment settings
|
||||||
ENV DHLEVEL=2048 ONLY_SUBDOMAINS=false AWS_CONFIG_FILE=/config/dns-conf/route53.ini
|
ENV DHLEVEL=2048 ONLY_SUBDOMAINS=false AWS_CONFIG_FILE=/config/dns-conf/route53.ini
|
||||||
ENV S6_BEHAVIOUR_IF_STAGE2_FAILS=2
|
ENV S6_BEHAVIOUR_IF_STAGE2_FAILS=2
|
||||||
|
|
||||||
RUN \
|
RUN \
|
||||||
echo "**** install build packages ****" && \
|
echo "**** install build packages ****" && \
|
||||||
apk add --no-cache --virtual=build-dependencies \
|
apk add --no-cache --virtual=build-dependencies \
|
||||||
build-base \
|
g++ \
|
||||||
cargo \
|
gcc \
|
||||||
libffi-dev \
|
libffi-dev \
|
||||||
libxml2-dev \
|
openssl-dev \
|
||||||
libxslt-dev \
|
python3-dev && \
|
||||||
openssl-dev \
|
echo "**** install runtime packages ****" && \
|
||||||
python3-dev && \
|
apk add --no-cache --upgrade \
|
||||||
echo "**** install runtime packages ****" && \
|
curl \
|
||||||
apk add --no-cache --upgrade \
|
fail2ban \
|
||||||
fail2ban \
|
gnupg \
|
||||||
gnupg \
|
memcached \
|
||||||
memcached \
|
nginx \
|
||||||
nginx-mod-http-brotli \
|
nginx-mod-http-echo \
|
||||||
nginx-mod-http-dav-ext \
|
nginx-mod-http-fancyindex \
|
||||||
nginx-mod-http-echo \
|
nginx-mod-http-geoip2 \
|
||||||
nginx-mod-http-fancyindex \
|
nginx-mod-http-headers-more \
|
||||||
nginx-mod-http-geoip2 \
|
nginx-mod-http-image-filter \
|
||||||
nginx-mod-http-headers-more \
|
nginx-mod-http-lua \
|
||||||
nginx-mod-http-image-filter \
|
nginx-mod-http-lua-upstream \
|
||||||
nginx-mod-http-perl \
|
nginx-mod-http-nchan \
|
||||||
nginx-mod-http-redis2 \
|
nginx-mod-http-perl \
|
||||||
nginx-mod-http-set-misc \
|
nginx-mod-http-redis2 \
|
||||||
nginx-mod-http-upload-progress \
|
nginx-mod-http-set-misc \
|
||||||
nginx-mod-http-xslt-filter \
|
nginx-mod-http-upload-progress \
|
||||||
nginx-mod-mail \
|
nginx-mod-http-xslt-filter \
|
||||||
nginx-mod-rtmp \
|
nginx-mod-mail \
|
||||||
nginx-mod-stream \
|
nginx-mod-rtmp \
|
||||||
nginx-mod-stream-geoip2 \
|
nginx-mod-stream \
|
||||||
nginx-vim \
|
nginx-mod-stream-geoip2 \
|
||||||
php81-bcmath \
|
nginx-vim \
|
||||||
php81-bz2 \
|
php7-bcmath \
|
||||||
php81-ctype \
|
php7-bz2 \
|
||||||
php81-curl \
|
php7-ctype \
|
||||||
php81-dom \
|
php7-curl \
|
||||||
php81-exif \
|
php7-dom \
|
||||||
php81-ftp \
|
php7-exif \
|
||||||
php81-gd \
|
php7-ftp \
|
||||||
php81-gmp \
|
php7-gd \
|
||||||
php81-iconv \
|
php7-iconv \
|
||||||
php81-imap \
|
php7-imap \
|
||||||
php81-intl \
|
php7-intl \
|
||||||
php81-ldap \
|
php7-ldap \
|
||||||
php81-mysqli \
|
php7-mcrypt \
|
||||||
php81-mysqlnd \
|
php7-memcached \
|
||||||
php81-opcache \
|
php7-mysqli \
|
||||||
php81-pdo_mysql \
|
php7-mysqlnd \
|
||||||
php81-pdo_odbc \
|
php7-opcache \
|
||||||
php81-pdo_pgsql \
|
php7-pdo_mysql \
|
||||||
php81-pdo_sqlite \
|
php7-pdo_odbc \
|
||||||
php81-pear \
|
php7-pdo_pgsql \
|
||||||
php81-pecl-apcu \
|
php7-pdo_sqlite \
|
||||||
php81-pecl-mailparse \
|
php7-pear \
|
||||||
php81-pecl-memcached \
|
php7-pecl-apcu \
|
||||||
php81-pecl-redis \
|
php7-pecl-redis \
|
||||||
php81-pgsql \
|
php7-pgsql \
|
||||||
php81-phar \
|
php7-phar \
|
||||||
php81-posix \
|
php7-posix \
|
||||||
php81-soap \
|
php7-soap \
|
||||||
php81-sockets \
|
php7-sockets \
|
||||||
php81-sodium \
|
php7-sodium \
|
||||||
php81-sqlite3 \
|
php7-sqlite3 \
|
||||||
php81-tokenizer \
|
php7-tokenizer \
|
||||||
php81-xmlreader \
|
php7-xml \
|
||||||
php81-xsl \
|
php7-xmlreader \
|
||||||
php81-zip \
|
php7-xmlrpc \
|
||||||
whois && \
|
php7-zip \
|
||||||
apk add --no-cache --repository=http://dl-cdn.alpinelinux.org/alpine/edge/testing \
|
py3-cryptography \
|
||||||
php81-pecl-mcrypt \
|
py3-future \
|
||||||
php81-pecl-xmlrpc && \
|
py3-pip \
|
||||||
echo "**** install certbot plugins ****" && \
|
whois && \
|
||||||
if [ -z ${CERTBOT_VERSION+x} ]; then \
|
echo "**** install certbot plugins ****" && \
|
||||||
CERTBOT_VERSION=$(curl -sL https://pypi.python.org/pypi/certbot/json |jq -r '. | .info.version'); \
|
if [ -z ${CERTBOT_VERSION+x} ]; then \
|
||||||
fi && \
|
CERTBOT="certbot"; \
|
||||||
python3 -m ensurepip && \
|
else \
|
||||||
pip3 install -U --no-cache-dir \
|
CERTBOT="certbot==${CERTBOT_VERSION}"; \
|
||||||
pip \
|
fi && \
|
||||||
wheel && \
|
pip3 install -U \
|
||||||
pip3 install -U --no-cache-dir --find-links https://wheel-index.linuxserver.io/alpine-3.17/ \
|
pip && \
|
||||||
certbot==${CERTBOT_VERSION} \
|
pip3 install -U \
|
||||||
certbot-dns-acmedns \
|
${CERTBOT} \
|
||||||
certbot-dns-aliyun \
|
certbot-dns-aliyun \
|
||||||
certbot-dns-azure \
|
certbot-dns-cloudflare \
|
||||||
certbot-dns-cloudflare \
|
certbot-dns-cloudxns \
|
||||||
certbot-dns-cpanel \
|
certbot-dns-cpanel \
|
||||||
certbot-dns-desec \
|
certbot-dns-digitalocean \
|
||||||
certbot-dns-digitalocean \
|
certbot-dns-dnsimple \
|
||||||
certbot-dns-directadmin \
|
certbot-dns-dnsmadeeasy \
|
||||||
certbot-dns-dnsimple \
|
certbot-dns-domeneshop \
|
||||||
certbot-dns-dnsmadeeasy \
|
certbot-dns-google \
|
||||||
certbot-dns-dnspod \
|
certbot-dns-inwx \
|
||||||
certbot-dns-do \
|
certbot-dns-linode \
|
||||||
certbot-dns-domeneshop \
|
certbot-dns-luadns \
|
||||||
certbot-dns-duckdns \
|
certbot-dns-nsone \
|
||||||
certbot-dns-dynu \
|
certbot-dns-ovh \
|
||||||
certbot-dns-gehirn \
|
certbot-dns-rfc2136 \
|
||||||
certbot-dns-godaddy \
|
certbot-dns-route53 \
|
||||||
certbot-dns-google \
|
certbot-dns-transip \
|
||||||
certbot-dns-google-domains \
|
certbot-plugin-gandi \
|
||||||
certbot-dns-he \
|
cryptography \
|
||||||
certbot-dns-hetzner \
|
requests && \
|
||||||
certbot-dns-infomaniak \
|
echo "**** remove unnecessary fail2ban filters ****" && \
|
||||||
certbot-dns-inwx \
|
rm \
|
||||||
certbot-dns-ionos \
|
/etc/fail2ban/jail.d/alpine-ssh.conf && \
|
||||||
certbot-dns-linode \
|
echo "**** copy fail2ban default action and filter to /default ****" && \
|
||||||
certbot-dns-loopia \
|
mkdir -p /defaults/fail2ban && \
|
||||||
certbot-dns-luadns \
|
mv /etc/fail2ban/action.d /defaults/fail2ban/ && \
|
||||||
certbot-dns-netcup \
|
mv /etc/fail2ban/filter.d /defaults/fail2ban/ && \
|
||||||
certbot-dns-njalla \
|
echo "**** copy proxy confs to /default ****" && \
|
||||||
certbot-dns-nsone \
|
mkdir -p /defaults/proxy-confs && \
|
||||||
certbot-dns-ovh \
|
curl -o \
|
||||||
certbot-dns-porkbun \
|
/tmp/proxy.tar.gz -L \
|
||||||
certbot-dns-rfc2136 \
|
"https://github.com/linuxserver/reverse-proxy-confs/tarball/master" && \
|
||||||
certbot-dns-route53 \
|
tar xf \
|
||||||
certbot-dns-sakuracloud \
|
/tmp/proxy.tar.gz -C \
|
||||||
certbot-dns-standalone \
|
/defaults/proxy-confs --strip-components=1 --exclude=linux*/.gitattributes --exclude=linux*/.github --exclude=linux*/.gitignore --exclude=linux*/LICENSE && \
|
||||||
certbot-dns-transip \
|
echo "**** configure nginx ****" && \
|
||||||
certbot-dns-vultr \
|
rm -f /etc/nginx/conf.d/default.conf && \
|
||||||
certbot-plugin-gandi \
|
curl -o \
|
||||||
cryptography \
|
/defaults/dhparams.pem -L \
|
||||||
future \
|
"https://lsio.ams3.digitaloceanspaces.com/dhparams.pem" && \
|
||||||
requests && \
|
echo "**** cleanup ****" && \
|
||||||
echo "**** enable OCSP stapling from base ****" && \
|
apk del --purge \
|
||||||
sed -i \
|
build-dependencies && \
|
||||||
's|#ssl_stapling on;|ssl_stapling on;|' \
|
for cleanfiles in *.pyc *.pyo; \
|
||||||
/defaults/nginx/ssl.conf.sample && \
|
do \
|
||||||
sed -i \
|
find /usr/lib/python3.* -iname "${cleanfiles}" -exec rm -f '{}' + \
|
||||||
's|#ssl_stapling_verify on;|ssl_stapling_verify on;|' \
|
; done && \
|
||||||
/defaults/nginx/ssl.conf.sample && \
|
rm -rf \
|
||||||
sed -i \
|
/tmp/* \
|
||||||
's|#ssl_trusted_certificate /config/keys/cert.crt;|ssl_trusted_certificate /config/keys/cert.crt;|' \
|
/root/.cache
|
||||||
/defaults/nginx/ssl.conf.sample && \
|
|
||||||
echo "**** correct ip6tables legacy issue ****" && \
|
|
||||||
rm \
|
|
||||||
/sbin/ip6tables && \
|
|
||||||
ln -s \
|
|
||||||
/sbin/ip6tables-nft /sbin/ip6tables && \
|
|
||||||
echo "**** remove unnecessary fail2ban filters ****" && \
|
|
||||||
rm \
|
|
||||||
/etc/fail2ban/jail.d/alpine-ssh.conf && \
|
|
||||||
echo "**** copy fail2ban default action and filter to /defaults ****" && \
|
|
||||||
mkdir -p /defaults/fail2ban && \
|
|
||||||
mv /etc/fail2ban/action.d /defaults/fail2ban/ && \
|
|
||||||
mv /etc/fail2ban/filter.d /defaults/fail2ban/ && \
|
|
||||||
echo "**** define allowipv6 to silence warning ****" && \
|
|
||||||
sed -i 's/#allowipv6 = auto/allowipv6 = auto/g' /etc/fail2ban/fail2ban.conf && \
|
|
||||||
echo "**** copy proxy confs to /defaults ****" && \
|
|
||||||
mkdir -p \
|
|
||||||
/defaults/nginx/proxy-confs && \
|
|
||||||
curl -o \
|
|
||||||
/tmp/proxy-confs.tar.gz -L \
|
|
||||||
"https://github.com/linuxserver/reverse-proxy-confs/tarball/master" && \
|
|
||||||
tar xf \
|
|
||||||
/tmp/proxy-confs.tar.gz -C \
|
|
||||||
/defaults/nginx/proxy-confs --strip-components=1 --exclude=linux*/.editorconfig --exclude=linux*/.gitattributes --exclude=linux*/.github --exclude=linux*/.gitignore --exclude=linux*/LICENSE && \
|
|
||||||
echo "**** cleanup ****" && \
|
|
||||||
apk del --purge \
|
|
||||||
build-dependencies && \
|
|
||||||
rm -rf \
|
|
||||||
/tmp/* \
|
|
||||||
$HOME/.cache \
|
|
||||||
$HOME/.cargo
|
|
||||||
|
|
||||||
# copy local files
|
# add local files
|
||||||
COPY root/ /
|
COPY root/ /
|
||||||
|
|
||||||
# ports and volumes
|
|
||||||
EXPOSE 80 443
|
|
||||||
VOLUME /config
|
|
||||||
|
|||||||
512
Jenkinsfile
vendored
512
Jenkinsfile
vendored
@ -16,7 +16,6 @@ pipeline {
|
|||||||
GITHUB_TOKEN=credentials('498b4638-2d02-4ce5-832d-8a57d01d97ab')
|
GITHUB_TOKEN=credentials('498b4638-2d02-4ce5-832d-8a57d01d97ab')
|
||||||
GITLAB_TOKEN=credentials('b6f0f1dd-6952-4cf6-95d1-9c06380283f0')
|
GITLAB_TOKEN=credentials('b6f0f1dd-6952-4cf6-95d1-9c06380283f0')
|
||||||
GITLAB_NAMESPACE=credentials('gitlab-namespace-id')
|
GITLAB_NAMESPACE=credentials('gitlab-namespace-id')
|
||||||
SCARF_TOKEN=credentials('scarf_api_key')
|
|
||||||
EXT_PIP = 'certbot'
|
EXT_PIP = 'certbot'
|
||||||
BUILD_VERSION_ARG = 'CERTBOT_VERSION'
|
BUILD_VERSION_ARG = 'CERTBOT_VERSION'
|
||||||
LS_USER = 'linuxserver'
|
LS_USER = 'linuxserver'
|
||||||
@ -43,7 +42,7 @@ pipeline {
|
|||||||
script{
|
script{
|
||||||
env.EXIT_STATUS = ''
|
env.EXIT_STATUS = ''
|
||||||
env.LS_RELEASE = sh(
|
env.LS_RELEASE = sh(
|
||||||
script: '''docker run --rm ghcr.io/linuxserver/alexeiled-skopeo sh -c 'skopeo inspect docker://docker.io/'${DOCKERHUB_IMAGE}':latest 2>/dev/null' | jq -r '.Labels.build_version' | awk '{print $3}' | grep '\\-ls' || : ''',
|
script: '''docker run --rm alexeiled/skopeo sh -c 'skopeo inspect docker://docker.io/'${DOCKERHUB_IMAGE}':latest 2>/dev/null' | jq -r '.Labels.build_version' | awk '{print $3}' | grep '\\-ls' || : ''',
|
||||||
returnStdout: true).trim()
|
returnStdout: true).trim()
|
||||||
env.LS_RELEASE_NOTES = sh(
|
env.LS_RELEASE_NOTES = sh(
|
||||||
script: '''cat readme-vars.yml | awk -F \\" '/date: "[0-9][0-9].[0-9][0-9].[0-9][0-9]:/ {print $4;exit;}' | sed -E ':a;N;$!ba;s/\\r{0,1}\\n/\\\\n/g' ''',
|
script: '''cat readme-vars.yml | awk -F \\" '/date: "[0-9][0-9].[0-9][0-9].[0-9][0-9]:/ {print $4;exit;}' | sed -E ':a;N;$!ba;s/\\r{0,1}\\n/\\\\n/g' ''',
|
||||||
@ -57,7 +56,7 @@ pipeline {
|
|||||||
env.CODE_URL = 'https://github.com/' + env.LS_USER + '/' + env.LS_REPO + '/commit/' + env.GIT_COMMIT
|
env.CODE_URL = 'https://github.com/' + env.LS_USER + '/' + env.LS_REPO + '/commit/' + env.GIT_COMMIT
|
||||||
env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.DOCKERHUB_IMAGE + '/tags/'
|
env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.DOCKERHUB_IMAGE + '/tags/'
|
||||||
env.PULL_REQUEST = env.CHANGE_ID
|
env.PULL_REQUEST = env.CHANGE_ID
|
||||||
env.TEMPLATED_FILES = 'Jenkinsfile README.md LICENSE .editorconfig ./.github/CONTRIBUTING.md ./.github/FUNDING.yml ./.github/ISSUE_TEMPLATE/config.yml ./.github/ISSUE_TEMPLATE/issue.bug.yml ./.github/ISSUE_TEMPLATE/issue.feature.yml ./.github/PULL_REQUEST_TEMPLATE.md ./.github/workflows/external_trigger_scheduler.yml ./.github/workflows/greetings.yml ./.github/workflows/package_trigger_scheduler.yml ./.github/workflows/call_issue_pr_tracker.yml ./.github/workflows/call_issues_cron.yml ./.github/workflows/permissions.yml ./.github/workflows/external_trigger.yml ./.github/workflows/package_trigger.yml ./root/donate.txt'
|
env.TEMPLATED_FILES = 'Jenkinsfile README.md LICENSE ./.github/FUNDING.yml ./.github/ISSUE_TEMPLATE.md ./.github/PULL_REQUEST_TEMPLATE.md ./.github/workflows/greetings.yml ./.github/workflows/stale.yml ./root/donate.txt'
|
||||||
}
|
}
|
||||||
script{
|
script{
|
||||||
env.LS_RELEASE_NUMBER = sh(
|
env.LS_RELEASE_NUMBER = sh(
|
||||||
@ -117,30 +116,6 @@ pipeline {
|
|||||||
env.EXT_RELEASE_CLEAN = sh(
|
env.EXT_RELEASE_CLEAN = sh(
|
||||||
script: '''echo ${EXT_RELEASE} | sed 's/[~,%@+;:/]//g' ''',
|
script: '''echo ${EXT_RELEASE} | sed 's/[~,%@+;:/]//g' ''',
|
||||||
returnStdout: true).trim()
|
returnStdout: true).trim()
|
||||||
|
|
||||||
def semver = env.EXT_RELEASE_CLEAN =~ /(\d+)\.(\d+)\.(\d+)/
|
|
||||||
if (semver.find()) {
|
|
||||||
env.SEMVER = "${semver[0][1]}.${semver[0][2]}.${semver[0][3]}"
|
|
||||||
} else {
|
|
||||||
semver = env.EXT_RELEASE_CLEAN =~ /(\d+)\.(\d+)(?:\.(\d+))?(.*)/
|
|
||||||
if (semver.find()) {
|
|
||||||
if (semver[0][3]) {
|
|
||||||
env.SEMVER = "${semver[0][1]}.${semver[0][2]}.${semver[0][3]}"
|
|
||||||
} else if (!semver[0][3] && !semver[0][4]) {
|
|
||||||
env.SEMVER = "${semver[0][1]}.${semver[0][2]}.${(new Date()).format('YYYYMMdd')}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (env.SEMVER != null) {
|
|
||||||
if (BRANCH_NAME != "master" && BRANCH_NAME != "main") {
|
|
||||||
env.SEMVER = "${env.SEMVER}-${BRANCH_NAME}"
|
|
||||||
}
|
|
||||||
println("SEMVER: ${env.SEMVER}")
|
|
||||||
} else {
|
|
||||||
println("No SEMVER detected")
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -153,17 +128,14 @@ pipeline {
|
|||||||
steps {
|
steps {
|
||||||
script{
|
script{
|
||||||
env.IMAGE = env.DOCKERHUB_IMAGE
|
env.IMAGE = env.DOCKERHUB_IMAGE
|
||||||
env.GITHUBIMAGE = 'ghcr.io/' + env.LS_USER + '/' + env.CONTAINER_NAME
|
env.GITHUBIMAGE = 'docker.pkg.github.com/' + env.LS_USER + '/' + env.LS_REPO + '/' + env.CONTAINER_NAME
|
||||||
env.GITLABIMAGE = 'registry.gitlab.com/linuxserver.io/' + env.LS_REPO + '/' + env.CONTAINER_NAME
|
env.GITLABIMAGE = 'registry.gitlab.com/linuxserver.io/' + env.LS_REPO + '/' + env.CONTAINER_NAME
|
||||||
env.QUAYIMAGE = 'quay.io/linuxserver.io/' + env.CONTAINER_NAME
|
|
||||||
if (env.MULTIARCH == 'true') {
|
if (env.MULTIARCH == 'true') {
|
||||||
env.CI_TAGS = 'amd64-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER + '|arm32v7-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER + '|arm64v8-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER
|
env.CI_TAGS = 'amd64-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER + '|arm32v7-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER + '|arm64v8-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER
|
||||||
} else {
|
} else {
|
||||||
env.CI_TAGS = env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER
|
env.CI_TAGS = env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER
|
||||||
}
|
}
|
||||||
env.VERSION_TAG = env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER
|
|
||||||
env.META_TAG = env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER
|
env.META_TAG = env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER
|
||||||
env.EXT_RELEASE_TAG = 'version-' + env.EXT_RELEASE_CLEAN
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -176,17 +148,14 @@ pipeline {
|
|||||||
steps {
|
steps {
|
||||||
script{
|
script{
|
||||||
env.IMAGE = env.DEV_DOCKERHUB_IMAGE
|
env.IMAGE = env.DEV_DOCKERHUB_IMAGE
|
||||||
env.GITHUBIMAGE = 'ghcr.io/' + env.LS_USER + '/lsiodev-' + env.CONTAINER_NAME
|
env.GITHUBIMAGE = 'docker.pkg.github.com/' + env.LS_USER + '/' + env.LS_REPO + '/lsiodev-' + env.CONTAINER_NAME
|
||||||
env.GITLABIMAGE = 'registry.gitlab.com/linuxserver.io/' + env.LS_REPO + '/lsiodev-' + env.CONTAINER_NAME
|
env.GITLABIMAGE = 'registry.gitlab.com/linuxserver.io/' + env.LS_REPO + '/lsiodev-' + env.CONTAINER_NAME
|
||||||
env.QUAYIMAGE = 'quay.io/linuxserver.io/lsiodev-' + env.CONTAINER_NAME
|
|
||||||
if (env.MULTIARCH == 'true') {
|
if (env.MULTIARCH == 'true') {
|
||||||
env.CI_TAGS = 'amd64-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '|arm32v7-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '|arm64v8-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA
|
env.CI_TAGS = 'amd64-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '|arm32v7-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '|arm64v8-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA
|
||||||
} else {
|
} else {
|
||||||
env.CI_TAGS = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA
|
env.CI_TAGS = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA
|
||||||
}
|
}
|
||||||
env.VERSION_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA
|
|
||||||
env.META_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA
|
env.META_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA
|
||||||
env.EXT_RELEASE_TAG = 'version-' + env.EXT_RELEASE_CLEAN
|
|
||||||
env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.DEV_DOCKERHUB_IMAGE + '/tags/'
|
env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.DEV_DOCKERHUB_IMAGE + '/tags/'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -199,17 +168,14 @@ pipeline {
|
|||||||
steps {
|
steps {
|
||||||
script{
|
script{
|
||||||
env.IMAGE = env.PR_DOCKERHUB_IMAGE
|
env.IMAGE = env.PR_DOCKERHUB_IMAGE
|
||||||
env.GITHUBIMAGE = 'ghcr.io/' + env.LS_USER + '/lspipepr-' + env.CONTAINER_NAME
|
env.GITHUBIMAGE = 'docker.pkg.github.com/' + env.LS_USER + '/' + env.LS_REPO + '/lspipepr-' + env.CONTAINER_NAME
|
||||||
env.GITLABIMAGE = 'registry.gitlab.com/linuxserver.io/' + env.LS_REPO + '/lspipepr-' + env.CONTAINER_NAME
|
env.GITLABIMAGE = 'registry.gitlab.com/linuxserver.io/' + env.LS_REPO + '/lspipepr-' + env.CONTAINER_NAME
|
||||||
env.QUAYIMAGE = 'quay.io/linuxserver.io/lspipepr-' + env.CONTAINER_NAME
|
|
||||||
if (env.MULTIARCH == 'true') {
|
if (env.MULTIARCH == 'true') {
|
||||||
env.CI_TAGS = 'amd64-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST + '|arm32v7-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST + '|arm64v8-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST
|
env.CI_TAGS = 'amd64-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST + '|arm32v7-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST + '|arm64v8-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST
|
||||||
} else {
|
} else {
|
||||||
env.CI_TAGS = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST
|
env.CI_TAGS = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST
|
||||||
}
|
}
|
||||||
env.VERSION_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST
|
|
||||||
env.META_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST
|
env.META_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST
|
||||||
env.EXT_RELEASE_TAG = 'version-' + env.EXT_RELEASE_CLEAN
|
|
||||||
env.CODE_URL = 'https://github.com/' + env.LS_USER + '/' + env.LS_REPO + '/pull/' + env.PULL_REQUEST
|
env.CODE_URL = 'https://github.com/' + env.LS_USER + '/' + env.LS_REPO + '/pull/' + env.PULL_REQUEST
|
||||||
env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.PR_DOCKERHUB_IMAGE + '/tags/'
|
env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.PR_DOCKERHUB_IMAGE + '/tags/'
|
||||||
}
|
}
|
||||||
@ -222,22 +188,25 @@ pipeline {
|
|||||||
}
|
}
|
||||||
steps {
|
steps {
|
||||||
withCredentials([
|
withCredentials([
|
||||||
string(credentialsId: 'ci-tests-s3-key-id', variable: 'S3_KEY'),
|
string(credentialsId: 'spaces-key', variable: 'DO_KEY'),
|
||||||
string(credentialsId: 'ci-tests-s3-secret-access-key', variable: 'S3_SECRET')
|
string(credentialsId: 'spaces-secret', variable: 'DO_SECRET')
|
||||||
]) {
|
]) {
|
||||||
script{
|
script{
|
||||||
env.SHELLCHECK_URL = 'https://ci-tests.linuxserver.io/' + env.IMAGE + '/' + env.META_TAG + '/shellcheck-result.xml'
|
env.SHELLCHECK_URL = 'https://lsio-ci.ams3.digitaloceanspaces.com/' + env.IMAGE + '/' + env.META_TAG + '/shellcheck-result.xml'
|
||||||
}
|
}
|
||||||
sh '''curl -sL https://raw.githubusercontent.com/linuxserver/docker-shellcheck/master/checkrun.sh | /bin/bash'''
|
sh '''curl -sL https://raw.githubusercontent.com/linuxserver/docker-shellcheck/master/checkrun.sh | /bin/bash'''
|
||||||
sh '''#! /bin/bash
|
sh '''#! /bin/bash
|
||||||
|
set -e
|
||||||
|
docker pull lsiodev/spaces-file-upload:latest
|
||||||
docker run --rm \
|
docker run --rm \
|
||||||
-v ${WORKSPACE}:/mnt \
|
-e DESTINATION=\"${IMAGE}/${META_TAG}/shellcheck-result.xml\" \
|
||||||
-e AWS_ACCESS_KEY_ID=\"${S3_KEY}\" \
|
-e FILE_NAME="shellcheck-result.xml" \
|
||||||
-e AWS_SECRET_ACCESS_KEY=\"${S3_SECRET}\" \
|
-e MIMETYPE="text/xml" \
|
||||||
ghcr.io/linuxserver/baseimage-alpine:3.17 s6-envdir -fn -- /var/run/s6/container_environment /bin/bash -c "\
|
-v ${WORKSPACE}:/mnt \
|
||||||
apk add --no-cache py3-pip && \
|
-e SECRET_KEY=\"${DO_SECRET}\" \
|
||||||
pip install s3cmd && \
|
-e ACCESS_KEY=\"${DO_KEY}\" \
|
||||||
s3cmd put --no-preserve --acl-public -m text/xml /mnt/shellcheck-result.xml s3://ci-tests.linuxserver.io/${IMAGE}/${META_TAG}/shellcheck-result.xml" || :'''
|
-t lsiodev/spaces-file-upload:latest \
|
||||||
|
python /upload.py'''
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -254,67 +223,20 @@ pipeline {
|
|||||||
sh '''#! /bin/bash
|
sh '''#! /bin/bash
|
||||||
set -e
|
set -e
|
||||||
TEMPDIR=$(mktemp -d)
|
TEMPDIR=$(mktemp -d)
|
||||||
docker pull ghcr.io/linuxserver/jenkins-builder:latest
|
docker pull linuxserver/jenkins-builder:latest
|
||||||
docker run --rm -e CONTAINER_NAME=${CONTAINER_NAME} -e GITHUB_BRANCH=master -v ${TEMPDIR}:/ansible/jenkins ghcr.io/linuxserver/jenkins-builder:latest
|
docker run --rm -e CONTAINER_NAME=${CONTAINER_NAME} -e GITHUB_BRANCH=master -v ${TEMPDIR}:/ansible/jenkins linuxserver/jenkins-builder:latest
|
||||||
# Stage 1 - Jenkinsfile update
|
|
||||||
if [[ "$(md5sum Jenkinsfile | awk '{ print $1 }')" != "$(md5sum ${TEMPDIR}/docker-${CONTAINER_NAME}/Jenkinsfile | awk '{ print $1 }')" ]]; then
|
|
||||||
mkdir -p ${TEMPDIR}/repo
|
|
||||||
git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/repo/${LS_REPO}
|
|
||||||
cd ${TEMPDIR}/repo/${LS_REPO}
|
|
||||||
git checkout -f master
|
|
||||||
cp ${TEMPDIR}/docker-${CONTAINER_NAME}/Jenkinsfile ${TEMPDIR}/repo/${LS_REPO}/
|
|
||||||
git add Jenkinsfile
|
|
||||||
git commit -m 'Bot Updating Templated Files'
|
|
||||||
git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git --all
|
|
||||||
echo "true" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER}
|
|
||||||
echo "Updating Jenkinsfile"
|
|
||||||
rm -Rf ${TEMPDIR}
|
|
||||||
exit 0
|
|
||||||
else
|
|
||||||
echo "Jenkinsfile is up to date."
|
|
||||||
fi
|
|
||||||
# Stage 2 - Delete old templates
|
|
||||||
OLD_TEMPLATES=".github/ISSUE_TEMPLATE.md .github/ISSUE_TEMPLATE/issue.bug.md .github/ISSUE_TEMPLATE/issue.feature.md .github/workflows/call_invalid_helper.yml .github/workflows/stale.yml"
|
|
||||||
for i in ${OLD_TEMPLATES}; do
|
|
||||||
if [[ -f "${i}" ]]; then
|
|
||||||
TEMPLATES_TO_DELETE="${i} ${TEMPLATES_TO_DELETE}"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
if [[ -n "${TEMPLATES_TO_DELETE}" ]]; then
|
|
||||||
mkdir -p ${TEMPDIR}/repo
|
|
||||||
git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/repo/${LS_REPO}
|
|
||||||
cd ${TEMPDIR}/repo/${LS_REPO}
|
|
||||||
git checkout -f master
|
|
||||||
for i in ${TEMPLATES_TO_DELETE}; do
|
|
||||||
git rm "${i}"
|
|
||||||
done
|
|
||||||
git commit -m 'Bot Updating Templated Files'
|
|
||||||
git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git --all
|
|
||||||
echo "true" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER}
|
|
||||||
echo "Deleting old and deprecated templates"
|
|
||||||
rm -Rf ${TEMPDIR}
|
|
||||||
exit 0
|
|
||||||
else
|
|
||||||
echo "No templates to delete"
|
|
||||||
fi
|
|
||||||
# Stage 3 - Update templates
|
|
||||||
CURRENTHASH=$(grep -hs ^ ${TEMPLATED_FILES} | md5sum | cut -c1-8)
|
CURRENTHASH=$(grep -hs ^ ${TEMPLATED_FILES} | md5sum | cut -c1-8)
|
||||||
cd ${TEMPDIR}/docker-${CONTAINER_NAME}
|
cd ${TEMPDIR}/docker-${CONTAINER_NAME}
|
||||||
NEWHASH=$(grep -hs ^ ${TEMPLATED_FILES} | md5sum | cut -c1-8)
|
NEWHASH=$(grep -hs ^ ${TEMPLATED_FILES} | md5sum | cut -c1-8)
|
||||||
if [[ "${CURRENTHASH}" != "${NEWHASH}" ]] || ! grep -q '.jenkins-external' "${WORKSPACE}/.gitignore" 2>/dev/null; then
|
if [[ "${CURRENTHASH}" != "${NEWHASH}" ]]; then
|
||||||
mkdir -p ${TEMPDIR}/repo
|
mkdir -p ${TEMPDIR}/repo
|
||||||
git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/repo/${LS_REPO}
|
git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/repo/${LS_REPO}
|
||||||
cd ${TEMPDIR}/repo/${LS_REPO}
|
cd ${TEMPDIR}/repo/${LS_REPO}
|
||||||
git checkout -f master
|
git checkout -f master
|
||||||
cd ${TEMPDIR}/docker-${CONTAINER_NAME}
|
cd ${TEMPDIR}/docker-${CONTAINER_NAME}
|
||||||
mkdir -p ${TEMPDIR}/repo/${LS_REPO}/.github/workflows
|
mkdir -p ${TEMPDIR}/repo/${LS_REPO}/.github/workflows
|
||||||
mkdir -p ${TEMPDIR}/repo/${LS_REPO}/.github/ISSUE_TEMPLATE
|
cp --parents ${TEMPLATED_FILES} ${TEMPDIR}/repo/${LS_REPO}/
|
||||||
cp --parents ${TEMPLATED_FILES} ${TEMPDIR}/repo/${LS_REPO}/ || :
|
|
||||||
cd ${TEMPDIR}/repo/${LS_REPO}/
|
cd ${TEMPDIR}/repo/${LS_REPO}/
|
||||||
if ! grep -q '.jenkins-external' .gitignore 2>/dev/null; then
|
|
||||||
echo ".jenkins-external" >> .gitignore
|
|
||||||
git add .gitignore
|
|
||||||
fi
|
|
||||||
git add ${TEMPLATED_FILES}
|
git add ${TEMPLATED_FILES}
|
||||||
git commit -m 'Bot Updating Templated Files'
|
git commit -m 'Bot Updating Templated Files'
|
||||||
git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git --all
|
git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git --all
|
||||||
@ -324,34 +246,13 @@ pipeline {
|
|||||||
fi
|
fi
|
||||||
mkdir -p ${TEMPDIR}/gitbook
|
mkdir -p ${TEMPDIR}/gitbook
|
||||||
git clone https://github.com/linuxserver/docker-documentation.git ${TEMPDIR}/gitbook/docker-documentation
|
git clone https://github.com/linuxserver/docker-documentation.git ${TEMPDIR}/gitbook/docker-documentation
|
||||||
if [[ ("${BRANCH_NAME}" == "master") || ("${BRANCH_NAME}" == "main") ]] && [[ (! -f ${TEMPDIR}/gitbook/docker-documentation/images/docker-${CONTAINER_NAME}.md) || ("$(md5sum ${TEMPDIR}/gitbook/docker-documentation/images/docker-${CONTAINER_NAME}.md | awk '{ print $1 }')" != "$(md5sum ${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/docker-${CONTAINER_NAME}.md | awk '{ print $1 }')") ]]; then
|
if [[ "${BRANCH_NAME}" == "master" ]] && [[ (! -f ${TEMPDIR}/gitbook/docker-documentation/images/docker-${CONTAINER_NAME}.md) || ("$(md5sum ${TEMPDIR}/gitbook/docker-documentation/images/docker-${CONTAINER_NAME}.md | awk '{ print $1 }')" != "$(md5sum ${TEMPDIR}/docker-${CONTAINER_NAME}/docker-${CONTAINER_NAME}.md | awk '{ print $1 }')") ]]; then
|
||||||
cp ${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/docker-${CONTAINER_NAME}.md ${TEMPDIR}/gitbook/docker-documentation/images/
|
cp ${TEMPDIR}/docker-${CONTAINER_NAME}/docker-${CONTAINER_NAME}.md ${TEMPDIR}/gitbook/docker-documentation/images/
|
||||||
cd ${TEMPDIR}/gitbook/docker-documentation/
|
cd ${TEMPDIR}/gitbook/docker-documentation/
|
||||||
git add images/docker-${CONTAINER_NAME}.md
|
git add images/docker-${CONTAINER_NAME}.md
|
||||||
git commit -m 'Bot Updating Documentation'
|
git commit -m 'Bot Updating Documentation'
|
||||||
git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/docker-documentation.git --all
|
git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/docker-documentation.git --all
|
||||||
fi
|
fi
|
||||||
mkdir -p ${TEMPDIR}/unraid
|
|
||||||
git clone https://github.com/linuxserver/docker-templates.git ${TEMPDIR}/unraid/docker-templates
|
|
||||||
git clone https://github.com/linuxserver/templates.git ${TEMPDIR}/unraid/templates
|
|
||||||
if [[ -f ${TEMPDIR}/unraid/docker-templates/linuxserver.io/img/${CONTAINER_NAME}-logo.png ]]; then
|
|
||||||
sed -i "s|master/linuxserver.io/img/linuxserver-ls-logo.png|master/linuxserver.io/img/${CONTAINER_NAME}-logo.png|" ${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/${CONTAINER_NAME}.xml
|
|
||||||
fi
|
|
||||||
if [[ ("${BRANCH_NAME}" == "master") || ("${BRANCH_NAME}" == "main") ]] && [[ (! -f ${TEMPDIR}/unraid/templates/unraid/${CONTAINER_NAME}.xml) || ("$(md5sum ${TEMPDIR}/unraid/templates/unraid/${CONTAINER_NAME}.xml | awk '{ print $1 }')" != "$(md5sum ${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/${CONTAINER_NAME}.xml | awk '{ print $1 }')") ]]; then
|
|
||||||
cd ${TEMPDIR}/unraid/templates/
|
|
||||||
if grep -wq "${CONTAINER_NAME}" ${TEMPDIR}/unraid/templates/unraid/ignore.list; then
|
|
||||||
echo "Image is on the ignore list, marking Unraid template as deprecated"
|
|
||||||
cp ${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/${CONTAINER_NAME}.xml ${TEMPDIR}/unraid/templates/unraid/
|
|
||||||
git add -u unraid/${CONTAINER_NAME}.xml
|
|
||||||
git mv unraid/${CONTAINER_NAME}.xml unraid/deprecated/${CONTAINER_NAME}.xml || :
|
|
||||||
git commit -m 'Bot Moving Deprecated Unraid Template' || :
|
|
||||||
else
|
|
||||||
cp ${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/${CONTAINER_NAME}.xml ${TEMPDIR}/unraid/templates/unraid/
|
|
||||||
git add unraid/${CONTAINER_NAME}.xml
|
|
||||||
git commit -m 'Bot Updating Unraid Template'
|
|
||||||
fi
|
|
||||||
git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/templates.git --all
|
|
||||||
fi
|
|
||||||
rm -Rf ${TEMPDIR}'''
|
rm -Rf ${TEMPDIR}'''
|
||||||
script{
|
script{
|
||||||
env.FILES_UPDATED = sh(
|
env.FILES_UPDATED = sh(
|
||||||
@ -396,96 +297,31 @@ pipeline {
|
|||||||
"visibility":"public"}' '''
|
"visibility":"public"}' '''
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* #######################
|
|
||||||
Scarf.sh package registry
|
|
||||||
####################### */
|
|
||||||
// Add package to Scarf.sh and set permissions
|
|
||||||
stage("Scarf.sh package registry"){
|
|
||||||
when {
|
|
||||||
branch "master"
|
|
||||||
environment name: 'EXIT_STATUS', value: ''
|
|
||||||
}
|
|
||||||
steps{
|
|
||||||
sh '''#! /bin/bash
|
|
||||||
set -e
|
|
||||||
PACKAGE_UUID=$(curl -X GET -H "Authorization: Bearer ${SCARF_TOKEN}" https://scarf.sh/api/v1/organizations/linuxserver-ci/packages | jq -r '.[] | select(.name=="linuxserver/swag") | .uuid')
|
|
||||||
if [ -z "${PACKAGE_UUID}" ]; then
|
|
||||||
echo "Adding package to Scarf.sh"
|
|
||||||
curl -sX POST https://scarf.sh/api/v1/organizations/linuxserver-ci/packages \
|
|
||||||
-H "Authorization: Bearer ${SCARF_TOKEN}" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
-d '{"name":"linuxserver/swag",\
|
|
||||||
"shortDescription":"example description",\
|
|
||||||
"libraryType":"docker",\
|
|
||||||
"website":"https://github.com/linuxserver/docker-swag",\
|
|
||||||
"backendUrl":"https://ghcr.io/linuxserver/swag",\
|
|
||||||
"publicUrl":"https://lscr.io/linuxserver/swag"}' || :
|
|
||||||
else
|
|
||||||
echo "Package already exists on Scarf.sh"
|
|
||||||
fi
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/* ###############
|
/* ###############
|
||||||
Build Container
|
Build Container
|
||||||
############### */
|
############### */
|
||||||
// Build Docker container for push to LS Repo
|
// Build Docker container for push to LS Repo
|
||||||
stage('Build-Single') {
|
stage('Build-Single') {
|
||||||
when {
|
when {
|
||||||
expression {
|
environment name: 'MULTIARCH', value: 'false'
|
||||||
env.MULTIARCH == 'false' || params.PACKAGE_CHECK == 'true'
|
|
||||||
}
|
|
||||||
environment name: 'EXIT_STATUS', value: ''
|
environment name: 'EXIT_STATUS', value: ''
|
||||||
}
|
}
|
||||||
steps {
|
steps {
|
||||||
echo "Running on node: ${NODE_NAME}"
|
sh "docker build --no-cache --pull -t ${IMAGE}:${META_TAG} \
|
||||||
sh "sed -r -i 's|(^FROM .*)|\\1\\n\\nENV LSIO_FIRST_PARTY=true|g' Dockerfile"
|
--build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${META_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ."
|
||||||
sh "docker buildx build \
|
|
||||||
--label \"org.opencontainers.image.created=${GITHUB_DATE}\" \
|
|
||||||
--label \"org.opencontainers.image.authors=linuxserver.io\" \
|
|
||||||
--label \"org.opencontainers.image.url=https://github.com/linuxserver/docker-swag/packages\" \
|
|
||||||
--label \"org.opencontainers.image.documentation=https://docs.linuxserver.io/images/docker-swag\" \
|
|
||||||
--label \"org.opencontainers.image.source=https://github.com/linuxserver/docker-swag\" \
|
|
||||||
--label \"org.opencontainers.image.version=${EXT_RELEASE_CLEAN}-ls${LS_TAG_NUMBER}\" \
|
|
||||||
--label \"org.opencontainers.image.revision=${COMMIT_SHA}\" \
|
|
||||||
--label \"org.opencontainers.image.vendor=linuxserver.io\" \
|
|
||||||
--label \"org.opencontainers.image.licenses=GPL-3.0-only\" \
|
|
||||||
--label \"org.opencontainers.image.ref.name=${COMMIT_SHA}\" \
|
|
||||||
--label \"org.opencontainers.image.title=Swag\" \
|
|
||||||
--label \"org.opencontainers.image.description=SWAG - Secure Web Application Gateway (formerly known as letsencrypt, no relation to Let's Encrypt™) sets up an Nginx webserver and reverse proxy with php support and a built-in certbot client that automates free SSL server certificate generation and renewal processes (Let's Encrypt and ZeroSSL). It also contains fail2ban for intrusion prevention.\" \
|
|
||||||
--no-cache --pull -t ${IMAGE}:${META_TAG} --platform=linux/amd64 \
|
|
||||||
--build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ."
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Build MultiArch Docker containers for push to LS Repo
|
// Build MultiArch Docker containers for push to LS Repo
|
||||||
stage('Build-Multi') {
|
stage('Build-Multi') {
|
||||||
when {
|
when {
|
||||||
allOf {
|
environment name: 'MULTIARCH', value: 'true'
|
||||||
environment name: 'MULTIARCH', value: 'true'
|
|
||||||
expression { params.PACKAGE_CHECK == 'false' }
|
|
||||||
}
|
|
||||||
environment name: 'EXIT_STATUS', value: ''
|
environment name: 'EXIT_STATUS', value: ''
|
||||||
}
|
}
|
||||||
parallel {
|
parallel {
|
||||||
stage('Build X86') {
|
stage('Build X86') {
|
||||||
steps {
|
steps {
|
||||||
echo "Running on node: ${NODE_NAME}"
|
sh "docker build --no-cache --pull -t ${IMAGE}:amd64-${META_TAG} \
|
||||||
sh "sed -r -i 's|(^FROM .*)|\\1\\n\\nENV LSIO_FIRST_PARTY=true|g' Dockerfile"
|
--build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${META_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ."
|
||||||
sh "docker buildx build \
|
|
||||||
--label \"org.opencontainers.image.created=${GITHUB_DATE}\" \
|
|
||||||
--label \"org.opencontainers.image.authors=linuxserver.io\" \
|
|
||||||
--label \"org.opencontainers.image.url=https://github.com/linuxserver/docker-swag/packages\" \
|
|
||||||
--label \"org.opencontainers.image.documentation=https://docs.linuxserver.io/images/docker-swag\" \
|
|
||||||
--label \"org.opencontainers.image.source=https://github.com/linuxserver/docker-swag\" \
|
|
||||||
--label \"org.opencontainers.image.version=${EXT_RELEASE_CLEAN}-ls${LS_TAG_NUMBER}\" \
|
|
||||||
--label \"org.opencontainers.image.revision=${COMMIT_SHA}\" \
|
|
||||||
--label \"org.opencontainers.image.vendor=linuxserver.io\" \
|
|
||||||
--label \"org.opencontainers.image.licenses=GPL-3.0-only\" \
|
|
||||||
--label \"org.opencontainers.image.ref.name=${COMMIT_SHA}\" \
|
|
||||||
--label \"org.opencontainers.image.title=Swag\" \
|
|
||||||
--label \"org.opencontainers.image.description=SWAG - Secure Web Application Gateway (formerly known as letsencrypt, no relation to Let's Encrypt™) sets up an Nginx webserver and reverse proxy with php support and a built-in certbot client that automates free SSL server certificate generation and renewal processes (Let's Encrypt and ZeroSSL). It also contains fail2ban for intrusion prevention.\" \
|
|
||||||
--no-cache --pull -t ${IMAGE}:amd64-${META_TAG} --platform=linux/amd64 \
|
|
||||||
--build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ."
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
stage('Build ARMHF') {
|
stage('Build ARMHF') {
|
||||||
@ -493,34 +329,28 @@ pipeline {
|
|||||||
label 'ARMHF'
|
label 'ARMHF'
|
||||||
}
|
}
|
||||||
steps {
|
steps {
|
||||||
echo "Running on node: ${NODE_NAME}"
|
withCredentials([
|
||||||
echo 'Logging into Github'
|
[
|
||||||
sh '''#! /bin/bash
|
$class: 'UsernamePasswordMultiBinding',
|
||||||
echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin
|
credentialsId: '3f9ba4d5-100d-45b0-a3c4-633fd6061207',
|
||||||
'''
|
usernameVariable: 'DOCKERUSER',
|
||||||
sh "sed -r -i 's|(^FROM .*)|\\1\\n\\nENV LSIO_FIRST_PARTY=true|g' Dockerfile.armhf"
|
passwordVariable: 'DOCKERPASS'
|
||||||
sh "docker buildx build \
|
]
|
||||||
--label \"org.opencontainers.image.created=${GITHUB_DATE}\" \
|
]) {
|
||||||
--label \"org.opencontainers.image.authors=linuxserver.io\" \
|
echo 'Logging into DockerHub'
|
||||||
--label \"org.opencontainers.image.url=https://github.com/linuxserver/docker-swag/packages\" \
|
sh '''#! /bin/bash
|
||||||
--label \"org.opencontainers.image.documentation=https://docs.linuxserver.io/images/docker-swag\" \
|
echo $DOCKERPASS | docker login -u $DOCKERUSER --password-stdin
|
||||||
--label \"org.opencontainers.image.source=https://github.com/linuxserver/docker-swag\" \
|
'''
|
||||||
--label \"org.opencontainers.image.version=${EXT_RELEASE_CLEAN}-ls${LS_TAG_NUMBER}\" \
|
sh "docker build --no-cache --pull -f Dockerfile.armhf -t ${IMAGE}:arm32v7-${META_TAG} \
|
||||||
--label \"org.opencontainers.image.revision=${COMMIT_SHA}\" \
|
--build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${META_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ."
|
||||||
--label \"org.opencontainers.image.vendor=linuxserver.io\" \
|
sh "docker tag ${IMAGE}:arm32v7-${META_TAG} lsiodev/buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER}"
|
||||||
--label \"org.opencontainers.image.licenses=GPL-3.0-only\" \
|
retry(5) {
|
||||||
--label \"org.opencontainers.image.ref.name=${COMMIT_SHA}\" \
|
sh "docker push lsiodev/buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER}"
|
||||||
--label \"org.opencontainers.image.title=Swag\" \
|
}
|
||||||
--label \"org.opencontainers.image.description=SWAG - Secure Web Application Gateway (formerly known as letsencrypt, no relation to Let's Encrypt™) sets up an Nginx webserver and reverse proxy with php support and a built-in certbot client that automates free SSL server certificate generation and renewal processes (Let's Encrypt and ZeroSSL). It also contains fail2ban for intrusion prevention.\" \
|
sh '''docker rmi \
|
||||||
--no-cache --pull -f Dockerfile.armhf -t ${IMAGE}:arm32v7-${META_TAG} --platform=linux/arm/v7 \
|
${IMAGE}:arm32v7-${META_TAG} \
|
||||||
--build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ."
|
lsiodev/buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER} || :'''
|
||||||
sh "docker tag ${IMAGE}:arm32v7-${META_TAG} ghcr.io/linuxserver/lsiodev-buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER}"
|
|
||||||
retry(5) {
|
|
||||||
sh "docker push ghcr.io/linuxserver/lsiodev-buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER}"
|
|
||||||
}
|
}
|
||||||
sh '''docker rmi \
|
|
||||||
${IMAGE}:arm32v7-${META_TAG} \
|
|
||||||
ghcr.io/linuxserver/lsiodev-buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER} || :'''
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
stage('Build ARM64') {
|
stage('Build ARM64') {
|
||||||
@ -528,34 +358,28 @@ pipeline {
|
|||||||
label 'ARM64'
|
label 'ARM64'
|
||||||
}
|
}
|
||||||
steps {
|
steps {
|
||||||
echo "Running on node: ${NODE_NAME}"
|
withCredentials([
|
||||||
echo 'Logging into Github'
|
[
|
||||||
sh '''#! /bin/bash
|
$class: 'UsernamePasswordMultiBinding',
|
||||||
echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin
|
credentialsId: '3f9ba4d5-100d-45b0-a3c4-633fd6061207',
|
||||||
'''
|
usernameVariable: 'DOCKERUSER',
|
||||||
sh "sed -r -i 's|(^FROM .*)|\\1\\n\\nENV LSIO_FIRST_PARTY=true|g' Dockerfile.aarch64"
|
passwordVariable: 'DOCKERPASS'
|
||||||
sh "docker buildx build \
|
]
|
||||||
--label \"org.opencontainers.image.created=${GITHUB_DATE}\" \
|
]) {
|
||||||
--label \"org.opencontainers.image.authors=linuxserver.io\" \
|
echo 'Logging into DockerHub'
|
||||||
--label \"org.opencontainers.image.url=https://github.com/linuxserver/docker-swag/packages\" \
|
sh '''#! /bin/bash
|
||||||
--label \"org.opencontainers.image.documentation=https://docs.linuxserver.io/images/docker-swag\" \
|
echo $DOCKERPASS | docker login -u $DOCKERUSER --password-stdin
|
||||||
--label \"org.opencontainers.image.source=https://github.com/linuxserver/docker-swag\" \
|
'''
|
||||||
--label \"org.opencontainers.image.version=${EXT_RELEASE_CLEAN}-ls${LS_TAG_NUMBER}\" \
|
sh "docker build --no-cache --pull -f Dockerfile.aarch64 -t ${IMAGE}:arm64v8-${META_TAG} \
|
||||||
--label \"org.opencontainers.image.revision=${COMMIT_SHA}\" \
|
--build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${META_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ."
|
||||||
--label \"org.opencontainers.image.vendor=linuxserver.io\" \
|
sh "docker tag ${IMAGE}:arm64v8-${META_TAG} lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}"
|
||||||
--label \"org.opencontainers.image.licenses=GPL-3.0-only\" \
|
retry(5) {
|
||||||
--label \"org.opencontainers.image.ref.name=${COMMIT_SHA}\" \
|
sh "docker push lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}"
|
||||||
--label \"org.opencontainers.image.title=Swag\" \
|
}
|
||||||
--label \"org.opencontainers.image.description=SWAG - Secure Web Application Gateway (formerly known as letsencrypt, no relation to Let's Encrypt™) sets up an Nginx webserver and reverse proxy with php support and a built-in certbot client that automates free SSL server certificate generation and renewal processes (Let's Encrypt and ZeroSSL). It also contains fail2ban for intrusion prevention.\" \
|
sh '''docker rmi \
|
||||||
--no-cache --pull -f Dockerfile.aarch64 -t ${IMAGE}:arm64v8-${META_TAG} --platform=linux/arm64 \
|
${IMAGE}:arm64v8-${META_TAG} \
|
||||||
--build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ."
|
lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} || :'''
|
||||||
sh "docker tag ${IMAGE}:arm64v8-${META_TAG} ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}"
|
|
||||||
retry(5) {
|
|
||||||
sh "docker push ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}"
|
|
||||||
}
|
}
|
||||||
sh '''docker rmi \
|
|
||||||
${IMAGE}:arm64v8-${META_TAG} \
|
|
||||||
ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} || :'''
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -571,17 +395,22 @@ pipeline {
|
|||||||
sh '''#! /bin/bash
|
sh '''#! /bin/bash
|
||||||
set -e
|
set -e
|
||||||
TEMPDIR=$(mktemp -d)
|
TEMPDIR=$(mktemp -d)
|
||||||
if [ "${MULTIARCH}" == "true" ] && [ "${PACKAGE_CHECK}" == "false" ]; then
|
if [ "${MULTIARCH}" == "true" ]; then
|
||||||
LOCAL_CONTAINER=${IMAGE}:amd64-${META_TAG}
|
LOCAL_CONTAINER=${IMAGE}:amd64-${META_TAG}
|
||||||
else
|
else
|
||||||
LOCAL_CONTAINER=${IMAGE}:${META_TAG}
|
LOCAL_CONTAINER=${IMAGE}:${META_TAG}
|
||||||
fi
|
fi
|
||||||
touch ${TEMPDIR}/package_versions.txt
|
if [ "${DIST_IMAGE}" == "alpine" ]; then
|
||||||
docker run --rm \
|
docker run --rm --entrypoint '/bin/sh' -v ${TEMPDIR}:/tmp ${LOCAL_CONTAINER} -c '\
|
||||||
-v /var/run/docker.sock:/var/run/docker.sock:ro \
|
apk info -v > /tmp/package_versions.txt && \
|
||||||
-v ${TEMPDIR}:/tmp \
|
sort -o /tmp/package_versions.txt /tmp/package_versions.txt && \
|
||||||
ghcr.io/anchore/syft:latest \
|
chmod 777 /tmp/package_versions.txt'
|
||||||
${LOCAL_CONTAINER} -o table=/tmp/package_versions.txt
|
elif [ "${DIST_IMAGE}" == "ubuntu" ]; then
|
||||||
|
docker run --rm --entrypoint '/bin/sh' -v ${TEMPDIR}:/tmp ${LOCAL_CONTAINER} -c '\
|
||||||
|
apt list -qq --installed | sed "s#/.*now ##g" | cut -d" " -f1 > /tmp/package_versions.txt && \
|
||||||
|
sort -o /tmp/package_versions.txt /tmp/package_versions.txt && \
|
||||||
|
chmod 777 /tmp/package_versions.txt'
|
||||||
|
fi
|
||||||
NEW_PACKAGE_TAG=$(md5sum ${TEMPDIR}/package_versions.txt | cut -c1-8 )
|
NEW_PACKAGE_TAG=$(md5sum ${TEMPDIR}/package_versions.txt | cut -c1-8 )
|
||||||
echo "Package tag sha from current packages in buit container is ${NEW_PACKAGE_TAG} comparing to old ${PACKAGE_TAG} from github"
|
echo "Package tag sha from current packages in buit container is ${NEW_PACKAGE_TAG} comparing to old ${PACKAGE_TAG} from github"
|
||||||
if [ "${NEW_PACKAGE_TAG}" != "${PACKAGE_TAG}" ]; then
|
if [ "${NEW_PACKAGE_TAG}" != "${PACKAGE_TAG}" ]; then
|
||||||
@ -616,13 +445,6 @@ pipeline {
|
|||||||
environment name: 'EXIT_STATUS', value: ''
|
environment name: 'EXIT_STATUS', value: ''
|
||||||
}
|
}
|
||||||
steps {
|
steps {
|
||||||
sh '''#! /bin/bash
|
|
||||||
echo "Packages were updated. Cleaning up the image and exiting."
|
|
||||||
if [ "${MULTIARCH}" == "true" ] && [ "${PACKAGE_CHECK}" == "false" ]; then
|
|
||||||
docker rmi ${IMAGE}:amd64-${META_TAG}
|
|
||||||
else
|
|
||||||
docker rmi ${IMAGE}:${META_TAG}
|
|
||||||
fi'''
|
|
||||||
script{
|
script{
|
||||||
env.EXIT_STATUS = 'ABORTED'
|
env.EXIT_STATUS = 'ABORTED'
|
||||||
}
|
}
|
||||||
@ -640,13 +462,6 @@ pipeline {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
steps {
|
steps {
|
||||||
sh '''#! /bin/bash
|
|
||||||
echo "There are no package updates. Cleaning up the image and exiting."
|
|
||||||
if [ "${MULTIARCH}" == "true" ] && [ "${PACKAGE_CHECK}" == "false" ]; then
|
|
||||||
docker rmi ${IMAGE}:amd64-${META_TAG}
|
|
||||||
else
|
|
||||||
docker rmi ${IMAGE}:${META_TAG}
|
|
||||||
fi'''
|
|
||||||
script{
|
script{
|
||||||
env.EXIT_STATUS = 'ABORTED'
|
env.EXIT_STATUS = 'ABORTED'
|
||||||
}
|
}
|
||||||
@ -663,20 +478,20 @@ pipeline {
|
|||||||
}
|
}
|
||||||
steps {
|
steps {
|
||||||
withCredentials([
|
withCredentials([
|
||||||
string(credentialsId: 'ci-tests-s3-key-id', variable: 'S3_KEY'),
|
string(credentialsId: 'spaces-key', variable: 'DO_KEY'),
|
||||||
string(credentialsId: 'ci-tests-s3-secret-access-key ', variable: 'S3_SECRET')
|
string(credentialsId: 'spaces-secret', variable: 'DO_SECRET')
|
||||||
]) {
|
]) {
|
||||||
script{
|
script{
|
||||||
env.CI_URL = 'https://ci-tests.linuxserver.io/' + env.IMAGE + '/' + env.META_TAG + '/index.html'
|
env.CI_URL = 'https://lsio-ci.ams3.digitaloceanspaces.com/' + env.IMAGE + '/' + env.META_TAG + '/index.html'
|
||||||
}
|
}
|
||||||
sh '''#! /bin/bash
|
sh '''#! /bin/bash
|
||||||
set -e
|
set -e
|
||||||
docker pull ghcr.io/linuxserver/ci:latest
|
docker pull lsiodev/ci:latest
|
||||||
if [ "${MULTIARCH}" == "true" ]; then
|
if [ "${MULTIARCH}" == "true" ]; then
|
||||||
docker pull ghcr.io/linuxserver/lsiodev-buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER}
|
docker pull lsiodev/buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER}
|
||||||
docker pull ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}
|
docker pull lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}
|
||||||
docker tag ghcr.io/linuxserver/lsiodev-buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm32v7-${META_TAG}
|
docker tag lsiodev/buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm32v7-${META_TAG}
|
||||||
docker tag ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm64v8-${META_TAG}
|
docker tag lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm64v8-${META_TAG}
|
||||||
fi
|
fi
|
||||||
docker run --rm \
|
docker run --rm \
|
||||||
--shm-size=1gb \
|
--shm-size=1gb \
|
||||||
@ -688,16 +503,16 @@ pipeline {
|
|||||||
-e PORT=\"${CI_PORT}\" \
|
-e PORT=\"${CI_PORT}\" \
|
||||||
-e SSL=\"${CI_SSL}\" \
|
-e SSL=\"${CI_SSL}\" \
|
||||||
-e BASE=\"${DIST_IMAGE}\" \
|
-e BASE=\"${DIST_IMAGE}\" \
|
||||||
-e SECRET_KEY=\"${S3_SECRET}\" \
|
-e SECRET_KEY=\"${DO_SECRET}\" \
|
||||||
-e ACCESS_KEY=\"${S3_KEY}\" \
|
-e ACCESS_KEY=\"${DO_KEY}\" \
|
||||||
-e DOCKER_ENV=\"${CI_DOCKERENV}\" \
|
-e DOCKER_ENV=\"${CI_DOCKERENV}\" \
|
||||||
-e WEB_SCREENSHOT=\"${CI_WEB}\" \
|
-e WEB_SCREENSHOT=\"${CI_WEB}\" \
|
||||||
-e WEB_AUTH=\"${CI_AUTH}\" \
|
-e WEB_AUTH=\"${CI_AUTH}\" \
|
||||||
-e WEB_PATH=\"${CI_WEBPATH}\" \
|
-e WEB_PATH=\"${CI_WEBPATH}\" \
|
||||||
-e DO_REGION="ams3" \
|
-e DO_REGION="ams3" \
|
||||||
-e DO_BUCKET="lsio-ci" \
|
-e DO_BUCKET="lsio-ci" \
|
||||||
-t ghcr.io/linuxserver/ci:latest \
|
-t lsiodev/ci:latest \
|
||||||
python3 test_build.py'''
|
python /ci/ci.py'''
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -717,46 +532,27 @@ pipeline {
|
|||||||
credentialsId: '3f9ba4d5-100d-45b0-a3c4-633fd6061207',
|
credentialsId: '3f9ba4d5-100d-45b0-a3c4-633fd6061207',
|
||||||
usernameVariable: 'DOCKERUSER',
|
usernameVariable: 'DOCKERUSER',
|
||||||
passwordVariable: 'DOCKERPASS'
|
passwordVariable: 'DOCKERPASS'
|
||||||
],
|
|
||||||
[
|
|
||||||
$class: 'UsernamePasswordMultiBinding',
|
|
||||||
credentialsId: 'Quay.io-Robot',
|
|
||||||
usernameVariable: 'QUAYUSER',
|
|
||||||
passwordVariable: 'QUAYPASS'
|
|
||||||
]
|
]
|
||||||
]) {
|
]) {
|
||||||
retry(5) {
|
retry(5) {
|
||||||
sh '''#! /bin/bash
|
sh '''#! /bin/bash
|
||||||
set -e
|
set -e
|
||||||
echo $DOCKERPASS | docker login -u $DOCKERUSER --password-stdin
|
echo $DOCKERPASS | docker login -u $DOCKERUSER --password-stdin
|
||||||
echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin
|
echo $GITHUB_TOKEN | docker login docker.pkg.github.com -u LinuxServer-CI --password-stdin
|
||||||
echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin
|
echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin
|
||||||
echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin
|
for PUSHIMAGE in "${GITHUBIMAGE}" "${GITLABIMAGE}" "${IMAGE}"; do
|
||||||
for PUSHIMAGE in "${GITHUBIMAGE}" "${GITLABIMAGE}" "${QUAYIMAGE}" "${IMAGE}"; do
|
|
||||||
docker tag ${IMAGE}:${META_TAG} ${PUSHIMAGE}:${META_TAG}
|
docker tag ${IMAGE}:${META_TAG} ${PUSHIMAGE}:${META_TAG}
|
||||||
docker tag ${PUSHIMAGE}:${META_TAG} ${PUSHIMAGE}:latest
|
docker tag ${PUSHIMAGE}:${META_TAG} ${PUSHIMAGE}:latest
|
||||||
docker tag ${PUSHIMAGE}:${META_TAG} ${PUSHIMAGE}:${EXT_RELEASE_TAG}
|
|
||||||
if [ -n "${SEMVER}" ]; then
|
|
||||||
docker tag ${PUSHIMAGE}:${META_TAG} ${PUSHIMAGE}:${SEMVER}
|
|
||||||
fi
|
|
||||||
docker push ${PUSHIMAGE}:latest
|
docker push ${PUSHIMAGE}:latest
|
||||||
docker push ${PUSHIMAGE}:${META_TAG}
|
docker push ${PUSHIMAGE}:${META_TAG}
|
||||||
docker push ${PUSHIMAGE}:${EXT_RELEASE_TAG}
|
|
||||||
if [ -n "${SEMVER}" ]; then
|
|
||||||
docker push ${PUSHIMAGE}:${SEMVER}
|
|
||||||
fi
|
|
||||||
done
|
done
|
||||||
'''
|
'''
|
||||||
}
|
}
|
||||||
sh '''#! /bin/bash
|
sh '''#! /bin/bash
|
||||||
for DELETEIMAGE in "${GITHUBIMAGE}" "${GITLABIMAGE}" "${QUAYIMAGE}" "${IMAGE}"; do
|
for DELETEIMAGE in "${GITHUBIMAGE}" "{GITLABIMAGE}" "${IMAGE}"; do
|
||||||
docker rmi \
|
docker rmi \
|
||||||
${DELETEIMAGE}:${META_TAG} \
|
${DELETEIMAGE}:${META_TAG} \
|
||||||
${DELETEIMAGE}:${EXT_RELEASE_TAG} \
|
|
||||||
${DELETEIMAGE}:latest || :
|
${DELETEIMAGE}:latest || :
|
||||||
if [ -n "${SEMVER}" ]; then
|
|
||||||
docker rmi ${DELETEIMAGE}:${SEMVER} || :
|
|
||||||
fi
|
|
||||||
done
|
done
|
||||||
'''
|
'''
|
||||||
}
|
}
|
||||||
@ -775,56 +571,33 @@ pipeline {
|
|||||||
credentialsId: '3f9ba4d5-100d-45b0-a3c4-633fd6061207',
|
credentialsId: '3f9ba4d5-100d-45b0-a3c4-633fd6061207',
|
||||||
usernameVariable: 'DOCKERUSER',
|
usernameVariable: 'DOCKERUSER',
|
||||||
passwordVariable: 'DOCKERPASS'
|
passwordVariable: 'DOCKERPASS'
|
||||||
],
|
|
||||||
[
|
|
||||||
$class: 'UsernamePasswordMultiBinding',
|
|
||||||
credentialsId: 'Quay.io-Robot',
|
|
||||||
usernameVariable: 'QUAYUSER',
|
|
||||||
passwordVariable: 'QUAYPASS'
|
|
||||||
]
|
]
|
||||||
]) {
|
]) {
|
||||||
retry(5) {
|
retry(5) {
|
||||||
sh '''#! /bin/bash
|
sh '''#! /bin/bash
|
||||||
set -e
|
set -e
|
||||||
echo $DOCKERPASS | docker login -u $DOCKERUSER --password-stdin
|
echo $DOCKERPASS | docker login -u $DOCKERUSER --password-stdin
|
||||||
echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin
|
echo $GITHUB_TOKEN | docker login docker.pkg.github.com -u LinuxServer-CI --password-stdin
|
||||||
echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin
|
echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin
|
||||||
echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin
|
|
||||||
if [ "${CI}" == "false" ]; then
|
if [ "${CI}" == "false" ]; then
|
||||||
docker pull ghcr.io/linuxserver/lsiodev-buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER}
|
docker pull lsiodev/buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER}
|
||||||
docker tag ghcr.io/linuxserver/lsiodev-buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm32v7-${META_TAG}
|
docker pull lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}
|
||||||
docker pull ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}
|
docker tag lsiodev/buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm32v7-${META_TAG}
|
||||||
docker tag ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm64v8-${META_TAG}
|
docker tag lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm64v8-${META_TAG}
|
||||||
fi
|
fi
|
||||||
for MANIFESTIMAGE in "${IMAGE}" "${GITLABIMAGE}" "${GITHUBIMAGE}" "${QUAYIMAGE}"; do
|
for MANIFESTIMAGE in "${IMAGE}" "${GITLABIMAGE}"; do
|
||||||
docker tag ${IMAGE}:amd64-${META_TAG} ${MANIFESTIMAGE}:amd64-${META_TAG}
|
docker tag ${IMAGE}:amd64-${META_TAG} ${MANIFESTIMAGE}:amd64-${META_TAG}
|
||||||
docker tag ${MANIFESTIMAGE}:amd64-${META_TAG} ${MANIFESTIMAGE}:amd64-latest
|
|
||||||
docker tag ${MANIFESTIMAGE}:amd64-${META_TAG} ${MANIFESTIMAGE}:amd64-${EXT_RELEASE_TAG}
|
|
||||||
docker tag ${IMAGE}:arm32v7-${META_TAG} ${MANIFESTIMAGE}:arm32v7-${META_TAG}
|
docker tag ${IMAGE}:arm32v7-${META_TAG} ${MANIFESTIMAGE}:arm32v7-${META_TAG}
|
||||||
docker tag ${MANIFESTIMAGE}:arm32v7-${META_TAG} ${MANIFESTIMAGE}:arm32v7-latest
|
|
||||||
docker tag ${MANIFESTIMAGE}:arm32v7-${META_TAG} ${MANIFESTIMAGE}:arm32v7-${EXT_RELEASE_TAG}
|
|
||||||
docker tag ${IMAGE}:arm64v8-${META_TAG} ${MANIFESTIMAGE}:arm64v8-${META_TAG}
|
docker tag ${IMAGE}:arm64v8-${META_TAG} ${MANIFESTIMAGE}:arm64v8-${META_TAG}
|
||||||
|
docker tag ${MANIFESTIMAGE}:amd64-${META_TAG} ${MANIFESTIMAGE}:amd64-latest
|
||||||
|
docker tag ${MANIFESTIMAGE}:arm32v7-${META_TAG} ${MANIFESTIMAGE}:arm32v7-latest
|
||||||
docker tag ${MANIFESTIMAGE}:arm64v8-${META_TAG} ${MANIFESTIMAGE}:arm64v8-latest
|
docker tag ${MANIFESTIMAGE}:arm64v8-${META_TAG} ${MANIFESTIMAGE}:arm64v8-latest
|
||||||
docker tag ${MANIFESTIMAGE}:arm64v8-${META_TAG} ${MANIFESTIMAGE}:arm64v8-${EXT_RELEASE_TAG}
|
|
||||||
if [ -n "${SEMVER}" ]; then
|
|
||||||
docker tag ${MANIFESTIMAGE}:amd64-${META_TAG} ${MANIFESTIMAGE}:amd64-${SEMVER}
|
|
||||||
docker tag ${MANIFESTIMAGE}:arm32v7-${META_TAG} ${MANIFESTIMAGE}:arm32v7-${SEMVER}
|
|
||||||
docker tag ${MANIFESTIMAGE}:arm64v8-${META_TAG} ${MANIFESTIMAGE}:arm64v8-${SEMVER}
|
|
||||||
fi
|
|
||||||
docker push ${MANIFESTIMAGE}:amd64-${META_TAG}
|
docker push ${MANIFESTIMAGE}:amd64-${META_TAG}
|
||||||
docker push ${MANIFESTIMAGE}:amd64-${EXT_RELEASE_TAG}
|
|
||||||
docker push ${MANIFESTIMAGE}:amd64-latest
|
|
||||||
docker push ${MANIFESTIMAGE}:arm32v7-${META_TAG}
|
docker push ${MANIFESTIMAGE}:arm32v7-${META_TAG}
|
||||||
docker push ${MANIFESTIMAGE}:arm32v7-latest
|
|
||||||
docker push ${MANIFESTIMAGE}:arm32v7-${EXT_RELEASE_TAG}
|
|
||||||
docker push ${MANIFESTIMAGE}:arm64v8-${META_TAG}
|
docker push ${MANIFESTIMAGE}:arm64v8-${META_TAG}
|
||||||
|
docker push ${MANIFESTIMAGE}:amd64-latest
|
||||||
|
docker push ${MANIFESTIMAGE}:arm32v7-latest
|
||||||
docker push ${MANIFESTIMAGE}:arm64v8-latest
|
docker push ${MANIFESTIMAGE}:arm64v8-latest
|
||||||
docker push ${MANIFESTIMAGE}:arm64v8-${EXT_RELEASE_TAG}
|
|
||||||
if [ -n "${SEMVER}" ]; then
|
|
||||||
docker push ${MANIFESTIMAGE}:amd64-${SEMVER}
|
|
||||||
docker push ${MANIFESTIMAGE}:arm32v7-${SEMVER}
|
|
||||||
docker push ${MANIFESTIMAGE}:arm64v8-${SEMVER}
|
|
||||||
fi
|
|
||||||
docker manifest push --purge ${MANIFESTIMAGE}:latest || :
|
docker manifest push --purge ${MANIFESTIMAGE}:latest || :
|
||||||
docker manifest create ${MANIFESTIMAGE}:latest ${MANIFESTIMAGE}:amd64-latest ${MANIFESTIMAGE}:arm32v7-latest ${MANIFESTIMAGE}:arm64v8-latest
|
docker manifest create ${MANIFESTIMAGE}:latest ${MANIFESTIMAGE}:amd64-latest ${MANIFESTIMAGE}:arm32v7-latest ${MANIFESTIMAGE}:arm64v8-latest
|
||||||
docker manifest annotate ${MANIFESTIMAGE}:latest ${MANIFESTIMAGE}:arm32v7-latest --os linux --arch arm
|
docker manifest annotate ${MANIFESTIMAGE}:latest ${MANIFESTIMAGE}:arm32v7-latest --os linux --arch arm
|
||||||
@ -833,47 +606,38 @@ pipeline {
|
|||||||
docker manifest create ${MANIFESTIMAGE}:${META_TAG} ${MANIFESTIMAGE}:amd64-${META_TAG} ${MANIFESTIMAGE}:arm32v7-${META_TAG} ${MANIFESTIMAGE}:arm64v8-${META_TAG}
|
docker manifest create ${MANIFESTIMAGE}:${META_TAG} ${MANIFESTIMAGE}:amd64-${META_TAG} ${MANIFESTIMAGE}:arm32v7-${META_TAG} ${MANIFESTIMAGE}:arm64v8-${META_TAG}
|
||||||
docker manifest annotate ${MANIFESTIMAGE}:${META_TAG} ${MANIFESTIMAGE}:arm32v7-${META_TAG} --os linux --arch arm
|
docker manifest annotate ${MANIFESTIMAGE}:${META_TAG} ${MANIFESTIMAGE}:arm32v7-${META_TAG} --os linux --arch arm
|
||||||
docker manifest annotate ${MANIFESTIMAGE}:${META_TAG} ${MANIFESTIMAGE}:arm64v8-${META_TAG} --os linux --arch arm64 --variant v8
|
docker manifest annotate ${MANIFESTIMAGE}:${META_TAG} ${MANIFESTIMAGE}:arm64v8-${META_TAG} --os linux --arch arm64 --variant v8
|
||||||
docker manifest push --purge ${MANIFESTIMAGE}:${EXT_RELEASE_TAG} || :
|
|
||||||
docker manifest create ${MANIFESTIMAGE}:${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:amd64-${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:arm32v7-${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:arm64v8-${EXT_RELEASE_TAG}
|
|
||||||
docker manifest annotate ${MANIFESTIMAGE}:${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:arm32v7-${EXT_RELEASE_TAG} --os linux --arch arm
|
|
||||||
docker manifest annotate ${MANIFESTIMAGE}:${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:arm64v8-${EXT_RELEASE_TAG} --os linux --arch arm64 --variant v8
|
|
||||||
if [ -n "${SEMVER}" ]; then
|
|
||||||
docker manifest push --purge ${MANIFESTIMAGE}:${SEMVER} || :
|
|
||||||
docker manifest create ${MANIFESTIMAGE}:${SEMVER} ${MANIFESTIMAGE}:amd64-${SEMVER} ${MANIFESTIMAGE}:arm32v7-${SEMVER} ${MANIFESTIMAGE}:arm64v8-${SEMVER}
|
|
||||||
docker manifest annotate ${MANIFESTIMAGE}:${SEMVER} ${MANIFESTIMAGE}:arm32v7-${SEMVER} --os linux --arch arm
|
|
||||||
docker manifest annotate ${MANIFESTIMAGE}:${SEMVER} ${MANIFESTIMAGE}:arm64v8-${SEMVER} --os linux --arch arm64 --variant v8
|
|
||||||
fi
|
|
||||||
docker manifest push --purge ${MANIFESTIMAGE}:latest
|
docker manifest push --purge ${MANIFESTIMAGE}:latest
|
||||||
docker manifest push --purge ${MANIFESTIMAGE}:${META_TAG}
|
docker manifest push --purge ${MANIFESTIMAGE}:${META_TAG}
|
||||||
docker manifest push --purge ${MANIFESTIMAGE}:${EXT_RELEASE_TAG}
|
|
||||||
if [ -n "${SEMVER}" ]; then
|
|
||||||
docker manifest push --purge ${MANIFESTIMAGE}:${SEMVER}
|
|
||||||
fi
|
|
||||||
done
|
done
|
||||||
|
docker tag ${IMAGE}:amd64-${META_TAG} ${GITHUBIMAGE}:amd64-${META_TAG}
|
||||||
|
docker tag ${IMAGE}:arm32v7-${META_TAG} ${GITHUBIMAGE}:arm32v7-${META_TAG}
|
||||||
|
docker tag ${IMAGE}:arm64v8-${META_TAG} ${GITHUBIMAGE}:arm64v8-${META_TAG}
|
||||||
|
docker tag ${GITHUBIMAGE}:amd64-${META_TAG} ${GITHUBIMAGE}:latest
|
||||||
|
docker tag ${GITHUBIMAGE}:amd64-${META_TAG} ${GITHUBIMAGE}:${META_TAG}
|
||||||
|
docker tag ${GITHUBIMAGE}:arm32v7-${META_TAG} ${GITHUBIMAGE}:arm32v7-latest
|
||||||
|
docker tag ${GITHUBIMAGE}:arm64v8-${META_TAG} ${GITHUBIMAGE}:arm64v8-latest
|
||||||
|
docker push ${GITHUBIMAGE}:amd64-${META_TAG}
|
||||||
|
docker push ${GITHUBIMAGE}:arm32v7-${META_TAG}
|
||||||
|
docker push ${GITHUBIMAGE}:arm64v8-${META_TAG}
|
||||||
|
docker push ${GITHUBIMAGE}:latest
|
||||||
|
docker push ${GITHUBIMAGE}:${META_TAG}
|
||||||
|
docker push ${GITHUBIMAGE}:arm32v7-latest
|
||||||
|
docker push ${GITHUBIMAGE}:arm64v8-latest
|
||||||
'''
|
'''
|
||||||
}
|
}
|
||||||
sh '''#! /bin/bash
|
sh '''#! /bin/bash
|
||||||
for DELETEIMAGE in "${GITHUBIMAGE}" "${GITLABIMAGE}" "${QUAYIMAGE}" "${IMAGE}"; do
|
for DELETEIMAGE in "${GITHUBIMAGE}" "${GITLABIMAGE}" "${IMAGE}"; do
|
||||||
docker rmi \
|
docker rmi \
|
||||||
${DELETEIMAGE}:amd64-${META_TAG} \
|
${DELETEIMAGE}:amd64-${META_TAG} \
|
||||||
${DELETEIMAGE}:amd64-latest \
|
${DELETEIMAGE}:amd64-latest \
|
||||||
${DELETEIMAGE}:amd64-${EXT_RELEASE_TAG} \
|
|
||||||
${DELETEIMAGE}:arm32v7-${META_TAG} \
|
${DELETEIMAGE}:arm32v7-${META_TAG} \
|
||||||
${DELETEIMAGE}:arm32v7-latest \
|
${DELETEIMAGE}:arm32v7-latest \
|
||||||
${DELETEIMAGE}:arm32v7-${EXT_RELEASE_TAG} \
|
|
||||||
${DELETEIMAGE}:arm64v8-${META_TAG} \
|
${DELETEIMAGE}:arm64v8-${META_TAG} \
|
||||||
${DELETEIMAGE}:arm64v8-latest \
|
${DELETEIMAGE}:arm64v8-latest || :
|
||||||
${DELETEIMAGE}:arm64v8-${EXT_RELEASE_TAG} || :
|
|
||||||
if [ -n "${SEMVER}" ]; then
|
|
||||||
docker rmi \
|
|
||||||
${DELETEIMAGE}:amd64-${SEMVER} \
|
|
||||||
${DELETEIMAGE}:arm32v7-${SEMVER} \
|
|
||||||
${DELETEIMAGE}:arm64v8-${SEMVER} || :
|
|
||||||
fi
|
|
||||||
done
|
done
|
||||||
docker rmi \
|
docker rmi \
|
||||||
ghcr.io/linuxserver/lsiodev-buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER} \
|
lsiodev/buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER} \
|
||||||
ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} || :
|
lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} || :
|
||||||
'''
|
'''
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -889,9 +653,9 @@ pipeline {
|
|||||||
environment name: 'EXIT_STATUS', value: ''
|
environment name: 'EXIT_STATUS', value: ''
|
||||||
}
|
}
|
||||||
steps {
|
steps {
|
||||||
echo "Pushing New tag for current commit ${META_TAG}"
|
echo "Pushing New tag for current commit ${EXT_RELEASE_CLEAN}-ls${LS_TAG_NUMBER}"
|
||||||
sh '''curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/git/tags \
|
sh '''curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/git/tags \
|
||||||
-d '{"tag":"'${META_TAG}'",\
|
-d '{"tag":"'${EXT_RELEASE_CLEAN}'-ls'${LS_TAG_NUMBER}'",\
|
||||||
"object": "'${COMMIT_SHA}'",\
|
"object": "'${COMMIT_SHA}'",\
|
||||||
"message": "Tagging Release '${EXT_RELEASE_CLEAN}'-ls'${LS_TAG_NUMBER}' to master",\
|
"message": "Tagging Release '${EXT_RELEASE_CLEAN}'-ls'${LS_TAG_NUMBER}' to master",\
|
||||||
"type": "commit",\
|
"type": "commit",\
|
||||||
@ -899,10 +663,10 @@ pipeline {
|
|||||||
echo "Pushing New release for Tag"
|
echo "Pushing New release for Tag"
|
||||||
sh '''#! /bin/bash
|
sh '''#! /bin/bash
|
||||||
echo "Updating PIP version of ${EXT_PIP} to ${EXT_RELEASE_CLEAN}" > releasebody.json
|
echo "Updating PIP version of ${EXT_PIP} to ${EXT_RELEASE_CLEAN}" > releasebody.json
|
||||||
echo '{"tag_name":"'${META_TAG}'",\
|
echo '{"tag_name":"'${EXT_RELEASE_CLEAN}'-ls'${LS_TAG_NUMBER}'",\
|
||||||
"target_commitish": "master",\
|
"target_commitish": "master",\
|
||||||
"name": "'${META_TAG}'",\
|
"name": "'${EXT_RELEASE_CLEAN}'-ls'${LS_TAG_NUMBER}'",\
|
||||||
"body": "**LinuxServer Changes:**\\n\\n'${LS_RELEASE_NOTES}'\\n\\n**PIP Changes:**\\n\\n' > start
|
"body": "**LinuxServer Changes:**\\n\\n'${LS_RELEASE_NOTES}'\\n**PIP Changes:**\\n\\n' > start
|
||||||
printf '","draft": false,"prerelease": false}' >> releasebody.json
|
printf '","draft": false,"prerelease": false}' >> releasebody.json
|
||||||
paste -d'\\0' start releasebody.json > releasebody.json.done
|
paste -d'\\0' start releasebody.json > releasebody.json.done
|
||||||
curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/releases -d @releasebody.json.done'''
|
curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/releases -d @releasebody.json.done'''
|
||||||
@ -926,9 +690,9 @@ pipeline {
|
|||||||
sh '''#! /bin/bash
|
sh '''#! /bin/bash
|
||||||
set -e
|
set -e
|
||||||
TEMPDIR=$(mktemp -d)
|
TEMPDIR=$(mktemp -d)
|
||||||
docker pull ghcr.io/linuxserver/jenkins-builder:latest
|
docker pull linuxserver/jenkins-builder:latest
|
||||||
docker run --rm -e CONTAINER_NAME=${CONTAINER_NAME} -e GITHUB_BRANCH="${BRANCH_NAME}" -v ${TEMPDIR}:/ansible/jenkins ghcr.io/linuxserver/jenkins-builder:latest
|
docker run --rm -e CONTAINER_NAME=${CONTAINER_NAME} -e GITHUB_BRANCH=master -v ${TEMPDIR}:/ansible/jenkins linuxserver/jenkins-builder:latest
|
||||||
docker pull ghcr.io/linuxserver/readme-sync
|
docker pull lsiodev/readme-sync
|
||||||
docker run --rm=true \
|
docker run --rm=true \
|
||||||
-e DOCKERHUB_USERNAME=$DOCKERUSER \
|
-e DOCKERHUB_USERNAME=$DOCKERUSER \
|
||||||
-e DOCKERHUB_PASSWORD=$DOCKERPASS \
|
-e DOCKERHUB_PASSWORD=$DOCKERPASS \
|
||||||
@ -936,7 +700,7 @@ pipeline {
|
|||||||
-e DOCKER_REPOSITORY=${IMAGE} \
|
-e DOCKER_REPOSITORY=${IMAGE} \
|
||||||
-e GIT_BRANCH=master \
|
-e GIT_BRANCH=master \
|
||||||
-v ${TEMPDIR}/docker-${CONTAINER_NAME}:/mnt \
|
-v ${TEMPDIR}/docker-${CONTAINER_NAME}:/mnt \
|
||||||
ghcr.io/linuxserver/readme-sync bash -c 'node sync'
|
lsiodev/readme-sync bash -c 'node sync'
|
||||||
rm -Rf ${TEMPDIR} '''
|
rm -Rf ${TEMPDIR} '''
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -964,12 +728,12 @@ pipeline {
|
|||||||
sh 'echo "build aborted"'
|
sh 'echo "build aborted"'
|
||||||
}
|
}
|
||||||
else if (currentBuild.currentResult == "SUCCESS"){
|
else if (currentBuild.currentResult == "SUCCESS"){
|
||||||
sh ''' curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/jenkins-avatar.png","embeds": [{"color": 1681177,\
|
sh ''' curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://wiki.jenkins-ci.org/download/attachments/2916393/headshot.png","embeds": [{"color": 1681177,\
|
||||||
"description": "**Build:** '${BUILD_NUMBER}'\\n**CI Results:** '${CI_URL}'\\n**ShellCheck Results:** '${SHELLCHECK_URL}'\\n**Status:** Success\\n**Job:** '${RUN_DISPLAY_URL}'\\n**Change:** '${CODE_URL}'\\n**External Release:**: '${RELEASE_LINK}'\\n**DockerHub:** '${DOCKERHUB_LINK}'\\n"}],\
|
"description": "**Build:** '${BUILD_NUMBER}'\\n**CI Results:** '${CI_URL}'\\n**ShellCheck Results:** '${SHELLCHECK_URL}'\\n**Status:** Success\\n**Job:** '${RUN_DISPLAY_URL}'\\n**Change:** '${CODE_URL}'\\n**External Release:**: '${RELEASE_LINK}'\\n**DockerHub:** '${DOCKERHUB_LINK}'\\n"}],\
|
||||||
"username": "Jenkins"}' ${BUILDS_DISCORD} '''
|
"username": "Jenkins"}' ${BUILDS_DISCORD} '''
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
sh ''' curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/jenkins-avatar.png","embeds": [{"color": 16711680,\
|
sh ''' curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://wiki.jenkins-ci.org/download/attachments/2916393/headshot.png","embeds": [{"color": 16711680,\
|
||||||
"description": "**Build:** '${BUILD_NUMBER}'\\n**CI Results:** '${CI_URL}'\\n**ShellCheck Results:** '${SHELLCHECK_URL}'\\n**Status:** failure\\n**Job:** '${RUN_DISPLAY_URL}'\\n**Change:** '${CODE_URL}'\\n**External Release:**: '${RELEASE_LINK}'\\n**DockerHub:** '${DOCKERHUB_LINK}'\\n"}],\
|
"description": "**Build:** '${BUILD_NUMBER}'\\n**CI Results:** '${CI_URL}'\\n**ShellCheck Results:** '${SHELLCHECK_URL}'\\n**Status:** failure\\n**Job:** '${RUN_DISPLAY_URL}'\\n**Change:** '${CODE_URL}'\\n**External Release:**: '${RELEASE_LINK}'\\n**DockerHub:** '${DOCKERHUB_LINK}'\\n"}],\
|
||||||
"username": "Jenkins"}' ${BUILDS_DISCORD} '''
|
"username": "Jenkins"}' ${BUILDS_DISCORD} '''
|
||||||
}
|
}
|
||||||
|
|||||||
369
README.md
369
README.md
@ -1,6 +1,3 @@
|
|||||||
<!-- DO NOT EDIT THIS FILE MANUALLY -->
|
|
||||||
<!-- Please read the https://github.com/linuxserver/docker-swag/blob/master/.github/CONTRIBUTING.md -->
|
|
||||||
|
|
||||||
[](https://linuxserver.io)
|
[](https://linuxserver.io)
|
||||||
|
|
||||||
[](https://blog.linuxserver.io "all the things you can do with our containers including How-To guides, opinions and much more!")
|
[](https://blog.linuxserver.io "all the things you can do with our containers including How-To guides, opinions and much more!")
|
||||||
@ -12,14 +9,13 @@
|
|||||||
|
|
||||||
The [LinuxServer.io](https://linuxserver.io) team brings you another container release featuring:
|
The [LinuxServer.io](https://linuxserver.io) team brings you another container release featuring:
|
||||||
|
|
||||||
* regular and timely application updates
|
* regular and timely application updates
|
||||||
* easy user mappings (PGID, PUID)
|
* easy user mappings (PGID, PUID)
|
||||||
* custom base image with s6 overlay
|
* custom base image with s6 overlay
|
||||||
* weekly base OS updates with common layers across the entire LinuxServer.io ecosystem to minimise space usage, down time and bandwidth
|
* weekly base OS updates with common layers across the entire LinuxServer.io ecosystem to minimise space usage, down time and bandwidth
|
||||||
* regular security updates
|
* regular security updates
|
||||||
|
|
||||||
Find us at:
|
Find us at:
|
||||||
|
|
||||||
* [Blog](https://blog.linuxserver.io) - all the things you can do with our containers including How-To guides, opinions and much more!
|
* [Blog](https://blog.linuxserver.io) - all the things you can do with our containers including How-To guides, opinions and much more!
|
||||||
* [Discord](https://discord.gg/YWrKVTn) - realtime support / chat with the community and the team.
|
* [Discord](https://discord.gg/YWrKVTn) - realtime support / chat with the community and the team.
|
||||||
* [Discourse](https://discourse.linuxserver.io) - post on our community forum.
|
* [Discourse](https://discourse.linuxserver.io) - post on our community forum.
|
||||||
@ -29,138 +25,89 @@ Find us at:
|
|||||||
|
|
||||||
# [linuxserver/swag](https://github.com/linuxserver/docker-swag)
|
# [linuxserver/swag](https://github.com/linuxserver/docker-swag)
|
||||||
|
|
||||||
[](https://scarf.sh/gateway/linuxserver-ci/docker/linuxserver%2Fswag)
|
|
||||||
[](https://github.com/linuxserver/docker-swag)
|
[](https://github.com/linuxserver/docker-swag)
|
||||||
[](https://github.com/linuxserver/docker-swag/releases)
|
[](https://github.com/linuxserver/docker-swag/releases)
|
||||||
[](https://github.com/linuxserver/docker-swag/packages)
|
[](https://github.com/linuxserver/docker-swag/packages)
|
||||||
[](https://gitlab.com/linuxserver.io/docker-swag/container_registry)
|
[](https://gitlab.com/Linuxserver.io/docker-swag/container_registry)
|
||||||
[](https://quay.io/repository/linuxserver.io/swag)
|
[](https://microbadger.com/images/linuxserver/swag "Get your own version badge on microbadger.com")
|
||||||
[](https://hub.docker.com/r/linuxserver/swag)
|
[](https://hub.docker.com/r/linuxserver/swag)
|
||||||
[](https://hub.docker.com/r/linuxserver/swag)
|
[](https://hub.docker.com/r/linuxserver/swag)
|
||||||
[](https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-swag/job/master/)
|
[](https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-swag/job/master/)
|
||||||
[](https://ci-tests.linuxserver.io/linuxserver/swag/latest/index.html)
|
[](https://lsio-ci.ams3.digitaloceanspaces.com/linuxserver/swag/latest/index.html)
|
||||||
|
|
||||||
SWAG - Secure Web Application Gateway (formerly known as letsencrypt, no relation to Let's Encrypt™) sets up an Nginx webserver and reverse proxy with php support and a built-in certbot client that automates free SSL server certificate generation and renewal processes (Let's Encrypt and ZeroSSL). It also contains fail2ban for intrusion prevention.
|
SWAG - Secure Web-server And Gateway (formerly known as letsencrypt, no relation to Let's Encrypt™) sets up an Nginx webserver and reverse proxy with php support and a built-in certbot client that automates free SSL server certificate generation and renewal processes. It also contains fail2ban for intrusion prevention.
|
||||||
|
|
||||||
[](https://linuxserver.io)
|
[](https://linuxserver.io)
|
||||||
|
|
||||||
## Supported Architectures
|
## Supported Architectures
|
||||||
|
|
||||||
We utilise the docker manifest for multi-platform awareness. More information is available from docker [here](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md#manifest-list) and our announcement [here](https://blog.linuxserver.io/2019/02/21/the-lsio-pipeline-project/).
|
Our images support multiple architectures such as `x86-64`, `arm64` and `armhf`. We utilise the docker manifest for multi-platform awareness. More information is available from docker [here](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md#manifest-list) and our announcement [here](https://blog.linuxserver.io/2019/02/21/the-lsio-pipeline-project/).
|
||||||
|
|
||||||
Simply pulling `lscr.io/linuxserver/swag:latest` should retrieve the correct image for your arch, but you can also pull specific arch images via tags.
|
Simply pulling `linuxserver/swag` should retrieve the correct image for your arch, but you can also pull specific arch images via tags.
|
||||||
|
|
||||||
The architectures supported by this image are:
|
The architectures supported by this image are:
|
||||||
|
|
||||||
| Architecture | Available | Tag |
|
| Architecture | Tag |
|
||||||
| :----: | :----: | ---- |
|
| :----: | --- |
|
||||||
| x86-64 | ✅ | amd64-\<version tag\> |
|
| x86-64 | amd64-latest |
|
||||||
| arm64 | ✅ | arm64v8-\<version tag\> |
|
| arm64 | arm64v8-latest |
|
||||||
| armhf | ✅ | arm32v7-\<version tag\> |
|
| armhf | arm32v7-latest |
|
||||||
|
|
||||||
## Application Setup
|
|
||||||
|
|
||||||
### Validation and initial setup
|
|
||||||
|
|
||||||
* Before running this container, make sure that the url and subdomains are properly forwarded to this container's host, and that port 443 (and/or 80) is not being used by another service on the host (NAS gui, another webserver, etc.).
|
|
||||||
* If you need a dynamic dns provider, you can use the free provider duckdns.org where the `URL` will be `yoursubdomain.duckdns.org` and the `SUBDOMAINS` can be `www,ftp,cloud` with http validation, or `wildcard` with dns validation. You can use our [duckdns image](https://hub.docker.com/r/linuxserver/duckdns/) to update your IP on duckdns.org.
|
|
||||||
* For `http` validation, port 80 on the internet side of the router should be forwarded to this container's port 80
|
|
||||||
* For `dns` validation, make sure to enter your credentials into the corresponding ini (or json for some plugins) file under `/config/dns-conf`
|
|
||||||
* Cloudflare provides free accounts for managing dns and is very easy to use with this image. Make sure that it is set up for "dns only" instead of "dns + proxy"
|
|
||||||
* Google dns plugin is meant to be used with "Google Cloud DNS", a paid enterprise product, and not for "Google Domains DNS"
|
|
||||||
* DuckDNS only supoprts two types of DNS validated certificates (not both at the same time):
|
|
||||||
1. Certs that only cover your main subdomain (ie. `yoursubdomain.duckdns.org`, leave the `SUBDOMAINS` variable empty)
|
|
||||||
2. Certs that cover sub-subdomains of your main subdomain (ie. `*.yoursubdomain.duckdns.org`, set the `SUBDOMAINS` variable to `wildcard`)
|
|
||||||
* `--cap-add=NET_ADMIN` is required for fail2ban to modify iptables
|
|
||||||
* After setup, navigate to `https://yourdomain.url` to access the default homepage (http access through port 80 is disabled by default, you can enable it by editing the default site config at `/config/nginx/site-confs/default.conf`).
|
|
||||||
* Certs are checked nightly and if expiration is within 30 days, renewal is attempted. If your cert is about to expire in less than 30 days, check the logs under `/config/log/letsencrypt` to see why the renewals have been failing. It is recommended to input your e-mail in docker parameters so you receive expiration notices from Let's Encrypt in those circumstances.
|
|
||||||
|
|
||||||
### Security and password protection
|
|
||||||
|
|
||||||
* The container detects changes to url and subdomains, revokes existing certs and generates new ones during start.
|
|
||||||
* Per [RFC7919](https://datatracker.ietf.org/doc/html/rfc7919), the container is shipping [ffdhe4096](https://ssl-config.mozilla.org/ffdhe4096.txt) as the `dhparams.pem`.
|
|
||||||
* If you'd like to password protect your sites, you can use htpasswd. Run the following command on your host to generate the htpasswd file `docker exec -it swag htpasswd -c /config/nginx/.htpasswd <username>`
|
|
||||||
* You can add multiple user:pass to `.htpasswd`. For the first user, use the above command, for others, use the above command without the `-c` flag, as it will force deletion of the existing `.htpasswd` and creation of a new one
|
|
||||||
* You can also use ldap auth for security and access control. A sample, user configurable ldap.conf is provided, and it requires the separate image [linuxserver/ldap-auth](https://hub.docker.com/r/linuxserver/ldap-auth/) to communicate with an ldap server.
|
|
||||||
|
|
||||||
### Site config and reverse proxy
|
|
||||||
|
|
||||||
* The default site config resides at `/config/nginx/site-confs/default.conf`. Feel free to modify this file, and you can add other conf files to this directory. However, if you delete the `default` file, a new default will be created on container start.
|
|
||||||
* Preset reverse proxy config files are added for popular apps. See the `README.md` file under `/config/nginx/proxy_confs` for instructions on how to enable them. The preset confs reside in and get imported from [this repo](https://github.com/linuxserver/reverse-proxy-confs).
|
|
||||||
* If you wish to hide your site from search engine crawlers, you may find it useful to add this configuration line to your site config, within the server block, above the line where ssl.conf is included
|
|
||||||
`add_header X-Robots-Tag "noindex, nofollow, nosnippet, noarchive";`
|
|
||||||
This will *ask* Google et al not to index and list your site. Be careful with this, as you will eventually be de-listed if you leave this line in on a site you wish to be present on search engines
|
|
||||||
* If you wish to redirect http to https, you must expose port 80
|
|
||||||
|
|
||||||
### Using certs in other containers
|
|
||||||
|
|
||||||
* This container includes auto-generated pfx and private-fullchain-bundle pem certs that are needed by other apps like Emby and Znc.
|
|
||||||
* To use these certs in other containers, do either of the following:
|
|
||||||
1. *(Easier)* Mount the container's config folder in other containers (ie. `-v /path-to-swag-config:/swag-ssl`) and in the other containers, use the cert location `/swag-ssl/keys/letsencrypt/`
|
|
||||||
2. *(More secure)* Mount the SWAG folder `etc` that resides under `/config` in other containers (ie. `-v /path-to-swag-config/etc:/swag-ssl`) and in the other containers, use the cert location `/swag-ssl/letsencrypt/live/<your.domain.url>/` (This is more secure because the first method shares the entire SWAG config folder with other containers, including the www files, whereas the second method only shares the ssl certs)
|
|
||||||
* These certs include:
|
|
||||||
1. `cert.pem`, `chain.pem`, `fullchain.pem` and `privkey.pem`, which are generated by Certbot and used by nginx and various other apps
|
|
||||||
2. `privkey.pfx`, a format supported by Microsoft and commonly used by dotnet apps such as Emby Server (no password)
|
|
||||||
3. `priv-fullchain-bundle.pem`, a pem cert that bundles the private key and the fullchain, used by apps like ZNC
|
|
||||||
|
|
||||||
### Using fail2ban
|
|
||||||
|
|
||||||
* This container includes fail2ban set up with 5 jails by default:
|
|
||||||
1. nginx-http-auth
|
|
||||||
2. nginx-badbots
|
|
||||||
3. nginx-botsearch
|
|
||||||
4. nginx-deny
|
|
||||||
5. nginx-unauthorized
|
|
||||||
* To enable or disable other jails, modify the file `/config/fail2ban/jail.local`
|
|
||||||
* To modify filters and actions, instead of editing the `.conf` files, create `.local` files with the same name and edit those because .conf files get overwritten when the actions and filters are updated. `.local` files will append whatever's in the `.conf` files (ie. `nginx-http-auth.conf` --> `nginx-http-auth.local`)
|
|
||||||
* You can check which jails are active via `docker exec -it swag fail2ban-client status`
|
|
||||||
* You can check the status of a specific jail via `docker exec -it swag fail2ban-client status <jail name>`
|
|
||||||
* You can unban an IP via `docker exec -it swag fail2ban-client set <jail name> unbanip <IP>`
|
|
||||||
* A list of commands can be found here: <https://www.fail2ban.org/wiki/index.php/Commands>
|
|
||||||
|
|
||||||
### Updating configs
|
|
||||||
|
|
||||||
* This container creates a number of configs for nginx, proxy samples, etc.
|
|
||||||
* Config updates are noted in the changelog but not automatically applied to your files.
|
|
||||||
* If you have modified a file with noted changes in the changelog:
|
|
||||||
1. Keep your existing configs as is (not broken, don't fix)
|
|
||||||
2. Review our repository commits and apply the new changes yourself
|
|
||||||
3. Delete the modified config file with listed updates, restart the container, reapply your changes
|
|
||||||
* If you have NOT modified a file with noted changes in the changelog:
|
|
||||||
1. Delete the config file with listed updates, restart the container
|
|
||||||
* Proxy sample updates are not listed in the changelog. See the changes here: [https://github.com/linuxserver/reverse-proxy-confs/commits/master](https://github.com/linuxserver/reverse-proxy-confs/commits/master)
|
|
||||||
* Proxy sample files WILL be updated, however your renamed (enabled) proxy files will not.
|
|
||||||
* You can check the new sample and adjust your active config as needed.
|
|
||||||
|
|
||||||
### Migration from the old `linuxserver/letsencrypt` image
|
|
||||||
|
|
||||||
Please follow the instructions [on this blog post](https://www.linuxserver.io/blog/2020-08-21-introducing-swag#migrate).
|
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
Here are some example snippets to help you get started creating a container.
|
Here are some example snippets to help you get started creating a container.
|
||||||
|
|
||||||
### docker-compose (recommended, [click here for more info](https://docs.linuxserver.io/general/docker-compose))
|
### docker
|
||||||
|
|
||||||
```yaml
|
```
|
||||||
|
docker create \
|
||||||
|
--name=swag \
|
||||||
|
--cap-add=NET_ADMIN \
|
||||||
|
-e PUID=1000 \
|
||||||
|
-e PGID=1000 \
|
||||||
|
-e TZ=Europe/London \
|
||||||
|
-e URL=yourdomain.url \
|
||||||
|
-e SUBDOMAINS=www, \
|
||||||
|
-e VALIDATION=http \
|
||||||
|
-e DNSPLUGIN=cloudflare `#optional` \
|
||||||
|
-e PROPAGATION= `#optional` \
|
||||||
|
-e DUCKDNSTOKEN= `#optional` \
|
||||||
|
-e EMAIL= `#optional` \
|
||||||
|
-e ONLY_SUBDOMAINS=false `#optional` \
|
||||||
|
-e EXTRA_DOMAINS= `#optional` \
|
||||||
|
-e STAGING=false `#optional` \
|
||||||
|
-p 443:443 \
|
||||||
|
-p 80:80 `#optional` \
|
||||||
|
-v /path/to/appdata/config:/config \
|
||||||
|
--restart unless-stopped \
|
||||||
|
linuxserver/swag
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### docker-compose
|
||||||
|
|
||||||
|
Compatible with docker-compose v2 schemas.
|
||||||
|
|
||||||
|
```
|
||||||
---
|
---
|
||||||
version: "2.1"
|
version: "2.1"
|
||||||
services:
|
services:
|
||||||
swag:
|
swag:
|
||||||
image: lscr.io/linuxserver/swag:latest
|
image: linuxserver/swag
|
||||||
container_name: swag
|
container_name: swag
|
||||||
cap_add:
|
cap_add:
|
||||||
- NET_ADMIN
|
- NET_ADMIN
|
||||||
environment:
|
environment:
|
||||||
- PUID=1000
|
- PUID=1000
|
||||||
- PGID=1000
|
- PGID=1000
|
||||||
- TZ=Etc/UTC
|
- TZ=Europe/London
|
||||||
- URL=yourdomain.url
|
- URL=yourdomain.url
|
||||||
|
- SUBDOMAINS=www,
|
||||||
- VALIDATION=http
|
- VALIDATION=http
|
||||||
- SUBDOMAINS=www, #optional
|
|
||||||
- CERTPROVIDER= #optional
|
|
||||||
- DNSPLUGIN=cloudflare #optional
|
- DNSPLUGIN=cloudflare #optional
|
||||||
- PROPAGATION= #optional
|
- PROPAGATION= #optional
|
||||||
|
- DUCKDNSTOKEN= #optional
|
||||||
- EMAIL= #optional
|
- EMAIL= #optional
|
||||||
- ONLY_SUBDOMAINS=false #optional
|
- ONLY_SUBDOMAINS=false #optional
|
||||||
- EXTRA_DOMAINS= #optional
|
- EXTRA_DOMAINS= #optional
|
||||||
@ -173,33 +120,6 @@ services:
|
|||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
```
|
```
|
||||||
|
|
||||||
### docker cli ([click here for more info](https://docs.docker.com/engine/reference/commandline/cli/))
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker run -d \
|
|
||||||
--name=swag \
|
|
||||||
--cap-add=NET_ADMIN \
|
|
||||||
-e PUID=1000 \
|
|
||||||
-e PGID=1000 \
|
|
||||||
-e TZ=Etc/UTC \
|
|
||||||
-e URL=yourdomain.url \
|
|
||||||
-e VALIDATION=http \
|
|
||||||
-e SUBDOMAINS=www, `#optional` \
|
|
||||||
-e CERTPROVIDER= `#optional` \
|
|
||||||
-e DNSPLUGIN=cloudflare `#optional` \
|
|
||||||
-e PROPAGATION= `#optional` \
|
|
||||||
-e EMAIL= `#optional` \
|
|
||||||
-e ONLY_SUBDOMAINS=false `#optional` \
|
|
||||||
-e EXTRA_DOMAINS= `#optional` \
|
|
||||||
-e STAGING=false `#optional` \
|
|
||||||
-p 443:443 \
|
|
||||||
-p 80:80 `#optional` \
|
|
||||||
-v /path/to/appdata/config:/config \
|
|
||||||
--restart unless-stopped \
|
|
||||||
lscr.io/linuxserver/swag:latest
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
## Parameters
|
## Parameters
|
||||||
|
|
||||||
Container images are configured using parameters passed at runtime (such as those above). These parameters are separated by a colon and indicate `<external>:<internal>` respectively. For example, `-p 8080:80` would expose port `80` from inside the container to be accessible from the host's IP on port `8080` outside the container.
|
Container images are configured using parameters passed at runtime (such as those above). These parameters are separated by a colon and indicate `<external>:<internal>` respectively. For example, `-p 8080:80` would expose port `80` from inside the container to be accessible from the host's IP on port `8080` outside the container.
|
||||||
@ -210,30 +130,26 @@ Container images are configured using parameters passed at runtime (such as thos
|
|||||||
| `-p 80` | Http port (required for http validation and http -> https redirect) |
|
| `-p 80` | Http port (required for http validation and http -> https redirect) |
|
||||||
| `-e PUID=1000` | for UserID - see below for explanation |
|
| `-e PUID=1000` | for UserID - see below for explanation |
|
||||||
| `-e PGID=1000` | for GroupID - see below for explanation |
|
| `-e PGID=1000` | for GroupID - see below for explanation |
|
||||||
| `-e TZ=Etc/UTC` | specify a timezone to use, see this [list](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List). |
|
| `-e TZ=Europe/London` | Specify a timezone to use EG Europe/London. |
|
||||||
| `-e URL=yourdomain.url` | Top url you have control over (`customdomain.com` if you own it, or `customsubdomain.ddnsprovider.com` if dynamic dns). |
|
| `-e URL=yourdomain.url` | Top url you have control over (`customdomain.com` if you own it, or `customsubdomain.ddnsprovider.com` if dynamic dns). |
|
||||||
| `-e VALIDATION=http` | Certbot validation method to use, options are `http` or `dns` (`dns` method also requires `DNSPLUGIN` variable set). |
|
| `-e SUBDOMAINS=www,` | Subdomains you'd like the cert to cover (comma separated, no spaces) ie. `www,ftp,cloud`. For a wildcard cert, set this _exactly_ to `wildcard` (wildcard cert is available via `dns` and `duckdns` validation only) |
|
||||||
| `-e SUBDOMAINS=www,` | Subdomains you'd like the cert to cover (comma separated, no spaces) ie. `www,ftp,cloud`. For a wildcard cert, set this *exactly* to `wildcard` (wildcard cert is available via `dns` validation only) |
|
| `-e VALIDATION=http` | Certbot validation method to use, options are `http`, `dns` or `duckdns` (`dns` method also requires `DNSPLUGIN` variable set) (`duckdns` method requires `DUCKDNSTOKEN` variable set, and the `SUBDOMAINS` variable must be either empty or set to `wildcard`). |
|
||||||
| `-e CERTPROVIDER=` | Optionally define the cert provider. Set to `zerossl` for ZeroSSL certs (requires existing [ZeroSSL account](https://app.zerossl.com/signup) and the e-mail address entered in `EMAIL` env var). Otherwise defaults to Let's Encrypt. |
|
| `-e DNSPLUGIN=cloudflare` | Required if `VALIDATION` is set to `dns`. Options are `aliyun`, `cloudflare`, `cloudxns`, `cpanel`, `digitalocean`, `dnsimple`, `dnsmadeeasy`, `domeneshop`, `gandi`, `google`, `inwx`, `linode`, `luadns`, `nsone`, `ovh`, `rfc2136`, `route53` and `transip`. Also need to enter the credentials into the corresponding ini (or json for some plugins) file under `/config/dns-conf`. |
|
||||||
| `-e DNSPLUGIN=cloudflare` | Required if `VALIDATION` is set to `dns`. Options are `acmedns`, `aliyun`, `azure`, `cloudflare`, `cpanel`, `desec`, `digitalocean`, `directadmin`, `dnsimple`, `dnsmadeeasy`, `dnspod`, `do`, `domeneshop`, `duckdns`, `dynu`, `gandi`, `gehirn`, `godaddy`, `google`, `google-domains`, `he`, `hetzner`, `infomaniak`, `inwx`, `ionos`, `linode`, `loopia`, `luadns`, `netcup`, `njalla`, `nsone`, `ovh`, `porkbun`, `rfc2136`, `route53`, `sakuracloud`, `standalone`, `transip`, and `vultr`. Also need to enter the credentials into the corresponding ini (or json for some plugins) file under `/config/dns-conf`. |
|
|
||||||
| `-e PROPAGATION=` | Optionally override (in seconds) the default propagation time for the dns plugins. |
|
| `-e PROPAGATION=` | Optionally override (in seconds) the default propagation time for the dns plugins. |
|
||||||
| `-e EMAIL=` | Optional e-mail address used for cert expiration notifications (Required for ZeroSSL). |
|
| `-e DUCKDNSTOKEN=` | Required if `VALIDATION` is set to `duckdns`. Retrieve your token from https://www.duckdns.org |
|
||||||
|
| `-e EMAIL=` | Optional e-mail address used for cert expiration notifications. |
|
||||||
| `-e ONLY_SUBDOMAINS=false` | If you wish to get certs only for certain subdomains, but not the main domain (main domain may be hosted on another machine and cannot be validated), set this to `true` |
|
| `-e ONLY_SUBDOMAINS=false` | If you wish to get certs only for certain subdomains, but not the main domain (main domain may be hosted on another machine and cannot be validated), set this to `true` |
|
||||||
| `-e EXTRA_DOMAINS=` | Additional fully qualified domain names (comma separated, no spaces) ie. `extradomain.com,subdomain.anotherdomain.org,*.anotherdomain.org` |
|
| `-e EXTRA_DOMAINS=` | Additional fully qualified domain names (comma separated, no spaces) ie. `extradomain.com,subdomain.anotherdomain.org,*.anotherdomain.org` |
|
||||||
| `-e STAGING=false` | Set to `true` to retrieve certs in staging mode. Rate limits will be much higher, but the resulting cert will not pass the browser's security test. Only to be used for testing purposes. |
|
| `-e STAGING=false` | Set to `true` to retrieve certs in staging mode. Rate limits will be much higher, but the resulting cert will not pass the browser's security test. Only to be used for testing purposes. |
|
||||||
| `-v /config` | All the config files including the webroot reside here. |
|
| `-v /config` | All the config files including the webroot reside here. |
|
||||||
|
|
||||||
### Portainer notice
|
|
||||||
|
|
||||||
This image utilises `cap_add` or `sysctl` to work properly. This is not implemented properly in some versions of Portainer, thus this image may not work if deployed through Portainer.
|
|
||||||
|
|
||||||
## Environment variables from files (Docker secrets)
|
## Environment variables from files (Docker secrets)
|
||||||
|
|
||||||
You can set any environment variable from a file by using a special prepend `FILE__`.
|
You can set any environment variable from a file by using a special prepend `FILE__`.
|
||||||
|
|
||||||
As an example:
|
As an example:
|
||||||
|
|
||||||
```bash
|
```
|
||||||
-e FILE__PASSWORD=/run/secrets/mysecretpassword
|
-e FILE__PASSWORD=/run/secrets/mysecretpassword
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -252,16 +168,73 @@ Ensure any volume directories on the host are owned by the same user you specify
|
|||||||
|
|
||||||
In this instance `PUID=1000` and `PGID=1000`, to find yours use `id user` as below:
|
In this instance `PUID=1000` and `PGID=1000`, to find yours use `id user` as below:
|
||||||
|
|
||||||
```bash
|
```
|
||||||
$ id username
|
$ id username
|
||||||
uid=1000(dockeruser) gid=1000(dockergroup) groups=1000(dockergroup)
|
uid=1000(dockeruser) gid=1000(dockergroup) groups=1000(dockergroup)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Application Setup
|
||||||
|
|
||||||
|
### Migrating from the old `linuxserver/letsencrypt` image
|
||||||
|
* If using docker cli:
|
||||||
|
* Stop and remove existing container via `docker stop letsencrypt` and `docker rm letsencrypt`
|
||||||
|
* Create new container using the sample on this page (container name: `swag`, image name: `linuxserver/swag`)
|
||||||
|
* If using docker compose:
|
||||||
|
* Edit the compose yaml to change the image to `linuxserver/swag` and change the service and container names to `swag`
|
||||||
|
* Issue `docker-compose up -d`
|
||||||
|
### Validation and initial setup
|
||||||
|
* Before running this container, make sure that the url and subdomains are properly forwarded to this container's host, and that port 443 (and/or 80) is not being used by another service on the host (NAS gui, another webserver, etc.).
|
||||||
|
* For `http` validation, port 80 on the internet side of the router should be forwarded to this container's port 80
|
||||||
|
* For `dns` validation, make sure to enter your credentials into the corresponding ini (or json for some plugins) file under `/config/dns-conf`
|
||||||
|
* Cloudflare provides free accounts for managing dns and is very easy to use with this image. Make sure that it is set up for "dns only" instead of "dns + proxy"
|
||||||
|
* Google dns plugin is meant to be used with "Google Cloud DNS", a paid enterprise product, and not for "Google Domains DNS"
|
||||||
|
* For `duckdns` validation, either leave the `SUBDOMAINS` variable empty or set it to `wildcard`, and set the `DUCKDNSTOKEN` variable with your duckdns token. Due to a limitation of duckdns, the resulting cert will only cover either main subdomain (ie. `yoursubdomain.duckdns.org`), or sub-subdomains (ie. `*.yoursubdomain.duckdns.org`), but will not both at the same time. You can use our [duckdns image](https://hub.docker.com/r/linuxserver/duckdns/) to update your IP on duckdns.org.
|
||||||
|
* `--cap-add=NET_ADMIN` is required for fail2ban to modify iptables
|
||||||
|
* If you need a dynamic dns provider, you can use the free provider duckdns.org where the `URL` will be `yoursubdomain.duckdns.org` and the `SUBDOMAINS` can be `www,ftp,cloud` with http validation, or `wildcard` with dns validation.
|
||||||
|
* After setup, navigate to `https://yourdomain.url` to access the default homepage (http access through port 80 is disabled by default, you can enable it by editing the default site config at `/config/nginx/site-confs/default`).
|
||||||
|
* Certs are checked nightly and if expiration is within 30 days, renewal is attempted. If your cert is about to expire in less than 30 days, check the logs under `/config/log/letsencrypt` to see why the renewals have been failing. It is recommended to input your e-mail in docker parameters so you receive expiration notices from Let's Encrypt in those circumstances.
|
||||||
|
### Security and password protection
|
||||||
|
* The container detects changes to url and subdomains, revokes existing certs and generates new ones during start.
|
||||||
|
* The container provides a pre-generated 4096-bit dhparams.pem (rotated weekly via [Jenkins job](https://ci.linuxserver.io/blue/organizations/jenkins/Xtras-Builders-Etc%2Fdhparams-uploader/activity)) for new instances, however you may generate your own by running `docker exec swag openssl dhparam -out /config/nginx/dhparams.pem 4096` WARNING: This takes a very long time
|
||||||
|
* If you'd like to password protect your sites, you can use htpasswd. Run the following command on your host to generate the htpasswd file `docker exec -it swag htpasswd -c /config/nginx/.htpasswd <username>`
|
||||||
|
* You can add multiple user:pass to `.htpasswd`. For the first user, use the above command, for others, use the above command without the `-c` flag, as it will force deletion of the existing `.htpasswd` and creation of a new one
|
||||||
|
* You can also use ldap auth for security and access control. A sample, user configurable ldap.conf is provided, and it requires the separate image [linuxserver/ldap-auth](https://hub.docker.com/r/linuxserver/ldap-auth/) to communicate with an ldap server.
|
||||||
|
### Site config and reverse proxy
|
||||||
|
* The default site config resides at `/config/nginx/site-confs/default`. Feel free to modify this file, and you can add other conf files to this directory. However, if you delete the `default` file, a new default will be created on container start.
|
||||||
|
* Preset reverse proxy config files are added for popular apps. See the `README.md` file under `/config/nginx/proxy_confs` for instructions on how to enable them. The preset confs reside in and get imported from [this repo](https://github.com/linuxserver/reverse-proxy-confs).
|
||||||
|
* If you wish to hide your site from search engine crawlers, you may find it useful to add this configuration line to your site config, within the server block, above the line where ssl.conf is included
|
||||||
|
`add_header X-Robots-Tag "noindex, nofollow, nosnippet, noarchive";`
|
||||||
|
This will *ask* Google et al not to index and list your site. Be careful with this, as you will eventually be de-listed if you leave this line in on a site you wish to be present on search engines
|
||||||
|
* If you wish to redirect http to https, you must expose port 80
|
||||||
|
### Using certs in other containers
|
||||||
|
* This container includes auto-generated pfx and private-fullchain-bundle pem certs that are needed by other apps like Emby and Znc.
|
||||||
|
* To use these certs in other containers, do either of the following:
|
||||||
|
1. *(Easier)* Mount the container's config folder in other containers (ie. `-v /path-to-le-config:/le-ssl`) and in the other containers, use the cert location `/le-ssl/keys/letsencrypt/`
|
||||||
|
2. *(More secure)* Mount the SWAG folder `etc` that resides under `/config` in other containers (ie. `-v /path-to-le-config/etc:/le-ssl`) and in the other containers, use the cert location `/le-ssl/letsencrypt/live/<your.domain.url>/` (This is more secure because the first method shares the entire SWAG config folder with other containers, including the www files, whereas the second method only shares the ssl certs)
|
||||||
|
* These certs include:
|
||||||
|
1. `cert.pem`, `chain.pem`, `fullchain.pem` and `privkey.pem`, which are generated by Let's Encrypt and used by nginx and various other apps
|
||||||
|
2. `privkey.pfx`, a format supported by Microsoft and commonly used by dotnet apps such as Emby Server (no password)
|
||||||
|
3. `priv-fullchain-bundle.pem`, a pem cert that bundles the private key and the fullchain, used by apps like ZNC
|
||||||
|
### Using fail2ban
|
||||||
|
* This container includes fail2ban set up with 3 jails by default:
|
||||||
|
1. nginx-http-auth
|
||||||
|
2. nginx-badbots
|
||||||
|
3. nginx-botsearch
|
||||||
|
* To enable or disable other jails, modify the file `/config/fail2ban/jail.local`
|
||||||
|
* To modify filters and actions, instead of editing the `.conf` files, create `.local` files with the same name and edit those because .conf files get overwritten when the actions and filters are updated. `.local` files will append whatever's in the `.conf` files (ie. `nginx-http-auth.conf` --> `nginx-http-auth.local`)
|
||||||
|
* You can check which jails are active via `docker exec -it swag fail2ban-client status`
|
||||||
|
* You can check the status of a specific jail via `docker exec -it swag fail2ban-client status <jail name>`
|
||||||
|
* You can unban an IP via `docker exec -it swag fail2ban-client set <jail name> unbanip <IP>`
|
||||||
|
* A list of commands can be found here: https://www.fail2ban.org/wiki/index.php/Commands
|
||||||
|
|
||||||
|
|
||||||
## Docker Mods
|
## Docker Mods
|
||||||
|
[](https://mods.linuxserver.io/?mod=swag "view available mods for this container.")
|
||||||
|
|
||||||
[](https://mods.linuxserver.io/?mod=swag "view available mods for this container.") [](https://mods.linuxserver.io/?mod=universal "view available universal mods.")
|
We publish various [Docker Mods](https://github.com/linuxserver/docker-mods) to enable additional functionality within the containers. The list of Mods available for this image (if any) can be accessed via the dynamic badge above.
|
||||||
|
|
||||||
We publish various [Docker Mods](https://github.com/linuxserver/docker-mods) to enable additional functionality within the containers. The list of Mods available for this image (if any) as well as universal mods that can be applied to any one of our images can be accessed via the dynamic badges above.
|
|
||||||
|
|
||||||
## Support Info
|
## Support Info
|
||||||
|
|
||||||
@ -270,7 +243,7 @@ We publish various [Docker Mods](https://github.com/linuxserver/docker-mods) to
|
|||||||
* container version number
|
* container version number
|
||||||
* `docker inspect -f '{{ index .Config.Labels "build_version" }}' swag`
|
* `docker inspect -f '{{ index .Config.Labels "build_version" }}' swag`
|
||||||
* image version number
|
* image version number
|
||||||
* `docker inspect -f '{{ index .Config.Labels "build_version" }}' lscr.io/linuxserver/swag:latest`
|
* `docker inspect -f '{{ index .Config.Labels "build_version" }}' linuxserver/swag`
|
||||||
|
|
||||||
## Updating Info
|
## Updating Info
|
||||||
|
|
||||||
@ -278,57 +251,48 @@ Most of our images are static, versioned, and require an image update and contai
|
|||||||
|
|
||||||
Below are the instructions for updating containers:
|
Below are the instructions for updating containers:
|
||||||
|
|
||||||
### Via Docker Compose
|
### Via Docker Run/Create
|
||||||
|
* Update the image: `docker pull linuxserver/swag`
|
||||||
|
* Stop the running container: `docker stop swag`
|
||||||
|
* Delete the container: `docker rm swag`
|
||||||
|
* Recreate a new container with the same docker create parameters as instructed above (if mapped correctly to a host folder, your `/config` folder and settings will be preserved)
|
||||||
|
* Start the new container: `docker start swag`
|
||||||
|
* You can also remove the old dangling images: `docker image prune`
|
||||||
|
|
||||||
|
### Via Docker Compose
|
||||||
* Update all images: `docker-compose pull`
|
* Update all images: `docker-compose pull`
|
||||||
* or update a single image: `docker-compose pull swag`
|
* or update a single image: `docker-compose pull swag`
|
||||||
* Let compose update all containers as necessary: `docker-compose up -d`
|
* Let compose update all containers as necessary: `docker-compose up -d`
|
||||||
* or update a single container: `docker-compose up -d swag`
|
* or update a single container: `docker-compose up -d swag`
|
||||||
* You can also remove the old dangling images: `docker image prune`
|
* You can also remove the old dangling images: `docker image prune`
|
||||||
|
|
||||||
### Via Docker Run
|
### Via Watchtower auto-updater (especially useful if you don't remember the original parameters)
|
||||||
|
|
||||||
* Update the image: `docker pull lscr.io/linuxserver/swag:latest`
|
|
||||||
* Stop the running container: `docker stop swag`
|
|
||||||
* Delete the container: `docker rm swag`
|
|
||||||
* Recreate a new container with the same docker run parameters as instructed above (if mapped correctly to a host folder, your `/config` folder and settings will be preserved)
|
|
||||||
* You can also remove the old dangling images: `docker image prune`
|
|
||||||
|
|
||||||
### Via Watchtower auto-updater (only use if you don't remember the original parameters)
|
|
||||||
|
|
||||||
* Pull the latest image at its tag and replace it with the same env variables in one run:
|
* Pull the latest image at its tag and replace it with the same env variables in one run:
|
||||||
|
```
|
||||||
```bash
|
|
||||||
docker run --rm \
|
docker run --rm \
|
||||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||||
containrrr/watchtower \
|
containrrr/watchtower \
|
||||||
--run-once swag
|
--run-once swag
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Note:** We do not endorse the use of Watchtower as a solution to automated updates of existing Docker containers. In fact we generally discourage automated updates. However, this is a useful tool for one-time manual updates of containers where you have forgotten the original parameters. In the long term, we highly recommend using Docker Compose.
|
||||||
|
|
||||||
* You can also remove the old dangling images: `docker image prune`
|
* You can also remove the old dangling images: `docker image prune`
|
||||||
|
|
||||||
**Note:** We do not endorse the use of Watchtower as a solution to automated updates of existing Docker containers. In fact we generally discourage automated updates. However, this is a useful tool for one-time manual updates of containers where you have forgotten the original parameters. In the long term, we highly recommend using [Docker Compose](https://docs.linuxserver.io/general/docker-compose).
|
|
||||||
|
|
||||||
### Image Update Notifications - Diun (Docker Image Update Notifier)
|
|
||||||
|
|
||||||
* We recommend [Diun](https://crazymax.dev/diun/) for update notifications. Other tools that automatically update containers unattended are not recommended or supported.
|
|
||||||
|
|
||||||
## Building locally
|
## Building locally
|
||||||
|
|
||||||
If you want to make local modifications to these images for development purposes or just to customize the logic:
|
If you want to make local modifications to these images for development purposes or just to customize the logic:
|
||||||
|
```
|
||||||
```bash
|
|
||||||
git clone https://github.com/linuxserver/docker-swag.git
|
git clone https://github.com/linuxserver/docker-swag.git
|
||||||
cd docker-swag
|
cd docker-swag
|
||||||
docker build \
|
docker build \
|
||||||
--no-cache \
|
--no-cache \
|
||||||
--pull \
|
--pull \
|
||||||
-t lscr.io/linuxserver/swag:latest .
|
-t linuxserver/swag:latest .
|
||||||
```
|
```
|
||||||
|
|
||||||
The ARM variants can be built on x86_64 hardware using `multiarch/qemu-user-static`
|
The ARM variants can be built on x86_64 hardware using `multiarch/qemu-user-static`
|
||||||
|
```
|
||||||
```bash
|
|
||||||
docker run --rm --privileged multiarch/qemu-user-static:register --reset
|
docker run --rm --privileged multiarch/qemu-user-static:register --reset
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -336,69 +300,4 @@ Once registered you can define the dockerfile to use with `-f Dockerfile.aarch64
|
|||||||
|
|
||||||
## Versions
|
## Versions
|
||||||
|
|
||||||
* **25.03.23:** - Fix renewal post hook.
|
|
||||||
* **10.03.23:** - Cleanup unused csr and keys folders. See [certbot 2.3.0 release notes](https://github.com/certbot/certbot/releases/tag/v2.3.0).
|
|
||||||
* **09.03.23:** - Add Google Domains DNS support, `google-domains`.
|
|
||||||
* **02.03.23:** - Set permissions on crontabs during init.
|
|
||||||
* **09.02.23:** - [Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) proxy.conf, authelia-location.conf and authelia-server.conf - Add Authentik configs, update Authelia configs.
|
|
||||||
* **06.02.23:** - Add porkbun support back in.
|
|
||||||
* **21.01.23:** - Unpin certbot version (allow certbot 2.x). !!BREAKING CHANGE!! We are temporarily removing the certbot porkbun plugin until a new version is released that is compatible with certbot 2.x.
|
|
||||||
* **20.01.23:** - Rebase to alpine 3.17 with php8.1.
|
|
||||||
* **16.01.23:** - Remove nchan module because it keeps causing crashes.
|
|
||||||
* **08.12.22:** - Revamp certbot init.
|
|
||||||
* **03.12.22:** - Remove defunct cloudxns plugin.
|
|
||||||
* **22.11.22:** - Pin acme to the same version as certbot.
|
|
||||||
* **22.11.22:** - Pin certbot to 1.32.0 until plugin compatibility improves.
|
|
||||||
* **05.11.22:** - Update acmedns plugin handling.
|
|
||||||
* **06.10.22:** - Switch to certbot-dns-duckdns. Update cpanel and gandi dns plugin handling. Minor adjustments to init logic.
|
|
||||||
* **05.10.22:** - Use certbot file hooks instead of command line hooks
|
|
||||||
* **04.10.22:** - Add godaddy and porkbun dns plugins.
|
|
||||||
* **03.10.22:** - Add default_server back to default site conf's https listen.
|
|
||||||
* **22.09.22:** - Added support for DO DNS validation.
|
|
||||||
* **22.09.22:** - Added certbot-dns-acmedns for DNS01 validation.
|
|
||||||
* **20.08.22:** - [Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) nginx.conf - Rebasing to alpine 3.15 with php8. Restructure nginx configs ([see changes announcement](https://info.linuxserver.io/issues/2022-08-20-nginx-base)).
|
|
||||||
* **10.08.22:** - Added support for Dynu DNS validation.
|
|
||||||
* **18.05.22:** - Added support for Azure DNS validation.
|
|
||||||
* **09.04.22:** - Added certbot-dns-loopia for DNS01 validation.
|
|
||||||
* **05.04.22:** - Added support for standalone DNS validation.
|
|
||||||
* **28.03.22:** - created a logfile for fail2ban nginx-unauthorized in /etc/cont-init.d/50-config
|
|
||||||
* **09.01.22:** - Added a fail2ban jail for nginx unauthorized
|
|
||||||
* **21.12.21:** - Fixed issue with iptables not working as expected
|
|
||||||
* **30.11.21:** - Move maxmind to a [new mod](https://github.com/linuxserver/docker-mods/tree/swag-maxmind)
|
|
||||||
* **22.11.21:** - Added support for Infomaniak DNS for certificate generation.
|
|
||||||
* **20.11.21:** - Added support for dnspod validation.
|
|
||||||
* **15.11.21:** - Added support for deSEC DNS for wildcard certificate generation.
|
|
||||||
* **26.10.21:** - [Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) proxy.conf - Mitigate <https://httpoxy.org/> vulnerabilities. Ref: <https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx#Defeating-the-Attack-using-NGINX-and-NGINX-Plus>
|
|
||||||
* **23.10.21:** - Fix Hurricane Electric (HE) DNS validation.
|
|
||||||
* **12.10.21:** - Fix deprecated LE root cert check to fix failures when using `STAGING=true`, and failures in revoking.
|
|
||||||
* **06.10.21:** - Added support for Hurricane Electric (HE) DNS validation. Added lxml build deps.
|
|
||||||
* **01.10.21:** - Check if the cert uses the old LE root cert, revoke and regenerate if necessary. [Here's more info](https://twitter.com/letsencrypt/status/1443621997288767491) on LE root cert expiration
|
|
||||||
* **19.09.21:** - Add an optional header to opt out of Google FLoC in `ssl.conf`.
|
|
||||||
* **17.09.21:** - Mark `SUBDOMAINS` var as optional.
|
|
||||||
* **01.08.21:** - Add support for ionos dns validation.
|
|
||||||
* **15.07.21:** - Fix libmaxminddb issue due to upstream change.
|
|
||||||
* **07.07.21:** - Rebase to alpine 3.14.
|
|
||||||
* **24.06.21:** - Update default nginx conf folder.
|
|
||||||
* **28.05.21:** - [Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) authelia-server.conf - Use `resolver.conf` and patch for `CVE-2021-32637`.
|
|
||||||
* **20.05.21:** - Modify resolver.conf generation to detect and ignore ipv6.
|
|
||||||
* **14.05.21:** - [Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) nginx.conf, ssl.conf, proxy.conf, and the default site-conf - Rework nginx.conf to be inline with alpine upstream and relocate lines from other files. Use linuxserver.io wheel index for pip packages. Switch to using [ffdhe4096](https://ssl-config.mozilla.org/ffdhe4096.txt) for `dhparams.pem` per [RFC7919](https://datatracker.ietf.org/doc/html/rfc7919). Added `worker_processes.conf`, which sets the number of nginx workers, and `resolver.conf`, which sets the dns resolver. Both conf files are auto-generated only on first start and can be user modified later.
|
|
||||||
* **21.04.21:** - [Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) authelia-server.conf and authelia-location.conf - Add remote name/email headers and pass http method.
|
|
||||||
* **12.04.21:** - Add php7-gmp and php7-pecl-mailparse.
|
|
||||||
* **12.04.21:** - Add support for vultr dns validation.
|
|
||||||
* **14.03.21:** - Add support for directadmin dns validation.
|
|
||||||
* **12.02.21:** - Clean up rust/cargo cache, which ballooned the image size in the last couple of builds.
|
|
||||||
* **10.02.21:** - Fix aliyun, domeneshop, inwx and transip dns confs for existing users.
|
|
||||||
* **09.02.21:** - Rebasing to alpine 3.13. Add nginx mods brotli and dav-ext. Remove nginx mods lua and lua-upstream (due to regression over the last couple of years).
|
|
||||||
* **26.01.21:** - Add support for hetzner dns validation.
|
|
||||||
* **20.01.21:** - Add check for ZeroSSL EAB retrieval.
|
|
||||||
* **08.01.21:** - Add support for getting certs from [ZeroSSL](https://zerossl.com/) via optional `CERTPROVIDER` env var. Update aliyun, domeneshop, inwx and transip dns plugins with the new plugin names. Hide `donoteditthisfile.conf` because users were editing it despite its name. Suppress harmless error when no proxy confs are enabled.
|
|
||||||
* **03.01.21:** - [Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) /config/nginx/site-confs/default.conf - Add helper pages to aid troubleshooting
|
|
||||||
* **10.12.20:** - Add support for njalla dns validation
|
|
||||||
* **09.12.20:** - Check for template/conf updates and notify in the log. Add support for gehirn and sakuracloud dns validation.
|
|
||||||
* **01.11.20:** - Add support for netcup dns validation
|
|
||||||
* **29.10.20:** - [Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) ssl.conf - Add frame-ancestors to Content-Security-Policy.
|
|
||||||
* **04.10.20:** - [Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) nginx.conf, proxy.conf, and ssl.conf - Minor cleanups and reordering.
|
|
||||||
* **20.09.20:** - [Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) nginx.conf - Added geoip2 configs. Added MAXMINDDB_LICENSE_KEY variable to readme.
|
|
||||||
* **08.09.20:** - Add php7-xsl.
|
|
||||||
* **01.09.20:** - [Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) nginx.conf, proxy.conf, and various proxy samples - Global websockets across all configs.
|
|
||||||
* **03.08.20:** - Initial release.
|
* **03.08.20:** - Initial release.
|
||||||
|
|||||||
@ -1,340 +1,219 @@
|
|||||||
NAME VERSION TYPE
|
alpine-baselayout-3.2.0-r7
|
||||||
ConfigArgParse 1.5.3 python
|
alpine-keys-2.2-r0
|
||||||
PyJWT 2.6.0 python
|
apache2-utils-2.4.43-r0
|
||||||
PyYAML 6.0 python
|
apk-tools-2.10.5-r1
|
||||||
acme 2.5.0 python
|
apr-1.7.0-r0
|
||||||
alpine-baselayout 3.4.0-r0 apk
|
apr-util-1.6.1-r6
|
||||||
alpine-baselayout-data 3.4.0-r0 apk
|
argon2-libs-20190702-r1
|
||||||
alpine-keys 2.4-r1 apk
|
bash-5.0.17-r0
|
||||||
alpine-release 3.17.3-r0 apk
|
brotli-libs-1.0.7-r5
|
||||||
aom-libs 3.5.0-r0 apk
|
busybox-1.31.1-r19
|
||||||
apache2-utils 2.4.56-r0 apk
|
c-client-2007f-r11
|
||||||
apk-tools 2.12.10-r1 apk
|
ca-certificates-20191127-r4
|
||||||
apr 1.7.2-r0 apk
|
ca-certificates-bundle-20191127-r4
|
||||||
apr-util 1.6.3-r0 apk
|
coreutils-8.32-r0
|
||||||
argon2-libs 20190702-r2 apk
|
curl-7.69.1-r0
|
||||||
attrs 22.2.0 python
|
db-5.3.28-r1
|
||||||
azure-common 1.1.28 python
|
expat-2.2.9-r1
|
||||||
azure-core 1.26.4 python
|
fail2ban-0.11.1-r2
|
||||||
azure-identity 1.12.0 python
|
freetype-2.10.2-r0
|
||||||
azure-mgmt-core 1.4.0 python
|
gdbm-1.13-r1
|
||||||
azure-mgmt-dns 8.0.0 python
|
git-2.26.2-r0
|
||||||
bash 5.2.15-r0 apk
|
git-perl-2.26.2-r0
|
||||||
beautifulsoup4 4.12.2 python
|
glib-2.64.4-r0
|
||||||
boto3 1.26.109 python
|
gmp-6.2.0-r0
|
||||||
botocore 1.29.109 python
|
gnupg-2.2.20-r0
|
||||||
brotli-libs 1.0.9-r9 apk
|
gnutls-3.6.14-r0
|
||||||
bs4 0.0.1 python
|
icu-libs-67.1-r0
|
||||||
busybox 1.35.0 binary
|
ip6tables-1.8.4-r1
|
||||||
busybox 1.35.0-r29 apk
|
iptables-1.8.4-r1
|
||||||
busybox-binsh 1.35.0-r29 apk
|
libacl-2.2.53-r0
|
||||||
c-client 2007f-r14 apk
|
libassuan-2.5.3-r0
|
||||||
ca-certificates 20220614-r4 apk
|
libattr-2.4.48-r0
|
||||||
ca-certificates-bundle 20220614-r4 apk
|
libblkid-2.35.2-r0
|
||||||
cachetools 5.3.0 python
|
libbsd-0.10.0-r0
|
||||||
certbot 2.5.0 python
|
libbz2-1.0.8-r1
|
||||||
certbot-dns-acmedns 0.1.0 python
|
libc-utils-0.7.2-r3
|
||||||
certbot-dns-aliyun 2.0.0 python
|
libcap-2.27-r0
|
||||||
certbot-dns-azure 2.1.0 python
|
libcrypto1.1-1.1.1g-r0
|
||||||
certbot-dns-cloudflare 2.5.0 python
|
libcurl-7.69.1-r0
|
||||||
certbot-dns-cpanel 0.4.0 python
|
libedit-20191231.3.1-r0
|
||||||
certbot-dns-desec 1.2.1 python
|
libevent-2.1.11-r1
|
||||||
certbot-dns-digitalocean 2.5.0 python
|
libffi-3.3-r2
|
||||||
certbot-dns-directadmin 1.0.3 python
|
libgcc-9.3.0-r2
|
||||||
certbot-dns-dnsimple 2.5.0 python
|
libgcrypt-1.8.5-r0
|
||||||
certbot-dns-dnsmadeeasy 2.5.0 python
|
libgd-2.3.0-r1
|
||||||
certbot-dns-dnspod 0.1.0 python
|
libgpg-error-1.37-r0
|
||||||
certbot-dns-do 0.31.0 python
|
libice-1.0.10-r0
|
||||||
certbot-dns-domeneshop 0.2.9 python
|
libidn-1.35-r0
|
||||||
certbot-dns-duckdns 1.3 python
|
libintl-0.20.2-r0
|
||||||
certbot-dns-dynu 0.0.4 python
|
libjpeg-turbo-2.0.5-r0
|
||||||
certbot-dns-gehirn 2.5.0 python
|
libksba-1.4.0-r0
|
||||||
certbot-dns-godaddy 0.2.2 python
|
libldap-2.4.50-r0
|
||||||
certbot-dns-google 2.5.0 python
|
libmagic-5.38-r0
|
||||||
certbot-dns-google-domains 0.1.9 python
|
libmaxminddb-1.4.2-r1
|
||||||
certbot-dns-he 1.0.0 python
|
libmcrypt-2.5.8-r8
|
||||||
certbot-dns-hetzner 2.0.0 python
|
libmemcached-libs-1.0.18-r4
|
||||||
certbot-dns-infomaniak 0.2.1 python
|
libmnl-1.0.4-r0
|
||||||
certbot-dns-inwx 2.2.0 python
|
libmount-2.35.2-r0
|
||||||
certbot-dns-ionos 2022.11.24 python
|
libnftnl-libs-1.1.6-r0
|
||||||
certbot-dns-linode 2.5.0 python
|
libpng-1.6.37-r1
|
||||||
certbot-dns-loopia 1.0.1 python
|
libpq-12.3-r2
|
||||||
certbot-dns-luadns 2.5.0 python
|
libproc-3.3.16-r0
|
||||||
certbot-dns-netcup 1.2.0 python
|
libressl3.1-libcrypto-3.1.2-r0
|
||||||
certbot-dns-njalla 1.0.0 python
|
libressl3.1-libssl-3.1.2-r0
|
||||||
certbot-dns-nsone 2.5.0 python
|
libsasl-2.1.27-r6
|
||||||
certbot-dns-ovh 2.5.0 python
|
libseccomp-2.4.3-r0
|
||||||
certbot-dns-porkbun 0.8 python
|
libsecret-0.20.3-r0
|
||||||
certbot-dns-rfc2136 2.5.0 python
|
libsm-1.2.3-r0
|
||||||
certbot-dns-route53 2.5.0 python
|
libsodium-1.0.18-r0
|
||||||
certbot-dns-sakuracloud 2.5.0 python
|
libssl1.1-1.1.1g-r0
|
||||||
certbot-dns-standalone 1.1 python
|
libstdc++-9.3.0-r2
|
||||||
certbot-dns-transip 0.5.2 python
|
libtasn1-4.16.0-r1
|
||||||
certbot-dns-vultr 1.0.3 python
|
libtls-standalone-2.9.1-r1
|
||||||
certbot-plugin-gandi 1.4.3 python
|
libunistring-0.9.10-r0
|
||||||
certifi 2022.12.7 python
|
libuuid-2.35.2-r0
|
||||||
cffi 1.15.1 python
|
libwebp-1.1.0-r0
|
||||||
charset-normalizer 3.1.0 python
|
libx11-1.6.10-r0
|
||||||
cloudflare 2.11.1 python
|
libxau-1.0.9-r0
|
||||||
configobj 5.0.8 python
|
libxcb-1.14-r1
|
||||||
coreutils 9.1-r0 apk
|
libxdmcp-1.1.3-r0
|
||||||
cryptography 40.0.1 python
|
libxext-1.3.4-r0
|
||||||
curl 7.88.1-r1 apk
|
libxml2-2.9.10-r4
|
||||||
dataclasses-json 0.5.7 python
|
libxpm-3.5.13-r0
|
||||||
distro 1.8.0 python
|
libxslt-1.1.34-r0
|
||||||
dns-lexicon 3.11.7 python
|
libxt-1.2.0-r0
|
||||||
dnslib 0.9.23 python
|
libzip-1.6.1-r1
|
||||||
dnspython 2.3.0 python
|
linux-pam-1.3.1-r4
|
||||||
domeneshop 0.4.3 python
|
logrotate-3.16.0-r0
|
||||||
fail2ban 1.0.2 python
|
luajit-5.1.20190925-r0
|
||||||
fail2ban 1.0.2-r0 apk
|
memcached-1.6.6-r0
|
||||||
filelock 3.11.0 python
|
musl-1.1.24-r9
|
||||||
fontconfig 2.14.1-r0 apk
|
musl-utils-1.1.24-r9
|
||||||
freetype 2.12.1-r0 apk
|
nano-4.9.3-r0
|
||||||
future 0.18.3 python
|
ncurses-libs-6.2_p20200523-r0
|
||||||
gdbm 1.23-r0 apk
|
ncurses-terminfo-base-6.2_p20200523-r0
|
||||||
git 2.38.4-r1 apk
|
nettle-3.5.1-r1
|
||||||
git-perl 2.38.4-r1 apk
|
nghttp2-libs-1.41.0-r0
|
||||||
gmp 6.2.1-r2 apk
|
nginx-1.18.0-r0
|
||||||
gnupg 2.2.40-r0 apk
|
nginx-mod-devel-kit-1.18.0-r0
|
||||||
gnupg-dirmngr 2.2.40-r0 apk
|
nginx-mod-http-echo-1.18.0-r0
|
||||||
gnupg-gpgconf 2.2.40-r0 apk
|
nginx-mod-http-fancyindex-1.18.0-r0
|
||||||
gnupg-utils 2.2.40-r0 apk
|
nginx-mod-http-geoip2-1.18.0-r0
|
||||||
gnupg-wks-client 2.2.40-r0 apk
|
nginx-mod-http-headers-more-1.18.0-r0
|
||||||
gnutls 3.7.8-r3 apk
|
nginx-mod-http-image-filter-1.18.0-r0
|
||||||
google-api-core 2.11.0 python
|
nginx-mod-http-lua-1.18.0-r0
|
||||||
google-api-python-client 2.84.0 python
|
nginx-mod-http-lua-upstream-1.18.0-r0
|
||||||
google-auth 2.17.2 python
|
nginx-mod-http-nchan-1.18.0-r0
|
||||||
google-auth-httplib2 0.1.0 python
|
nginx-mod-http-perl-1.18.0-r0
|
||||||
googleapis-common-protos 1.59.0 python
|
nginx-mod-http-redis2-1.18.0-r0
|
||||||
gpg 2.2.40-r0 apk
|
nginx-mod-http-set-misc-1.18.0-r0
|
||||||
gpg-agent 2.2.40-r0 apk
|
nginx-mod-http-upload-progress-1.18.0-r0
|
||||||
gpg-wks-server 2.2.40-r0 apk
|
nginx-mod-http-xslt-filter-1.18.0-r0
|
||||||
gpgsm 2.2.40-r0 apk
|
nginx-mod-mail-1.18.0-r0
|
||||||
gpgv 2.2.40-r0 apk
|
nginx-mod-rtmp-1.18.0-r0
|
||||||
httplib2 0.22.0 python
|
nginx-mod-stream-1.18.0-r0
|
||||||
icu-data-en 72.1-r1 apk
|
nginx-mod-stream-geoip2-1.18.0-r0
|
||||||
icu-libs 72.1-r1 apk
|
nginx-vim-1.18.0-r0
|
||||||
idna 3.4 python
|
npth-1.6-r0
|
||||||
importlib-metadata 6.2.0 python
|
openssl-1.1.1g-r0
|
||||||
ip6tables 1.8.8-r2 apk
|
p11-kit-0.23.20-r5
|
||||||
iptables 1.8.8-r2 apk
|
pcre-8.44-r0
|
||||||
isodate 0.6.1 python
|
pcre2-10.35-r0
|
||||||
jmespath 1.0.1 python
|
perl-5.30.3-r0
|
||||||
josepy 1.13.0 python
|
perl-error-0.17029-r0
|
||||||
jq 1.6-r2 apk
|
perl-git-2.26.2-r0
|
||||||
jsonlines 3.1.0 python
|
php7-7.3.20-r0
|
||||||
jsonpickle 3.0.1 python
|
php7-bcmath-7.3.20-r0
|
||||||
libacl 2.3.1-r1 apk
|
php7-bz2-7.3.20-r0
|
||||||
libassuan 2.5.5-r1 apk
|
php7-common-7.3.20-r0
|
||||||
libattr 2.5.1-r2 apk
|
php7-ctype-7.3.20-r0
|
||||||
libavif 0.11.1-r0 apk
|
php7-curl-7.3.20-r0
|
||||||
libbsd 0.11.7-r0 apk
|
php7-dom-7.3.20-r0
|
||||||
libbz2 1.0.8-r4 apk
|
php7-exif-7.3.20-r0
|
||||||
libc-utils 0.7.2-r3 apk
|
php7-fileinfo-7.3.20-r0
|
||||||
libcrypto3 3.0.8-r3 apk
|
php7-fpm-7.3.20-r0
|
||||||
libcurl 7.88.1-r1 apk
|
php7-ftp-7.3.20-r0
|
||||||
libdav1d 1.0.0-r2 apk
|
php7-gd-7.3.20-r0
|
||||||
libedit 20221030.3.1-r0 apk
|
php7-iconv-7.3.20-r0
|
||||||
libevent 2.1.12-r5 apk
|
php7-imap-7.3.20-r0
|
||||||
libexpat 2.5.0-r0 apk
|
php7-intl-7.3.20-r0
|
||||||
libffi 3.4.4-r0 apk
|
php7-json-7.3.20-r0
|
||||||
libgcc 12.2.1_git20220924-r4 apk
|
php7-ldap-7.3.20-r0
|
||||||
libgcrypt 1.10.1-r0 apk
|
php7-mbstring-7.3.20-r0
|
||||||
libgd 2.3.3-r3 apk
|
php7-mysqli-7.3.20-r0
|
||||||
libgpg-error 1.46-r1 apk
|
php7-mysqlnd-7.3.20-r0
|
||||||
libice 1.0.10-r1 apk
|
php7-opcache-7.3.20-r0
|
||||||
libidn 1.41-r0 apk
|
php7-openssl-7.3.20-r0
|
||||||
libintl 0.21.1-r1 apk
|
php7-pdo-7.3.20-r0
|
||||||
libjpeg-turbo 2.1.4-r0 apk
|
php7-pdo_mysql-7.3.20-r0
|
||||||
libksba 1.6.3-r0 apk
|
php7-pdo_odbc-7.3.20-r0
|
||||||
libldap 2.6.3-r6 apk
|
php7-pdo_pgsql-7.3.20-r0
|
||||||
libmaxminddb-libs 1.7.1-r0 apk
|
php7-pdo_sqlite-7.3.20-r0
|
||||||
libmcrypt 2.5.8-r10 apk
|
php7-pear-7.3.20-r0
|
||||||
libmd 1.0.4-r0 apk
|
php7-pecl-apcu-5.1.18-r0
|
||||||
libmemcached-libs 1.0.18-r5 apk
|
php7-pecl-igbinary-3.1.2-r0
|
||||||
libmnl 1.0.5-r0 apk
|
php7-pecl-mcrypt-1.0.3-r0
|
||||||
libnftnl 1.2.4-r0 apk
|
php7-pecl-memcached-3.1.5-r0
|
||||||
libpng 1.6.38-r0 apk
|
php7-pecl-redis-5.2.2-r1
|
||||||
libpq 15.2-r0 apk
|
php7-pgsql-7.3.20-r0
|
||||||
libproc 3.3.17-r2 apk
|
php7-phar-7.3.20-r0
|
||||||
libsasl 2.1.28-r3 apk
|
php7-posix-7.3.20-r0
|
||||||
libseccomp 2.5.4-r0 apk
|
php7-session-7.3.20-r0
|
||||||
libsm 1.2.3-r1 apk
|
php7-simplexml-7.3.20-r0
|
||||||
libsodium 1.0.18-r2 apk
|
php7-soap-7.3.20-r0
|
||||||
libssl3 3.0.8-r3 apk
|
php7-sockets-7.3.20-r0
|
||||||
libstdc++ 12.2.1_git20220924-r4 apk
|
php7-sodium-7.3.20-r0
|
||||||
libtasn1 4.19.0-r0 apk
|
php7-sqlite3-7.3.20-r0
|
||||||
libunistring 1.1-r0 apk
|
php7-tokenizer-7.3.20-r0
|
||||||
libuuid 2.38.1-r1 apk
|
php7-xml-7.3.20-r0
|
||||||
libwebp 1.2.4-r1 apk
|
php7-xmlreader-7.3.20-r0
|
||||||
libx11 1.8.4-r0 apk
|
php7-xmlrpc-7.3.20-r0
|
||||||
libxau 1.0.10-r0 apk
|
php7-xmlwriter-7.3.20-r0
|
||||||
libxcb 1.15-r0 apk
|
php7-zip-7.3.20-r0
|
||||||
libxdmcp 1.1.4-r0 apk
|
pinentry-1.1.0-r2
|
||||||
libxext 1.3.5-r0 apk
|
popt-1.16-r7
|
||||||
libxml2 2.10.3-r1 apk
|
procps-3.3.16-r0
|
||||||
libxpm 3.5.15-r0 apk
|
py3-appdirs-1.4.4-r1
|
||||||
libxslt 1.1.37-r1 apk
|
py3-asn1crypto-1.3.0-r0
|
||||||
libxt 1.2.1-r0 apk
|
py3-cachecontrol-0.12.6-r0
|
||||||
libzip 1.9.2-r2 apk
|
py3-certifi-2020.4.5.1-r0
|
||||||
linux-pam 1.5.2-r1 apk
|
py3-cffi-1.14.0-r2
|
||||||
logrotate 3.20.1-r3 apk
|
py3-chardet-3.0.4-r4
|
||||||
loopialib 0.2.0 python
|
py3-colorama-0.4.3-r0
|
||||||
lxml 4.9.2 python
|
py3-contextlib2-0.6.0-r0
|
||||||
lz4-libs 1.9.4-r1 apk
|
py3-cparser-2.20-r0
|
||||||
marshmallow 3.19.0 python
|
py3-cryptography-2.9.2-r0
|
||||||
marshmallow-enum 1.5.1 python
|
py3-distlib-0.3.0-r0
|
||||||
memcached 1.6.17 binary
|
py3-distro-1.5.0-r1
|
||||||
memcached 1.6.17-r0 apk
|
py3-future-0.18.2-r1
|
||||||
mock 5.0.1 python
|
py3-html5lib-1.0.1-r4
|
||||||
mpdecimal 2.5.1-r1 apk
|
py3-idna-2.9-r0
|
||||||
msal 1.21.0 python
|
py3-lockfile-0.12.2-r3
|
||||||
msal-extensions 1.0.0 python
|
py3-msgpack-1.0.0-r0
|
||||||
msrest 0.7.1 python
|
py3-ordered-set-4.0.1-r0
|
||||||
musl 1.2.3-r4 apk
|
py3-packaging-20.4-r0
|
||||||
musl-utils 1.2.3-r4 apk
|
py3-parsing-2.4.7-r0
|
||||||
mypy-extensions 1.0.0 python
|
py3-pep517-0.8.2-r0
|
||||||
nano 7.0-r0 apk
|
py3-pip-20.1.1-r0
|
||||||
ncurses-libs 6.3_p20221119-r0 apk
|
py3-progress-1.5-r0
|
||||||
ncurses-terminfo-base 6.3_p20221119-r0 apk
|
py3-pytoml-0.1.21-r0
|
||||||
netcat-openbsd 1.130-r4 apk
|
py3-requests-2.23.0-r0
|
||||||
nettle 3.8.1-r0 apk
|
py3-retrying-1.3.3-r0
|
||||||
nghttp2-libs 1.51.0-r0 apk
|
py3-setuptools-47.0.0-r0
|
||||||
nginx 1.22.1-r0 apk
|
py3-six-1.15.0-r0
|
||||||
nginx-mod-devel-kit 1.22.1-r0 apk
|
py3-toml-0.10.1-r0
|
||||||
nginx-mod-http-brotli 1.22.1-r0 apk
|
py3-urllib3-1.25.9-r0
|
||||||
nginx-mod-http-dav-ext 1.22.1-r0 apk
|
py3-webencodings-0.5.1-r3
|
||||||
nginx-mod-http-echo 1.22.1-r0 apk
|
python3-3.8.5-r0
|
||||||
nginx-mod-http-fancyindex 1.22.1-r0 apk
|
readline-8.0.4-r0
|
||||||
nginx-mod-http-geoip2 1.22.1-r0 apk
|
scanelf-1.2.6-r0
|
||||||
nginx-mod-http-headers-more 1.22.1-r0 apk
|
shadow-4.8.1-r0
|
||||||
nginx-mod-http-image-filter 1.22.1-r0 apk
|
sqlite-libs-3.32.1-r0
|
||||||
nginx-mod-http-perl 1.22.1-r0 apk
|
ssl_client-1.31.1-r19
|
||||||
nginx-mod-http-redis2 1.22.1-r0 apk
|
tzdata-2020a-r0
|
||||||
nginx-mod-http-set-misc 1.22.1-r0 apk
|
unixodbc-2.3.7-r2
|
||||||
nginx-mod-http-upload-progress 1.22.1-r0 apk
|
whois-5.5.6-r0
|
||||||
nginx-mod-http-xslt-filter 1.22.1-r0 apk
|
xz-5.2.5-r0
|
||||||
nginx-mod-mail 1.22.1-r0 apk
|
xz-libs-5.2.5-r0
|
||||||
nginx-mod-rtmp 1.22.1-r0 apk
|
zlib-1.2.11-r3
|
||||||
nginx-mod-stream 1.22.1-r0 apk
|
|
||||||
nginx-mod-stream-geoip2 1.22.1-r0 apk
|
|
||||||
nginx-vim 1.22.1-r0 apk
|
|
||||||
npth 1.6-r2 apk
|
|
||||||
oauth2client 4.1.3 python
|
|
||||||
oauthlib 3.2.2 python
|
|
||||||
oniguruma 6.9.8-r0 apk
|
|
||||||
openssl 3.0.8-r3 apk
|
|
||||||
p11-kit 0.24.1-r1 apk
|
|
||||||
packaging 23.0 python
|
|
||||||
parsedatetime 2.6 python
|
|
||||||
pcre 8.45-r2 apk
|
|
||||||
pcre2 10.42-r0 apk
|
|
||||||
perl 5.36.0-r0 apk
|
|
||||||
perl-error 0.17029-r1 apk
|
|
||||||
perl-git 2.38.4-r1 apk
|
|
||||||
php-cli 8.1.17 binary
|
|
||||||
php-fpm 8.1.17 binary
|
|
||||||
php81 8.1.17-r0 apk
|
|
||||||
php81-bcmath 8.1.17-r0 apk
|
|
||||||
php81-bz2 8.1.17-r0 apk
|
|
||||||
php81-common 8.1.17-r0 apk
|
|
||||||
php81-ctype 8.1.17-r0 apk
|
|
||||||
php81-curl 8.1.17-r0 apk
|
|
||||||
php81-dom 8.1.17-r0 apk
|
|
||||||
php81-exif 8.1.17-r0 apk
|
|
||||||
php81-fileinfo 8.1.17-r0 apk
|
|
||||||
php81-fpm 8.1.17-r0 apk
|
|
||||||
php81-ftp 8.1.17-r0 apk
|
|
||||||
php81-gd 8.1.17-r0 apk
|
|
||||||
php81-gmp 8.1.17-r0 apk
|
|
||||||
php81-iconv 8.1.17-r0 apk
|
|
||||||
php81-imap 8.1.17-r0 apk
|
|
||||||
php81-intl 8.1.17-r0 apk
|
|
||||||
php81-ldap 8.1.17-r0 apk
|
|
||||||
php81-mbstring 8.1.17-r0 apk
|
|
||||||
php81-mysqli 8.1.17-r0 apk
|
|
||||||
php81-mysqlnd 8.1.17-r0 apk
|
|
||||||
php81-opcache 8.1.17-r0 apk
|
|
||||||
php81-openssl 8.1.17-r0 apk
|
|
||||||
php81-pdo 8.1.17-r0 apk
|
|
||||||
php81-pdo_mysql 8.1.17-r0 apk
|
|
||||||
php81-pdo_odbc 8.1.17-r0 apk
|
|
||||||
php81-pdo_pgsql 8.1.17-r0 apk
|
|
||||||
php81-pdo_sqlite 8.1.17-r0 apk
|
|
||||||
php81-pear 8.1.17-r0 apk
|
|
||||||
php81-pecl-apcu 5.1.22-r0 apk
|
|
||||||
php81-pecl-igbinary 3.2.12-r0 apk
|
|
||||||
php81-pecl-mailparse 3.1.4-r0 apk
|
|
||||||
php81-pecl-mcrypt 1.0.6-r0 apk
|
|
||||||
php81-pecl-memcached 3.2.0-r0 apk
|
|
||||||
php81-pecl-redis 5.3.7-r0 apk
|
|
||||||
php81-pecl-xmlrpc 1.0.0_rc3-r0 apk
|
|
||||||
php81-pgsql 8.1.17-r0 apk
|
|
||||||
php81-phar 8.1.17-r0 apk
|
|
||||||
php81-posix 8.1.17-r0 apk
|
|
||||||
php81-session 8.1.17-r0 apk
|
|
||||||
php81-simplexml 8.1.17-r0 apk
|
|
||||||
php81-soap 8.1.17-r0 apk
|
|
||||||
php81-sockets 8.1.17-r0 apk
|
|
||||||
php81-sodium 8.1.17-r0 apk
|
|
||||||
php81-sqlite3 8.1.17-r0 apk
|
|
||||||
php81-tokenizer 8.1.17-r0 apk
|
|
||||||
php81-xml 8.1.17-r0 apk
|
|
||||||
php81-xmlreader 8.1.17-r0 apk
|
|
||||||
php81-xmlwriter 8.1.17-r0 apk
|
|
||||||
php81-xsl 8.1.17-r0 apk
|
|
||||||
php81-zip 8.1.17-r0 apk
|
|
||||||
pinentry 1.2.1-r0 apk
|
|
||||||
pip 23.0.1 python
|
|
||||||
pkb-client 1.2 python
|
|
||||||
popt 1.19-r0 apk
|
|
||||||
portalocker 2.7.0 python
|
|
||||||
procps 3.3.17-r2 apk
|
|
||||||
protobuf 4.22.1 python
|
|
||||||
publicsuffixlist 0.9.3 python
|
|
||||||
pyOpenSSL 23.1.1 python
|
|
||||||
pyRFC3339 1.1 python
|
|
||||||
pyacmedns 0.4 python
|
|
||||||
pyasn1 0.4.8 python
|
|
||||||
pyasn1-modules 0.2.8 python
|
|
||||||
pycparser 2.21 python
|
|
||||||
pyparsing 3.0.9 python
|
|
||||||
python 3.10.11 binary
|
|
||||||
python-dateutil 2.8.2 python
|
|
||||||
python-digitalocean 1.17.0 python
|
|
||||||
python-transip 0.6.0 python
|
|
||||||
python3 3.10.11-r0 apk
|
|
||||||
pytz 2023.3 python
|
|
||||||
readline 8.2.0-r0 apk
|
|
||||||
requests 2.28.2 python
|
|
||||||
requests-file 1.5.1 python
|
|
||||||
requests-mock 1.10.0 python
|
|
||||||
requests-oauthlib 1.3.1 python
|
|
||||||
rsa 4.9 python
|
|
||||||
s3transfer 0.6.0 python
|
|
||||||
scanelf 1.3.5-r1 apk
|
|
||||||
setuptools 65.5.0 python
|
|
||||||
shadow 4.13-r0 apk
|
|
||||||
six 1.16.0 python
|
|
||||||
skalibs 2.12.0.1-r0 apk
|
|
||||||
soupsieve 2.4 python
|
|
||||||
sqlite-libs 3.40.1-r0 apk
|
|
||||||
ssl_client 1.35.0-r29 apk
|
|
||||||
tiff 4.4.0-r3 apk
|
|
||||||
tldextract 3.4.0 python
|
|
||||||
typing-inspect 0.8.0 python
|
|
||||||
typing_extensions 4.5.0 python
|
|
||||||
tzdata 2023c-r0 apk
|
|
||||||
unixodbc 2.3.11-r0 apk
|
|
||||||
uritemplate 4.1.1 python
|
|
||||||
urllib3 1.26.15 python
|
|
||||||
utmps-libs 0.1.2.0-r1 apk
|
|
||||||
wheel 0.40.0 python
|
|
||||||
whois 5.5.14-r0 apk
|
|
||||||
xz 5.2.9-r0 apk
|
|
||||||
xz-libs 5.2.9-r0 apk
|
|
||||||
zipp 3.15.0 python
|
|
||||||
zlib 1.2.13-r0 apk
|
|
||||||
zope.interface 6.0 python
|
|
||||||
zstd-libs 1.5.5-r0 apk
|
|
||||||
|
|||||||
138
readme-vars.yml
Normal file → Executable file
138
readme-vars.yml
Normal file → Executable file
@ -4,7 +4,7 @@
|
|||||||
project_name: swag
|
project_name: swag
|
||||||
project_url: "https://linuxserver.io"
|
project_url: "https://linuxserver.io"
|
||||||
project_logo: "https://github.com/linuxserver/docker-templates/raw/master/linuxserver.io/img/swag.gif"
|
project_logo: "https://github.com/linuxserver/docker-templates/raw/master/linuxserver.io/img/swag.gif"
|
||||||
project_blurb: "SWAG - Secure Web Application Gateway (formerly known as letsencrypt, no relation to Let's Encrypt™) sets up an Nginx webserver and reverse proxy with php support and a built-in certbot client that automates free SSL server certificate generation and renewal processes (Let's Encrypt and ZeroSSL). It also contains fail2ban for intrusion prevention."
|
project_blurb: "SWAG - Secure Web-server And Gateway (formerly known as letsencrypt, no relation to Let's Encrypt™) sets up an Nginx webserver and reverse proxy with php support and a built-in certbot client that automates free SSL server certificate generation and renewal processes. It also contains fail2ban for intrusion prevention."
|
||||||
project_lsio_github_repo_url: "https://github.com/linuxserver/docker-{{ project_name }}"
|
project_lsio_github_repo_url: "https://github.com/linuxserver/docker-{{ project_name }}"
|
||||||
|
|
||||||
project_blurb_optional_extras_enabled: false
|
project_blurb_optional_extras_enabled: false
|
||||||
@ -32,7 +32,8 @@ param_usage_include_env: true
|
|||||||
param_env_vars:
|
param_env_vars:
|
||||||
- { env_var: "TZ", env_value: "Europe/London", desc: "Specify a timezone to use EG Europe/London." }
|
- { env_var: "TZ", env_value: "Europe/London", desc: "Specify a timezone to use EG Europe/London." }
|
||||||
- { env_var: "URL", env_value: "yourdomain.url", desc: "Top url you have control over (`customdomain.com` if you own it, or `customsubdomain.ddnsprovider.com` if dynamic dns)." }
|
- { env_var: "URL", env_value: "yourdomain.url", desc: "Top url you have control over (`customdomain.com` if you own it, or `customsubdomain.ddnsprovider.com` if dynamic dns)." }
|
||||||
- { env_var: "VALIDATION", env_value: "http", desc: "Certbot validation method to use, options are `http` or `dns` (`dns` method also requires `DNSPLUGIN` variable set)." }
|
- { env_var: "SUBDOMAINS", env_value: "www,", desc: "Subdomains you'd like the cert to cover (comma separated, no spaces) ie. `www,ftp,cloud`. For a wildcard cert, set this _exactly_ to `wildcard` (wildcard cert is available via `dns` and `duckdns` validation only)" }
|
||||||
|
- { env_var: "VALIDATION", env_value: "http", desc: "Certbot validation method to use, options are `http`, `dns` or `duckdns` (`dns` method also requires `DNSPLUGIN` variable set) (`duckdns` method requires `DUCKDNSTOKEN` variable set, and the `SUBDOMAINS` variable must be either empty or set to `wildcard`)." }
|
||||||
param_usage_include_vols: true
|
param_usage_include_vols: true
|
||||||
param_volumes:
|
param_volumes:
|
||||||
- { vol_path: "/config", vol_host_path: "/path/to/appdata/config", desc: "All the config files including the webroot reside here." }
|
- { vol_path: "/config", vol_host_path: "/path/to/appdata/config", desc: "All the config files including the webroot reside here." }
|
||||||
@ -49,11 +50,10 @@ cap_add_param_vars:
|
|||||||
# optional container parameters
|
# optional container parameters
|
||||||
opt_param_usage_include_env: true
|
opt_param_usage_include_env: true
|
||||||
opt_param_env_vars:
|
opt_param_env_vars:
|
||||||
- { env_var: "SUBDOMAINS", env_value: "www,", desc: "Subdomains you'd like the cert to cover (comma separated, no spaces) ie. `www,ftp,cloud`. For a wildcard cert, set this *exactly* to `wildcard` (wildcard cert is available via `dns` validation only)" }
|
- { env_var: "DNSPLUGIN", env_value: "cloudflare", desc: "Required if `VALIDATION` is set to `dns`. Options are `aliyun`, `cloudflare`, `cloudxns`, `cpanel`, `digitalocean`, `dnsimple`, `dnsmadeeasy`, `domeneshop`, `gandi`, `google`, `inwx`, `linode`, `luadns`, `nsone`, `ovh`, `rfc2136`, `route53` and `transip`. Also need to enter the credentials into the corresponding ini (or json for some plugins) file under `/config/dns-conf`." }
|
||||||
- { env_var: "CERTPROVIDER", env_value: "", desc: "Optionally define the cert provider. Set to `zerossl` for ZeroSSL certs (requires existing [ZeroSSL account](https://app.zerossl.com/signup) and the e-mail address entered in `EMAIL` env var). Otherwise defaults to Let's Encrypt." }
|
|
||||||
- { env_var: "DNSPLUGIN", env_value: "cloudflare", desc: "Required if `VALIDATION` is set to `dns`. Options are `acmedns`, `aliyun`, `azure`, `cloudflare`, `cpanel`, `desec`, `digitalocean`, `directadmin`, `dnsimple`, `dnsmadeeasy`, `dnspod`, `do`, `domeneshop`, `duckdns`, `dynu`, `gandi`, `gehirn`, `godaddy`, `google`, `google-domains`, `he`, `hetzner`, `infomaniak`, `inwx`, `ionos`, `linode`, `loopia`, `luadns`, `netcup`, `njalla`, `nsone`, `ovh`, `porkbun`, `rfc2136`, `route53`, `sakuracloud`, `standalone`, `transip`, and `vultr`. Also need to enter the credentials into the corresponding ini (or json for some plugins) file under `/config/dns-conf`." }
|
|
||||||
- { env_var: "PROPAGATION", env_value: "", desc: "Optionally override (in seconds) the default propagation time for the dns plugins." }
|
- { env_var: "PROPAGATION", env_value: "", desc: "Optionally override (in seconds) the default propagation time for the dns plugins." }
|
||||||
- { env_var: "EMAIL", env_value: "", desc: "Optional e-mail address used for cert expiration notifications (Required for ZeroSSL)." }
|
- { env_var: "DUCKDNSTOKEN", env_value: "", desc: "Required if `VALIDATION` is set to `duckdns`. Retrieve your token from https://www.duckdns.org" }
|
||||||
|
- { env_var: "EMAIL", env_value: "", desc: "Optional e-mail address used for cert expiration notifications." }
|
||||||
- { env_var: "ONLY_SUBDOMAINS", env_value: "false", desc: "If you wish to get certs only for certain subdomains, but not the main domain (main domain may be hosted on another machine and cannot be validated), set this to `true`" }
|
- { env_var: "ONLY_SUBDOMAINS", env_value: "false", desc: "If you wish to get certs only for certain subdomains, but not the main domain (main domain may be hosted on another machine and cannot be validated), set this to `true`" }
|
||||||
- { env_var: "EXTRA_DOMAINS", env_value: "", desc: "Additional fully qualified domain names (comma separated, no spaces) ie. `extradomain.com,subdomain.anotherdomain.org,*.anotherdomain.org`" }
|
- { env_var: "EXTRA_DOMAINS", env_value: "", desc: "Additional fully qualified domain names (comma separated, no spaces) ie. `extradomain.com,subdomain.anotherdomain.org,*.anotherdomain.org`" }
|
||||||
- { env_var: "STAGING", env_value: "false", desc: "Set to `true` to retrieve certs in staging mode. Rate limits will be much higher, but the resulting cert will not pass the browser's security test. Only to be used for testing purposes." }
|
- { env_var: "STAGING", env_value: "false", desc: "Set to `true` to retrieve certs in staging mode. Rate limits will be much higher, but the resulting cert will not pass the browser's security test. Only to be used for testing purposes." }
|
||||||
@ -76,147 +76,61 @@ optional_block_1_items: ""
|
|||||||
# application setup block
|
# application setup block
|
||||||
app_setup_block_enabled: true
|
app_setup_block_enabled: true
|
||||||
app_setup_block: |
|
app_setup_block: |
|
||||||
|
### Migrating from the old `linuxserver/letsencrypt` image
|
||||||
|
* If using docker cli:
|
||||||
|
* Stop and remove existing container via `docker stop letsencrypt` and `docker rm letsencrypt`
|
||||||
|
* Create new container using the sample on this page (container name: `swag`, image name: `linuxserver/swag`)
|
||||||
|
* If using docker compose:
|
||||||
|
* Edit the compose yaml to change the image to `linuxserver/swag` and change the service and container names to `swag`
|
||||||
|
* Issue `docker-compose up -d`
|
||||||
### Validation and initial setup
|
### Validation and initial setup
|
||||||
|
|
||||||
* Before running this container, make sure that the url and subdomains are properly forwarded to this container's host, and that port 443 (and/or 80) is not being used by another service on the host (NAS gui, another webserver, etc.).
|
* Before running this container, make sure that the url and subdomains are properly forwarded to this container's host, and that port 443 (and/or 80) is not being used by another service on the host (NAS gui, another webserver, etc.).
|
||||||
* If you need a dynamic dns provider, you can use the free provider duckdns.org where the `URL` will be `yoursubdomain.duckdns.org` and the `SUBDOMAINS` can be `www,ftp,cloud` with http validation, or `wildcard` with dns validation. You can use our [duckdns image](https://hub.docker.com/r/linuxserver/duckdns/) to update your IP on duckdns.org.
|
|
||||||
* For `http` validation, port 80 on the internet side of the router should be forwarded to this container's port 80
|
* For `http` validation, port 80 on the internet side of the router should be forwarded to this container's port 80
|
||||||
* For `dns` validation, make sure to enter your credentials into the corresponding ini (or json for some plugins) file under `/config/dns-conf`
|
* For `dns` validation, make sure to enter your credentials into the corresponding ini (or json for some plugins) file under `/config/dns-conf`
|
||||||
* Cloudflare provides free accounts for managing dns and is very easy to use with this image. Make sure that it is set up for "dns only" instead of "dns + proxy"
|
* Cloudflare provides free accounts for managing dns and is very easy to use with this image. Make sure that it is set up for "dns only" instead of "dns + proxy"
|
||||||
* Google dns plugin is meant to be used with "Google Cloud DNS", a paid enterprise product, and not for "Google Domains DNS"
|
* Google dns plugin is meant to be used with "Google Cloud DNS", a paid enterprise product, and not for "Google Domains DNS"
|
||||||
* DuckDNS only supoprts two types of DNS validated certificates (not both at the same time):
|
* For `duckdns` validation, either leave the `SUBDOMAINS` variable empty or set it to `wildcard`, and set the `DUCKDNSTOKEN` variable with your duckdns token. Due to a limitation of duckdns, the resulting cert will only cover either main subdomain (ie. `yoursubdomain.duckdns.org`), or sub-subdomains (ie. `*.yoursubdomain.duckdns.org`), but will not both at the same time. You can use our [duckdns image](https://hub.docker.com/r/linuxserver/duckdns/) to update your IP on duckdns.org.
|
||||||
1. Certs that only cover your main subdomain (ie. `yoursubdomain.duckdns.org`, leave the `SUBDOMAINS` variable empty)
|
|
||||||
2. Certs that cover sub-subdomains of your main subdomain (ie. `*.yoursubdomain.duckdns.org`, set the `SUBDOMAINS` variable to `wildcard`)
|
|
||||||
* `--cap-add=NET_ADMIN` is required for fail2ban to modify iptables
|
* `--cap-add=NET_ADMIN` is required for fail2ban to modify iptables
|
||||||
* After setup, navigate to `https://yourdomain.url` to access the default homepage (http access through port 80 is disabled by default, you can enable it by editing the default site config at `/config/nginx/site-confs/default.conf`).
|
* If you need a dynamic dns provider, you can use the free provider duckdns.org where the `URL` will be `yoursubdomain.duckdns.org` and the `SUBDOMAINS` can be `www,ftp,cloud` with http validation, or `wildcard` with dns validation.
|
||||||
|
* After setup, navigate to `https://yourdomain.url` to access the default homepage (http access through port 80 is disabled by default, you can enable it by editing the default site config at `/config/nginx/site-confs/default`).
|
||||||
* Certs are checked nightly and if expiration is within 30 days, renewal is attempted. If your cert is about to expire in less than 30 days, check the logs under `/config/log/letsencrypt` to see why the renewals have been failing. It is recommended to input your e-mail in docker parameters so you receive expiration notices from Let's Encrypt in those circumstances.
|
* Certs are checked nightly and if expiration is within 30 days, renewal is attempted. If your cert is about to expire in less than 30 days, check the logs under `/config/log/letsencrypt` to see why the renewals have been failing. It is recommended to input your e-mail in docker parameters so you receive expiration notices from Let's Encrypt in those circumstances.
|
||||||
|
|
||||||
### Security and password protection
|
### Security and password protection
|
||||||
|
|
||||||
* The container detects changes to url and subdomains, revokes existing certs and generates new ones during start.
|
* The container detects changes to url and subdomains, revokes existing certs and generates new ones during start.
|
||||||
* Per [RFC7919](https://datatracker.ietf.org/doc/html/rfc7919), the container is shipping [ffdhe4096](https://ssl-config.mozilla.org/ffdhe4096.txt) as the `dhparams.pem`.
|
* The container provides a pre-generated 4096-bit dhparams.pem (rotated weekly via [Jenkins job](https://ci.linuxserver.io/blue/organizations/jenkins/Xtras-Builders-Etc%2Fdhparams-uploader/activity)) for new instances, however you may generate your own by running `docker exec swag openssl dhparam -out /config/nginx/dhparams.pem 4096` WARNING: This takes a very long time
|
||||||
* If you'd like to password protect your sites, you can use htpasswd. Run the following command on your host to generate the htpasswd file `docker exec -it swag htpasswd -c /config/nginx/.htpasswd <username>`
|
* If you'd like to password protect your sites, you can use htpasswd. Run the following command on your host to generate the htpasswd file `docker exec -it swag htpasswd -c /config/nginx/.htpasswd <username>`
|
||||||
* You can add multiple user:pass to `.htpasswd`. For the first user, use the above command, for others, use the above command without the `-c` flag, as it will force deletion of the existing `.htpasswd` and creation of a new one
|
* You can add multiple user:pass to `.htpasswd`. For the first user, use the above command, for others, use the above command without the `-c` flag, as it will force deletion of the existing `.htpasswd` and creation of a new one
|
||||||
* You can also use ldap auth for security and access control. A sample, user configurable ldap.conf is provided, and it requires the separate image [linuxserver/ldap-auth](https://hub.docker.com/r/linuxserver/ldap-auth/) to communicate with an ldap server.
|
* You can also use ldap auth for security and access control. A sample, user configurable ldap.conf is provided, and it requires the separate image [linuxserver/ldap-auth](https://hub.docker.com/r/linuxserver/ldap-auth/) to communicate with an ldap server.
|
||||||
|
|
||||||
### Site config and reverse proxy
|
### Site config and reverse proxy
|
||||||
|
* The default site config resides at `/config/nginx/site-confs/default`. Feel free to modify this file, and you can add other conf files to this directory. However, if you delete the `default` file, a new default will be created on container start.
|
||||||
* The default site config resides at `/config/nginx/site-confs/default.conf`. Feel free to modify this file, and you can add other conf files to this directory. However, if you delete the `default` file, a new default will be created on container start.
|
|
||||||
* Preset reverse proxy config files are added for popular apps. See the `README.md` file under `/config/nginx/proxy_confs` for instructions on how to enable them. The preset confs reside in and get imported from [this repo](https://github.com/linuxserver/reverse-proxy-confs).
|
* Preset reverse proxy config files are added for popular apps. See the `README.md` file under `/config/nginx/proxy_confs` for instructions on how to enable them. The preset confs reside in and get imported from [this repo](https://github.com/linuxserver/reverse-proxy-confs).
|
||||||
* If you wish to hide your site from search engine crawlers, you may find it useful to add this configuration line to your site config, within the server block, above the line where ssl.conf is included
|
* If you wish to hide your site from search engine crawlers, you may find it useful to add this configuration line to your site config, within the server block, above the line where ssl.conf is included
|
||||||
`add_header X-Robots-Tag "noindex, nofollow, nosnippet, noarchive";`
|
`add_header X-Robots-Tag "noindex, nofollow, nosnippet, noarchive";`
|
||||||
This will *ask* Google et al not to index and list your site. Be careful with this, as you will eventually be de-listed if you leave this line in on a site you wish to be present on search engines
|
This will *ask* Google et al not to index and list your site. Be careful with this, as you will eventually be de-listed if you leave this line in on a site you wish to be present on search engines
|
||||||
* If you wish to redirect http to https, you must expose port 80
|
* If you wish to redirect http to https, you must expose port 80
|
||||||
|
|
||||||
### Using certs in other containers
|
### Using certs in other containers
|
||||||
|
|
||||||
* This container includes auto-generated pfx and private-fullchain-bundle pem certs that are needed by other apps like Emby and Znc.
|
* This container includes auto-generated pfx and private-fullchain-bundle pem certs that are needed by other apps like Emby and Znc.
|
||||||
* To use these certs in other containers, do either of the following:
|
* To use these certs in other containers, do either of the following:
|
||||||
1. *(Easier)* Mount the container's config folder in other containers (ie. `-v /path-to-swag-config:/swag-ssl`) and in the other containers, use the cert location `/swag-ssl/keys/letsencrypt/`
|
1. *(Easier)* Mount the container's config folder in other containers (ie. `-v /path-to-le-config:/le-ssl`) and in the other containers, use the cert location `/le-ssl/keys/letsencrypt/`
|
||||||
2. *(More secure)* Mount the SWAG folder `etc` that resides under `/config` in other containers (ie. `-v /path-to-swag-config/etc:/swag-ssl`) and in the other containers, use the cert location `/swag-ssl/letsencrypt/live/<your.domain.url>/` (This is more secure because the first method shares the entire SWAG config folder with other containers, including the www files, whereas the second method only shares the ssl certs)
|
2. *(More secure)* Mount the SWAG folder `etc` that resides under `/config` in other containers (ie. `-v /path-to-le-config/etc:/le-ssl`) and in the other containers, use the cert location `/le-ssl/letsencrypt/live/<your.domain.url>/` (This is more secure because the first method shares the entire SWAG config folder with other containers, including the www files, whereas the second method only shares the ssl certs)
|
||||||
* These certs include:
|
* These certs include:
|
||||||
1. `cert.pem`, `chain.pem`, `fullchain.pem` and `privkey.pem`, which are generated by Certbot and used by nginx and various other apps
|
1. `cert.pem`, `chain.pem`, `fullchain.pem` and `privkey.pem`, which are generated by Let's Encrypt and used by nginx and various other apps
|
||||||
2. `privkey.pfx`, a format supported by Microsoft and commonly used by dotnet apps such as Emby Server (no password)
|
2. `privkey.pfx`, a format supported by Microsoft and commonly used by dotnet apps such as Emby Server (no password)
|
||||||
3. `priv-fullchain-bundle.pem`, a pem cert that bundles the private key and the fullchain, used by apps like ZNC
|
3. `priv-fullchain-bundle.pem`, a pem cert that bundles the private key and the fullchain, used by apps like ZNC
|
||||||
|
|
||||||
### Using fail2ban
|
### Using fail2ban
|
||||||
|
* This container includes fail2ban set up with 3 jails by default:
|
||||||
* This container includes fail2ban set up with 5 jails by default:
|
|
||||||
1. nginx-http-auth
|
1. nginx-http-auth
|
||||||
2. nginx-badbots
|
2. nginx-badbots
|
||||||
3. nginx-botsearch
|
3. nginx-botsearch
|
||||||
4. nginx-deny
|
|
||||||
5. nginx-unauthorized
|
|
||||||
* To enable or disable other jails, modify the file `/config/fail2ban/jail.local`
|
* To enable or disable other jails, modify the file `/config/fail2ban/jail.local`
|
||||||
* To modify filters and actions, instead of editing the `.conf` files, create `.local` files with the same name and edit those because .conf files get overwritten when the actions and filters are updated. `.local` files will append whatever's in the `.conf` files (ie. `nginx-http-auth.conf` --> `nginx-http-auth.local`)
|
* To modify filters and actions, instead of editing the `.conf` files, create `.local` files with the same name and edit those because .conf files get overwritten when the actions and filters are updated. `.local` files will append whatever's in the `.conf` files (ie. `nginx-http-auth.conf` --> `nginx-http-auth.local`)
|
||||||
* You can check which jails are active via `docker exec -it swag fail2ban-client status`
|
* You can check which jails are active via `docker exec -it swag fail2ban-client status`
|
||||||
* You can check the status of a specific jail via `docker exec -it swag fail2ban-client status <jail name>`
|
* You can check the status of a specific jail via `docker exec -it swag fail2ban-client status <jail name>`
|
||||||
* You can unban an IP via `docker exec -it swag fail2ban-client set <jail name> unbanip <IP>`
|
* You can unban an IP via `docker exec -it swag fail2ban-client set <jail name> unbanip <IP>`
|
||||||
* A list of commands can be found here: <https://www.fail2ban.org/wiki/index.php/Commands>
|
* A list of commands can be found here: https://www.fail2ban.org/wiki/index.php/Commands
|
||||||
|
|
||||||
### Updating configs
|
app_setup_nginx_reverse_proxy_snippet: false
|
||||||
|
app_setup_nginx_reverse_proxy_block: ""
|
||||||
* This container creates a number of configs for nginx, proxy samples, etc.
|
|
||||||
* Config updates are noted in the changelog but not automatically applied to your files.
|
|
||||||
* If you have modified a file with noted changes in the changelog:
|
|
||||||
1. Keep your existing configs as is (not broken, don't fix)
|
|
||||||
2. Review our repository commits and apply the new changes yourself
|
|
||||||
3. Delete the modified config file with listed updates, restart the container, reapply your changes
|
|
||||||
* If you have NOT modified a file with noted changes in the changelog:
|
|
||||||
1. Delete the config file with listed updates, restart the container
|
|
||||||
* Proxy sample updates are not listed in the changelog. See the changes here: [https://github.com/linuxserver/reverse-proxy-confs/commits/master](https://github.com/linuxserver/reverse-proxy-confs/commits/master)
|
|
||||||
* Proxy sample files WILL be updated, however your renamed (enabled) proxy files will not.
|
|
||||||
* You can check the new sample and adjust your active config as needed.
|
|
||||||
|
|
||||||
### Migration from the old `linuxserver/letsencrypt` image
|
|
||||||
|
|
||||||
Please follow the instructions [on this blog post](https://www.linuxserver.io/blog/2020-08-21-introducing-swag#migrate).
|
|
||||||
|
|
||||||
# changelog
|
# changelog
|
||||||
changelogs:
|
changelogs:
|
||||||
- { date: "25.03.23:", desc: "Fix renewal post hook." }
|
- { date: "03.08.20:", desc: "Initial release." }
|
||||||
- { date: "10.03.23:", desc: "Cleanup unused csr and keys folders. See [certbot 2.3.0 release notes](https://github.com/certbot/certbot/releases/tag/v2.3.0)." }
|
|
||||||
- { date: "09.03.23:", desc: "Add Google Domains DNS support, `google-domains`." }
|
|
||||||
- { date: "02.03.23:", desc: "Set permissions on crontabs during init." }
|
|
||||||
- { date: "09.02.23:", desc: "[Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) proxy.conf, authelia-location.conf and authelia-server.conf - Add Authentik configs, update Authelia configs." }
|
|
||||||
- { date: "06.02.23:", desc: "Add porkbun support back in." }
|
|
||||||
- { date: "21.01.23:", desc: "Unpin certbot version (allow certbot 2.x). !!BREAKING CHANGE!! We are temporarily removing the certbot porkbun plugin until a new version is released that is compatible with certbot 2.x." }
|
|
||||||
- { date: "20.01.23:", desc: "Rebase to alpine 3.17 with php8.1." }
|
|
||||||
- { date: "16.01.23:", desc: "Remove nchan module because it keeps causing crashes." }
|
|
||||||
- { date: "08.12.22:", desc: "Revamp certbot init."}
|
|
||||||
- { date: "03.12.22:", desc: "Remove defunct cloudxns plugin."}
|
|
||||||
- { date: "22.11.22:", desc: "Pin acme to the same version as certbot."}
|
|
||||||
- { date: "22.11.22:", desc: "Pin certbot to 1.32.0 until plugin compatibility improves."}
|
|
||||||
- { date: "05.11.22:", desc: "Update acmedns plugin handling."}
|
|
||||||
- { date: "06.10.22:", desc: "Switch to certbot-dns-duckdns. Update cpanel and gandi dns plugin handling. Minor adjustments to init logic." }
|
|
||||||
- { date: "05.10.22:", desc: "Use certbot file hooks instead of command line hooks" }
|
|
||||||
- { date: "04.10.22:", desc: "Add godaddy and porkbun dns plugins." }
|
|
||||||
- { date: "03.10.22:", desc: "Add default_server back to default site conf's https listen." }
|
|
||||||
- { date: "22.09.22:", desc: "Added support for DO DNS validation." }
|
|
||||||
- { date: "22.09.22:", desc: "Added certbot-dns-acmedns for DNS01 validation." }
|
|
||||||
- { date: "20.08.22:", desc: "[Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) nginx.conf - Rebasing to alpine 3.15 with php8. Restructure nginx configs ([see changes announcement](https://info.linuxserver.io/issues/2022-08-20-nginx-base))." }
|
|
||||||
- { date: "10.08.22:", desc: "Added support for Dynu DNS validation." }
|
|
||||||
- { date: "18.05.22:", desc: "Added support for Azure DNS validation." }
|
|
||||||
- { date: "09.04.22:", desc: "Added certbot-dns-loopia for DNS01 validation." }
|
|
||||||
- { date: "05.04.22:", desc: "Added support for standalone DNS validation." }
|
|
||||||
- { date: "28.03.22:", desc: "created a logfile for fail2ban nginx-unauthorized in /etc/cont-init.d/50-config" }
|
|
||||||
- { date: "09.01.22:", desc: "Added a fail2ban jail for nginx unauthorized" }
|
|
||||||
- { date: "21.12.21:", desc: "Fixed issue with iptables not working as expected" }
|
|
||||||
- { date: "30.11.21:", desc: "Move maxmind to a [new mod](https://github.com/linuxserver/docker-mods/tree/swag-maxmind)" }
|
|
||||||
- { date: "22.11.21:", desc: "Added support for Infomaniak DNS for certificate generation." }
|
|
||||||
- { date: "20.11.21:", desc: "Added support for dnspod validation." }
|
|
||||||
- { date: "15.11.21:", desc: "Added support for deSEC DNS for wildcard certificate generation." }
|
|
||||||
- { date: "26.10.21:", desc: "[Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) proxy.conf - Mitigate <https://httpoxy.org/> vulnerabilities. Ref: <https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx#Defeating-the-Attack-using-NGINX-and-NGINX-Plus>" }
|
|
||||||
- { date: "23.10.21:", desc: "Fix Hurricane Electric (HE) DNS validation." }
|
|
||||||
- { date: "12.10.21:", desc: "Fix deprecated LE root cert check to fix failures when using `STAGING=true`, and failures in revoking." }
|
|
||||||
- { date: "06.10.21:", desc: "Added support for Hurricane Electric (HE) DNS validation. Added lxml build deps." }
|
|
||||||
- { date: "01.10.21:", desc: "Check if the cert uses the old LE root cert, revoke and regenerate if necessary. [Here's more info](https://twitter.com/letsencrypt/status/1443621997288767491) on LE root cert expiration" }
|
|
||||||
- { date: "19.09.21:", desc: "Add an optional header to opt out of Google FLoC in `ssl.conf`." }
|
|
||||||
- { date: "17.09.21:", desc: "Mark `SUBDOMAINS` var as optional." }
|
|
||||||
- { date: "01.08.21:", desc: "Add support for ionos dns validation." }
|
|
||||||
- { date: "15.07.21:", desc: "Fix libmaxminddb issue due to upstream change." }
|
|
||||||
- { date: "07.07.21:", desc: "Rebase to alpine 3.14." }
|
|
||||||
- { date: "24.06.21:", desc: "Update default nginx conf folder." }
|
|
||||||
- { date: "28.05.21:", desc: "[Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) authelia-server.conf - Use `resolver.conf` and patch for `CVE-2021-32637`." }
|
|
||||||
- { date: "20.05.21:", desc: "Modify resolver.conf generation to detect and ignore ipv6." }
|
|
||||||
- { date: "14.05.21:", desc: "[Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) nginx.conf, ssl.conf, proxy.conf, and the default site-conf - Rework nginx.conf to be inline with alpine upstream and relocate lines from other files. Use linuxserver.io wheel index for pip packages. Switch to using [ffdhe4096](https://ssl-config.mozilla.org/ffdhe4096.txt) for `dhparams.pem` per [RFC7919](https://datatracker.ietf.org/doc/html/rfc7919). Added `worker_processes.conf`, which sets the number of nginx workers, and `resolver.conf`, which sets the dns resolver. Both conf files are auto-generated only on first start and can be user modified later." }
|
|
||||||
- { date: "21.04.21:", desc: "[Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) authelia-server.conf and authelia-location.conf - Add remote name/email headers and pass http method." }
|
|
||||||
- { date: "12.04.21:", desc: "Add php7-gmp and php7-pecl-mailparse." }
|
|
||||||
- { date: "12.04.21:", desc: "Add support for vultr dns validation." }
|
|
||||||
- { date: "14.03.21:", desc: "Add support for directadmin dns validation." }
|
|
||||||
- { date: "12.02.21:", desc: "Clean up rust/cargo cache, which ballooned the image size in the last couple of builds." }
|
|
||||||
- { date: "10.02.21:", desc: "Fix aliyun, domeneshop, inwx and transip dns confs for existing users." }
|
|
||||||
- { date: "09.02.21:", desc: "Rebasing to alpine 3.13. Add nginx mods brotli and dav-ext. Remove nginx mods lua and lua-upstream (due to regression over the last couple of years)." }
|
|
||||||
- { date: "26.01.21:", desc: "Add support for hetzner dns validation." }
|
|
||||||
- { date: "20.01.21:", desc: "Add check for ZeroSSL EAB retrieval." }
|
|
||||||
- { date: "08.01.21:", desc: "Add support for getting certs from [ZeroSSL](https://zerossl.com/) via optional `CERTPROVIDER` env var. Update aliyun, domeneshop, inwx and transip dns plugins with the new plugin names. Hide `donoteditthisfile.conf` because users were editing it despite its name. Suppress harmless error when no proxy confs are enabled." }
|
|
||||||
- { date: "03.01.21:", desc: "[Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) /config/nginx/site-confs/default.conf - Add helper pages to aid troubleshooting" }
|
|
||||||
- { date: "10.12.20:", desc: "Add support for njalla dns validation" }
|
|
||||||
- { date: "09.12.20:", desc: "Check for template/conf updates and notify in the log. Add support for gehirn and sakuracloud dns validation." }
|
|
||||||
- { date: "01.11.20:", desc: "Add support for netcup dns validation" }
|
|
||||||
- { date: "29.10.20:", desc: "[Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) ssl.conf - Add frame-ancestors to Content-Security-Policy." }
|
|
||||||
- { date: "04.10.20:", desc: "[Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) nginx.conf, proxy.conf, and ssl.conf - Minor cleanups and reordering." }
|
|
||||||
- { date: "20.09.20:", desc: "[Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) nginx.conf - Added geoip2 configs. Added MAXMINDDB_LICENSE_KEY variable to readme."}
|
|
||||||
- { date: "08.09.20:", desc: "Add php7-xsl." }
|
|
||||||
- { date: "01.09.20:", desc: "[Existing users should update:](https://github.com/linuxserver/docker-swag/blob/master/README.md#updating-configs) nginx.conf, proxy.conf, and various proxy samples - Global websockets across all configs." }
|
|
||||||
- { date: "03.08.20:", desc: "Initial release." }
|
|
||||||
8
root/app/duckdns-txt
Normal file
8
root/app/duckdns-txt
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
. /config/donoteditthisfile.conf
|
||||||
|
|
||||||
|
curl https://www.duckdns.org/update?domains=${CERTBOT_DOMAIN}\&token=${ORIGDUCKDNSTOKEN}\&txt=${CERTBOT_VALIDATION}
|
||||||
|
|
||||||
|
echo "sleeping 60"
|
||||||
|
sleep 60
|
||||||
@ -1,9 +1,27 @@
|
|||||||
#!/usr/bin/with-contenv bash
|
#!/usr/bin/with-contenv bash
|
||||||
# shellcheck shell=bash
|
|
||||||
|
. /config/donoteditthisfile.conf
|
||||||
|
|
||||||
echo "<------------------------------------------------->"
|
echo "<------------------------------------------------->"
|
||||||
echo
|
echo
|
||||||
echo "<------------------------------------------------->"
|
echo "<------------------------------------------------->"
|
||||||
echo "cronjob running on $(date)"
|
echo "cronjob running on "$(date)
|
||||||
echo "Running certbot renew"
|
echo "Running certbot renew"
|
||||||
certbot renew --non-interactive
|
if [ "$ORIGVALIDATION" = "dns" ] || [ "$ORIGVALIDATION" = "duckdns" ]; then
|
||||||
|
certbot -n renew \
|
||||||
|
--post-hook "if ps aux | grep [n]ginx: > /dev/null; then s6-svc -h /var/run/s6/services/nginx; fi; \
|
||||||
|
cd /config/keys/letsencrypt && \
|
||||||
|
openssl pkcs12 -export -out privkey.pfx -inkey privkey.pem -in cert.pem -certfile chain.pem -passout pass: && \
|
||||||
|
sleep 1 && \
|
||||||
|
cat privkey.pem fullchain.pem > priv-fullchain-bundle.pem && \
|
||||||
|
chown -R abc:abc /config/etc/letsencrypt"
|
||||||
|
else
|
||||||
|
certbot -n renew \
|
||||||
|
--pre-hook "if ps aux | grep [n]ginx: > /dev/null; then s6-svc -d /var/run/s6/services/nginx; fi" \
|
||||||
|
--post-hook "if ps aux | grep 's6-supervise nginx' | grep -v grep > /dev/null; then s6-svc -u /var/run/s6/services/nginx; fi; \
|
||||||
|
cd /config/keys/letsencrypt && \
|
||||||
|
openssl pkcs12 -export -out privkey.pfx -inkey privkey.pem -in cert.pem -certfile chain.pem -passout pass: && \
|
||||||
|
sleep 1 && \
|
||||||
|
cat privkey.pem fullchain.pem > priv-fullchain-bundle.pem && \
|
||||||
|
chown -R abc:abc /config/etc/letsencrypt"
|
||||||
|
fi
|
||||||
|
|||||||
11
root/defaults/authelia-location.conf
Normal file
11
root/defaults/authelia-location.conf
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
## Version 2020/05/31 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/authelia-location.conf
|
||||||
|
# Make sure that your authelia container is in the same user defined bridge network and is named authelia
|
||||||
|
# Make sure that the authelia configuration.yml has 'path: "authelia"' defined
|
||||||
|
|
||||||
|
auth_request /authelia/api/verify;
|
||||||
|
auth_request_set $target_url $scheme://$http_host$request_uri;
|
||||||
|
auth_request_set $user $upstream_http_remote_user;
|
||||||
|
auth_request_set $groups $upstream_http_remote_groups;
|
||||||
|
proxy_set_header Remote-User $user;
|
||||||
|
proxy_set_header Remote-Groups $groups;
|
||||||
|
error_page 401 =302 https://$http_host/authelia/?rd=$target_url;
|
||||||
48
root/defaults/authelia-server.conf
Normal file
48
root/defaults/authelia-server.conf
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
## Version 2020/05/31 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/authelia-server.conf
|
||||||
|
# Make sure that your authelia container is in the same user defined bridge network and is named authelia
|
||||||
|
|
||||||
|
location ^~ /authelia {
|
||||||
|
include /config/nginx/proxy.conf;
|
||||||
|
resolver 127.0.0.11 valid=30s;
|
||||||
|
set $upstream_authelia authelia;
|
||||||
|
proxy_pass http://$upstream_authelia:9091;
|
||||||
|
}
|
||||||
|
|
||||||
|
location = /authelia/api/verify {
|
||||||
|
internal;
|
||||||
|
resolver 127.0.0.11 valid=30s;
|
||||||
|
set $upstream_authelia authelia;
|
||||||
|
proxy_pass_request_body off;
|
||||||
|
proxy_pass http://$upstream_authelia:9091;
|
||||||
|
proxy_set_header Content-Length "";
|
||||||
|
|
||||||
|
# Timeout if the real server is dead
|
||||||
|
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503;
|
||||||
|
|
||||||
|
# [REQUIRED] Needed by Authelia to check authorizations of the resource.
|
||||||
|
# Provide either X-Original-URL and X-Forwarded-Proto or
|
||||||
|
# X-Forwarded-Proto, X-Forwarded-Host and X-Forwarded-Uri or both.
|
||||||
|
# Those headers will be used by Authelia to deduce the target url of the user.
|
||||||
|
# Basic Proxy Config
|
||||||
|
client_body_buffer_size 128k;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Original-URL $scheme://$http_host$request_uri;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_set_header X-Forwarded-Host $http_host;
|
||||||
|
proxy_set_header X-Forwarded-Uri $request_uri;
|
||||||
|
proxy_set_header X-Forwarded-Ssl on;
|
||||||
|
proxy_redirect http:// $scheme://;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Connection "";
|
||||||
|
proxy_cache_bypass $cookie_session;
|
||||||
|
proxy_no_cache $cookie_session;
|
||||||
|
proxy_buffers 4 32k;
|
||||||
|
|
||||||
|
# Advanced Proxy Config
|
||||||
|
send_timeout 5m;
|
||||||
|
proxy_read_timeout 240;
|
||||||
|
proxy_send_timeout 240;
|
||||||
|
proxy_connect_timeout 240;
|
||||||
|
}
|
||||||
147
root/defaults/default
Normal file
147
root/defaults/default
Normal file
@ -0,0 +1,147 @@
|
|||||||
|
## Version 2020/05/23 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/default
|
||||||
|
|
||||||
|
# redirect all traffic to https
|
||||||
|
server {
|
||||||
|
listen 80 default_server;
|
||||||
|
listen [::]:80 default_server;
|
||||||
|
server_name _;
|
||||||
|
return 301 https://$host$request_uri;
|
||||||
|
}
|
||||||
|
|
||||||
|
# main server block
|
||||||
|
server {
|
||||||
|
listen 443 ssl http2 default_server;
|
||||||
|
listen [::]:443 ssl http2 default_server;
|
||||||
|
|
||||||
|
root /config/www;
|
||||||
|
index index.html index.htm index.php;
|
||||||
|
|
||||||
|
server_name _;
|
||||||
|
|
||||||
|
# enable subfolder method reverse proxy confs
|
||||||
|
include /config/nginx/proxy-confs/*.subfolder.conf;
|
||||||
|
|
||||||
|
# all ssl related config moved to ssl.conf
|
||||||
|
include /config/nginx/ssl.conf;
|
||||||
|
|
||||||
|
# enable for ldap auth
|
||||||
|
#include /config/nginx/ldap.conf;
|
||||||
|
|
||||||
|
# enable for Authelia
|
||||||
|
#include /config/nginx/authelia-server.conf;
|
||||||
|
|
||||||
|
client_max_body_size 0;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
try_files $uri $uri/ /index.html /index.php?$args =404;
|
||||||
|
}
|
||||||
|
|
||||||
|
location ~ \.php$ {
|
||||||
|
fastcgi_split_path_info ^(.+\.php)(/.+)$;
|
||||||
|
fastcgi_pass 127.0.0.1:9000;
|
||||||
|
fastcgi_index index.php;
|
||||||
|
include /etc/nginx/fastcgi_params;
|
||||||
|
}
|
||||||
|
|
||||||
|
# sample reverse proxy config for password protected couchpotato running at IP 192.168.1.50 port 5050 with base url "cp"
|
||||||
|
# notice this is within the same server block as the base
|
||||||
|
# don't forget to generate the .htpasswd file as described on docker hub
|
||||||
|
# location ^~ /cp {
|
||||||
|
# auth_basic "Restricted";
|
||||||
|
# auth_basic_user_file /config/nginx/.htpasswd;
|
||||||
|
# include /config/nginx/proxy.conf;
|
||||||
|
# proxy_pass http://192.168.1.50:5050/cp;
|
||||||
|
# }
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
# sample reverse proxy config without url base, but as a subdomain "cp", ip and port same as above
|
||||||
|
# notice this is a new server block, you need a new server block for each subdomain
|
||||||
|
#server {
|
||||||
|
# listen 443 ssl http2;
|
||||||
|
# listen [::]:443 ssl http2;
|
||||||
|
#
|
||||||
|
# root /config/www;
|
||||||
|
# index index.html index.htm index.php;
|
||||||
|
#
|
||||||
|
# server_name cp.*;
|
||||||
|
#
|
||||||
|
# include /config/nginx/ssl.conf;
|
||||||
|
#
|
||||||
|
# client_max_body_size 0;
|
||||||
|
#
|
||||||
|
# location / {
|
||||||
|
# auth_basic "Restricted";
|
||||||
|
# auth_basic_user_file /config/nginx/.htpasswd;
|
||||||
|
# include /config/nginx/proxy.conf;
|
||||||
|
# proxy_pass http://192.168.1.50:5050;
|
||||||
|
# }
|
||||||
|
#}
|
||||||
|
|
||||||
|
# sample reverse proxy config for "heimdall" via subdomain, with ldap authentication
|
||||||
|
# ldap-auth container has to be running and the /config/nginx/ldap.conf file should be filled with ldap info
|
||||||
|
# notice this is a new server block, you need a new server block for each subdomain
|
||||||
|
#server {
|
||||||
|
# listen 443 ssl http2;
|
||||||
|
# listen [::]:443 ssl http2;
|
||||||
|
#
|
||||||
|
# root /config/www;
|
||||||
|
# index index.html index.htm index.php;
|
||||||
|
#
|
||||||
|
# server_name heimdall.*;
|
||||||
|
#
|
||||||
|
# include /config/nginx/ssl.conf;
|
||||||
|
#
|
||||||
|
# include /config/nginx/ldap.conf;
|
||||||
|
#
|
||||||
|
# client_max_body_size 0;
|
||||||
|
#
|
||||||
|
# location / {
|
||||||
|
# # the next two lines will enable ldap auth along with the included ldap.conf in the server block
|
||||||
|
# auth_request /auth;
|
||||||
|
# error_page 401 =200 /ldaplogin;
|
||||||
|
#
|
||||||
|
# include /config/nginx/proxy.conf;
|
||||||
|
# resolver 127.0.0.11 valid=30s;
|
||||||
|
# set $upstream_app heimdall;
|
||||||
|
# set $upstream_port 443;
|
||||||
|
# set $upstream_proto https;
|
||||||
|
# proxy_pass $upstream_proto://$upstream_app:$upstream_port;
|
||||||
|
# }
|
||||||
|
#}
|
||||||
|
|
||||||
|
# sample reverse proxy config for "heimdall" via subdomain, with Authelia
|
||||||
|
# Authelia container has to be running in the same user defined bridge network, with container name "authelia", and with 'path: "authelia"' set in its configuration.yml
|
||||||
|
# notice this is a new server block, you need a new server block for each subdomain
|
||||||
|
#server {
|
||||||
|
# listen 443 ssl http2;
|
||||||
|
# listen [::]:443 ssl http2;
|
||||||
|
#
|
||||||
|
# root /config/www;
|
||||||
|
# index index.html index.htm index.php;
|
||||||
|
#
|
||||||
|
# server_name heimdall.*;
|
||||||
|
#
|
||||||
|
# include /config/nginx/ssl.conf;
|
||||||
|
#
|
||||||
|
# include /config/nginx/authelia-server.conf;
|
||||||
|
#
|
||||||
|
# client_max_body_size 0;
|
||||||
|
#
|
||||||
|
# location / {
|
||||||
|
# # the next line will enable Authelia along with the included authelia-server.conf in the server block
|
||||||
|
# include /config/nginx/authelia-location.conf;
|
||||||
|
#
|
||||||
|
# include /config/nginx/proxy.conf;
|
||||||
|
# resolver 127.0.0.11 valid=30s;
|
||||||
|
# set $upstream_app heimdall;
|
||||||
|
# set $upstream_port 443;
|
||||||
|
# set $upstream_proto https;
|
||||||
|
# proxy_pass $upstream_proto://$upstream_app:$upstream_port;
|
||||||
|
# }
|
||||||
|
#}
|
||||||
|
|
||||||
|
# enable subdomain method reverse proxy confs
|
||||||
|
include /config/nginx/proxy-confs/*.subdomain.conf;
|
||||||
|
# enable proxy cache for auth
|
||||||
|
proxy_cache_path cache/ keys_zone=auth_cache:10m;
|
||||||
@ -1,9 +0,0 @@
|
|||||||
{
|
|
||||||
"yourdomain.com": {
|
|
||||||
"username":"yourusername",
|
|
||||||
"password":"yourpassword",
|
|
||||||
"fulldomain":"<guid>.acme.yourdomain.com",
|
|
||||||
"subdomain":"<guid>",
|
|
||||||
"allowfrom":[]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,5 +0,0 @@
|
|||||||
# See https://pypi.org/project/certbot-dns-acmedns/
|
|
||||||
# https://github.com/joohoi/acme-dns
|
|
||||||
#
|
|
||||||
dns_acmedns_api_url = http://your-acme-dns-server.example.com/
|
|
||||||
dns_acmedns_registration_file = /config/dns-conf/acmedns-registration.json
|
|
||||||
@ -2,5 +2,5 @@
|
|||||||
# https://ram.console.aliyun.com/
|
# https://ram.console.aliyun.com/
|
||||||
# And ensure your RAM account has AliyunDNSFullAccess permission.
|
# And ensure your RAM account has AliyunDNSFullAccess permission.
|
||||||
|
|
||||||
dns_aliyun_access_key = 12345678
|
certbot_dns_aliyun:dns_aliyun_access_key = 12345678
|
||||||
dns_aliyun_access_key_secret = 1234567890abcdef1234567890abcdef
|
certbot_dns_aliyun:dns_aliyun_access_key_secret = 1234567890abcdef1234567890abcdef
|
||||||
|
|||||||
@ -1,26 +0,0 @@
|
|||||||
# Instructions: https://certbot-dns-azure.readthedocs.io/en/latest/
|
|
||||||
# Replace with your values
|
|
||||||
# dns_azure_environment can be one of the following: AzurePublicCloud, AzureUSGovernmentCloud, AzureChinaCloud, AzureGermanCloud
|
|
||||||
# Service Principal with Client Secret
|
|
||||||
dns_azure_sp_client_id = 912ce44a-0156-4669-ae22-c16a17d34ca5
|
|
||||||
dns_azure_sp_client_secret = E-xqXU83Y-jzTI6xe9fs2YC~mck3ZzUih9
|
|
||||||
dns_azure_tenant_id = ed1090f3-ab18-4b12-816c-599af8a88cf7
|
|
||||||
dns_azure_environment = "AzurePublicCloud"
|
|
||||||
dns_azure_zone1 = example.com:/subscriptions/c135abce-d87d-48df-936c-15596c6968a5/resourceGroups/dns1
|
|
||||||
|
|
||||||
|
|
||||||
# Service Prinicipal with Certificate
|
|
||||||
#dns_azure_sp_client_id = 912ce44a-0156-4669-ae22-c16a17d34ca5
|
|
||||||
#dns_azure_sp_certificate_path = /path/to/certificate.pem
|
|
||||||
#dns_azure_tenant_id = ed1090f3-ab18-4b12-816c-599af8a88cf7
|
|
||||||
#dns_azure_environment = "AzurePublicCloud"
|
|
||||||
#dns_azure_zone1 = example.com:/subscriptions/c135abce-d87d-48df-936c-15596c6968a5/resourceGroups/dns1
|
|
||||||
|
|
||||||
# Azure Managed Identity
|
|
||||||
#dns_azure_msi_client_id = 912ce44a-0156-4669-ae22-c16a17d34ca5
|
|
||||||
#dns_azure_zone1 = example.com:/subscriptions/c135abce-d87d-48df-936c-15596c6968a5/resourceGroups/dns1
|
|
||||||
|
|
||||||
# System Assigned Azure Managed Identity
|
|
||||||
#dns_azure_msi_system_assigned = true
|
|
||||||
#dns_azure_environment = "AzurePublicCloud"
|
|
||||||
#dns_azure_zone1 = example.com:/subscriptions/c135abce-d87d-48df-936c-15596c6968a5/resourceGroups/dns1
|
|
||||||
@ -6,4 +6,4 @@ dns_cloudflare_email = cloudflare@example.com
|
|||||||
dns_cloudflare_api_key = 0123456789abcdef0123456789abcdef01234567
|
dns_cloudflare_api_key = 0123456789abcdef0123456789abcdef01234567
|
||||||
|
|
||||||
# With token (comment out both lines above and uncomment below):
|
# With token (comment out both lines above and uncomment below):
|
||||||
#dns_cloudflare_api_token = 0123456789abcdef0123456789abcdef01234567
|
#dns_cloudflare_api_token = 0123456789abcdef0123456789abcdef01234567
|
||||||
4
root/defaults/dns-conf/cloudxns.ini
Normal file
4
root/defaults/dns-conf/cloudxns.ini
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
# Instructions: https://github.com/certbot/certbot/blob/master/certbot-dns-cloudxns/certbot_dns_cloudxns/__init__.py#L20
|
||||||
|
# Replace with your values
|
||||||
|
dns_cloudxns_api_key = 1234567890abcdef1234567890abcdef
|
||||||
|
dns_cloudxns_secret_key = 1122334455667788
|
||||||
@ -1,15 +1,6 @@
|
|||||||
# Instructions: https://github.com/badjware/certbot-dns-cpanel#credentials
|
# Instructions: https://github.com/badjware/certbot-dns-cpanel#credentials
|
||||||
# The url cPanel url
|
# Replace with your values
|
||||||
# include the scheme and the port number (usually 2083 for https)
|
# include the scheme and the port number (usually 2083 for https)
|
||||||
cpanel_url = https://cpanel.exemple.com:2083
|
certbot_dns_cpanel:cpanel_url = https://cpanel.example.com:2083
|
||||||
|
certbot_dns_cpanel:cpanel_username = username
|
||||||
# The cPanel username
|
certbot_dns_cpanel:cpanel_password = 1234567890abcdef
|
||||||
cpanel_username = user
|
|
||||||
|
|
||||||
# The cPanel password
|
|
||||||
cpanel_password = hunter2
|
|
||||||
|
|
||||||
# The cPanel API Token
|
|
||||||
cpanel_token = EUTQ793EY7MIRX4EMXXXXXXXXXXOX4JF
|
|
||||||
|
|
||||||
# You only need to configure API Token or Password. If you supply both, the API Token will be used
|
|
||||||
@ -1,4 +0,0 @@
|
|||||||
# Instructions: https://pypi.org/project/certbot-dns-desec/
|
|
||||||
# Replace with your Desec V1 API Token
|
|
||||||
dns_desec_token=YOUR_TOKEN_HERE
|
|
||||||
dns_desec_endpoint=https://desec.io/api/v1/
|
|
||||||
@ -1,21 +0,0 @@
|
|||||||
# Instructions: https://github.com/cybercinch/certbot-dns-directadmin/blob/master/certbot_dns_directadmin/__init__.py
|
|
||||||
|
|
||||||
# It is recommended to create a login key in the DirectAdmin control panel to be used as value for directadmin_password.
|
|
||||||
# Instructions on how to create such key can be found at https://help.directadmin.com/item.php?id=523.
|
|
||||||
#
|
|
||||||
# Make sure to grant the following permissions:
|
|
||||||
# - CMD_API_LOGIN_TEST
|
|
||||||
# - CMD_API_DNS_CONTROL
|
|
||||||
# - CMD_API_SHOW_DOMAINS
|
|
||||||
#
|
|
||||||
# Username and password can also be used in case your DirectAdmin instance has no support for login keys.
|
|
||||||
|
|
||||||
# The DirectAdmin Server url
|
|
||||||
# include the scheme and the port number (Normally 2222)
|
|
||||||
dns_directadmin_url = https://my.directadminserver.com:2222
|
|
||||||
|
|
||||||
# The DirectAdmin username
|
|
||||||
dns_directadmin_username = username
|
|
||||||
|
|
||||||
# The DirectAdmin password
|
|
||||||
dns_directadmin_password = aSuperStrongPassword
|
|
||||||
@ -1,5 +0,0 @@
|
|||||||
# Instructions: https://github.com/SkyLothar/certbot-dns-dnspod#create-a-credentials-file
|
|
||||||
# Obtain your own DNSPod API token at DNSPod console: https://console.dnspod.cn/account/token/token
|
|
||||||
# Replace with your own email, id and token
|
|
||||||
dns_dnspod_email = "me@example.com"
|
|
||||||
dns_dnspod_api_token = "12345,1234567890abcdef1234567890abcdef"
|
|
||||||
@ -1,3 +0,0 @@
|
|||||||
# Instructions: https://github.com/georgeto/certbot-dns-do/blob/master/certbot_dns_do/__init__.py#L32
|
|
||||||
# Replace with your values
|
|
||||||
dns_do_api_token = YOUR_DO_LETSENCRYPT_API_KEY
|
|
||||||
@ -1,4 +1,4 @@
|
|||||||
# Instructions: https://github.com/domeneshop/certbot-dns-domeneshop#credentials
|
# Instructions: https://github.com/domeneshop/certbot-dns-domeneshop#credentials
|
||||||
# Replace with your values
|
# Replace with your values
|
||||||
dns_domeneshop_client_token=1234567890abcdef
|
certbot_dns_domeneshop:dns_domeneshop_client_token=1234567890abcdef
|
||||||
dns_domeneshop_client_secret=1234567890abcdefghijklmnopqrstuvxyz1234567890abcdefghijklmnopqrs
|
certbot_dns_domeneshop:dns_domeneshop_client_secret=1234567890abcdefghijklmnopqrstuvxyz1234567890abcdefghijklmnopqrs
|
||||||
@ -1,3 +0,0 @@
|
|||||||
# Instructions: https://github.com/infinityofspace/certbot_dns_duckdns#credentials-file-or-cli-parameters
|
|
||||||
# Replace with your API token from your duckdns account.
|
|
||||||
dns_duckdns_token=<your-duckdns-token>
|
|
||||||
@ -1,3 +0,0 @@
|
|||||||
# Instructions: https://github.com/bikram990/certbot-dns-dynu#configuration
|
|
||||||
# Replace with your API token from your dynu account.
|
|
||||||
dns_dynu_auth_token = AbCbASsd!@34
|
|
||||||
@ -1,7 +1,3 @@
|
|||||||
# Instructions: https://github.com/obynio/certbot-plugin-gandi#usage
|
# Instructions: https://github.com/obynio/certbot-plugin-gandi#usage
|
||||||
# Replace with your value
|
# Replace with your value
|
||||||
# live dns v5 api key
|
certbot_plugin_gandi:dns_api_key=APIKEY
|
||||||
dns_gandi_api_key=APIKEY
|
|
||||||
|
|
||||||
# optional organization id, remove it if not used
|
|
||||||
#dns_gandi_sharing_id=SHARINGID
|
|
||||||
|
|||||||
@ -1,4 +0,0 @@
|
|||||||
# Instructions: https://certbot-dns-gehirn.readthedocs.io/en/stable/
|
|
||||||
# Replace with your values
|
|
||||||
dns_gehirn_api_token = 00000000-0000-0000-0000-000000000000
|
|
||||||
dns_gehirn_api_secret = MDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAw
|
|
||||||
@ -1,4 +0,0 @@
|
|||||||
# Instructions: https://github.com/miigotu/certbot-dns-godaddy
|
|
||||||
# Replace with your values
|
|
||||||
dns_godaddy_secret = 0123456789abcdef0123456789abcdef01234567
|
|
||||||
dns_godaddy_key = abcdef0123456789abcdef01234567abcdef0123
|
|
||||||
@ -1,4 +0,0 @@
|
|||||||
# Instructions: https://github.com/aaomidi/certbot-dns-google-domains#credentials
|
|
||||||
# Replace with your value
|
|
||||||
dns_google_domains_access_token = abcdef
|
|
||||||
dns_google_domains_zone = example.com
|
|
||||||
@ -3,4 +3,4 @@
|
|||||||
"_comment": "Replace with your values",
|
"_comment": "Replace with your values",
|
||||||
"type": "service_account",
|
"type": "service_account",
|
||||||
"rest": "..."
|
"rest": "..."
|
||||||
}
|
}
|
||||||
@ -1,4 +0,0 @@
|
|||||||
# Instructions: https://github.com/TSaaristo/certbot-dns-he#example-usage
|
|
||||||
# Replace with your values
|
|
||||||
dns_he_user = Me
|
|
||||||
dns_he_pass = my HE password
|
|
||||||
@ -1,3 +0,0 @@
|
|||||||
# Instructions: https://github.com/ctrlaltcoop/certbot-dns-hetzner
|
|
||||||
# Replace with your values
|
|
||||||
dns_hetzner_api_token = nohnah4zoo9Kiejee9aGh0thoopee2sa
|
|
||||||
@ -1,3 +0,0 @@
|
|||||||
# Instructions: https://github.com/Infomaniak/certbot-dns-infomaniak#via-ini-file
|
|
||||||
# Replace with your values
|
|
||||||
dns_infomaniak_token = XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
|
||||||
@ -1,6 +1,6 @@
|
|||||||
# Instructions: https://github.com/oGGy990/certbot-dns-inwx
|
# Instructions: https://github.com/oGGy990/certbot-dns-inwx
|
||||||
# Replace with your values
|
# Replace with your values
|
||||||
dns_inwx_url = https://api.domrobot.com/xmlrpc/
|
certbot_dns_inwx:dns_inwx_url = https://api.domrobot.com/xmlrpc/
|
||||||
dns_inwx_username = your_username
|
certbot_dns_inwx:dns_inwx_username = your_username
|
||||||
dns_inwx_password = your_password
|
certbot_dns_inwx:dns_inwx_password = your_password
|
||||||
dns_inwx_shared_secret = your_shared_secret optional
|
certbot_dns_inwx:dns_inwx_shared_secret = your_shared_secret optional
|
||||||
|
|||||||
@ -1,5 +0,0 @@
|
|||||||
# Instructions: https://github.com/helgeerbe/certbot-dns-ionos
|
|
||||||
# Replace with your values
|
|
||||||
dns_ionos_prefix = myapikeyprefix
|
|
||||||
dns_ionos_secret = verysecureapikeysecret
|
|
||||||
dns_ionos_endpoint = https://api.hosting.ionos.com
|
|
||||||
@ -1,3 +0,0 @@
|
|||||||
# Replace with your values
|
|
||||||
dns_loopia_user = user@loopiaapi
|
|
||||||
dns_loopia_password = passwordgoeshere
|
|
||||||
@ -1,5 +0,0 @@
|
|||||||
# Recommended PROPAGATION value in environment for netcup is 900
|
|
||||||
|
|
||||||
dns_netcup_customer_id = 123456
|
|
||||||
dns_netcup_api_key = 0123456789abcdef0123456789abcdef01234567
|
|
||||||
dns_netcup_api_password = abcdef0123456789abcdef01234567abcdef0123
|
|
||||||
@ -1,2 +0,0 @@
|
|||||||
# Generate your API token here: https://njal.la/settings/api/
|
|
||||||
dns_njalla_token=0000000000000000000000000000000000000000
|
|
||||||
@ -1,4 +0,0 @@
|
|||||||
# Instructions: https://github.com/infinityofspace/certbot_dns_porkbun
|
|
||||||
# Replace with your values
|
|
||||||
dns_porkbun_key=<your-porkbun-api-key>
|
|
||||||
dns_porkbun_secret=<your-porkbun-api-secret>
|
|
||||||
@ -1,5 +1,5 @@
|
|||||||
# Instructions: https://github.com/certbot/certbot/blob/master/certbot-dns-route53/certbot_dns_route53/__init__.py#L18
|
# Instructions: https://github.com/certbot/certbot/blob/master/certbot-dns-route53/certbot_dns_route53/__init__.py#L18
|
||||||
# Replace with your values
|
# Replace with your values
|
||||||
[default]
|
[default]
|
||||||
; aws_access_key_id=AKIAIOSFODNN7EXAMPLE
|
aws_access_key_id=AKIAIOSFODNN7EXAMPLE
|
||||||
; aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||||
|
|||||||
@ -1,4 +0,0 @@
|
|||||||
# Instructions: https://certbot-dns-sakuracloud.readthedocs.io/en/stable/
|
|
||||||
# Replace with your values
|
|
||||||
dns_sakuracloud_api_token = 00000000-0000-0000-0000-000000000000
|
|
||||||
dns_sakuracloud_api_secret = MDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAw
|
|
||||||
@ -1,8 +0,0 @@
|
|||||||
# Instructions: https://github.com/siilike/certbot-dns-standalone/blob/master/README.rst
|
|
||||||
# Make sure to expose UDP port 53 from your swag container:
|
|
||||||
# - for docker cli, add argument: `-p 53:53/udp`
|
|
||||||
# - for docker-compose, add the following line under ports: `- 53:53/udp`
|
|
||||||
# This file does not need to be changed:
|
|
||||||
# - no credentials are required
|
|
||||||
# - it's not used and only for informational purpose
|
|
||||||
# - prepare the correct DNS records as described in the plugin instructions instead
|
|
||||||
@ -1,30 +1,6 @@
|
|||||||
# Instructions: https://readthedocs.org/projects/certbot-dns-transip/
|
# Instructions: https://readthedocs.org/projects/certbot-dns-transip/
|
||||||
#
|
# Convert the key to an RSA key (openssl rsa -in transip.key -out transip-rsa.key)
|
||||||
# This DNS plugin can be used to generate SSL wildcard certificates via TransIP DNS TXT records
|
# Place .key-file in the same directory as this file. Location "/config/dns-conf" is from within the container
|
||||||
#
|
|
||||||
# Login with your TransIP account and go to My Account | API:
|
|
||||||
# 1. API-settings: On
|
|
||||||
#
|
|
||||||
# 2. IP-address/ranges whitelist: Add a new authorized IP address (Swag Docker) to use the API
|
|
||||||
#
|
|
||||||
# 3. Generate a new Key Pair and copy the private key to a new transip.key file in the format:
|
|
||||||
# -----BEGIN PRIVATE KEY-----
|
|
||||||
# ...
|
|
||||||
# -----END PRIVATE KEY-----
|
|
||||||
#
|
|
||||||
# 4. Convert the key to an RSA key with command:
|
|
||||||
# openssl rsa -in transip.key -out /config/dns-conf/transip-rsa.key
|
|
||||||
#
|
|
||||||
# 5. Set permission
|
|
||||||
# chmod 600 /config/dns-conf/transip-rsa.key
|
|
||||||
#
|
|
||||||
# 6. Replace <transip_username> below with your TransIP username
|
|
||||||
#
|
|
||||||
# 7. Create wildcard certificate with Swag environment variables:
|
|
||||||
# SUBDOMAINS=wildcard
|
|
||||||
# VALIDATION=dns
|
|
||||||
# DNSPLUGIN=transip
|
|
||||||
|
|
||||||
dns_transip_username = <transip_username>
|
|
||||||
dns_transip_key_file = /config/dns-conf/transip-rsa.key
|
|
||||||
|
|
||||||
|
certbot_dns_transip:dns_transip_username = <transip_username>
|
||||||
|
certbot_dns_transip:dns_transip_key_file = /config/dns-conf/transip-rsa.key
|
||||||
@ -1,3 +0,0 @@
|
|||||||
# Instructions: https://github.com/lezgomatt/certbot-dns-vultr
|
|
||||||
# Replace with your vultr Personal Access Token (see https://www.vultr.com/docs/how-to-setup-dynamic-dns).
|
|
||||||
dns_vultr_key = YOUR_VULTR_API_KEY
|
|
||||||
@ -1,8 +0,0 @@
|
|||||||
#!/usr/bin/with-contenv bash
|
|
||||||
# shellcheck shell=bash
|
|
||||||
|
|
||||||
cd /config/keys/letsencrypt || exit 1
|
|
||||||
openssl pkcs12 -export -out privkey.pfx -inkey privkey.pem -in cert.pem -certfile chain.pem -passout pass:
|
|
||||||
sleep 1
|
|
||||||
cat {privkey,fullchain}.pem >priv-fullchain-bundle.pem
|
|
||||||
chown -R abc:abc /config/etc/letsencrypt
|
|
||||||
@ -1,15 +0,0 @@
|
|||||||
#!/usr/bin/with-contenv bash
|
|
||||||
# shellcheck shell=bash
|
|
||||||
|
|
||||||
# shellcheck source=/dev/null
|
|
||||||
. /config/.donoteditthisfile.conf
|
|
||||||
|
|
||||||
if [[ ! "${ORIGVALIDATION}" = "dns" ]] && [[ ! "${ORIGVALIDATION}" = "duckdns" ]]; then
|
|
||||||
if pgrep -f "s6-supervise svc-nginx" >/dev/null; then
|
|
||||||
s6-svc -u /run/service/svc-nginx
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
if pgrep -f "nginx:" >/dev/null; then
|
|
||||||
s6-svc -h /run/service/svc-nginx
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
@ -1,11 +0,0 @@
|
|||||||
#!/usr/bin/with-contenv bash
|
|
||||||
# shellcheck shell=bash
|
|
||||||
|
|
||||||
# shellcheck source=/dev/null
|
|
||||||
. /config/.donoteditthisfile.conf
|
|
||||||
|
|
||||||
if [[ ! "${ORIGVALIDATION}" = "dns" ]] && [[ ! "${ORIGVALIDATION}" = "duckdns" ]]; then
|
|
||||||
if pgrep -f "nginx:" >/dev/null; then
|
|
||||||
s6-svc -d /run/service/svc-nginx
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
@ -1,7 +0,0 @@
|
|||||||
# A fail2ban filter for unauthorized log messages
|
|
||||||
|
|
||||||
[Definition]
|
|
||||||
|
|
||||||
failregex = ^<HOST>.*"(GET|POST|HEAD).*" (401) .*$
|
|
||||||
|
|
||||||
ignoreregex = .*(?i)plex.*
|
|
||||||
@ -1,14 +1,10 @@
|
|||||||
## Version 2022/08/20 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/fail2ban/jail.local
|
## Version 2020/05/10 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/jail.local
|
||||||
# This is the custom version of the jail.conf for fail2ban
|
# This is the custom version of the jail.conf for fail2ban
|
||||||
# Feel free to modify this and add additional filters
|
# Feel free to modify this and add additional filters
|
||||||
# Then you can drop the new filter conf files into the fail2ban-filters
|
# Then you can drop the new filter conf files into the fail2ban-filters
|
||||||
# folder and restart the container
|
# folder and restart the container
|
||||||
|
|
||||||
[DEFAULT]
|
[DEFAULT]
|
||||||
# Prevents banning LAN subnets
|
|
||||||
ignoreip = 10.0.0.0/8
|
|
||||||
192.168.0.0/16
|
|
||||||
172.16.0.0/12
|
|
||||||
|
|
||||||
# Changes the default ban action from "iptables-multiport", which causes issues on some platforms, to "iptables-allports".
|
# Changes the default ban action from "iptables-multiport", which causes issues on some platforms, to "iptables-allports".
|
||||||
banaction = iptables-allports
|
banaction = iptables-allports
|
||||||
@ -25,35 +21,37 @@ maxretry = 5
|
|||||||
|
|
||||||
|
|
||||||
[ssh]
|
[ssh]
|
||||||
|
|
||||||
enabled = false
|
enabled = false
|
||||||
|
|
||||||
|
|
||||||
[nginx-http-auth]
|
[nginx-http-auth]
|
||||||
|
|
||||||
enabled = true
|
enabled = true
|
||||||
filter = nginx-http-auth
|
filter = nginx-http-auth
|
||||||
port = http,https
|
port = http,https
|
||||||
logpath = /config/log/nginx/error.log
|
logpath = /config/log/nginx/error.log
|
||||||
|
|
||||||
|
|
||||||
[nginx-badbots]
|
[nginx-badbots]
|
||||||
|
|
||||||
enabled = true
|
enabled = true
|
||||||
port = http,https
|
port = http,https
|
||||||
filter = nginx-badbots
|
filter = nginx-badbots
|
||||||
logpath = /config/log/nginx/access.log
|
logpath = /config/log/nginx/access.log
|
||||||
maxretry = 2
|
maxretry = 2
|
||||||
|
|
||||||
|
|
||||||
[nginx-botsearch]
|
[nginx-botsearch]
|
||||||
|
|
||||||
enabled = true
|
enabled = true
|
||||||
port = http,https
|
port = http,https
|
||||||
filter = nginx-botsearch
|
filter = nginx-botsearch
|
||||||
logpath = /config/log/nginx/access.log
|
logpath = /config/log/nginx/access.log
|
||||||
|
|
||||||
[nginx-deny]
|
[nginx-deny]
|
||||||
|
|
||||||
enabled = true
|
enabled = true
|
||||||
port = http,https
|
port = http,https
|
||||||
filter = nginx-deny
|
filter = nginx-deny
|
||||||
logpath = /config/log/nginx/error.log
|
logpath = /config/log/nginx/error.log
|
||||||
|
|
||||||
[nginx-unauthorized]
|
|
||||||
enabled = true
|
|
||||||
port = http,https
|
|
||||||
filter = nginx-unauthorized
|
|
||||||
logpath = /config/log/nginx/access.log
|
|
||||||
92
root/defaults/ldap.conf
Normal file
92
root/defaults/ldap.conf
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
## Version 2020/06/02 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/ldap.conf
|
||||||
|
## this conf is meant to be used in conjunction with our ldap-auth image: https://github.com/linuxserver/docker-ldap-auth
|
||||||
|
## see the heimdall example in the default site config for info on enabling ldap auth
|
||||||
|
## for further instructions on this conf, see https://github.com/nginxinc/nginx-ldap-auth
|
||||||
|
|
||||||
|
location /ldaplogin {
|
||||||
|
resolver 127.0.0.11 valid=30s;
|
||||||
|
set $upstream_auth_app ldap-auth;
|
||||||
|
set $upstream_auth_port 9000;
|
||||||
|
set $upstream_auth_proto http;
|
||||||
|
proxy_pass $upstream_auth_proto://$upstream_auth_app:$upstream_auth_port;
|
||||||
|
proxy_set_header X-Target $request_uri;
|
||||||
|
}
|
||||||
|
|
||||||
|
location = /auth {
|
||||||
|
resolver 127.0.0.11 valid=30s;
|
||||||
|
set $upstream_auth_app ldap-auth;
|
||||||
|
set $upstream_auth_port 8888;
|
||||||
|
set $upstream_auth_proto http;
|
||||||
|
proxy_pass $upstream_auth_proto://$upstream_auth_app:$upstream_auth_port;
|
||||||
|
|
||||||
|
proxy_pass_request_body off;
|
||||||
|
proxy_set_header Content-Length "";
|
||||||
|
|
||||||
|
#Before enabling the below caching options, make sure you have the line "proxy_cache_path cache/ keys_zone=auth_cache:10m;" at the bottom your default site config
|
||||||
|
#proxy_cache auth_cache;
|
||||||
|
#proxy_cache_valid 200 10m;
|
||||||
|
#proxy_cache_key "$http_authorization$cookie_nginxauth";
|
||||||
|
|
||||||
|
# As implemented in nginx-ldap-auth-daemon.py, the ldap-auth daemon
|
||||||
|
# communicates with a LDAP server, passing in the following
|
||||||
|
# parameters to specify which user account to authenticate. To
|
||||||
|
# eliminate the need to modify the Python code, this file contains
|
||||||
|
# 'proxy_set_header' directives that set the values of the
|
||||||
|
# parameters. Set or change them as instructed in the comments.
|
||||||
|
#
|
||||||
|
# Parameter Proxy header
|
||||||
|
# ----------- ----------------
|
||||||
|
# url X-Ldap-URL
|
||||||
|
# starttls X-Ldap-Starttls
|
||||||
|
# basedn X-Ldap-BaseDN
|
||||||
|
# binddn X-Ldap-BindDN
|
||||||
|
# bindpasswd X-Ldap-BindPass
|
||||||
|
# cookiename X-CookieName
|
||||||
|
# realm X-Ldap-Realm
|
||||||
|
# template X-Ldap-Template
|
||||||
|
|
||||||
|
# (Required) Set the URL and port for connecting to the LDAP server,
|
||||||
|
# by replacing 'example.com'.
|
||||||
|
# Do not mix ldaps-style URL and X-Ldap-Starttls as it will not work.
|
||||||
|
proxy_set_header X-Ldap-URL "ldap://example.com";
|
||||||
|
|
||||||
|
# (Optional) Establish a TLS-enabled LDAP session after binding to the
|
||||||
|
# LDAP server.
|
||||||
|
# This is the 'proper' way to establish encrypted TLS connections, see
|
||||||
|
# http://www.openldap.org/faq/data/cache/185.html
|
||||||
|
#proxy_set_header X-Ldap-Starttls "true";
|
||||||
|
|
||||||
|
# (Required) Set the Base DN, by replacing the value enclosed in
|
||||||
|
# double quotes.
|
||||||
|
proxy_set_header X-Ldap-BaseDN "cn=Users,dc=test,dc=local";
|
||||||
|
|
||||||
|
# (Required) Set the Bind DN, by replacing the value enclosed in
|
||||||
|
# double quotes.
|
||||||
|
# If AD, use "root@test.local"
|
||||||
|
proxy_set_header X-Ldap-BindDN "cn=root,dc=test,dc=local";
|
||||||
|
|
||||||
|
# (Required) Set the Bind password, by replacing 'secret'.
|
||||||
|
proxy_set_header X-Ldap-BindPass "secret";
|
||||||
|
|
||||||
|
# (Required) The following directives set the cookie name and pass
|
||||||
|
# it, respectively. They are required for cookie-based
|
||||||
|
# authentication. Comment them out if using HTTP basic
|
||||||
|
# authentication.
|
||||||
|
proxy_set_header X-CookieName "nginxauth";
|
||||||
|
proxy_set_header Cookie nginxauth=$cookie_nginxauth;
|
||||||
|
|
||||||
|
# (Required if using Microsoft Active Directory as the LDAP server)
|
||||||
|
# Set the LDAP template by uncommenting the following directive.
|
||||||
|
#proxy_set_header X-Ldap-Template "(sAMAccountName=%(username)s)";
|
||||||
|
|
||||||
|
# (Optional if using OpenLDAP as the LDAP server) Set the LDAP
|
||||||
|
# template by uncommenting the following directive and replacing
|
||||||
|
# '(cn=%(username)s)' which is the default set in
|
||||||
|
# nginx-ldap-auth-daemon.py.
|
||||||
|
#proxy_set_header X-Ldap-Template "(cn=%(username)s)";
|
||||||
|
|
||||||
|
# (Optional) Set the realm name, by uncommenting the following
|
||||||
|
# directive and replacing 'Restricted' which is the default set
|
||||||
|
# in nginx-ldap-auth-daemon.py.
|
||||||
|
#proxy_set_header X-Ldap-Realm "Restricted";
|
||||||
|
}
|
||||||
105
root/defaults/nginx.conf
Normal file
105
root/defaults/nginx.conf
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
## Version 2019/12/19 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/nginx.conf
|
||||||
|
|
||||||
|
user abc;
|
||||||
|
worker_processes 4;
|
||||||
|
pid /run/nginx.pid;
|
||||||
|
include /etc/nginx/modules/*.conf;
|
||||||
|
|
||||||
|
events {
|
||||||
|
worker_connections 768;
|
||||||
|
# multi_accept on;
|
||||||
|
}
|
||||||
|
|
||||||
|
http {
|
||||||
|
|
||||||
|
##
|
||||||
|
# Basic Settings
|
||||||
|
##
|
||||||
|
|
||||||
|
sendfile on;
|
||||||
|
tcp_nopush on;
|
||||||
|
tcp_nodelay on;
|
||||||
|
keepalive_timeout 65;
|
||||||
|
types_hash_max_size 2048;
|
||||||
|
variables_hash_max_size 2048;
|
||||||
|
large_client_header_buffers 4 16k;
|
||||||
|
|
||||||
|
# server_tokens off;
|
||||||
|
|
||||||
|
# server_names_hash_bucket_size 64;
|
||||||
|
# server_name_in_redirect off;
|
||||||
|
|
||||||
|
client_max_body_size 0;
|
||||||
|
|
||||||
|
include /etc/nginx/mime.types;
|
||||||
|
default_type application/octet-stream;
|
||||||
|
|
||||||
|
##
|
||||||
|
# Logging Settings
|
||||||
|
##
|
||||||
|
|
||||||
|
access_log /config/log/nginx/access.log;
|
||||||
|
error_log /config/log/nginx/error.log;
|
||||||
|
|
||||||
|
##
|
||||||
|
# Gzip Settings
|
||||||
|
##
|
||||||
|
|
||||||
|
gzip on;
|
||||||
|
gzip_disable "msie6";
|
||||||
|
|
||||||
|
# gzip_vary on;
|
||||||
|
# gzip_proxied any;
|
||||||
|
# gzip_comp_level 6;
|
||||||
|
# gzip_buffers 16 8k;
|
||||||
|
# gzip_http_version 1.1;
|
||||||
|
# gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||||
|
|
||||||
|
##
|
||||||
|
# nginx-naxsi config
|
||||||
|
##
|
||||||
|
# Uncomment it if you installed nginx-naxsi
|
||||||
|
##
|
||||||
|
|
||||||
|
#include /etc/nginx/naxsi_core.rules;
|
||||||
|
|
||||||
|
##
|
||||||
|
# nginx-passenger config
|
||||||
|
##
|
||||||
|
# Uncomment it if you installed nginx-passenger
|
||||||
|
##
|
||||||
|
|
||||||
|
#passenger_root /usr;
|
||||||
|
#passenger_ruby /usr/bin/ruby;
|
||||||
|
|
||||||
|
##
|
||||||
|
# Virtual Host Configs
|
||||||
|
##
|
||||||
|
include /etc/nginx/conf.d/*.conf;
|
||||||
|
include /config/nginx/site-confs/*;
|
||||||
|
lua_load_resty_core off;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#mail {
|
||||||
|
# # See sample authentication script at:
|
||||||
|
# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
|
||||||
|
#
|
||||||
|
# # auth_http localhost/auth.php;
|
||||||
|
# # pop3_capabilities "TOP" "USER";
|
||||||
|
# # imap_capabilities "IMAP4rev1" "UIDPLUS";
|
||||||
|
#
|
||||||
|
# server {
|
||||||
|
# listen localhost:110;
|
||||||
|
# protocol pop3;
|
||||||
|
# proxy on;
|
||||||
|
# }
|
||||||
|
#
|
||||||
|
# server {
|
||||||
|
# listen localhost:143;
|
||||||
|
# protocol imap;
|
||||||
|
# proxy on;
|
||||||
|
# }
|
||||||
|
#}
|
||||||
|
daemon off;
|
||||||
@ -1,29 +0,0 @@
|
|||||||
## Version 2023/02/09 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/nginx/authelia-location.conf.sample
|
|
||||||
# Make sure that your authelia container is in the same user defined bridge network and is named authelia
|
|
||||||
# Rename /config/nginx/proxy-confs/authelia.subdomain.conf.sample to /config/nginx/proxy-confs/authelia.subdomain.conf
|
|
||||||
# Make sure that the authelia configuration.yml has 'path: "authelia"' defined
|
|
||||||
|
|
||||||
## Send a subrequest to Authelia to verify if the user is authenticated and has permission to access the resource.
|
|
||||||
auth_request /authelia/api/verify;
|
|
||||||
## If the subreqest returns 200 pass to the backend, if the subrequest returns 401 redirect to the portal.
|
|
||||||
error_page 401 = @authelia_proxy_signin;
|
|
||||||
|
|
||||||
## Translate response headers from Authelia into variables
|
|
||||||
auth_request_set $user $upstream_http_remote_user;
|
|
||||||
auth_request_set $groups $upstream_http_remote_groups;
|
|
||||||
auth_request_set $name $upstream_http_remote_name;
|
|
||||||
auth_request_set $email $upstream_http_remote_email;
|
|
||||||
auth_request_set $authorization $upstream_http_authorization;
|
|
||||||
auth_request_set $proxy_authorization $upstream_http_proxy_authorization;
|
|
||||||
|
|
||||||
## Inject the response header variables into the request made to the actual upstream
|
|
||||||
proxy_set_header Remote-User $user;
|
|
||||||
proxy_set_header Remote-Groups $groups;
|
|
||||||
proxy_set_header Remote-Name $name;
|
|
||||||
proxy_set_header Remote-Email $email;
|
|
||||||
proxy_set_header Authorization $authorization;
|
|
||||||
proxy_set_header Proxy-Authorization $proxy_authorization;
|
|
||||||
|
|
||||||
## Include the Set-Cookie header if present.
|
|
||||||
auth_request_set $set_cookie $upstream_http_set_cookie;
|
|
||||||
add_header Set-Cookie $set_cookie;
|
|
||||||
@ -1,55 +0,0 @@
|
|||||||
## Version 2023/02/09 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/nginx/authelia-server.conf.sample
|
|
||||||
# Make sure that your authelia container is in the same user defined bridge network and is named authelia
|
|
||||||
# Rename /config/nginx/proxy-confs/authelia.subdomain.conf.sample to /config/nginx/proxy-confs/authelia.subdomain.conf
|
|
||||||
# Make sure that the authelia configuration.yml has 'path: "authelia"' defined
|
|
||||||
|
|
||||||
# location for authelia subfolder requests
|
|
||||||
location ^~ /authelia {
|
|
||||||
auth_request off; # requests to this subfolder must be accessible without authentication
|
|
||||||
include /config/nginx/proxy.conf;
|
|
||||||
include /config/nginx/resolver.conf;
|
|
||||||
set $upstream_authelia authelia;
|
|
||||||
proxy_pass http://$upstream_authelia:9091;
|
|
||||||
}
|
|
||||||
|
|
||||||
# location for authelia auth requests
|
|
||||||
location = /authelia/api/verify {
|
|
||||||
internal;
|
|
||||||
|
|
||||||
include /config/nginx/proxy.conf;
|
|
||||||
include /config/nginx/resolver.conf;
|
|
||||||
set $upstream_authelia authelia;
|
|
||||||
proxy_pass http://$upstream_authelia:9091/authelia/api/verify;
|
|
||||||
|
|
||||||
## Include the Set-Cookie header if present.
|
|
||||||
auth_request_set $set_cookie $upstream_http_set_cookie;
|
|
||||||
add_header Set-Cookie $set_cookie;
|
|
||||||
|
|
||||||
proxy_pass_request_body off;
|
|
||||||
proxy_set_header Content-Length "";
|
|
||||||
}
|
|
||||||
|
|
||||||
# Virtual location for authelia 401 redirects
|
|
||||||
location @authelia_proxy_signin {
|
|
||||||
internal;
|
|
||||||
|
|
||||||
## Set the $target_url variable based on the original request.
|
|
||||||
set_escape_uri $target_url $scheme://$http_host$request_uri;
|
|
||||||
|
|
||||||
## Include the Set-Cookie header if present.
|
|
||||||
auth_request_set $set_cookie $upstream_http_set_cookie;
|
|
||||||
add_header Set-Cookie $set_cookie;
|
|
||||||
|
|
||||||
## Set $authelia_backend to route requests to the current domain by default
|
|
||||||
set $authelia_backend $http_host;
|
|
||||||
## In order for Webauthn to work with multiple domains authelia must operate on a separate subdomain
|
|
||||||
## To use authelia on a separate subdomain:
|
|
||||||
## * comment the $authelia_backend line above
|
|
||||||
## * rename /config/nginx/proxy-confs/authelia.conf.sample to /config/nginx/proxy-confs/authelia.conf
|
|
||||||
## * make sure that your dns has a cname set for authelia
|
|
||||||
## * uncomment the $authelia_backend line below and change example.com to your domain
|
|
||||||
## * restart the swag container
|
|
||||||
#set $authelia_backend authelia.example.com;
|
|
||||||
|
|
||||||
return 302 https://$authelia_backend/authelia/?rd=$target_url;
|
|
||||||
}
|
|
||||||
@ -1,26 +0,0 @@
|
|||||||
## Version 2023/02/09 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/nginx/authentik-location.conf.sample
|
|
||||||
# Make sure that your authentik container is in the same user defined bridge network and is named authentik-server
|
|
||||||
# Rename /config/nginx/proxy-confs/authentik.subdomain.conf.sample to /config/nginx/proxy-confs/authentik.subdomain.conf
|
|
||||||
|
|
||||||
## Send a subrequest to Authentik to verify if the user is authenticated and has permission to access the resource.
|
|
||||||
auth_request /outpost.goauthentik.io/auth/nginx;
|
|
||||||
## If the subreqest returns 200 pass to the backend, if the subrequest returns 401 redirect to the portal.
|
|
||||||
error_page 401 = @goauthentik_proxy_signin;
|
|
||||||
|
|
||||||
## Translate response headers from Authentik into variables
|
|
||||||
auth_request_set $authentik_username $upstream_http_x_authentik_username;
|
|
||||||
auth_request_set $authentik_groups $upstream_http_x_authentik_groups;
|
|
||||||
auth_request_set $authentik_email $upstream_http_x_authentik_email;
|
|
||||||
auth_request_set $authentik_name $upstream_http_x_authentik_name;
|
|
||||||
auth_request_set $authentik_uid $upstream_http_x_authentik_uid;
|
|
||||||
|
|
||||||
## Inject the response header variables into the request made to the actual upstream
|
|
||||||
proxy_set_header X-authentik-username $authentik_username;
|
|
||||||
proxy_set_header X-authentik-groups $authentik_groups;
|
|
||||||
proxy_set_header X-authentik-email $authentik_email;
|
|
||||||
proxy_set_header X-authentik-name $authentik_name;
|
|
||||||
proxy_set_header X-authentik-uid $authentik_uid;
|
|
||||||
|
|
||||||
## Include the Set-Cookie header if present.
|
|
||||||
auth_request_set $set_cookie $upstream_http_set_cookie;
|
|
||||||
add_header Set-Cookie $set_cookie;
|
|
||||||
@ -1,45 +0,0 @@
|
|||||||
## Version 2023/02/09 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/nginx/authentik-server.conf.sample
|
|
||||||
# Make sure that your authentik container is in the same user defined bridge network and is named authentik-server
|
|
||||||
# Rename /config/nginx/proxy-confs/authentik.subdomain.conf.sample to /config/nginx/proxy-confs/authentik.subdomain.conf
|
|
||||||
|
|
||||||
# location for authentik subfolder requests
|
|
||||||
location ^~ /outpost.goauthentik.io {
|
|
||||||
auth_request off; # requests to this subfolder must be accessible without authentication
|
|
||||||
include /config/nginx/proxy.conf;
|
|
||||||
include /config/nginx/resolver.conf;
|
|
||||||
set $upstream_authentik authentik-server;
|
|
||||||
proxy_pass http://$upstream_authentik:9000;
|
|
||||||
}
|
|
||||||
|
|
||||||
# location for authentik auth requests
|
|
||||||
location = /outpost.goauthentik.io/auth/nginx {
|
|
||||||
internal;
|
|
||||||
|
|
||||||
include /config/nginx/proxy.conf;
|
|
||||||
include /config/nginx/resolver.conf;
|
|
||||||
set $upstream_authentik authentik-server;
|
|
||||||
proxy_pass http://$upstream_authentik:9000/outpost.goauthentik.io/auth/nginx;
|
|
||||||
|
|
||||||
## Include the Set-Cookie header if present.
|
|
||||||
auth_request_set $set_cookie $upstream_http_set_cookie;
|
|
||||||
add_header Set-Cookie $set_cookie;
|
|
||||||
|
|
||||||
proxy_pass_request_body off;
|
|
||||||
proxy_set_header Content-Length "";
|
|
||||||
}
|
|
||||||
|
|
||||||
# Virtual location for authentik 401 redirects
|
|
||||||
location @goauthentik_proxy_signin {
|
|
||||||
internal;
|
|
||||||
|
|
||||||
## Set the $target_url variable based on the original request.
|
|
||||||
set_escape_uri $target_url $scheme://$http_host$request_uri;
|
|
||||||
|
|
||||||
## Include the Set-Cookie header if present.
|
|
||||||
auth_request_set $set_cookie $upstream_http_set_cookie;
|
|
||||||
add_header Set-Cookie $set_cookie;
|
|
||||||
|
|
||||||
## Set $authentik_backend to route requests to the current domain by default
|
|
||||||
set $authentik_backend $http_host;
|
|
||||||
return 302 https://$authentik_backend/outpost.goauthentik.io/start?rd=$target_url;
|
|
||||||
}
|
|
||||||
@ -1,4 +0,0 @@
|
|||||||
## Version 2022/08/20 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/nginx/ldap-location.conf.sample
|
|
||||||
|
|
||||||
auth_request /auth;
|
|
||||||
error_page 401 =200 /ldaplogin;
|
|
||||||
@ -1,90 +0,0 @@
|
|||||||
## Version 2022/08/20 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/nginx/ldap-server.conf.sample
|
|
||||||
## this conf is meant to be used in conjunction with our ldap-auth image: https://github.com/linuxserver/docker-ldap-auth
|
|
||||||
## see the heimdall example in the default site config for info on enabling ldap auth
|
|
||||||
## for further instructions on this conf, see https://github.com/nginxinc/nginx-ldap-auth
|
|
||||||
|
|
||||||
location /ldaplogin {
|
|
||||||
|
|
||||||
set $upstream_auth_app ldap-auth;
|
|
||||||
set $upstream_auth_port 9000;
|
|
||||||
set $upstream_auth_proto http;
|
|
||||||
proxy_pass $upstream_auth_proto://$upstream_auth_app:$upstream_auth_port;
|
|
||||||
proxy_set_header X-Target $request_uri;
|
|
||||||
}
|
|
||||||
|
|
||||||
location = /auth {
|
|
||||||
|
|
||||||
set $upstream_auth_app ldap-auth;
|
|
||||||
set $upstream_auth_port 8888;
|
|
||||||
set $upstream_auth_proto http;
|
|
||||||
proxy_pass $upstream_auth_proto://$upstream_auth_app:$upstream_auth_port;
|
|
||||||
|
|
||||||
proxy_pass_request_body off;
|
|
||||||
proxy_set_header Content-Length "";
|
|
||||||
|
|
||||||
#Before enabling the below caching options, make sure you have the line "proxy_cache_path cache/ keys_zone=auth_cache:10m;" at the bottom your default site config
|
|
||||||
#proxy_cache auth_cache;
|
|
||||||
#proxy_cache_valid 200 10m;
|
|
||||||
#proxy_cache_key "$http_authorization$cookie_nginxauth";
|
|
||||||
|
|
||||||
# As implemented in nginx-ldap-auth-daemon.py, the ldap-auth daemon
|
|
||||||
# communicates with a LDAP server, passing in the following
|
|
||||||
# parameters to specify which user account to authenticate. To
|
|
||||||
# eliminate the need to modify the Python code, this file contains
|
|
||||||
# 'proxy_set_header' directives that set the values of the
|
|
||||||
# parameters. Set or change them as instructed in the comments.
|
|
||||||
#
|
|
||||||
# Parameter Proxy header
|
|
||||||
# ----------- ----------------
|
|
||||||
# url X-Ldap-URL
|
|
||||||
# starttls X-Ldap-Starttls
|
|
||||||
# basedn X-Ldap-BaseDN
|
|
||||||
# binddn X-Ldap-BindDN
|
|
||||||
# bindpasswd X-Ldap-BindPass
|
|
||||||
# cookiename X-CookieName
|
|
||||||
# realm X-Ldap-Realm
|
|
||||||
# template X-Ldap-Template
|
|
||||||
# (Required) Set the URL and port for connecting to the LDAP server,
|
|
||||||
# by replacing 'example.com'.
|
|
||||||
# Do not mix ldaps-style URL and X-Ldap-Starttls as it will not work.
|
|
||||||
proxy_set_header X-Ldap-URL "ldap://example.com";
|
|
||||||
|
|
||||||
# (Optional) Establish a TLS-enabled LDAP session after binding to the
|
|
||||||
# LDAP server.
|
|
||||||
# This is the 'proper' way to establish encrypted TLS connections, see
|
|
||||||
# http://www.openldap.org/faq/data/cache/185.html
|
|
||||||
#proxy_set_header X-Ldap-Starttls "true";
|
|
||||||
|
|
||||||
# (Required) Set the Base DN, by replacing the value enclosed in
|
|
||||||
# double quotes.
|
|
||||||
proxy_set_header X-Ldap-BaseDN "cn=Users,dc=test,dc=local";
|
|
||||||
|
|
||||||
# (Required) Set the Bind DN, by replacing the value enclosed in
|
|
||||||
# double quotes.
|
|
||||||
# If AD, use "root@test.local"
|
|
||||||
proxy_set_header X-Ldap-BindDN "cn=root,dc=test,dc=local";
|
|
||||||
|
|
||||||
# (Required) Set the Bind password, by replacing 'secret'.
|
|
||||||
proxy_set_header X-Ldap-BindPass "secret";
|
|
||||||
|
|
||||||
# (Required) The following directives set the cookie name and pass
|
|
||||||
# it, respectively. They are required for cookie-based
|
|
||||||
# authentication. Comment them out if using HTTP basic
|
|
||||||
# authentication.
|
|
||||||
proxy_set_header X-CookieName "nginxauth";
|
|
||||||
proxy_set_header Cookie nginxauth=$cookie_nginxauth;
|
|
||||||
|
|
||||||
# (Required if using Microsoft Active Directory as the LDAP server)
|
|
||||||
# Set the LDAP template by uncommenting the following directive.
|
|
||||||
#proxy_set_header X-Ldap-Template "(sAMAccountName=%(username)s)";
|
|
||||||
|
|
||||||
# (Optional if using OpenLDAP as the LDAP server) Set the LDAP
|
|
||||||
# template by uncommenting the following directive and replacing
|
|
||||||
# '(cn=%(username)s)' which is the default set in
|
|
||||||
# nginx-ldap-auth-daemon.py.
|
|
||||||
#proxy_set_header X-Ldap-Template "(cn=%(username)s)";
|
|
||||||
# (Optional) Set the realm name, by uncommenting the following
|
|
||||||
# directive and replacing 'Restricted' which is the default set
|
|
||||||
# in nginx-ldap-auth-daemon.py.
|
|
||||||
#proxy_set_header X-Ldap-Realm "Restricted";
|
|
||||||
}
|
|
||||||
@ -1,37 +0,0 @@
|
|||||||
## Version 2023/02/09 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/nginx/proxy.conf.sample
|
|
||||||
|
|
||||||
# Timeout if the real server is dead
|
|
||||||
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503;
|
|
||||||
|
|
||||||
# Proxy Connection Settings
|
|
||||||
proxy_buffers 32 4k;
|
|
||||||
proxy_connect_timeout 240;
|
|
||||||
proxy_headers_hash_bucket_size 128;
|
|
||||||
proxy_headers_hash_max_size 1024;
|
|
||||||
proxy_http_version 1.1;
|
|
||||||
proxy_read_timeout 240;
|
|
||||||
proxy_redirect http:// $scheme://;
|
|
||||||
proxy_send_timeout 240;
|
|
||||||
|
|
||||||
# Proxy Cache and Cookie Settings
|
|
||||||
proxy_cache_bypass $cookie_session;
|
|
||||||
#proxy_cookie_path / "/; Secure"; # enable at your own risk, may break certain apps
|
|
||||||
proxy_no_cache $cookie_session;
|
|
||||||
|
|
||||||
# Proxy Header Settings
|
|
||||||
proxy_set_header Connection $connection_upgrade;
|
|
||||||
proxy_set_header Early-Data $ssl_early_data;
|
|
||||||
proxy_set_header Host $host;
|
|
||||||
proxy_set_header Proxy "";
|
|
||||||
proxy_set_header Upgrade $http_upgrade;
|
|
||||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
|
||||||
proxy_set_header X-Forwarded-Host $host;
|
|
||||||
proxy_set_header X-Forwarded-Method $request_method;
|
|
||||||
proxy_set_header X-Forwarded-Port $server_port;
|
|
||||||
proxy_set_header X-Forwarded-Proto $scheme;
|
|
||||||
proxy_set_header X-Forwarded-Server $host;
|
|
||||||
proxy_set_header X-Forwarded-Ssl on;
|
|
||||||
proxy_set_header X-Forwarded-Uri $request_uri;
|
|
||||||
proxy_set_header X-Original-Method $request_method;
|
|
||||||
proxy_set_header X-Original-URL $scheme://$http_host$request_uri;
|
|
||||||
proxy_set_header X-Real-IP $remote_addr;
|
|
||||||
@ -1,68 +0,0 @@
|
|||||||
## Version 2023/02/09 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/nginx/site-confs/default.conf.sample
|
|
||||||
|
|
||||||
# redirect all traffic to https
|
|
||||||
server {
|
|
||||||
listen 80 default_server;
|
|
||||||
listen [::]:80 default_server;
|
|
||||||
|
|
||||||
location / {
|
|
||||||
return 301 https://$host$request_uri;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# main server block
|
|
||||||
server {
|
|
||||||
listen 443 ssl http2 default_server;
|
|
||||||
listen [::]:443 ssl http2 default_server;
|
|
||||||
|
|
||||||
server_name _;
|
|
||||||
|
|
||||||
root /config/www;
|
|
||||||
index index.html index.htm index.php;
|
|
||||||
|
|
||||||
# enable subfolder method reverse proxy confs
|
|
||||||
include /config/nginx/proxy-confs/*.subfolder.conf;
|
|
||||||
|
|
||||||
# enable for ldap auth (requires ldap-location.conf in the location block)
|
|
||||||
#include /config/nginx/ldap-server.conf;
|
|
||||||
|
|
||||||
# enable for Authelia (requires authelia-location.conf in the location block)
|
|
||||||
#include /config/nginx/authelia-server.conf;
|
|
||||||
|
|
||||||
# enable for Authentik (requires authentik-location.conf in the location block)
|
|
||||||
#include /config/nginx/authentik-server.conf;
|
|
||||||
|
|
||||||
location / {
|
|
||||||
# enable for basic auth
|
|
||||||
#auth_basic "Restricted";
|
|
||||||
#auth_basic_user_file /config/nginx/.htpasswd;
|
|
||||||
|
|
||||||
# enable for ldap auth (requires ldap-server.conf in the server block)
|
|
||||||
#include /config/nginx/ldap-location.conf;
|
|
||||||
|
|
||||||
# enable for Authelia (requires authelia-server.conf in the server block)
|
|
||||||
#include /config/nginx/authelia-location.conf;
|
|
||||||
|
|
||||||
# enable for Authentik (requires authentik-server.conf in the server block)
|
|
||||||
#include /config/nginx/authentik-location.conf;
|
|
||||||
|
|
||||||
try_files $uri $uri/ /index.html /index.php$is_args$args =404;
|
|
||||||
}
|
|
||||||
|
|
||||||
location ~ ^(.+\.php)(.*)$ {
|
|
||||||
fastcgi_split_path_info ^(.+\.php)(.*)$;
|
|
||||||
fastcgi_pass 127.0.0.1:9000;
|
|
||||||
fastcgi_index index.php;
|
|
||||||
include /etc/nginx/fastcgi_params;
|
|
||||||
}
|
|
||||||
|
|
||||||
# deny access to .htaccess/.htpasswd files
|
|
||||||
location ~ /\.ht {
|
|
||||||
deny all;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# enable subdomain method reverse proxy confs
|
|
||||||
include /config/nginx/proxy-confs/*.subdomain.conf;
|
|
||||||
# enable proxy cache for auth
|
|
||||||
proxy_cache_path cache/ keys_zone=auth_cache:10m;
|
|
||||||
32
root/defaults/proxy.conf
Normal file
32
root/defaults/proxy.conf
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
## Version 2019/10/23 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/proxy.conf
|
||||||
|
|
||||||
|
client_body_buffer_size 128k;
|
||||||
|
|
||||||
|
#Timeout if the real server is dead
|
||||||
|
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503;
|
||||||
|
|
||||||
|
# Advanced Proxy Config
|
||||||
|
send_timeout 5m;
|
||||||
|
proxy_read_timeout 240;
|
||||||
|
proxy_send_timeout 240;
|
||||||
|
proxy_connect_timeout 240;
|
||||||
|
|
||||||
|
# TLS 1.3 early data
|
||||||
|
proxy_set_header Early-Data $ssl_early_data;
|
||||||
|
|
||||||
|
# Basic Proxy Config
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto https;
|
||||||
|
proxy_set_header X-Forwarded-Host $host;
|
||||||
|
proxy_set_header X-Forwarded-Ssl on;
|
||||||
|
proxy_redirect http:// $scheme://;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Connection "";
|
||||||
|
#proxy_cookie_path / "/; HTTPOnly; Secure"; # enable at your own risk, may break certain apps
|
||||||
|
proxy_cache_bypass $cookie_session;
|
||||||
|
proxy_no_cache $cookie_session;
|
||||||
|
proxy_buffers 32 4k;
|
||||||
|
proxy_headers_hash_bucket_size 128;
|
||||||
|
proxy_headers_hash_max_size 1024;
|
||||||
48
root/defaults/ssl.conf
Normal file
48
root/defaults/ssl.conf
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
## Version 2020/06/17 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/ssl.conf
|
||||||
|
|
||||||
|
### Mozilla Recommendations
|
||||||
|
# generated 2020-06-17, Mozilla Guideline v5.4, nginx 1.18.0-r0, OpenSSL 1.1.1g-r0, intermediate configuration
|
||||||
|
# https://ssl-config.mozilla.org/#server=nginx&version=1.18.0-r0&config=intermediate&openssl=1.1.1g-r0&guideline=5.4
|
||||||
|
|
||||||
|
ssl_session_timeout 1d;
|
||||||
|
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
|
||||||
|
ssl_session_tickets off;
|
||||||
|
|
||||||
|
# intermediate configuration
|
||||||
|
ssl_protocols TLSv1.2 TLSv1.3;
|
||||||
|
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
|
||||||
|
ssl_prefer_server_ciphers off;
|
||||||
|
|
||||||
|
# OCSP stapling
|
||||||
|
ssl_stapling on;
|
||||||
|
ssl_stapling_verify on;
|
||||||
|
|
||||||
|
|
||||||
|
### Linuxserver.io Defaults
|
||||||
|
|
||||||
|
# Certificates
|
||||||
|
ssl_certificate /config/keys/letsencrypt/fullchain.pem;
|
||||||
|
ssl_certificate_key /config/keys/letsencrypt/privkey.pem;
|
||||||
|
# verify chain of trust of OCSP response using Root CA and Intermediate certs
|
||||||
|
ssl_trusted_certificate /config/keys/letsencrypt/fullchain.pem;
|
||||||
|
|
||||||
|
# Diffie-Hellman Parameters
|
||||||
|
ssl_dhparam /config/nginx/dhparams.pem;
|
||||||
|
|
||||||
|
# Resolver
|
||||||
|
resolver 127.0.0.11 valid=30s; # Docker DNS Server
|
||||||
|
|
||||||
|
# Enable TLS 1.3 early data
|
||||||
|
ssl_early_data on;
|
||||||
|
|
||||||
|
# HSTS, remove # from the line below to enable HSTS
|
||||||
|
#add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload" always;
|
||||||
|
|
||||||
|
# Optional additional headers
|
||||||
|
#add_header Content-Security-Policy "upgrade-insecure-requests";
|
||||||
|
#add_header X-Frame-Options "SAMEORIGIN" always;
|
||||||
|
#add_header X-XSS-Protection "1; mode=block" always;
|
||||||
|
#add_header X-Content-Type-Options "nosniff" always;
|
||||||
|
#add_header X-UA-Compatible "IE=Edge" always;
|
||||||
|
#add_header Cache-Control "no-transform" always;
|
||||||
|
#add_header Referrer-Policy "same-origin" always;
|
||||||
@ -1,39 +0,0 @@
|
|||||||
<html>
|
|
||||||
<head>
|
|
||||||
<title>Welcome to your SWAG instance</title>
|
|
||||||
<style>
|
|
||||||
body{
|
|
||||||
font-family: Helvetica, Arial, sans-serif;
|
|
||||||
}
|
|
||||||
.message{
|
|
||||||
width:440px;
|
|
||||||
padding:20px 40px;
|
|
||||||
margin:0 auto;
|
|
||||||
background-color:#f9f9f9;
|
|
||||||
border:1px solid #ddd;
|
|
||||||
color: #1e3d62;
|
|
||||||
}
|
|
||||||
center{
|
|
||||||
margin:40px 0;
|
|
||||||
}
|
|
||||||
h1{
|
|
||||||
font-size: 18px;
|
|
||||||
line-height: 26px;
|
|
||||||
}
|
|
||||||
p{
|
|
||||||
font-size: 12px;
|
|
||||||
}
|
|
||||||
a{
|
|
||||||
color: rgb(207, 48, 139);
|
|
||||||
}
|
|
||||||
</style>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<div class="message">
|
|
||||||
<h1>Welcome to your <a target="_blank" href="https://github.com/linuxserver/docker-swag">SWAG</a> instance</h1>
|
|
||||||
<p>A webserver and reverse proxy solution brought to you by <a target="_blank" href="https://www.linuxserver.io/">linuxserver.io</a> with php support and a built-in Certbot client.</p>
|
|
||||||
<p>We have an article on how to use swag here: <a target="_blank" href="https://docs.linuxserver.io/general/swag">docs.linuxserver.io</a></p>
|
|
||||||
<p>For help and support, please visit: <a target="_blank" href="https://www.linuxserver.io/support">linuxserver.io/support</a></p>
|
|
||||||
</div>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
||||||
302
root/etc/cont-init.d/50-config
Normal file
302
root/etc/cont-init.d/50-config
Normal file
@ -0,0 +1,302 @@
|
|||||||
|
#!/usr/bin/with-contenv bash
|
||||||
|
|
||||||
|
# Display variables for troubleshooting
|
||||||
|
echo -e "Variables set:\\n\
|
||||||
|
PUID=${PUID}\\n\
|
||||||
|
PGID=${PGID}\\n\
|
||||||
|
TZ=${TZ}\\n\
|
||||||
|
URL=${URL}\\n\
|
||||||
|
SUBDOMAINS=${SUBDOMAINS}\\n\
|
||||||
|
EXTRA_DOMAINS=${EXTRA_DOMAINS}\\n\
|
||||||
|
ONLY_SUBDOMAINS=${ONLY_SUBDOMAINS}\\n\
|
||||||
|
VALIDATION=${VALIDATION}\\n\
|
||||||
|
DNSPLUGIN=${DNSPLUGIN}\\n\
|
||||||
|
EMAIL=${EMAIL}\\n\
|
||||||
|
STAGING=${STAGING}\\n"
|
||||||
|
|
||||||
|
# Echo init finish for test runs
|
||||||
|
if [ -n "${TEST_RUN}" ]; then
|
||||||
|
echo '[services.d] done.'
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Sanitize variables
|
||||||
|
SANED_VARS=( DNSPLUGIN EMAIL EXTRA_DOMAINS ONLY_SUBDOMAINS STAGING SUBDOMAINS URL VALIDATION )
|
||||||
|
for i in "${SANED_VARS[@]}"
|
||||||
|
do
|
||||||
|
export echo "$i"="${!i//\"/}"
|
||||||
|
export echo "$i"="$(echo "${!i}" | tr '[:upper:]' '[:lower:]')"
|
||||||
|
done
|
||||||
|
|
||||||
|
# check to make sure that the required variables are set
|
||||||
|
[[ -z "$URL" ]] && \
|
||||||
|
echo "Please pass your URL as an environment variable in your docker run command. See docker info for more details." && \
|
||||||
|
sleep infinity
|
||||||
|
|
||||||
|
# make our folders and links
|
||||||
|
mkdir -p \
|
||||||
|
/config/{log/letsencrypt,log/fail2ban,etc/letsencrypt,fail2ban,crontabs,dns-conf,geoip2db} \
|
||||||
|
/var/run/fail2ban
|
||||||
|
rm -rf /etc/letsencrypt
|
||||||
|
ln -s /config/etc/letsencrypt /etc/letsencrypt
|
||||||
|
|
||||||
|
# copy dns default configs
|
||||||
|
cp -n /defaults/dns-conf/* /config/dns-conf/
|
||||||
|
chown -R abc:abc /config/dns-conf
|
||||||
|
|
||||||
|
# copy reverse proxy configs
|
||||||
|
cp -R /defaults/proxy-confs /config/nginx/
|
||||||
|
# remove outdated files (remove this action after 2020/10/17)
|
||||||
|
rm -f /config/nginx/proxy-confs/seafile.subdomain.config.sample /config/nginx/proxy-confs/librespeed.subdomain.com.sample
|
||||||
|
|
||||||
|
# copy/update the fail2ban config defaults to/in /config
|
||||||
|
cp -R /defaults/fail2ban/filter.d /config/fail2ban/
|
||||||
|
cp -R /defaults/fail2ban/action.d /config/fail2ban/
|
||||||
|
# if jail.local is missing in /config, copy default
|
||||||
|
[[ ! -f /config/fail2ban/jail.local ]] && \
|
||||||
|
cp /defaults/jail.local /config/fail2ban/jail.local
|
||||||
|
# Replace fail2ban config with user config
|
||||||
|
[[ -d /etc/fail2ban/filter.d ]] && \
|
||||||
|
rm -rf /etc/fail2ban/filter.d
|
||||||
|
[[ -d /etc/fail2ban/action.d ]] && \
|
||||||
|
rm -rf /etc/fail2ban/action.d
|
||||||
|
cp -R /config/fail2ban/filter.d /etc/fail2ban/
|
||||||
|
cp -R /config/fail2ban/action.d /etc/fail2ban/
|
||||||
|
cp /defaults/fail2ban/fail2ban.local /etc/fail2ban/
|
||||||
|
cp /config/fail2ban/jail.local /etc/fail2ban/jail.local
|
||||||
|
|
||||||
|
# copy crontab and proxy defaults if needed
|
||||||
|
[[ ! -f /config/crontabs/root ]] && \
|
||||||
|
cp /etc/crontabs/root /config/crontabs/
|
||||||
|
[[ ! -f /config/nginx/proxy.conf ]] && \
|
||||||
|
cp /defaults/proxy.conf /config/nginx/proxy.conf
|
||||||
|
[[ ! -f /config/nginx/ssl.conf ]] && \
|
||||||
|
cp /defaults/ssl.conf /config/nginx/ssl.conf
|
||||||
|
[[ ! -f /config/nginx/ldap.conf ]] && \
|
||||||
|
cp /defaults/ldap.conf /config/nginx/ldap.conf
|
||||||
|
[[ ! -f /config/nginx/authelia-server.conf ]] && \
|
||||||
|
cp /defaults/authelia-server.conf /config/nginx/authelia-server.conf
|
||||||
|
[[ ! -f /config/nginx/authelia-location.conf ]] && \
|
||||||
|
cp /defaults/authelia-location.conf /config/nginx/authelia-location.conf
|
||||||
|
|
||||||
|
# copy pre-generated dhparams or generate if needed
|
||||||
|
[[ ! -f /config/nginx/dhparams.pem ]] && \
|
||||||
|
cp /defaults/dhparams.pem /config/nginx/dhparams.pem
|
||||||
|
if ! grep -q 'PARAMETERS' "/config/nginx/dhparams.pem"; then
|
||||||
|
curl -o /config/nginx/dhparams.pem -L "https://lsio.ams3.digitaloceanspaces.com/dhparams.pem"
|
||||||
|
fi
|
||||||
|
if ! grep -q 'PARAMETERS' "/config/nginx/dhparams.pem"; then
|
||||||
|
echo "Generating dhparams.pem. This will take a long time. Do not stop the container until this process is completed."
|
||||||
|
openssl dhparam -out /config/nginx/dhparams.pem 4096
|
||||||
|
fi
|
||||||
|
|
||||||
|
# check to make sure DNSPLUGIN is selected if dns validation is used
|
||||||
|
[[ "$VALIDATION" = "dns" ]] && [[ ! "$DNSPLUGIN" =~ ^(aliyun|cloudflare|cloudxns|cpanel|digitalocean|dnsimple|dnsmadeeasy|domeneshop|gandi|google|inwx|linode|luadns|nsone|ovh|rfc2136|route53|transip)$ ]] && \
|
||||||
|
echo "Please set the DNSPLUGIN variable to a valid plugin name. See docker info for more details." && \
|
||||||
|
sleep infinity
|
||||||
|
|
||||||
|
# import user crontabs
|
||||||
|
rm /etc/crontabs/*
|
||||||
|
cp /config/crontabs/* /etc/crontabs/
|
||||||
|
|
||||||
|
# create original config file if it doesn't exist
|
||||||
|
if [ ! -f "/config/donoteditthisfile.conf" ]; then
|
||||||
|
echo -e "ORIGURL=\"$URL\" ORIGSUBDOMAINS=\"$SUBDOMAINS\" ORIGONLY_SUBDOMAINS=\"$ONLY_SUBDOMAINS\" ORIGEXTRA_DOMAINS=\"$EXTRA_DOMAINS\" ORIGVALIDATION=\"$VALIDATION\" ORIGDNSPLUGIN=\"$DNSPLUGIN\" ORIGPROPAGATION=\"$PROPAGATION\" ORIGSTAGING=\"$STAGING\" ORIGDUCKDNSTOKEN=\"$DUCKDNSTOKEN\"" > /config/donoteditthisfile.conf
|
||||||
|
echo "Created donoteditthisfile.conf"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# load original config settings
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
. /config/donoteditthisfile.conf
|
||||||
|
|
||||||
|
# set default validation to http
|
||||||
|
if [ -z "$VALIDATION" ]; then
|
||||||
|
VALIDATION="http"
|
||||||
|
echo "VALIDATION parameter not set; setting it to http"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# if staging is set to true, use the staging server
|
||||||
|
if [ "$STAGING" = "true" ]; then
|
||||||
|
echo "NOTICE: Staging is active"
|
||||||
|
ACMESERVER="https://acme-staging-v02.api.letsencrypt.org/directory"
|
||||||
|
else
|
||||||
|
ACMESERVER="https://acme-v02.api.letsencrypt.org/directory"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# figuring out url only vs url & subdomains vs subdomains only
|
||||||
|
if [ -n "$SUBDOMAINS" ]; then
|
||||||
|
echo "SUBDOMAINS entered, processing"
|
||||||
|
if [ "$SUBDOMAINS" = "wildcard" ]; then
|
||||||
|
if [ "$ONLY_SUBDOMAINS" = true ]; then
|
||||||
|
export URL_REAL="-d *.${URL}"
|
||||||
|
echo "Wildcard cert for only the subdomains of $URL will be requested"
|
||||||
|
else
|
||||||
|
export URL_REAL="-d *.${URL} -d ${URL}"
|
||||||
|
echo "Wildcard cert for $URL will be requested"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "SUBDOMAINS entered, processing"
|
||||||
|
for job in $(echo "$SUBDOMAINS" | tr "," " "); do
|
||||||
|
export SUBDOMAINS_REAL="$SUBDOMAINS_REAL -d ${job}.${URL}"
|
||||||
|
done
|
||||||
|
if [ "$ONLY_SUBDOMAINS" = true ]; then
|
||||||
|
URL_REAL="$SUBDOMAINS_REAL"
|
||||||
|
echo "Only subdomains, no URL in cert"
|
||||||
|
else
|
||||||
|
URL_REAL="-d ${URL}${SUBDOMAINS_REAL}"
|
||||||
|
fi
|
||||||
|
echo "Sub-domains processed are: $SUBDOMAINS_REAL"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "No subdomains defined"
|
||||||
|
URL_REAL="-d $URL"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# add extra domains
|
||||||
|
if [ -n "$EXTRA_DOMAINS" ]; then
|
||||||
|
echo "EXTRA_DOMAINS entered, processing"
|
||||||
|
for job in $(echo "$EXTRA_DOMAINS" | tr "," " "); do
|
||||||
|
export EXTRA_DOMAINS_REAL="$EXTRA_DOMAINS_REAL -d ${job}"
|
||||||
|
done
|
||||||
|
echo "Extra domains processed are: $EXTRA_DOMAINS_REAL"
|
||||||
|
URL_REAL="$URL_REAL $EXTRA_DOMAINS_REAL"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# figuring out whether to use e-mail and which
|
||||||
|
if [[ $EMAIL == *@* ]]; then
|
||||||
|
echo "E-mail address entered: ${EMAIL}"
|
||||||
|
EMAILPARAM="-m ${EMAIL} --no-eff-email"
|
||||||
|
else
|
||||||
|
echo "No e-mail address entered or address invalid"
|
||||||
|
EMAILPARAM="--register-unsafely-without-email"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# setting the validation method to use
|
||||||
|
if [ "$VALIDATION" = "dns" ]; then
|
||||||
|
if [ "$DNSPLUGIN" = "route53" ]; then
|
||||||
|
if [ -n "$PROPAGATION" ];then PROPAGATIONPARAM="--dns-${DNSPLUGIN}-propagation-seconds ${PROPAGATION}"; fi
|
||||||
|
PREFCHAL="--dns-${DNSPLUGIN} ${PROPAGATIONPARAM} --manual-public-ip-logging-ok"
|
||||||
|
elif [[ "$DNSPLUGIN" =~ ^(cpanel)$ ]]; then
|
||||||
|
if [ -n "$PROPAGATION" ];then PROPAGATIONPARAM="--certbot-dns-${DNSPLUGIN}:${DNSPLUGIN}-propagation-seconds ${PROPAGATION}"; fi
|
||||||
|
PREFCHAL="-a certbot-dns-${DNSPLUGIN}:${DNSPLUGIN} --certbot-dns-${DNSPLUGIN}:${DNSPLUGIN}-credentials /config/dns-conf/${DNSPLUGIN}.ini ${PROPAGATIONPARAM} --manual-public-ip-logging-ok"
|
||||||
|
elif [[ "$DNSPLUGIN" =~ ^(gandi)$ ]]; then
|
||||||
|
if [ -n "$PROPAGATION" ];then echo "Gandi dns plugin does not support setting propagation time"; fi
|
||||||
|
PREFCHAL="-a certbot-plugin-${DNSPLUGIN}:dns --certbot-plugin-${DNSPLUGIN}:dns-credentials /config/dns-conf/${DNSPLUGIN}.ini --manual-public-ip-logging-ok"
|
||||||
|
elif [[ "$DNSPLUGIN" =~ ^(google)$ ]]; then
|
||||||
|
if [ -n "$PROPAGATION" ];then PROPAGATIONPARAM="--dns-${DNSPLUGIN}-propagation-seconds ${PROPAGATION}"; fi
|
||||||
|
PREFCHAL="--dns-${DNSPLUGIN} --dns-${DNSPLUGIN}-credentials /config/dns-conf/${DNSPLUGIN}.json ${PROPAGATIONPARAM} --manual-public-ip-logging-ok"
|
||||||
|
elif [[ "$DNSPLUGIN" =~ ^(aliyun|domeneshop|inwx|transip)$ ]]; then
|
||||||
|
if [ -n "$PROPAGATION" ];then PROPAGATIONPARAM="--certbot-dns-${DNSPLUGIN}:dns-${DNSPLUGIN}-propagation-seconds ${PROPAGATION}"; fi
|
||||||
|
PREFCHAL="-a certbot-dns-${DNSPLUGIN}:dns-${DNSPLUGIN} --certbot-dns-${DNSPLUGIN}:dns-${DNSPLUGIN}-credentials /config/dns-conf/${DNSPLUGIN}.ini ${PROPAGATIONPARAM} --manual-public-ip-logging-ok"
|
||||||
|
else
|
||||||
|
if [ -n "$PROPAGATION" ];then PROPAGATIONPARAM="--dns-${DNSPLUGIN}-propagation-seconds ${PROPAGATION}"; fi
|
||||||
|
PREFCHAL="--dns-${DNSPLUGIN} --dns-${DNSPLUGIN}-credentials /config/dns-conf/${DNSPLUGIN}.ini ${PROPAGATIONPARAM} --manual-public-ip-logging-ok"
|
||||||
|
fi
|
||||||
|
echo "${VALIDATION} validation via ${DNSPLUGIN} plugin is selected"
|
||||||
|
elif [ "$VALIDATION" = "tls-sni" ]; then
|
||||||
|
PREFCHAL="--non-interactive --standalone --preferred-challenges http"
|
||||||
|
echo "*****tls-sni validation has been deprecated, attempting http validation instead"
|
||||||
|
elif [ "$VALIDATION" = "duckdns" ]; then
|
||||||
|
PREFCHAL="--non-interactive --manual --preferred-challenges dns --manual-public-ip-logging-ok --manual-auth-hook /app/duckdns-txt"
|
||||||
|
chmod +x /app/duckdns-txt
|
||||||
|
echo "duckdns validation is selected"
|
||||||
|
if [ "$SUBDOMAINS" = "wildcard" ]; then
|
||||||
|
echo "the resulting certificate will only cover the subdomains due to a limitation of duckdns, so it is advised to set the root location to use www.subdomain.duckdns.org"
|
||||||
|
export URL_REAL="-d *.${URL}"
|
||||||
|
else
|
||||||
|
echo "the resulting certificate will only cover the main domain due to a limitation of duckdns, ie. subdomain.duckdns.org"
|
||||||
|
export URL_REAL="-d ${URL}"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
PREFCHAL="--non-interactive --standalone --preferred-challenges http"
|
||||||
|
echo "http validation is selected"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# setting the symlink for key location
|
||||||
|
rm -rf /config/keys/letsencrypt
|
||||||
|
if [ "$ONLY_SUBDOMAINS" = "true" ] && [ ! "$SUBDOMAINS" = "wildcard" ] ; then
|
||||||
|
DOMAIN="$(echo "$SUBDOMAINS" | tr ',' ' ' | awk '{print $1}').${URL}"
|
||||||
|
ln -s ../etc/letsencrypt/live/"$DOMAIN" /config/keys/letsencrypt
|
||||||
|
else
|
||||||
|
ln -s ../etc/letsencrypt/live/"$URL" /config/keys/letsencrypt
|
||||||
|
fi
|
||||||
|
|
||||||
|
# checking for changes in cert variables, revoking certs if necessary
|
||||||
|
if [ ! "$URL" = "$ORIGURL" ] || [ ! "$SUBDOMAINS" = "$ORIGSUBDOMAINS" ] || [ ! "$ONLY_SUBDOMAINS" = "$ORIGONLY_SUBDOMAINS" ] || [ ! "$EXTRA_DOMAINS" = "$ORIGEXTRA_DOMAINS" ] || [ ! "$VALIDATION" = "$ORIGVALIDATION" ] || [ ! "$DNSPLUGIN" = "$ORIGDNSPLUGIN" ] || [ ! "$PROPAGATION" = "$ORIGPROPAGATION" ] || [ ! "$STAGING" = "$ORIGSTAGING" ] || [ ! "$DUCKDNSTOKEN" = "$ORIGDUCKDNSTOKEN" ]; then
|
||||||
|
echo "Different validation parameters entered than what was used before. Revoking and deleting existing certificate, and an updated one will be created"
|
||||||
|
if [ "$ORIGONLY_SUBDOMAINS" = "true" ] && [ ! "$ORIGSUBDOMAINS" = "wildcard" ]; then
|
||||||
|
ORIGDOMAIN="$(echo "$ORIGSUBDOMAINS" | tr ',' ' ' | awk '{print $1}').${ORIGURL}"
|
||||||
|
[[ -f /config/etc/letsencrypt/live/"$ORIGDOMAIN"/fullchain.pem ]] && certbot revoke --non-interactive --cert-path /config/etc/letsencrypt/live/"$ORIGDOMAIN"/fullchain.pem
|
||||||
|
else
|
||||||
|
[[ -f /config/etc/letsencrypt/live/"$ORIGURL"/fullchain.pem ]] && certbot revoke --non-interactive --cert-path /config/etc/letsencrypt/live/"$ORIGURL"/fullchain.pem
|
||||||
|
fi
|
||||||
|
rm -rf /config/etc/letsencrypt
|
||||||
|
mkdir -p /config/etc/letsencrypt
|
||||||
|
fi
|
||||||
|
|
||||||
|
# saving new variables
|
||||||
|
echo -e "ORIGURL=\"$URL\" ORIGSUBDOMAINS=\"$SUBDOMAINS\" ORIGONLY_SUBDOMAINS=\"$ONLY_SUBDOMAINS\" ORIGEXTRA_DOMAINS=\"$EXTRA_DOMAINS\" ORIGVALIDATION=\"$VALIDATION\" ORIGDNSPLUGIN=\"$DNSPLUGIN\" ORIGPROPAGATION=\"$PROPAGATION\" ORIGSTAGING=\"$STAGING\" ORIGDUCKDNSTOKEN=\"$DUCKDNSTOKEN\"" > /config/donoteditthisfile.conf
|
||||||
|
|
||||||
|
# alter extension for error message
|
||||||
|
if [ "$DNSPLUGIN" = "google" ]; then
|
||||||
|
FILENAME="$DNSPLUGIN.json"
|
||||||
|
else
|
||||||
|
FILENAME="$DNSPLUGIN.ini"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# generating certs if necessary
|
||||||
|
if [ ! -f "/config/keys/letsencrypt/fullchain.pem" ]; then
|
||||||
|
echo "Generating new certificate"
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
certbot certonly --renew-by-default --server $ACMESERVER $PREFCHAL --rsa-key-size 4096 $EMAILPARAM --agree-tos $URL_REAL
|
||||||
|
if [ -d /config/keys/letsencrypt ]; then
|
||||||
|
cd /config/keys/letsencrypt || exit
|
||||||
|
else
|
||||||
|
if [ "$VALIDATION" = "dns" ]; then
|
||||||
|
echo "ERROR: Cert does not exist! Please see the validation error above. Make sure you entered correct credentials into the /config/dns-conf/${FILENAME} file."
|
||||||
|
elif [ "$VALIDATION" = "duckdns" ]; then
|
||||||
|
echo "ERROR: Cert does not exist! Please see the validation error above. Make sure your DUCKDNSTOKEN is correct."
|
||||||
|
else
|
||||||
|
echo "ERROR: Cert does not exist! Please see the validation error above. The issue may be due to incorrect dns or port forwarding settings. Please fix your settings and recreate the container"
|
||||||
|
fi
|
||||||
|
sleep infinity
|
||||||
|
fi
|
||||||
|
openssl pkcs12 -export -out privkey.pfx -inkey privkey.pem -in cert.pem -certfile chain.pem -passout pass:
|
||||||
|
sleep 1
|
||||||
|
cat {privkey,fullchain}.pem > priv-fullchain-bundle.pem
|
||||||
|
echo "New certificate generated; starting nginx"
|
||||||
|
else
|
||||||
|
echo "Certificate exists; parameters unchanged; starting nginx"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# create GeoIP2 folder symlink
|
||||||
|
[[ -d /var/lib/libmaxminddb ]] && [[ ! -L /var/lib/libmaxminddb ]] && \
|
||||||
|
rm -rf /var/lib/libmaxminddb
|
||||||
|
[[ ! -d /var/lib/libmaxminddb ]] && \
|
||||||
|
ln -s /config/geoip2db /var/lib/libmaxminddb
|
||||||
|
# check GeoIP2 database
|
||||||
|
if [ -n "$MAXMINDDB_LICENSE_KEY" ]; then
|
||||||
|
sed -i "s|.*MAXMINDDB_LICENSE_KEY.*|MAXMINDDB_LICENSE_KEY=\"${MAXMINDDB_LICENSE_KEY}\"|g" /etc/conf.d/libmaxminddb
|
||||||
|
if [ ! -f /var/lib/libmaxminddb/GeoLite2-City.mmdb ]; then
|
||||||
|
echo "Downloading GeoIP2 City database."
|
||||||
|
/etc/periodic/weekly/libmaxminddb
|
||||||
|
fi
|
||||||
|
elif [ -f /var/lib/libmaxminddb/GeoLite2-City.mmdb ]; then
|
||||||
|
echo -e "Currently using the user provided GeoLite2-City.mmdb.\nIf you want to enable weekly auto-updates of the database, retrieve a free license key from MaxMind,\nand add a new env variable \"MAXMINDDB_LICENSE_KEY\", set to your license key."
|
||||||
|
else
|
||||||
|
echo -e "Starting 2019/12/30, GeoIP2 databases require personal license key to download. Please retrieve a free license key from MaxMind,\nand add a new env variable \"MAXMINDDB_LICENSE_KEY\", set to your license key."
|
||||||
|
fi
|
||||||
|
|
||||||
|
# logfiles needed by fail2ban
|
||||||
|
[[ ! -f /config/log/nginx/error.log ]] && \
|
||||||
|
touch /config/log/nginx/error.log
|
||||||
|
[[ ! -f /config/log/nginx/access.log ]] && \
|
||||||
|
touch /config/log/nginx/access.log
|
||||||
|
|
||||||
|
# permissions
|
||||||
|
chown -R abc:abc \
|
||||||
|
/config
|
||||||
|
chmod -R 0644 /etc/logrotate.d
|
||||||
|
chmod -R +r /config/log
|
||||||
|
chmod +x /app/le-renew.sh
|
||||||
|
chmod 700 /defaults/dns-conf
|
||||||
|
chmod 600 /defaults/dns-conf/*
|
||||||
5
root/etc/s6-overlay/s6-rc.d/init-renew/run → root/etc/cont-init.d/60-renew
Executable file → Normal file
5
root/etc/s6-overlay/s6-rc.d/init-renew/run → root/etc/cont-init.d/60-renew
Executable file → Normal file
@ -1,11 +1,10 @@
|
|||||||
#!/usr/bin/with-contenv bash
|
#!/usr/bin/with-contenv bash
|
||||||
# shellcheck shell=bash
|
|
||||||
|
|
||||||
# Check if the cert is expired or expires within a day, if so, renew
|
# Check if the cert is expired or expires within a day, if so, renew
|
||||||
if openssl x509 -in /config/keys/letsencrypt/fullchain.pem -noout -checkend 86400 >/dev/null; then
|
if openssl x509 -in /config/keys/letsencrypt/fullchain.pem -noout -checkend 86400 >/dev/null; then
|
||||||
echo "The cert does not expire within the next day. Letting the cron script handle the renewal attempts overnight (2:08am)."
|
echo "The cert does not expire within the next day. Letting the cron script handle the renewal attempts overnight (2:08am)."
|
||||||
else
|
else
|
||||||
echo "The cert is either expired or it expires within the next day. Attempting to renew. This could take up to 10 minutes."
|
echo "The cert is either expired or it expires within the next day. Attempting to renew. This could take up to 10 minutes."
|
||||||
/app/le-renew.sh
|
/app/le-renew.sh
|
||||||
sleep 1
|
sleep 1
|
||||||
fi
|
fi
|
||||||
@ -1,11 +1,11 @@
|
|||||||
/config/log/letsencrypt/*.log {
|
/config/log/letsencrypt/*.log {
|
||||||
weekly
|
weekly
|
||||||
rotate 52
|
rotate 52
|
||||||
compress
|
compress
|
||||||
delaycompress
|
delaycompress
|
||||||
nodateext
|
nodateext
|
||||||
missingok
|
missingok
|
||||||
notifempty
|
notifempty
|
||||||
sharedscripts
|
sharedscripts
|
||||||
su abc abc
|
su abc abc
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,338 +0,0 @@
|
|||||||
#!/usr/bin/with-contenv bash
|
|
||||||
# shellcheck shell=bash
|
|
||||||
|
|
||||||
# Display variables for troubleshooting
|
|
||||||
echo -e "Variables set:\\n\
|
|
||||||
PUID=${PUID}\\n\
|
|
||||||
PGID=${PGID}\\n\
|
|
||||||
TZ=${TZ}\\n\
|
|
||||||
URL=${URL}\\n\
|
|
||||||
SUBDOMAINS=${SUBDOMAINS}\\n\
|
|
||||||
EXTRA_DOMAINS=${EXTRA_DOMAINS}\\n\
|
|
||||||
ONLY_SUBDOMAINS=${ONLY_SUBDOMAINS}\\n\
|
|
||||||
VALIDATION=${VALIDATION}\\n\
|
|
||||||
CERTPROVIDER=${CERTPROVIDER}\\n\
|
|
||||||
DNSPLUGIN=${DNSPLUGIN}\\n\
|
|
||||||
EMAIL=${EMAIL}\\n\
|
|
||||||
STAGING=${STAGING}\\n"
|
|
||||||
|
|
||||||
# Sanitize variables
|
|
||||||
SANED_VARS=(DNSPLUGIN EMAIL EXTRA_DOMAINS ONLY_SUBDOMAINS STAGING SUBDOMAINS URL VALIDATION CERTPROVIDER)
|
|
||||||
for i in "${SANED_VARS[@]}"; do
|
|
||||||
export echo "${i}"="${!i//\"/}"
|
|
||||||
export echo "${i}"="$(echo "${!i}" | tr '[:upper:]' '[:lower:]')"
|
|
||||||
done
|
|
||||||
|
|
||||||
# check to make sure DNSPLUGIN is selected if dns validation is used
|
|
||||||
if [[ "${VALIDATION}" = "dns" ]] && [[ ! "${DNSPLUGIN}" =~ ^(acmedns|aliyun|azure|cloudflare|cpanel|desec|digitalocean|directadmin|dnsimple|dnsmadeeasy|dnspod|do|domeneshop|duckdns|dynu|gandi|gehirn|godaddy|google|google-domains|he|hetzner|infomaniak|inwx|ionos|linode|loopia|luadns|netcup|njalla|nsone|ovh|porkbun|rfc2136|route53|sakuracloud|standalone|transip|vultr)$ ]]; then
|
|
||||||
echo "Please set the DNSPLUGIN variable to a valid plugin name. See docker info for more details."
|
|
||||||
sleep infinity
|
|
||||||
fi
|
|
||||||
|
|
||||||
# copy dns default configs
|
|
||||||
cp -n /defaults/dns-conf/* /config/dns-conf/
|
|
||||||
lsiown -R abc:abc /config/dns-conf
|
|
||||||
|
|
||||||
# copy default renewal hooks
|
|
||||||
chmod -R +x /defaults/etc/letsencrypt/renewal-hooks
|
|
||||||
cp -nR /defaults/etc/letsencrypt/renewal-hooks/* /config/etc/letsencrypt/renewal-hooks/
|
|
||||||
lsiown -R abc:abc /config/etc/letsencrypt/renewal-hooks
|
|
||||||
|
|
||||||
# replace nginx service location in renewal hooks
|
|
||||||
find /config/etc/letsencrypt/renewal-hooks/ -type f -exec sed -i 's|/run/service/nginx|/run/service/svc-nginx|g' {} \;
|
|
||||||
find /config/etc/letsencrypt/renewal-hooks/ -type f -exec sed -i 's|/var/run/s6/services/nginx|/run/service/svc-nginx|g' {} \;
|
|
||||||
find /config/etc/letsencrypt/renewal-hooks/ -type f -exec sed -i 's|s6-supervise nginx|s6-supervise svc-nginx|g' {} \;
|
|
||||||
|
|
||||||
# create original config file if it doesn't exist, move non-hidden legacy file to hidden
|
|
||||||
if [[ -f "/config/donoteditthisfile.conf" ]]; then
|
|
||||||
mv /config/donoteditthisfile.conf /config/.donoteditthisfile.conf
|
|
||||||
fi
|
|
||||||
if [[ ! -f "/config/.donoteditthisfile.conf" ]]; then
|
|
||||||
echo -e "ORIGURL=\"${URL}\" ORIGSUBDOMAINS=\"${SUBDOMAINS}\" ORIGONLY_SUBDOMAINS=\"${ONLY_SUBDOMAINS}\" ORIGEXTRA_DOMAINS=\"${EXTRA_DOMAINS}\" ORIGVALIDATION=\"${VALIDATION}\" ORIGDNSPLUGIN=\"${DNSPLUGIN}\" ORIGPROPAGATION=\"${PROPAGATION}\" ORIGSTAGING=\"${STAGING}\" ORIGCERTPROVIDER=\"${CERTPROVIDER}\" ORIGEMAIL=\"${EMAIL}\"" >/config/.donoteditthisfile.conf
|
|
||||||
echo "Created .donoteditthisfile.conf"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# load original config settings
|
|
||||||
# shellcheck source=/dev/null
|
|
||||||
. /config/.donoteditthisfile.conf
|
|
||||||
|
|
||||||
# setting ORIGDOMAIN for use in revoke sections
|
|
||||||
if [[ "${ORIGONLY_SUBDOMAINS}" = "true" ]] && [[ ! "${ORIGSUBDOMAINS}" = "wildcard" ]]; then
|
|
||||||
ORIGDOMAIN="$(echo "${ORIGSUBDOMAINS}" | tr ',' ' ' | awk '{print $1}').${ORIGURL}"
|
|
||||||
else
|
|
||||||
ORIGDOMAIN="${ORIGURL}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# update plugin names in dns conf inis
|
|
||||||
sed -i 's|^certbot[-_]dns[-_]aliyun:||g' /config/dns-conf/aliyun.ini
|
|
||||||
sed -i 's|^certbot[-_]dns[-_]cpanel:||g' /config/dns-conf/cpanel.ini
|
|
||||||
sed -i 's|^dns[-_]cpanel[-_]|cpanel_|g' /config/dns-conf/cpanel.ini
|
|
||||||
sed -i 's|^directadmin[-_]|dns_directadmin_|g' /config/dns-conf/directadmin.ini
|
|
||||||
sed -i 's|^certbot[-_]dns[-_]domeneshop:||g' /config/dns-conf/domeneshop.ini
|
|
||||||
sed -i 's|^certbot[-_]plugin[-_]gandi:dns[-_]|dns_gandi_|g' /config/dns-conf/gandi.ini
|
|
||||||
sed -i 's|^certbot[-_]dns[-_]inwx:||g' /config/dns-conf/inwx.ini
|
|
||||||
sed -i 's|^certbot[-_]dns[-_]transip:||g' /config/dns-conf/transip.ini
|
|
||||||
|
|
||||||
# update plugin names in renewal conf
|
|
||||||
if [[ -f "/config/etc/letsencrypt/renewal/${ORIGDOMAIN}.conf" ]] && [[ "${ORIGVALIDATION}" = "dns" ]]; then
|
|
||||||
if [[ "${ORIGDNSPLUGIN}" =~ ^(aliyun)$ ]]; then
|
|
||||||
sed -i 's|^authenticator = certbot[-_]dns[-_]aliyun:||g' "/config/etc/letsencrypt/renewal/${ORIGDOMAIN}.conf"
|
|
||||||
sed -i 's|^certbot[-_]dns[-_]aliyun:||g' "/config/etc/letsencrypt/renewal/${ORIGDOMAIN}.conf"
|
|
||||||
fi
|
|
||||||
if [[ "${ORIGDNSPLUGIN}" =~ ^(cpanel)$ ]]; then
|
|
||||||
sed -i 's|^authenticator = certbot[-_]dns[-_]cpanel:||g' "/config/etc/letsencrypt/renewal/${ORIGDOMAIN}.conf"
|
|
||||||
sed -i 's|^certbot[-_]dns[-_]cpanel:||g' "/config/etc/letsencrypt/renewal/${ORIGDOMAIN}.conf"
|
|
||||||
sed -i 's|^authenticator = dns[-_]cpanel|authenticator = cpanel|g' "/config/etc/letsencrypt/renewal/${ORIGDOMAIN}.conf"
|
|
||||||
sed -i 's|^dns[-_]cpanel[-_]|cpanel_|g' "/config/etc/letsencrypt/renewal/${ORIGDOMAIN}.conf"
|
|
||||||
fi
|
|
||||||
if [[ "${ORIGDNSPLUGIN}" =~ ^(directadmin)$ ]]; then
|
|
||||||
sed -i 's|^authenticator = directadmin|authenticator = dns-directadmin|g' "/config/etc/letsencrypt/renewal/${ORIGDOMAIN}.conf"
|
|
||||||
sed -i 's|^directadmin[-_]|dns_directadmin_|g' "/config/etc/letsencrypt/renewal/${ORIGDOMAIN}.conf"
|
|
||||||
fi
|
|
||||||
if [[ "${ORIGDNSPLUGIN}" =~ ^(domeneshop)$ ]]; then
|
|
||||||
sed -i 's|^authenticator = certbot[-_]dns[-_]domeneshop:||g' "/config/etc/letsencrypt/renewal/${ORIGDOMAIN}.conf"
|
|
||||||
sed -i 's|^certbot[-_]dns[-_]domeneshop:||g' "/config/etc/letsencrypt/renewal/${ORIGDOMAIN}.conf"
|
|
||||||
fi
|
|
||||||
if [[ "${ORIGDNSPLUGIN}" =~ ^(gandi)$ ]]; then
|
|
||||||
sed -i 's|^authenticator = certbot[-_]plugin[-_]gandi:dns|authenticator = dns-gandi|g' "/config/etc/letsencrypt/renewal/${ORIGDOMAIN}.conf"
|
|
||||||
sed -i 's|^certbot[-_]plugin[-_]gandi:dns[-_]|dns_gandi_|g' "/config/etc/letsencrypt/renewal/${ORIGDOMAIN}.conf"
|
|
||||||
fi
|
|
||||||
if [[ "${ORIGDNSPLUGIN}" =~ ^(inwx)$ ]]; then
|
|
||||||
sed -i 's|^authenticator = certbot[-_]dns[-_]inwx:||g' "/config/etc/letsencrypt/renewal/${ORIGDOMAIN}.conf"
|
|
||||||
sed -i 's|^certbot[-_]dns[-_]inwx:||g' "/config/etc/letsencrypt/renewal/${ORIGDOMAIN}.conf"
|
|
||||||
fi
|
|
||||||
if [[ "${ORIGDNSPLUGIN}" =~ ^(transip)$ ]]; then
|
|
||||||
sed -i 's|^authenticator = certbot[-_]dns[-_]transip:||g' "/config/etc/letsencrypt/renewal/${ORIGDOMAIN}.conf"
|
|
||||||
sed -i 's|^certbot[-_]dns[-_]transip:||g' "/config/etc/letsencrypt/renewal/${ORIGDOMAIN}.conf"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# set default validation to http
|
|
||||||
if [[ -z "${VALIDATION}" ]]; then
|
|
||||||
VALIDATION="http"
|
|
||||||
echo "VALIDATION parameter not set; setting it to http"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# set duckdns validation to dns
|
|
||||||
if [[ "${VALIDATION}" = "duckdns" ]]; then
|
|
||||||
VALIDATION="dns"
|
|
||||||
DNSPLUGIN="duckdns"
|
|
||||||
if [[ -n "${DUCKDNSTOKEN}" ]] && ! grep -q "dns_duckdns_token=${DUCKDNSTOKEN}$" /config/dns-conf/duckdns.ini; then
|
|
||||||
sed -i "s|^dns_duckdns_token=.*|dns_duckdns_token=${DUCKDNSTOKEN}|g" /config/dns-conf/duckdns.ini
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
if [[ "${VALIDATION}" = "dns" ]] && [[ "${DNSPLUGIN}" = "duckdns" ]]; then
|
|
||||||
if [[ "${SUBDOMAINS}" = "wildcard" ]]; then
|
|
||||||
echo "the resulting certificate will only cover the subdomains due to a limitation of duckdns, so it is advised to set the root location to use www.subdomain.duckdns.org"
|
|
||||||
export ONLY_SUBDOMAINS=true
|
|
||||||
else
|
|
||||||
echo "the resulting certificate will only cover the main domain due to a limitation of duckdns, ie. subdomain.duckdns.org"
|
|
||||||
export SUBDOMAINS=""
|
|
||||||
fi
|
|
||||||
export EXTRA_DOMAINS=""
|
|
||||||
fi
|
|
||||||
|
|
||||||
# setting the symlink for key location
|
|
||||||
rm -rf /config/keys/letsencrypt
|
|
||||||
if [[ "${ONLY_SUBDOMAINS}" = "true" ]] && [[ ! "${SUBDOMAINS}" = "wildcard" ]]; then
|
|
||||||
DOMAIN="$(echo "${SUBDOMAINS}" | tr ',' ' ' | awk '{print $1}').${URL}"
|
|
||||||
ln -s ../etc/letsencrypt/live/"${DOMAIN}" /config/keys/letsencrypt
|
|
||||||
else
|
|
||||||
ln -s ../etc/letsencrypt/live/"${URL}" /config/keys/letsencrypt
|
|
||||||
fi
|
|
||||||
|
|
||||||
# cleanup unused csr and keys folders
|
|
||||||
rm -rf /etc/letsencrypt/csr
|
|
||||||
rm -rf /etc/letsencrypt/keys
|
|
||||||
|
|
||||||
# checking for changes in cert variables, revoking certs if necessary
|
|
||||||
if [[ ! "${URL}" = "${ORIGURL}" ]] ||
|
|
||||||
[[ ! "${SUBDOMAINS}" = "${ORIGSUBDOMAINS}" ]] ||
|
|
||||||
[[ ! "${ONLY_SUBDOMAINS}" = "${ORIGONLY_SUBDOMAINS}" ]] ||
|
|
||||||
[[ ! "${EXTRA_DOMAINS}" = "${ORIGEXTRA_DOMAINS}" ]] ||
|
|
||||||
[[ ! "${VALIDATION}" = "${ORIGVALIDATION}" ]] ||
|
|
||||||
[[ ! "${DNSPLUGIN}" = "${ORIGDNSPLUGIN}" ]] ||
|
|
||||||
[[ ! "${PROPAGATION}" = "${ORIGPROPAGATION}" ]] ||
|
|
||||||
[[ ! "${STAGING}" = "${ORIGSTAGING}" ]] ||
|
|
||||||
[[ ! "${CERTPROVIDER}" = "${ORIGCERTPROVIDER}" ]]; then
|
|
||||||
echo "Different validation parameters entered than what was used before. Revoking and deleting existing certificate, and an updated one will be created"
|
|
||||||
if [[ "${ORIGCERTPROVIDER}" = "zerossl" ]] && [[ -n "${ORIGEMAIL}" ]]; then
|
|
||||||
REV_EAB_CREDS=$(curl -s https://api.zerossl.com/acme/eab-credentials-email --data "email=${ORIGEMAIL}")
|
|
||||||
REV_ZEROSSL_EAB_KID=$(echo "${REV_EAB_CREDS}" | python3 -c "import sys, json; print(json.load(sys.stdin)['eab_kid'])")
|
|
||||||
REV_ZEROSSL_EAB_HMAC_KEY=$(echo "${REV_EAB_CREDS}" | python3 -c "import sys, json; print(json.load(sys.stdin)['eab_hmac_key'])")
|
|
||||||
if [[ -z "${REV_ZEROSSL_EAB_KID}" ]] || [[ -z "${REV_ZEROSSL_EAB_HMAC_KEY}" ]]; then
|
|
||||||
echo "Unable to retrieve EAB credentials from ZeroSSL. Check the outgoing connections to api.zerossl.com and dns. Sleeping."
|
|
||||||
sleep infinity
|
|
||||||
fi
|
|
||||||
REV_ACMESERVER="https://acme.zerossl.com/v2/DV90 --eab-kid ${REV_ZEROSSL_EAB_KID} --eab-hmac-key ${REV_ZEROSSL_EAB_HMAC_KEY}"
|
|
||||||
elif [[ "${ORIGSTAGING}" = "true" ]]; then
|
|
||||||
REV_ACMESERVER="https://acme-staging-v02.api.letsencrypt.org/directory"
|
|
||||||
else
|
|
||||||
REV_ACMESERVER="https://acme-v02.api.letsencrypt.org/directory"
|
|
||||||
fi
|
|
||||||
if [[ -f /config/etc/letsencrypt/live/"${ORIGDOMAIN}"/fullchain.pem ]]; then
|
|
||||||
certbot revoke --non-interactive --cert-path /config/etc/letsencrypt/live/"${ORIGDOMAIN}"/fullchain.pem --server ${REV_ACMESERVER} || true
|
|
||||||
fi
|
|
||||||
rm -rf /config/etc/letsencrypt/{accounts,archive,live,renewal}
|
|
||||||
fi
|
|
||||||
|
|
||||||
# saving new variables
|
|
||||||
echo -e "ORIGURL=\"${URL}\" ORIGSUBDOMAINS=\"${SUBDOMAINS}\" ORIGONLY_SUBDOMAINS=\"${ONLY_SUBDOMAINS}\" ORIGEXTRA_DOMAINS=\"${EXTRA_DOMAINS}\" ORIGVALIDATION=\"${VALIDATION}\" ORIGDNSPLUGIN=\"${DNSPLUGIN}\" ORIGPROPAGATION=\"${PROPAGATION}\" ORIGSTAGING=\"${STAGING}\" ORIGCERTPROVIDER=\"${CERTPROVIDER}\" ORIGEMAIL=\"${EMAIL}\"" >/config/.donoteditthisfile.conf
|
|
||||||
|
|
||||||
# Check if the cert is using the old LE root cert, revoke and regen if necessary
|
|
||||||
if [[ -f "/config/keys/letsencrypt/chain.pem" ]] && { [[ "${CERTPROVIDER}" == "letsencrypt" ]] || [[ "${CERTPROVIDER}" == "" ]]; } && [[ "${STAGING}" != "true" ]] && ! openssl x509 -in /config/keys/letsencrypt/chain.pem -noout -issuer | grep -q "ISRG Root X"; then
|
|
||||||
echo "The cert seems to be using the old LE root cert, which is no longer valid. Deleting and revoking."
|
|
||||||
REV_ACMESERVER="https://acme-v02.api.letsencrypt.org/directory"
|
|
||||||
if [[ -f /config/etc/letsencrypt/live/"${ORIGDOMAIN}"/fullchain.pem ]]; then
|
|
||||||
certbot revoke --non-interactive --cert-path /config/etc/letsencrypt/live/"${ORIGDOMAIN}"/fullchain.pem --server ${REV_ACMESERVER} || true
|
|
||||||
fi
|
|
||||||
rm -rf /config/etc/letsencrypt/{accounts,archive,live,renewal}
|
|
||||||
fi
|
|
||||||
|
|
||||||
# if zerossl is selected or staging is set to true, use the relevant server
|
|
||||||
if [[ "${CERTPROVIDER}" = "zerossl" ]] && [[ "${STAGING}" = "true" ]]; then
|
|
||||||
echo "ZeroSSL does not support staging mode, ignoring STAGING variable"
|
|
||||||
fi
|
|
||||||
if [[ "${CERTPROVIDER}" = "zerossl" ]] && [[ -n "${EMAIL}" ]]; then
|
|
||||||
echo "ZeroSSL is selected as the cert provider, registering cert with ${EMAIL}"
|
|
||||||
ACMESERVER="https://acme.zerossl.com/v2/DV90"
|
|
||||||
elif [[ "${CERTPROVIDER}" = "zerossl" ]] && [[ -z "${EMAIL}" ]]; then
|
|
||||||
echo "ZeroSSL is selected as the cert provider, but the e-mail address has not been entered. Please visit https://zerossl.com, register a new account and set the account e-mail address in the EMAIL environment variable"
|
|
||||||
sleep infinity
|
|
||||||
elif [[ "${STAGING}" = "true" ]]; then
|
|
||||||
echo "NOTICE: Staging is active"
|
|
||||||
echo "Using Let's Encrypt as the cert provider"
|
|
||||||
ACMESERVER="https://acme-staging-v02.api.letsencrypt.org/directory"
|
|
||||||
else
|
|
||||||
echo "Using Let's Encrypt as the cert provider"
|
|
||||||
ACMESERVER="https://acme-v02.api.letsencrypt.org/directory"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# figuring out url only vs url & subdomains vs subdomains only
|
|
||||||
if [[ -n "${SUBDOMAINS}" ]]; then
|
|
||||||
echo "SUBDOMAINS entered, processing"
|
|
||||||
if [[ "${SUBDOMAINS}" = "wildcard" ]]; then
|
|
||||||
if [[ "${ONLY_SUBDOMAINS}" = true ]]; then
|
|
||||||
export URL_REAL="-d *.${URL}"
|
|
||||||
echo "Wildcard cert for only the subdomains of ${URL} will be requested"
|
|
||||||
else
|
|
||||||
export URL_REAL="-d *.${URL} -d ${URL}"
|
|
||||||
echo "Wildcard cert for ${URL} will be requested"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "SUBDOMAINS entered, processing"
|
|
||||||
for job in $(echo "${SUBDOMAINS}" | tr "," " "); do
|
|
||||||
export SUBDOMAINS_REAL="${SUBDOMAINS_REAL} -d ${job}.${URL}"
|
|
||||||
done
|
|
||||||
if [[ "${ONLY_SUBDOMAINS}" = true ]]; then
|
|
||||||
URL_REAL="${SUBDOMAINS_REAL}"
|
|
||||||
echo "Only subdomains, no URL in cert"
|
|
||||||
else
|
|
||||||
URL_REAL="-d ${URL}${SUBDOMAINS_REAL}"
|
|
||||||
fi
|
|
||||||
echo "Sub-domains processed are: ${SUBDOMAINS_REAL}"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "No subdomains defined"
|
|
||||||
URL_REAL="-d ${URL}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# add extra domains
|
|
||||||
if [[ -n "${EXTRA_DOMAINS}" ]]; then
|
|
||||||
echo "EXTRA_DOMAINS entered, processing"
|
|
||||||
for job in $(echo "${EXTRA_DOMAINS}" | tr "," " "); do
|
|
||||||
export EXTRA_DOMAINS_REAL="${EXTRA_DOMAINS_REAL} -d ${job}"
|
|
||||||
done
|
|
||||||
echo "Extra domains processed are: ${EXTRA_DOMAINS_REAL}"
|
|
||||||
URL_REAL="${URL_REAL} ${EXTRA_DOMAINS_REAL}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# figuring out whether to use e-mail and which
|
|
||||||
if [[ ${EMAIL} == *@* ]]; then
|
|
||||||
echo "E-mail address entered: ${EMAIL}"
|
|
||||||
EMAILPARAM="-m ${EMAIL} --no-eff-email"
|
|
||||||
else
|
|
||||||
echo "No e-mail address entered or address invalid"
|
|
||||||
EMAILPARAM="--register-unsafely-without-email"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# alter extension for error message
|
|
||||||
if [[ "${DNSPLUGIN}" = "google" ]]; then
|
|
||||||
DNSCREDENTIALFILE="/config/dns-conf/${DNSPLUGIN}.json"
|
|
||||||
else
|
|
||||||
DNSCREDENTIALFILE="/config/dns-conf/${DNSPLUGIN}.ini"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# setting the validation method to use
|
|
||||||
if [[ "${VALIDATION}" = "dns" ]]; then
|
|
||||||
AUTHENTICATORPARAM="--authenticator dns-${DNSPLUGIN}"
|
|
||||||
DNSCREDENTIALSPARAM="--dns-${DNSPLUGIN}-credentials ${DNSCREDENTIALFILE}"
|
|
||||||
if [[ -n "${PROPAGATION}" ]]; then PROPAGATIONPARAM="--dns-${DNSPLUGIN}-propagation-seconds ${PROPAGATION}"; fi
|
|
||||||
|
|
||||||
# plugins that don't support setting credentials file
|
|
||||||
if [[ "${DNSPLUGIN}" =~ ^(route53|standalone)$ ]]; then
|
|
||||||
DNSCREDENTIALSPARAM=""
|
|
||||||
fi
|
|
||||||
# plugins that don't support setting propagation
|
|
||||||
if [[ "${DNSPLUGIN}" =~ ^(azure|gandi|route53|standalone)$ ]]; then
|
|
||||||
if [[ -n "${PROPAGATION}" ]]; then echo "${DNSPLUGIN} dns plugin does not support setting propagation time"; fi
|
|
||||||
PROPAGATIONPARAM=""
|
|
||||||
fi
|
|
||||||
# plugins that use old parameter naming convention
|
|
||||||
if [[ "${DNSPLUGIN}" =~ ^(cpanel)$ ]]; then
|
|
||||||
AUTHENTICATORPARAM="--authenticator ${DNSPLUGIN}"
|
|
||||||
DNSCREDENTIALSPARAM="--${DNSPLUGIN}-credentials ${DNSCREDENTIALFILE}"
|
|
||||||
if [[ -n "${PROPAGATION}" ]]; then PROPAGATIONPARAM="--${DNSPLUGIN}-propagation-seconds ${PROPAGATION}"; fi
|
|
||||||
fi
|
|
||||||
# don't restore txt records when using DuckDNS plugin
|
|
||||||
if [[ "${DNSPLUGIN}" =~ ^(duckdns)$ ]]; then
|
|
||||||
AUTHENTICATORPARAM="${AUTHENTICATORPARAM} --dns-${DNSPLUGIN}-no-txt-restore"
|
|
||||||
fi
|
|
||||||
|
|
||||||
PREFCHAL="${AUTHENTICATORPARAM} ${DNSCREDENTIALSPARAM} ${PROPAGATIONPARAM}"
|
|
||||||
echo "${VALIDATION} validation via ${DNSPLUGIN} plugin is selected"
|
|
||||||
elif [[ "${VALIDATION}" = "tls-sni" ]]; then
|
|
||||||
PREFCHAL="--standalone --preferred-challenges http"
|
|
||||||
echo "*****tls-sni validation has been deprecated, attempting http validation instead"
|
|
||||||
else
|
|
||||||
PREFCHAL="--standalone --preferred-challenges http"
|
|
||||||
echo "http validation is selected"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# generating certs if necessary
|
|
||||||
if [[ ! -f "/config/keys/letsencrypt/fullchain.pem" ]]; then
|
|
||||||
if [[ "${CERTPROVIDER}" = "zerossl" ]] && [[ -n "${EMAIL}" ]]; then
|
|
||||||
echo "Retrieving EAB from ZeroSSL"
|
|
||||||
EAB_CREDS=$(curl -s https://api.zerossl.com/acme/eab-credentials-email --data "email=${EMAIL}")
|
|
||||||
ZEROSSL_EAB_KID=$(echo "${EAB_CREDS}" | python3 -c "import sys, json; print(json.load(sys.stdin)['eab_kid'])")
|
|
||||||
ZEROSSL_EAB_HMAC_KEY=$(echo "${EAB_CREDS}" | python3 -c "import sys, json; print(json.load(sys.stdin)['eab_hmac_key'])")
|
|
||||||
if [[ -z "${ZEROSSL_EAB_KID}" ]] || [[ -z "${ZEROSSL_EAB_HMAC_KEY}" ]]; then
|
|
||||||
echo "Unable to retrieve EAB credentials from ZeroSSL. Check the outgoing connections to api.zerossl.com and dns. Sleeping."
|
|
||||||
sleep infinity
|
|
||||||
fi
|
|
||||||
ZEROSSL_EAB="--eab-kid ${ZEROSSL_EAB_KID} --eab-hmac-key ${ZEROSSL_EAB_HMAC_KEY}"
|
|
||||||
fi
|
|
||||||
echo "Generating new certificate"
|
|
||||||
# shellcheck disable=SC2086
|
|
||||||
certbot certonly --non-interactive --renew-by-default --server ${ACMESERVER} ${ZEROSSL_EAB} ${PREFCHAL} --rsa-key-size 4096 ${EMAILPARAM} --agree-tos ${URL_REAL}
|
|
||||||
if [[ ! -d /config/keys/letsencrypt ]]; then
|
|
||||||
if [[ "${VALIDATION}" = "dns" ]]; then
|
|
||||||
echo "ERROR: Cert does not exist! Please see the validation error above. Make sure you entered correct credentials into the ${DNSCREDENTIALFILE} file."
|
|
||||||
else
|
|
||||||
echo "ERROR: Cert does not exist! Please see the validation error above. The issue may be due to incorrect dns or port forwarding settings. Please fix your settings and recreate the container"
|
|
||||||
fi
|
|
||||||
sleep infinity
|
|
||||||
fi
|
|
||||||
run-parts /config/etc/letsencrypt/renewal-hooks/deploy/
|
|
||||||
echo "New certificate generated; starting nginx"
|
|
||||||
else
|
|
||||||
echo "Certificate exists; parameters unchanged; starting nginx"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# if certbot generated key exists, remove self-signed cert and replace it with symlink to live cert
|
|
||||||
if [[ -d /config/keys/letsencrypt ]]; then
|
|
||||||
rm -rf /config/keys/cert.crt
|
|
||||||
ln -s ./letsencrypt/fullchain.pem /config/keys/cert.crt
|
|
||||||
rm -rf /config/keys/cert.key
|
|
||||||
ln -s ./letsencrypt/privkey.pem /config/keys/cert.key
|
|
||||||
fi
|
|
||||||
@ -1 +0,0 @@
|
|||||||
oneshot
|
|
||||||
@ -1 +0,0 @@
|
|||||||
/etc/s6-overlay/s6-rc.d/init-certbot-config/run
|
|
||||||
@ -1,38 +0,0 @@
|
|||||||
#!/usr/bin/with-contenv bash
|
|
||||||
# shellcheck shell=bash
|
|
||||||
|
|
||||||
# make folders
|
|
||||||
mkdir -p \
|
|
||||||
/config/crontabs
|
|
||||||
|
|
||||||
## root
|
|
||||||
# if crontabs do not exist in config
|
|
||||||
if [[ ! -f /config/crontabs/root ]]; then
|
|
||||||
# copy crontab from system
|
|
||||||
if crontab -l -u root; then
|
|
||||||
crontab -l -u root >/config/crontabs/root
|
|
||||||
fi
|
|
||||||
|
|
||||||
# if crontabs still do not exist in config (were not copied from system)
|
|
||||||
# copy crontab from included defaults (using -n, do not overwrite an existing file)
|
|
||||||
cp -n /etc/crontabs/root /config/crontabs/
|
|
||||||
fi
|
|
||||||
# set permissions and import user crontabs
|
|
||||||
lsiown root:root /config/crontabs/root
|
|
||||||
crontab -u root /config/crontabs/root
|
|
||||||
|
|
||||||
## abc
|
|
||||||
# if crontabs do not exist in config
|
|
||||||
if [[ ! -f /config/crontabs/abc ]]; then
|
|
||||||
# copy crontab from system
|
|
||||||
if crontab -l -u abc; then
|
|
||||||
crontab -l -u abc >/config/crontabs/abc
|
|
||||||
fi
|
|
||||||
|
|
||||||
# if crontabs still do not exist in config (were not copied from system)
|
|
||||||
# copy crontab from included defaults (using -n, do not overwrite an existing file)
|
|
||||||
cp -n /etc/crontabs/abc /config/crontabs/
|
|
||||||
fi
|
|
||||||
# set permissions and import user crontabs
|
|
||||||
lsiown abc:abc /config/crontabs/abc
|
|
||||||
crontab -u abc /config/crontabs/abc
|
|
||||||
@ -1 +0,0 @@
|
|||||||
oneshot
|
|
||||||
@ -1 +0,0 @@
|
|||||||
/etc/s6-overlay/s6-rc.d/init-crontabs-config/run
|
|
||||||
@ -1,29 +0,0 @@
|
|||||||
#!/usr/bin/with-contenv bash
|
|
||||||
# shellcheck shell=bash
|
|
||||||
|
|
||||||
# copy/update the fail2ban config defaults to/in /config
|
|
||||||
cp -R /defaults/fail2ban/filter.d /config/fail2ban/
|
|
||||||
cp -R /defaults/fail2ban/action.d /config/fail2ban/
|
|
||||||
# if jail.local is missing in /config, copy default
|
|
||||||
if [[ ! -f /config/fail2ban/jail.local ]]; then
|
|
||||||
cp /defaults/fail2ban/jail.local /config/fail2ban/jail.local
|
|
||||||
fi
|
|
||||||
# Replace fail2ban config with user config
|
|
||||||
if [[ -d /etc/fail2ban/filter.d ]]; then
|
|
||||||
rm -rf /etc/fail2ban/filter.d
|
|
||||||
fi
|
|
||||||
if [[ -d /etc/fail2ban/action.d ]]; then
|
|
||||||
rm -rf /etc/fail2ban/action.d
|
|
||||||
fi
|
|
||||||
cp -R /config/fail2ban/filter.d /etc/fail2ban/
|
|
||||||
cp -R /config/fail2ban/action.d /etc/fail2ban/
|
|
||||||
cp /defaults/fail2ban/fail2ban.local /etc/fail2ban/
|
|
||||||
cp /config/fail2ban/jail.local /etc/fail2ban/jail.local
|
|
||||||
|
|
||||||
# logfiles needed by fail2ban
|
|
||||||
if [[ ! -f /config/log/nginx/error.log ]]; then
|
|
||||||
touch /config/log/nginx/error.log
|
|
||||||
fi
|
|
||||||
if [[ ! -f /config/log/nginx/access.log ]]; then
|
|
||||||
touch /config/log/nginx/access.log
|
|
||||||
fi
|
|
||||||
@ -1 +0,0 @@
|
|||||||
oneshot
|
|
||||||
@ -1 +0,0 @@
|
|||||||
/etc/s6-overlay/s6-rc.d/init-fail2ban-config/run
|
|
||||||
@ -1,12 +0,0 @@
|
|||||||
#!/usr/bin/with-contenv bash
|
|
||||||
# shellcheck shell=bash
|
|
||||||
|
|
||||||
# make our folders and links
|
|
||||||
mkdir -p \
|
|
||||||
/config/{fail2ban,crontabs,dns-conf} \
|
|
||||||
/config/etc/letsencrypt/renewal-hooks \
|
|
||||||
/config/log/{fail2ban,letsencrypt,nginx} \
|
|
||||||
/config/nginx/proxy-confs \
|
|
||||||
/run/fail2ban
|
|
||||||
rm -rf /etc/letsencrypt
|
|
||||||
ln -s /config/etc/letsencrypt /etc/letsencrypt
|
|
||||||
@ -1 +0,0 @@
|
|||||||
oneshot
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user