Compare commits

..

46 Commits

Author SHA1 Message Date
Matt Hook
96a626324c fix(buildscripts): make build process more closely resemble EE (#8881) 2023-05-04 13:04:16 +12:00
Chaim Lev-Ari
5fd36ee986 chore(build): remove grunt and add makefile [EE-4824] (#8803) 2023-05-02 12:49:51 +07:00
cmeng
7c2fcb67eb fix(stack) add skip TLS toggle for edit stack EE-5391 (#8850) 2023-04-28 13:35:38 +12:00
matias-portainer
2eb4453487 fix(images): avoid returning null on registryId default value EE-5394 (#8842) 2023-04-26 10:24:49 -03:00
cmeng
535e499cc5 fix(webhook) remove NaN fom webhook url EE-5373 (#8815) 2023-04-21 10:56:59 +12:00
Matt Hook
fee315b07e bump version to 2.18.2 (#8808) 2023-04-20 09:05:18 +12:00
Ali
d1166b5294 fix(editor): fix styles [EE-5369] (#8810)
* fix(editor): fix styles [EE-5369]

* rm hash

---------

Co-authored-by: testa113 <testa113>
2023-04-20 08:27:31 +12:00
Matt Hook
e3b727a636 bump version to 2.18.1 (#8802) 2023-04-18 14:50:18 +12:00
Chaim Lev-Ari
d56ea05218 fix(edge/updates): add padding for edge groups [EE-5349] (#8771) 2023-04-18 13:40:06 +12:00
Dakota Walsh
8e724e3fbe feat(libhelm): allow passing optional env and http client [EE-5252] (#8798)
Co-authored-by: Matt Hook <hookenz@gmail.com>
2023-04-14 15:51:11 +12:00
cmeng
33b141bcd3 fix(backup) add description text to backup EE-5283 (#8776) 2023-04-13 16:04:59 +12:00
Matt Hook
ded8ce48a8 feat(cert): ce teasers for ca cert [EE-5252] (#8769) 2023-04-13 15:32:58 +12:00
Oscar Zhou
e60635bf32 fix(swagger): correct endpoint api annotations [EE-5333] (#8762) 2023-04-13 15:31:18 +12:00
cmeng
6fb4951949 fix(stack): upgrade docker-compose EE-5334 (#8756) 2023-04-11 17:55:53 +12:00
Oscar Zhou
c429b29216 fix(k8s/gitops): missing git auth toggle in k8s app edit page [EE-5320] (#8740) 2023-04-10 20:14:04 +12:00
Ali
8ab490f224 fix(ns): add selection caching back [EE-5273] (#8739)
Co-authored-by: testa113 <testa113>
2023-04-06 14:28:05 +12:00
Matt Hook
78b83420bf search for correct source directory when doing a restore (#8677) 2023-04-06 10:39:16 +12:00
cmeng
b4dbc341cc fix(homepage) move heartbeat logic to backend EE-5317 (#8736) 2023-04-06 09:09:13 +12:00
Matt Hook
3118c639f6 fix(docs): add missing swagger docs for upload file [EE-4886] (#8707)
* add docs for uploading files via host management features

* fix other doc issues
2023-04-04 16:59:26 +12:00
cmeng
5d7ab85473 fix(security): potential vulnerability of path traversal attacks EE-5303 (#8727) 2023-04-04 09:00:11 +12:00
Chaim Lev-Ari
99331a81d4 feat(gitops): allow to skip tls verification [EE-5023] (#8679) 2023-04-03 09:19:09 +03:00
Prabhat Khera
ab1a8c1d6a fix(ui): namespace caching issue EE-5273 (#8710)
* fix namespace caching issue

* fix(apps): add loading state [EE-5273]

* rm endpoint provider

* fix(namespace): remove caching [EE-5273]

* variable typo

---------

Co-authored-by: testa113 <testa113>
2023-03-31 13:25:00 +13:00
Chaim Lev-Ari
e063cba81b fix(ui/code-editor): stretch code editor content full height [EE-5202] (#8672) 2023-03-30 12:26:35 +03:00
Ali
23e6a982b9 fix(ns): save filter to local storage [EE-5287] (#8724)
* fix(ns): save filter to local storage [EE-5287]

* allow system ns and save per user

---------

Co-authored-by: testa113 <testa113>
2023-03-30 11:21:08 +13:00
andres-portainer
0bf75ae113 fix(snapshots): change the snapshot object to maintain backwards compatibility EE-5240 (#8704) 2023-03-23 13:30:50 -03:00
Ali
72b41dde01 fix(apps) UI release fixes [EE-5197] (#8703)
* fix(apps) searchbar flex resizing and insights

* UI fixes

* update stacks datatable

---------

Co-authored-by: testa113 <testa113>
2023-03-23 08:20:34 +13:00
Ali
36b122ca21 fix(dashboard): use faster proxy request [EE-5160] (#8694)
Co-authored-by: testa113 <testa113>
2023-03-22 15:34:48 +13:00
Prabhat Khera
649799069b fix Gpus null issue (#8691) 2023-03-21 16:05:55 +13:00
Oscar Zhou
0ca56ddbb1 fix(stack/git): fix cursor movement issue in git text fields (#8656) 2023-03-20 10:00:35 +13:00
Chaim Lev-Ari
3a30c8ed1e fix(ui/box-selector): BE link and use icons standard size [EE-5133] (#8659) 2023-03-19 13:37:44 +01:00
Ali
151db6bfe7 fix(kubeconfig): fix download checkbox [EE-5199] (#8675)
Co-authored-by: testa113 <testa113>
2023-03-17 10:34:00 +13:00
Ali
106c719a34 fix(wizard): Capitalise Kubernetes [EE-5178] (#8663)
Co-authored-by: testa113 <testa113>
2023-03-16 18:50:58 +13:00
Dakota Walsh
1cfd031db1 fix(kubernetes): Prevent rerunning initial cluster detection [EE-5170] (#8667) 2023-03-16 15:39:43 +13:00
Prabhat Khera
fbc1a2d44d fix(ui): namespace cache refresh on reload EE-5155 (#8657) 2023-03-16 10:10:26 +13:00
Oscar Zhou
47478efd1e fix(stack/git): remove duplicate code used to backup compose dir (#8620) 2023-03-15 12:27:23 +13:00
Ali
50940b7fba fix(annotations) ingress tip to match ee [EE-5158] (#8654)
Co-authored-by: testa113 <testa113>
2023-03-14 10:41:41 +13:00
matias-portainer
7468d5637b fix(upgrade): remove yellow upgrade banner EE-5141 (#8641) 2023-03-13 09:01:39 -03:00
Ali
6edc210ae7 fix(kube): check for ns on enter [EE-5160] (#8648)
Co-authored-by: testa113 <testa113>
2023-03-13 13:57:07 +13:00
Prabhat Khera
f859876cb6 fix typo in delete image modal dialog (#8622) 2023-03-13 11:05:55 +13:00
Matt Hook
5e434a82ed reduce throttling in the kube client (#8631) 2023-03-13 09:47:23 +13:00
Ali
d9f6471a00 fix(annotation): update wording/styling [EE-5158] (#8643)
Co-authored-by: testa113 <testa113>
2023-03-10 16:52:15 +13:00
cmeng
a7d1a20dfb fix(edge-stack) always show edge group selector [EE-5157] (#8638) 2023-03-10 10:48:53 +13:00
Ali
17517d7521 fix(app): restrict ns fix create app [EE-5123] (#8633)
Co-authored-by: testa113 <testa113>
2023-03-10 10:24:20 +13:00
andres-portainer
c609f6912f fix(home): disable live connect for async [EE-5000] (#8628) 2023-03-09 15:50:36 -03:00
Ali
346fe9e3f1 refactor(GPU): colocate and update UI [EE-5127] (#8634)
Co-authored-by: testa113 <testa113>
2023-03-09 22:06:49 +13:00
matias-portainer
69f14e569b fix(stacks): pass WorkingDir to deployer command EE-5142 (#8624) 2023-03-08 19:34:50 -03:00
2765 changed files with 53747 additions and 94803 deletions

View File

@@ -10,7 +10,6 @@ globals:
extends: extends:
- 'eslint:recommended' - 'eslint:recommended'
- 'plugin:storybook/recommended' - 'plugin:storybook/recommended'
- 'plugin:import/typescript'
- prettier - prettier
plugins: plugins:
@@ -24,13 +23,10 @@ parserOptions:
modules: true modules: true
rules: rules:
no-console: error
no-alert: error
no-control-regex: 'off' no-control-regex: 'off'
no-empty: warn no-empty: warn
no-empty-function: warn no-empty-function: warn
no-useless-escape: 'off' no-useless-escape: 'off'
import/named: error
import/order: import/order:
[ [
'error', 'error',
@@ -45,12 +41,6 @@ rules:
pathGroupsExcludedImportTypes: ['internal'], pathGroupsExcludedImportTypes: ['internal'],
}, },
] ]
no-restricted-imports:
- error
- patterns:
- group:
- '@/react/test-utils/*'
message: 'These utils are just for test files'
settings: settings:
'import/resolver': 'import/resolver':
@@ -59,8 +49,6 @@ settings:
- ['@@', './app/react/components'] - ['@@', './app/react/components']
- ['@', './app'] - ['@', './app']
extensions: ['.js', '.ts', '.tsx'] extensions: ['.js', '.ts', '.tsx']
typescript: true
node: true
overrides: overrides:
- files: - files:
@@ -85,7 +73,6 @@ overrides:
settings: settings:
react: react:
version: 'detect' version: 'detect'
rules: rules:
import/order: import/order:
[ [
@@ -99,8 +86,8 @@ overrides:
no-plusplus: off no-plusplus: off
func-style: [error, 'declaration'] func-style: [error, 'declaration']
import/prefer-default-export: off import/prefer-default-export: off
no-use-before-define: 'off' no-use-before-define: ['error', { functions: false }]
'@typescript-eslint/no-use-before-define': ['error', { functions: false, 'allowNamedExports': true }] '@typescript-eslint/no-use-before-define': ['error', { functions: false }]
no-shadow: 'off' no-shadow: 'off'
'@typescript-eslint/no-shadow': off '@typescript-eslint/no-shadow': off
jsx-a11y/no-autofocus: warn jsx-a11y/no-autofocus: warn
@@ -119,12 +106,6 @@ overrides:
'no-await-in-loop': 'off' 'no-await-in-loop': 'off'
'react/jsx-no-useless-fragment': ['error', { allowExpressions: true }] 'react/jsx-no-useless-fragment': ['error', { allowExpressions: true }]
'regex/invalid': ['error', [{ 'regex': '<Icon icon="(.*)"', 'message': 'Please directly import the `lucide-react` icon instead of using the string' }]] 'regex/invalid': ['error', [{ 'regex': '<Icon icon="(.*)"', 'message': 'Please directly import the `lucide-react` icon instead of using the string' }]]
'@typescript-eslint/no-restricted-imports':
- error
- patterns:
- group:
- '@/react/test-utils/*'
message: 'These utils are just for test files'
overrides: # allow props spreading for hoc files overrides: # allow props spreading for hoc files
- files: - files:
- app/**/with*.ts{,x} - app/**/with*.ts{,x}
@@ -133,16 +114,13 @@ overrides:
- files: - files:
- app/**/*.test.* - app/**/*.test.*
extends: extends:
- 'plugin:vitest/recommended' - 'plugin:jest/recommended'
- 'plugin:jest/style'
env: env:
'vitest/env': true 'jest/globals': true
rules: rules:
'react/jsx-no-constructed-context-values': off 'react/jsx-no-constructed-context-values': off
'@typescript-eslint/no-restricted-imports': off
no-restricted-imports: off
- files: - files:
- app/**/*.stories.* - app/**/*.stories.*
rules: rules:
'no-alert': off 'no-alert': off
'@typescript-eslint/no-restricted-imports': off
no-restricted-imports: off

View File

@@ -1,11 +0,0 @@
body:
- type: markdown
attributes:
value: |
Before asking a question, make sure it hasn't been already asked and answered. You can search our [discussions](https://github.com/orgs/portainer/discussions) and [bug reports](https://github.com/portainer/portainer/issues) in GitHub. Also, be sure to check our [knowledge base](https://portal.portainer.io/knowledge) and [documentation](https://docs.portainer.io/) first.
- type: textarea
attributes:
label: Ask a Question!
validations:
required: true

View File

@@ -1,38 +0,0 @@
body:
- type: markdown
attributes:
value: |
# Welcome!
Thanks for suggesting an idea for Portainer!
Before opening a new idea or feature request, make sure that we do not have any duplicates already open. You can ensure this by [searching this discussion cagetory](https://github.com/orgs/portainer/discussions/categories/ideas). If there is a duplicate, please add a comment to the existing idea instead.
Also, be sure to check our [knowledge base](https://portal.portainer.io/knowledge) and [documentation](https://docs.portainer.io) as they may point you toward a solution.
**DO NOT FILE DUPLICATE REQUESTS.**
- type: textarea
attributes:
label: Is your feature request related to a problem? Please describe
description: Short list of what the feature request aims to address.
validations:
required: true
- type: textarea
attributes:
label: Describe the solution you'd like
description: A clear and concise description of what you want to happen.
validations:
required: true
- type: textarea
attributes:
label: Describe alternatives you've considered
description: A clear and concise description of any alternative solutions or features you've considered.
validations:
required: true
- type: textarea
attributes:
label: Additional context
description: Add any other context or screenshots about the feature request here.
validations:
required: false

54
.github/ISSUE_TEMPLATE/Bug_report.md vendored Normal file
View File

@@ -0,0 +1,54 @@
---
name: Bug report
about: Create a bug report
title: ''
labels: bug/need-confirmation, kind/bug
assignees: ''
---
<!--
Thanks for reporting a bug for Portainer !
You can find more information about Portainer support framework policy here: https://www.portainer.io/2019/04/portainer-support-policy/
Do you need help or have a question? Come chat with us on Slack https://portainer.io/slack/
Before opening a new issue, make sure that we do not have any duplicates
already open. You can ensure this by searching the issue list for this
repository. If there is a duplicate, please close your issue and add a comment
to the existing issue instead.
Also, be sure to check our FAQ and documentation first: https://documentation.portainer.io/
-->
**Bug description**
A clear and concise description of what the bug is.
**Expected behavior**
A clear and concise description of what you expected to happen.
**Portainer Logs**
Provide the logs of your Portainer container or Service.
You can see how [here](https://documentation.portainer.io/r/portainer-logs)
**Steps to reproduce the issue:**
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Technical details:**
- Portainer version:
- Docker version (managed by Portainer):
- Kubernetes version (managed by Portainer):
- Platform (windows/linux):
- Command used to start Portainer (`docker run -p 9443:9443 portainer/portainer`):
- Browser:
- Use Case (delete as appropriate): Using Portainer at Home, Using Portainer in a Commercial setup.
- Have you reviewed our technical documentation and knowledge base? Yes/No
**Additional context**
Add any other context about the problem here.

View File

@@ -1,167 +0,0 @@
name: Bug Report
description: Create a report to help us improve.
labels: kind/bug,bug/need-confirmation
body:
- type: markdown
attributes:
value: |
# Welcome!
The issue tracker is for reporting bugs. If you have an [idea for a new feature](https://github.com/orgs/portainer/discussions/categories/ideas) or a [general question about Portainer](https://github.com/orgs/portainer/discussions/categories/help) please post in our [GitHub Discussions](https://github.com/orgs/portainer/discussions).
You can also ask for help in our [community Slack channel](https://join.slack.com/t/portainer/shared_invite/zt-txh3ljab-52QHTyjCqbe5RibC2lcjKA).
**DO NOT FILE ISSUES FOR GENERAL SUPPORT QUESTIONS**.
- type: checkboxes
id: terms
attributes:
label: Before you start please confirm the following.
options:
- label: Yes, I've searched similar issues on [GitHub](https://github.com/portainer/portainer/issues).
required: true
- label: Yes, I've checked whether this issue is covered in the Portainer [documentation](https://docs.portainer.io) or [knowledge base](https://portal.portainer.io/knowledge).
required: true
- type: markdown
attributes:
value: |
# About your issue
Tell us a bit about the issue you're having.
How to write a good bug report:
- Respect the issue template as much as possible.
- Summarize the issue so that we understand what is going wrong.
- Describe what you would have expected to have happened, and what actually happened instead.
- Provide easy to follow steps to reproduce the issue.
- Remain clear and concise.
- Format your messages to help the reader focus on what matters and understand the structure of your message, use [Markdown syntax](https://help.github.com/articles/github-flavored-markdown).
- type: textarea
attributes:
label: Problem Description
description: A clear and concise description of what the bug is.
validations:
required: true
- type: textarea
attributes:
label: Expected Behavior
description: A clear and concise description of what you expected to happen.
validations:
required: true
- type: textarea
attributes:
label: Actual Behavior
description: A clear and concise description of what actually happens.
validations:
required: true
- type: textarea
attributes:
label: Steps to Reproduce
description: Please be as detailed as possible when providing steps to reproduce.
placeholder: |
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
validations:
required: true
- type: textarea
attributes:
label: Portainer logs or screenshots
description: Provide Portainer container logs or any screenshots related to the issue.
validations:
required: false
- type: markdown
attributes:
value: |
# About your environment
Tell us a bit about your Portainer environment.
- type: dropdown
attributes:
label: Portainer version
description: We only provide support for the most recent version of Portainer and the previous 3 versions. If you are on an older version of Portainer we recommend [upgrading first](https://docs.portainer.io/start/upgrade) in case your bug has already been fixed.
multiple: false
options:
- '2.19.4'
- '2.19.3'
- '2.19.2'
- '2.19.1'
- '2.19.0'
- '2.18.4'
- '2.18.3'
- '2.18.2'
- '2.18.1'
- '2.17.1'
- '2.17.0'
- '2.16.2'
- '2.16.1'
- '2.16.0'
validations:
required: true
- type: dropdown
attributes:
label: Portainer Edition
multiple: false
options:
- 'Business Edition (BE/EE) with 5NF / 3NF license'
- 'Business Edition (BE/EE) with Home & Student license'
- 'Business Edition (BE/EE) with Starter license'
- 'Business Edition (BE/EE) with Professional or Enterprise license'
- 'Community Edition (CE)'
validations:
required: true
- type: input
attributes:
label: Platform and Version
description: |
Enter your container management platform (Docker | Swarm | Kubernetes) along with the version.
Example: Docker 24.0.3 | Docker Swarm 24.0.3 | Kubernetes 1.26
You can find our supported platforms [in our documentation](https://docs.portainer.io/start/requirements-and-prerequisites).
validations:
required: true
- type: input
attributes:
label: OS and Architecture
description: |
Enter your Operating System, Version and Architecture. Example: Ubuntu 22.04, AMD64 | Raspbian OS, ARM64
validations:
required: true
- type: input
attributes:
label: Browser
description: |
Enter your browser and version. Example: Google Chrome 114.0
validations:
required: false
- type: textarea
attributes:
label: What command did you use to deploy Portainer?
description: |
Example: `docker run -d -p 8000:8000 -p 9443:9443 --name portainer --restart=always -v /var/run/docker.sock:/var/run/docker.sock -v portainer_data:/data portainer/portainer-ce:latest`
If you deployed Portainer using a compose file or manifest you can provide this here as well.
render: bash
validations:
required: false
- type: textarea
attributes:
label: Additional Information
description: Any additional information about your environment, the bug, or anything else you think might be helpful.
validations:
required: false

View File

@@ -1,11 +1,5 @@
blank_issues_enabled: false blank_issues_enabled: false
contact_links: contact_links:
- name: Question - name: Portainer Business Edition - Get 5 nodes free
url: https://github.com/orgs/portainer/discussions/new?category=help url: https://portainer.io/pricing/take5
about: Ask us a question about Portainer usage or deployment. about: Portainer Business Edition has more features, more support and you can now get 5 nodes free for as long as you want.
- name: Idea or Feature Request
url: https://github.com/orgs/portainer/discussions/new?category=ideas
about: Suggest an idea or feature/enhancement that should be added in Portainer.
- name: Portainer Business Edition - Get 3 Nodes Free
url: https://www.portainer.io/take-3
about: Portainer Business Edition has more features, more support and you can now get 3 nodes free for as long as you want.

View File

@@ -1,176 +0,0 @@
name: ci
on:
workflow_dispatch:
push:
branches:
- 'develop'
- 'release/*'
pull_request:
branches:
- 'develop'
- 'release/*'
- 'feat/*'
- 'fix/*'
- 'refactor/*'
types:
- opened
- reopened
- synchronize
- ready_for_review
env:
DOCKER_HUB_REPO: portainerci/portainer-ce
EXTENSION_HUB_REPO: portainerci/portainer-docker-extension
GO_VERSION: 1.21.11
NODE_VERSION: 18.x
jobs:
build_images:
strategy:
matrix:
config:
- { platform: linux, arch: amd64, version: "" }
- { platform: linux, arch: arm64, version: "" }
- { platform: linux, arch: arm, version: "" }
- { platform: linux, arch: ppc64le, version: "" }
- { platform: linux, arch: s390x, version: "" }
- { platform: windows, arch: amd64, version: 1809 }
- { platform: windows, arch: amd64, version: ltsc2022 }
runs-on: ubuntu-latest
if: github.event.pull_request.draft == false
steps:
- name: '[preparation] checkout the current branch'
uses: actions/checkout@v4.1.1
with:
ref: ${{ github.event.inputs.branch }}
- name: '[preparation] set up golang'
uses: actions/setup-go@v5.0.0
with:
go-version: ${{ env.GO_VERSION }}
- name: '[preparation] set up node.js'
uses: actions/setup-node@v4.0.1
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'yarn'
- name: '[preparation] set up qemu'
uses: docker/setup-qemu-action@v3.0.0
- name: '[preparation] set up docker context for buildx'
run: docker context create builders
- name: '[preparation] set up docker buildx'
uses: docker/setup-buildx-action@v3.0.0
with:
endpoint: builders
- name: '[preparation] docker login'
uses: docker/login-action@v3.0.0
with:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: '[preparation] set the container image tag'
run: |
if [[ "${GITHUB_REF_NAME}" =~ ^release/.*$ ]]; then
# use the release branch name as the tag for release branches
# for instance, release/2.19 becomes 2.19
CONTAINER_IMAGE_TAG=$(echo $GITHUB_REF_NAME | cut -d "/" -f 2)
elif [ "${GITHUB_EVENT_NAME}" == "pull_request" ]; then
# use pr${{ github.event.number }} as the tag for pull requests
# for instance, pr123
CONTAINER_IMAGE_TAG="pr${{ github.event.number }}"
else
# replace / with - in the branch name
# for instance, feature/1.0.0 -> feature-1.0.0
CONTAINER_IMAGE_TAG=$(echo $GITHUB_REF_NAME | sed 's/\//-/g')
fi
echo "CONTAINER_IMAGE_TAG=${CONTAINER_IMAGE_TAG}-${{ matrix.config.platform }}${{ matrix.config.version }}-${{ matrix.config.arch }}" >> $GITHUB_ENV
- name: '[execution] build linux & windows portainer binaries'
run: |
export YARN_VERSION=$(yarn --version)
export WEBPACK_VERSION=$(yarn list webpack --depth=0 | grep webpack | awk -F@ '{print $2}')
export BUILDNUMBER=${GITHUB_RUN_NUMBER}
GIT_COMMIT_HASH_LONG=${{ github.sha }}
export GIT_COMMIT_HASH_SHORT={GIT_COMMIT_HASH_LONG:0:7}
NODE_ENV="testing"
if [[ "${GITHUB_REF_NAME}" =~ ^release/.*$ ]]; then
NODE_ENV="production"
fi
make build-all PLATFORM=${{ matrix.config.platform }} ARCH=${{ matrix.config.arch }} ENV=${NODE_ENV}
env:
CONTAINER_IMAGE_TAG: ${{ env.CONTAINER_IMAGE_TAG }}
- name: '[execution] build and push docker images'
run: |
if [ "${{ matrix.config.platform }}" == "windows" ]; then
mv dist/portainer dist/portainer.exe
docker buildx build --output=type=registry --platform ${{ matrix.config.platform }}/${{ matrix.config.arch }} --build-arg OSVERSION=${{ matrix.config.version }} -t "${DOCKER_HUB_REPO}:${CONTAINER_IMAGE_TAG}" -f build/${{ matrix.config.platform }}/Dockerfile .
else
docker buildx build --output=type=registry --platform ${{ matrix.config.platform }}/${{ matrix.config.arch }} -t "${DOCKER_HUB_REPO}:${CONTAINER_IMAGE_TAG}" -f build/${{ matrix.config.platform }}/Dockerfile .
docker buildx build --output=type=registry --platform ${{ matrix.config.platform }}/${{ matrix.config.arch }} -t "${DOCKER_HUB_REPO}:${CONTAINER_IMAGE_TAG}-alpine" -f build/${{ matrix.config.platform }}/alpine.Dockerfile .
if [[ "${GITHUB_REF_NAME}" =~ ^release/.*$ ]]; then
docker buildx build --output=type=registry --platform ${{ matrix.config.platform }}/${{ matrix.config.arch }} -t "${EXTENSION_HUB_REPO}:${CONTAINER_IMAGE_TAG}" -f build/${{ matrix.config.platform }}/Dockerfile .
docker buildx build --output=type=registry --platform ${{ matrix.config.platform }}/${{ matrix.config.arch }} -t "${EXTENSION_HUB_REPO}:${CONTAINER_IMAGE_TAG}-alpine" -f build/${{ matrix.config.platform }}/alpine.Dockerfile .
fi
fi
env:
CONTAINER_IMAGE_TAG: ${{ env.CONTAINER_IMAGE_TAG }}
build_manifests:
runs-on: ubuntu-latest
if: github.event.pull_request.draft == false
needs: [build_images]
steps:
- name: '[preparation] docker login'
uses: docker/login-action@v3.0.0
with:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: '[preparation] set up docker context for buildx'
run: docker version && docker context create builders
- name: '[preparation] set up docker buildx'
uses: docker/setup-buildx-action@v3.0.0
with:
endpoint: builders
- name: '[execution] build and push manifests'
run: |
if [[ "${GITHUB_REF_NAME}" =~ ^release/.*$ ]]; then
# use the release branch name as the tag for release branches
# for instance, release/2.19 becomes 2.19
CONTAINER_IMAGE_TAG=$(echo $GITHUB_REF_NAME | cut -d "/" -f 2)
elif [ "${GITHUB_EVENT_NAME}" == "pull_request" ]; then
# use pr${{ github.event.number }} as the tag for pull requests
# for instance, pr123
CONTAINER_IMAGE_TAG="pr${{ github.event.number }}"
else
# replace / with - in the branch name
# for instance, feature/1.0.0 -> feature-1.0.0
CONTAINER_IMAGE_TAG=$(echo $GITHUB_REF_NAME | sed 's/\//-/g')
fi
docker buildx imagetools create -t "${DOCKER_HUB_REPO}:${CONTAINER_IMAGE_TAG}" \
"${DOCKER_HUB_REPO}:${CONTAINER_IMAGE_TAG}-linux-amd64" \
"${DOCKER_HUB_REPO}:${CONTAINER_IMAGE_TAG}-linux-arm64" \
"${DOCKER_HUB_REPO}:${CONTAINER_IMAGE_TAG}-linux-arm" \
"${DOCKER_HUB_REPO}:${CONTAINER_IMAGE_TAG}-linux-ppc64le" \
"${DOCKER_HUB_REPO}:${CONTAINER_IMAGE_TAG}-linux-s390x" \
"${DOCKER_HUB_REPO}:${CONTAINER_IMAGE_TAG}-windows1809-amd64" \
"${DOCKER_HUB_REPO}:${CONTAINER_IMAGE_TAG}-windowsltsc2022-amd64"
docker buildx imagetools create -t "${DOCKER_HUB_REPO}:${CONTAINER_IMAGE_TAG}-alpine" \
"${DOCKER_HUB_REPO}:${CONTAINER_IMAGE_TAG}-linux-amd64-alpine" \
"${DOCKER_HUB_REPO}:${CONTAINER_IMAGE_TAG}-linux-arm64-alpine" \
"${DOCKER_HUB_REPO}:${CONTAINER_IMAGE_TAG}-linux-arm-alpine"
if [[ "${GITHUB_REF_NAME}" =~ ^release/.*$ ]]; then
docker buildx imagetools create -t "${EXTENSION_HUB_REPO}:${CONTAINER_IMAGE_TAG}" \
"${EXTENSION_HUB_REPO}:${CONTAINER_IMAGE_TAG}-linux-amd64" \
"${EXTENSION_HUB_REPO}:${CONTAINER_IMAGE_TAG}-linux-arm64" \
"${EXTENSION_HUB_REPO}:${CONTAINER_IMAGE_TAG}-linux-arm" \
"${EXTENSION_HUB_REPO}:${CONTAINER_IMAGE_TAG}-linux-ppc64le" \
"${EXTENSION_HUB_REPO}:${CONTAINER_IMAGE_TAG}-linux-s390x"
docker buildx imagetools create -t "${EXTENSION_HUB_REPO}:${CONTAINER_IMAGE_TAG}-alpine" \
"${EXTENSION_HUB_REPO}:${CONTAINER_IMAGE_TAG}-linux-amd64-alpine" \
"${EXTENSION_HUB_REPO}:${CONTAINER_IMAGE_TAG}-linux-arm64-alpine" \
"${EXTENSION_HUB_REPO}:${CONTAINER_IMAGE_TAG}-linux-arm-alpine"
fi

View File

@@ -11,5 +11,5 @@ jobs:
with: with:
CONFLICT_LABEL_NAME: 'has conflicts' CONFLICT_LABEL_NAME: 'has conflicts'
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
MAX_RETRIES: 10 MAX_RETRIES: 5
WAIT_MS: 60000 WAIT_MS: 5000

View File

@@ -11,32 +11,23 @@ on:
- master - master
- develop - develop
- release/* - release/*
types:
- opened
- reopened
- synchronize
- ready_for_review
env:
GO_VERSION: 1.21.9
NODE_VERSION: 18.x
jobs: jobs:
run-linters: run-linters:
name: Run linters name: Run linters
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.event.pull_request.draft == false
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- uses: actions/setup-node@v2 - uses: actions/setup-node@v2
with: with:
node-version: ${{ env.NODE_VERSION }} node-version: '14'
cache: 'yarn' cache: 'yarn'
- uses: actions/setup-go@v4 - uses: actions/setup-go@v3
with: with:
go-version: ${{ env.GO_VERSION }} go-version: 1.19.4
- run: yarn --frozen-lockfile - run: yarn --frozen-lockfile
- name: Run linters - name: Run linters
uses: wearerequired/lint-action@v1 uses: wearerequired/lint-action@v1
with: with:
@@ -51,5 +42,6 @@ jobs:
- name: GolangCI-Lint - name: GolangCI-Lint
uses: golangci/golangci-lint-action@v3 uses: golangci/golangci-lint-action@v3
with: with:
version: v1.55.2 version: latest
args: --timeout=10m -c .golangci.yaml working-directory: api
args: -c .golangci.yaml

View File

@@ -2,25 +2,21 @@ name: Nightly Code Security Scan
on: on:
schedule: schedule:
- cron: '0 20 * * *' - cron: '0 8 * * *'
workflow_dispatch: workflow_dispatch:
env:
GO_VERSION: 1.21.9
jobs: jobs:
client-dependencies: client-dependencies:
name: Client Dependency Check name: Client dependency check
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: >- # only run for develop branch if: >- # only run for develop branch
github.ref == 'refs/heads/develop' github.ref == 'refs/heads/develop'
outputs: outputs:
js: ${{ steps.set-matrix.outputs.js_result }} js: ${{ steps.set-matrix.outputs.js_result }}
steps: steps:
- name: checkout repository - uses: actions/checkout@master
uses: actions/checkout@master
- name: scan vulnerabilities by Snyk - name: Run Snyk to check for vulnerabilities
uses: snyk/actions/node@master uses: snyk/actions/node@master
continue-on-error: true # To make sure that artifact upload gets called continue-on-error: true # To make sure that artifact upload gets called
env: env:
@@ -28,148 +24,147 @@ jobs:
with: with:
json: true json: true
- name: upload scan result as develop artifact - name: Upload js security scan result as artifact
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v3
with: with:
name: js-security-scan-develop-result name: js-security-scan-develop-result
path: snyk.json path: snyk.json
- name: develop scan report export to html - name: Export scan result to html file
run: | run: |
$(docker run --rm -v ${{ github.workspace }}:/data portainerci/code-security-report:latest summary --report-type=snyk --path="/data/snyk.json" --output-type=table --export --export-filename="/data/js-result") $(docker run --rm -v ${{ github.workspace }}:/data oscarzhou/scan-report:0.1.8 summary -report-type=snyk -path="/data/snyk.json" -output-type=table -export -export-filename="/data/js-result")
- name: upload html file as artifact - name: Upload js result html file
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v3
with: with:
name: html-js-result-${{github.run_id}} name: html-js-result-${{github.run_id}}
path: js-result.html path: js-result.html
- name: analyse vulnerabilities - name: Analyse the js result
id: set-matrix id: set-matrix
run: | run: |
result=$(docker run --rm -v ${{ github.workspace }}:/data portainerci/code-security-report:latest summary --report-type=snyk --path="/data/snyk.json" --output-type=matrix) result=$(docker run --rm -v ${{ github.workspace }}:/data oscarzhou/scan-report:0.1.8 summary -report-type=snyk -path="/data/snyk.json" -output-type=matrix)
echo "js_result=${result}" >> $GITHUB_OUTPUT echo "::set-output name=js_result::${result}"
server-dependencies: server-dependencies:
name: Server Dependency Check name: Server dependency check
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: >- # only run for develop branch if: >- # only run for develop branch
github.ref == 'refs/heads/develop' github.ref == 'refs/heads/develop'
outputs: outputs:
go: ${{ steps.set-matrix.outputs.go_result }} go: ${{ steps.set-matrix.outputs.go_result }}
steps: steps:
- name: checkout repository - uses: actions/checkout@master
uses: actions/checkout@master
- name: install Go - uses: actions/setup-go@v3
uses: actions/setup-go@v3
with: with:
go-version: ${{ env.GO_VERSION }} go-version: '1.19.4'
- name: download Go modules - name: Download go modules
run: cd ./api && go get -t -v -d ./... run: cd ./api && go get -t -v -d ./...
- name: scan vulnerabilities by Snyk - name: Run Snyk to check for vulnerabilities
continue-on-error: true # To make sure that artifact upload gets called continue-on-error: true # To make sure that artifact upload gets called
env: env:
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
run: | run: |
yarn global add snyk yarn global add snyk
snyk test --file=./go.mod --json-file-output=snyk.json 2>/dev/null || : snyk test --file=./api/go.mod --json-file-output=snyk.json 2>/dev/null || :
- name: upload scan result as develop artifact - name: Upload go security scan result as artifact
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v3
with: with:
name: go-security-scan-develop-result name: go-security-scan-develop-result
path: snyk.json path: snyk.json
- name: develop scan report export to html - name: Export scan result to html file
run: | run: |
$(docker run --rm -v ${{ github.workspace }}:/data portainerci/code-security-report:latest summary --report-type=snyk --path="/data/snyk.json" --output-type=table --export --export-filename="/data/go-result") $(docker run --rm -v ${{ github.workspace }}:/data oscarzhou/scan-report:0.1.8 summary -report-type=snyk -path="/data/snyk.json" -output-type=table -export -export-filename="/data/go-result")
- name: upload html file as artifact - name: Upload go result html file
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v3
with: with:
name: html-go-result-${{github.run_id}} name: html-go-result-${{github.run_id}}
path: go-result.html path: go-result.html
- name: analyse vulnerabilities - name: Analyse the go result
id: set-matrix id: set-matrix
run: | run: |
result=$(docker run --rm -v ${{ github.workspace }}:/data portainerci/code-security-report:latest summary --report-type=snyk --path="/data/snyk.json" --output-type=matrix) result=$(docker run --rm -v ${{ github.workspace }}:/data oscarzhou/scan-report:0.1.8 summary -report-type=snyk -path="/data/snyk.json" -output-type=matrix)
echo "go_result=${result}" >> $GITHUB_OUTPUT echo "::set-output name=go_result::${result}"
image-vulnerability: image-vulnerability:
name: Image Vulnerability Check name: Build docker image and Image vulnerability check
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: >- if: >-
github.ref == 'refs/heads/develop' github.ref == 'refs/heads/develop'
outputs: outputs:
image-trivy: ${{ steps.set-trivy-matrix.outputs.image_trivy_result }} image: ${{ steps.set-matrix.outputs.image_result }}
image-docker-scout: ${{ steps.set-docker-scout-matrix.outputs.image_docker_scout_result }}
steps: steps:
- name: scan vulnerabilities by Trivy - name: Checkout code
uses: actions/checkout@master
- name: Use golang 1.19.4
uses: actions/setup-go@v3
with:
go-version: '1.19.4'
- name: Use Node.js 18.x
uses: actions/setup-node@v1
with:
node-version: 18.x
- name: Install packages
run: yarn --frozen-lockfile
- name: build
run: make build
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: build/linux/Dockerfile
tags: trivy-portainer:${{ github.sha }}
outputs: type=docker,dest=/tmp/trivy-portainer-image.tar
- name: Load docker image
run: |
docker load --input /tmp/trivy-portainer-image.tar
- name: Run Trivy vulnerability scanner
uses: docker://docker.io/aquasec/trivy:latest uses: docker://docker.io/aquasec/trivy:latest
continue-on-error: true continue-on-error: true
with: with:
args: image --ignore-unfixed=true --vuln-type="os,library" --exit-code=1 --format="json" --output="image-trivy.json" --no-progress portainerci/portainer:develop args: image --ignore-unfixed=true --vuln-type="os,library" --exit-code=1 --format="json" --output="image-trivy.json" --no-progress trivy-portainer:${{ github.sha }}
- name: upload Trivy image security scan result as artifact - name: Upload image security scan result as artifact
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v3
with: with:
name: image-security-scan-develop-result name: image-security-scan-develop-result
path: image-trivy.json path: image-trivy.json
- name: develop Trivy scan report export to html - name: Export scan result to html file
run: | run: |
$(docker run --rm -v ${{ github.workspace }}:/data portainerci/code-security-report:latest summary --report-type=trivy --path="/data/image-trivy.json" --output-type=table --export --export-filename="/data/image-trivy-result") $(docker run --rm -v ${{ github.workspace }}:/data oscarzhou/scan-report:0.1.8 summary -report-type=trivy -path="/data/image-trivy.json" -output-type=table -export -export-filename="/data/image-result")
- name: upload html file as Trivy artifact - name: Upload go result html file
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v3
with: with:
name: html-image-result-${{github.run_id}} name: html-image-result-${{github.run_id}}
path: image-trivy-result.html path: image-result.html
- name: analyse vulnerabilities from Trivy - name: Analyse the trivy result
id: set-trivy-matrix id: set-matrix
run: | run: |
result=$(docker run --rm -v ${{ github.workspace }}:/data portainerci/code-security-report:latest summary --report-type=trivy --path="/data/image-trivy.json" --output-type=matrix) result=$(docker run --rm -v ${{ github.workspace }}:/data oscarzhou/scan-report:0.1.8 summary -report-type=trivy -path="/data/image-trivy.json" -output-type=matrix)
echo "image_trivy_result=${result}" >> $GITHUB_OUTPUT echo "::set-output name=image_result::${result}"
- name: scan vulnerabilities by Docker Scout
uses: docker/scout-action@v1
continue-on-error: true
with:
command: cves
image: portainerci/portainer:develop
sarif-file: image-docker-scout.json
dockerhub-user: ${{ secrets.DOCKER_HUB_USERNAME }}
dockerhub-password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: upload Docker Scout image security scan result as artifact
uses: actions/upload-artifact@v3
with:
name: image-security-scan-develop-result
path: image-docker-scout.json
- name: develop Docker Scout scan report export to html
run: |
$(docker run --rm -v ${{ github.workspace }}:/data portainerci/code-security-report:latest summary --report-type=docker-scout --path="/data/image-docker-scout.json" --output-type=table --export --export-filename="/data/image-docker-scout-result")
- name: upload html file as Docker Scout artifact
uses: actions/upload-artifact@v3
with:
name: html-image-result-${{github.run_id}}
path: image-docker-scout-result.html
- name: analyse vulnerabilities from Docker Scout
id: set-docker-scout-matrix
run: |
result=$(docker run --rm -v ${{ github.workspace }}:/data portainerci/code-security-report:latest summary --report-type=docker-scout --path="/data/image-docker-scout.json" --output-type=matrix)
echo "image_docker_scout_result=${result}" >> $GITHUB_OUTPUT
result-analysis: result-analysis:
name: Analyse Scan Results name: Analyse scan result
needs: [client-dependencies, server-dependencies, image-vulnerability] needs: [client-dependencies, server-dependencies, image-vulnerability]
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: >- if: >-
@@ -178,27 +173,23 @@ jobs:
matrix: matrix:
js: ${{fromJson(needs.client-dependencies.outputs.js)}} js: ${{fromJson(needs.client-dependencies.outputs.js)}}
go: ${{fromJson(needs.server-dependencies.outputs.go)}} go: ${{fromJson(needs.server-dependencies.outputs.go)}}
image-trivy: ${{fromJson(needs.image-vulnerability.outputs.image-trivy)}} image: ${{fromJson(needs.image-vulnerability.outputs.image)}}
image-docker-scout: ${{fromJson(needs.image-vulnerability.outputs.image-docker-scout)}}
steps: steps:
- name: display the results of js, Go, and image scan - name: Display the results of js, go and image
run: | run: |
echo "${{ matrix.js.status }}" echo ${{ matrix.js.status }}
echo "${{ matrix.go.status }}" echo ${{ matrix.go.status }}
echo "${{ matrix.image-trivy.status }}" echo ${{ matrix.image.status }}
echo "${{ matrix.image-docker-scout.status }}" echo ${{ matrix.js.summary }}
echo "${{ matrix.js.summary }}" echo ${{ matrix.go.summary }}
echo "${{ matrix.go.summary }}" echo ${{ matrix.image.summary }}
echo "${{ matrix.image-trivy.summary }}"
echo "${{ matrix.image-docker-scout.summary }}"
- name: send message to Slack - name: Send Slack message
if: >- if: >-
matrix.js.status == 'failure' || matrix.js.status == 'failure' ||
matrix.go.status == 'failure' || matrix.go.status == 'failure' ||
matrix.image-trivy.status == 'failure' || matrix.image.status == 'failure'
matrix.image-docker-scout.status == 'failure' uses: slackapi/slack-github-action@v1.18.0
uses: slackapi/slack-github-action@v1.23.0
with: with:
payload: | payload: |
{ {
@@ -233,14 +224,7 @@ jobs:
"type": "section", "type": "section",
"text": { "text": {
"type": "mrkdwn", "type": "mrkdwn",
"text": "*Image Trivy vulnerability check*: *${{ matrix.image-trivy.status }}*\n${{ matrix.image-trivy.summary }}\n" "text": "*Image vulnerability check*: *${{ matrix.image.status }}*\n${{ matrix.image.summary }}\n"
}
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*Image Docker Scout vulnerability check*: *${{ matrix.image-docker-scout.status }}*\n${{ matrix.image-docker-scout.summary }}\n"
} }
} }
] ]

View File

@@ -7,31 +7,25 @@ on:
- edited - edited
paths: paths:
- 'package.json' - 'package.json'
- 'go.mod' - 'api/go.mod'
- 'gruntfile.js'
- 'build/linux/Dockerfile' - 'build/linux/Dockerfile'
- 'build/linux/alpine.Dockerfile' - 'build/linux/alpine.Dockerfile'
- 'build/windows/Dockerfile' - 'build/windows/Dockerfile'
- '.github/workflows/pr-security.yml'
env:
GO_VERSION: 1.21.9
NODE_VERSION: 18.x
jobs: jobs:
client-dependencies: client-dependencies:
name: Client Dependency Check name: Client dependency check
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: >- if: >-
github.event.pull_request && github.event.pull_request &&
github.event.review.body == '/scan' && github.event.review.body == '/scan'
github.event.pull_request.draft == false
outputs: outputs:
jsdiff: ${{ steps.set-diff-matrix.outputs.js_diff_result }} jsdiff: ${{ steps.set-diff-matrix.outputs.js_diff_result }}
steps: steps:
- name: checkout repository - uses: actions/checkout@master
uses: actions/checkout@master
- name: scan vulnerabilities by Snyk - name: Run Snyk to check for vulnerabilities
uses: snyk/actions/node@master uses: snyk/actions/node@master
continue-on-error: true # To make sure that artifact upload gets called continue-on-error: true # To make sure that artifact upload gets called
env: env:
@@ -39,13 +33,13 @@ jobs:
with: with:
json: true json: true
- name: upload scan result as pull-request artifact - name: Upload js security scan result as artifact
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v3
with: with:
name: js-security-scan-feat-result name: js-security-scan-feat-result
path: snyk.json path: snyk.json
- name: download artifacts from develop branch built by nightly scan - name: Download artifacts from develop branch
env: env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: | run: |
@@ -57,58 +51,55 @@ jobs:
echo "null" > ./js-snyk-develop.json echo "null" > ./js-snyk-develop.json
fi fi
- name: pr vs develop scan report comparison export to html - name: Export scan result to html file
run: | run: |
$(docker run --rm -v ${{ github.workspace }}:/data portainerci/code-security-report:latest diff --report-type=snyk --path="/data/js-snyk-feature.json" --compare-to="/data/js-snyk-develop.json" --output-type=table --export --export-filename="/data/js-result") $(docker run --rm -v ${{ github.workspace }}:/data oscarzhou/scan-report:0.1.8 diff -report-type=snyk -path="/data/js-snyk-feature.json" -compare-to="/data/js-snyk-develop.json" -output-type=table -export -export-filename="/data/js-result")
- name: upload html file as artifact - name: Upload js result html file
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v3
with: with:
name: html-js-result-compare-to-develop-${{github.run_id}} name: html-js-result-compare-to-develop-${{github.run_id}}
path: js-result.html path: js-result.html
- name: analyse different vulnerabilities against develop branch - name: Analyse the js diff result
id: set-diff-matrix id: set-diff-matrix
run: | run: |
result=$(docker run --rm -v ${{ github.workspace }}:/data portainerci/code-security-report:latest diff --report-type=snyk --path="/data/js-snyk-feature.json" --compare-to="/data/js-snyk-develop.json" --output-type=matrix) result=$(docker run --rm -v ${{ github.workspace }}:/data oscarzhou/scan-report:0.1.8 diff -report-type=snyk -path="/data/js-snyk-feature.json" -compare-to="./data/js-snyk-develop.json" -output-type=matrix)
echo "js_diff_result=${result}" >> $GITHUB_OUTPUT echo "::set-output name=js_diff_result::${result}"
server-dependencies: server-dependencies:
name: Server Dependency Check name: Server dependency check
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: >- if: >-
github.event.pull_request && github.event.pull_request &&
github.event.review.body == '/scan' && github.event.review.body == '/scan'
github.event.pull_request.draft == false
outputs: outputs:
godiff: ${{ steps.set-diff-matrix.outputs.go_diff_result }} godiff: ${{ steps.set-diff-matrix.outputs.go_diff_result }}
steps: steps:
- name: checkout repository - uses: actions/checkout@master
uses: actions/checkout@master
- name: install Go - uses: actions/setup-go@v3
uses: actions/setup-go@v3
with: with:
go-version: ${{ env.GO_VERSION }} go-version: '1.19.4'
- name: download Go modules - name: Download go modules
run: cd ./api && go get -t -v -d ./... run: cd ./api && go get -t -v -d ./...
- name: scan vulnerabilities by Snyk - name: Run Snyk to check for vulnerabilities
continue-on-error: true # To make sure that artifact upload gets called continue-on-error: true # To make sure that artifact upload gets called
env: env:
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
run: | run: |
yarn global add snyk yarn global add snyk
snyk test --file=./go.mod --json-file-output=snyk.json 2>/dev/null || : snyk test --file=./api/go.mod --json-file-output=snyk.json 2>/dev/null || :
- name: upload scan result as pull-request artifact - name: Upload go security scan result as artifact
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v3
with: with:
name: go-security-scan-feature-result name: go-security-scan-feature-result
path: snyk.json path: snyk.json
- name: download artifacts from develop branch built by nightly scan - name: Download artifacts from develop branch
env: env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: | run: |
@@ -120,80 +111,78 @@ jobs:
echo "null" > ./go-snyk-develop.json echo "null" > ./go-snyk-develop.json
fi fi
- name: pr vs develop scan report comparison export to html - name: Export scan result to html file
run: | run: |
$(docker run --rm -v ${{ github.workspace }}:/data portainerci/code-security-report:latest diff --report-type=snyk --path="/data/go-snyk-feature.json" --compare-to="/data/go-snyk-develop.json" --output-type=table --export --export-filename="/data/go-result") $(docker run --rm -v ${{ github.workspace }}:/data oscarzhou/scan-report:0.1.8 diff -report-type=snyk -path="/data/go-snyk-feature.json" -compare-to="/data/go-snyk-develop.json" -output-type=table -export -export-filename="/data/go-result")
- name: upload html file as artifact - name: Upload go result html file
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v3
with: with:
name: html-go-result-compare-to-develop-${{github.run_id}} name: html-go-result-compare-to-develop-${{github.run_id}}
path: go-result.html path: go-result.html
- name: analyse different vulnerabilities against develop branch - name: Analyse the go diff result
id: set-diff-matrix id: set-diff-matrix
run: | run: |
result=$(docker run --rm -v ${{ github.workspace }}:/data portainerci/code-security-report:latest diff --report-type=snyk --path="/data/go-snyk-feature.json" --compare-to="/data/go-snyk-develop.json" --output-type=matrix) result=$(docker run --rm -v ${{ github.workspace }}:/data oscarzhou/scan-report:0.1.8 diff -report-type=snyk -path="/data/go-snyk-feature.json" -compare-to="/data/go-snyk-develop.json" -output-type=matrix)
echo "go_diff_result=${result}" >> $GITHUB_OUTPUT echo "::set-output name=go_diff_result::${result}"
image-vulnerability: image-vulnerability:
name: Image Vulnerability Check name: Build docker image and Image vulnerability check
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: >- if: >-
github.event.pull_request && github.event.pull_request &&
github.event.review.body == '/scan' && github.event.review.body == '/scan'
github.event.pull_request.draft == false
outputs: outputs:
imagediff-trivy: ${{ steps.set-diff-trivy-matrix.outputs.image_diff_trivy_result }} imagediff: ${{ steps.set-diff-matrix.outputs.image_diff_result }}
imagediff-docker-scout: ${{ steps.set-diff-docker-scout-matrix.outputs.image_diff_docker_scout_result }}
steps: steps:
- name: checkout code - name: Checkout code
uses: actions/checkout@master uses: actions/checkout@master
- name: install Go - name: Use golang 1.19.4
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: ${{ env.GO_VERSION }} go-version: '1.19.4'
- name: install Node.js - name: Use Node.js 18.x
uses: actions/setup-node@v3 uses: actions/setup-node@v1
with: with:
node-version: ${{ env.NODE_VERSION }} node-version: 18.x
- name: Install packages - name: Install packages
run: yarn --frozen-lockfile run: yarn --frozen-lockfile
- name: build - name: build
run: make build-all run: make build
- name: set up docker buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2 uses: docker/setup-buildx-action@v1
- name: build and compress image - name: Build and push
uses: docker/build-push-action@v4 uses: docker/build-push-action@v2
with: with:
context: . context: .
file: build/linux/Dockerfile file: build/linux/Dockerfile
tags: local-portainer:${{ github.sha }} tags: trivy-portainer:${{ github.sha }}
outputs: type=docker,dest=/tmp/local-portainer-image.tar outputs: type=docker,dest=/tmp/trivy-portainer-image.tar
- name: load docker image - name: Load docker image
run: | run: |
docker load --input /tmp/local-portainer-image.tar docker load --input /tmp/trivy-portainer-image.tar
- name: scan vulnerabilities by Trivy - name: Run Trivy vulnerability scanner
uses: docker://docker.io/aquasec/trivy:latest uses: docker://docker.io/aquasec/trivy:latest
continue-on-error: true continue-on-error: true
with: with:
args: image --ignore-unfixed=true --vuln-type="os,library" --exit-code=1 --format="json" --output="image-trivy.json" --no-progress local-portainer:${{ github.sha }} args: image --ignore-unfixed=true --vuln-type="os,library" --exit-code=1 --format="json" --output="image-trivy.json" --no-progress trivy-portainer:${{ github.sha }}
- name: upload Trivy image security scan result as artifact - name: Upload image security scan result as artifact
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v3
with: with:
name: image-security-scan-feature-result name: image-security-scan-feature-result
path: image-trivy.json path: image-trivy.json
- name: download Trivy artifacts from develop branch built by nightly scan - name: Download artifacts from develop branch
env: env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: | run: |
@@ -205,94 +194,45 @@ jobs:
echo "null" > ./image-trivy-develop.json echo "null" > ./image-trivy-develop.json
fi fi
- name: pr vs develop Trivy scan report comparison export to html - name: Export scan result to html file
run: | run: |
$(docker run --rm -v ${{ github.workspace }}:/data portainerci/code-security-report:latest diff --report-type=trivy --path="/data/image-trivy-feature.json" --compare-to="/data/image-trivy-develop.json" --output-type=table --export --export-filename="/data/image-trivy-result") $(docker run --rm -v ${{ github.workspace }}:/data oscarzhou/scan-report:0.1.8 diff -report-type=trivy -path="/data/image-trivy-feature.json" -compare-to="/data/image-trivy-develop.json" -output-type=table -export -export-filename="/data/image-result")
- name: upload html file as Trivy artifact - name: Upload image result html file
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v3
with: with:
name: html-image-result-compare-to-develop-${{github.run_id}} name: html-image-result-compare-to-develop-${{github.run_id}}
path: image-trivy-result.html path: image-result.html
- name: analyse different vulnerabilities against develop branch by Trivy - name: Analyse the image diff result
id: set-diff-trivy-matrix id: set-diff-matrix
run: | run: |
result=$(docker run --rm -v ${{ github.workspace }}:/data portainerci/code-security-report:latest diff --report-type=trivy --path="/data/image-trivy-feature.json" --compare-to="/data/image-trivy-develop.json" --output-type=matrix) result=$(docker run --rm -v ${{ github.workspace }}:/data oscarzhou/scan-report:0.1.8 diff -report-type=trivy -path="/data/image-trivy-feature.json" -compare-to="./data/image-trivy-develop.json" -output-type=matrix)
echo "image_diff_trivy_result=${result}" >> $GITHUB_OUTPUT echo "::set-output name=image_diff_result::${result}"
- name: scan vulnerabilities by Docker Scout
uses: docker/scout-action@v1
continue-on-error: true
with:
command: cves
image: local-portainer:${{ github.sha }}
sarif-file: image-docker-scout.json
dockerhub-user: ${{ secrets.DOCKER_HUB_USERNAME }}
dockerhub-password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: upload Docker Scout image security scan result as artifact
uses: actions/upload-artifact@v3
with:
name: image-security-scan-feature-result
path: image-docker-scout.json
- name: download Docker Scout artifacts from develop branch built by nightly scan
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
mv ./image-docker-scout.json ./image-docker-scout-feature.json
(gh run download -n image-security-scan-develop-result -R ${{ github.repository }} 2>&1 >/dev/null) || :
if [[ -e ./image-docker-scout.json ]]; then
mv ./image-docker-scout.json ./image-docker-scout-develop.json
else
echo "null" > ./image-docker-scout-develop.json
fi
- name: pr vs develop Docker Scout scan report comparison export to html
run: |
$(docker run --rm -v ${{ github.workspace }}:/data portainerci/code-security-report:latest diff --report-type=docker-scout --path="/data/image-docker-scout-feature.json" --compare-to="/data/image-docker-scout-develop.json" --output-type=table --export --export-filename="/data/image-docker-scout-result")
- name: upload html file as Docker Scout artifact
uses: actions/upload-artifact@v3
with:
name: html-image-result-compare-to-develop-${{github.run_id}}
path: image-docker-scout-result.html
- name: analyse different vulnerabilities against develop branch by Docker Scout
id: set-diff-docker-scout-matrix
run: |
result=$(docker run --rm -v ${{ github.workspace }}:/data portainerci/code-security-report:latest diff --report-type=docker-scout --path="/data/image-docker-scout-feature.json" --compare-to="/data/image-docker-scout-develop.json" --output-type=matrix)
echo "image_diff_docker_scout_result=${result}" >> $GITHUB_OUTPUT
result-analysis: result-analysis:
name: Analyse Scan Result Against develop Branch name: Analyse scan result compared to develop
needs: [client-dependencies, server-dependencies, image-vulnerability] needs: [client-dependencies, server-dependencies, image-vulnerability]
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: >- if: >-
github.event.pull_request && github.event.pull_request &&
github.event.review.body == '/scan' && github.event.review.body == '/scan'
github.event.pull_request.draft == false
strategy: strategy:
matrix: matrix:
jsdiff: ${{fromJson(needs.client-dependencies.outputs.jsdiff)}} jsdiff: ${{fromJson(needs.client-dependencies.outputs.jsdiff)}}
godiff: ${{fromJson(needs.server-dependencies.outputs.godiff)}} godiff: ${{fromJson(needs.server-dependencies.outputs.godiff)}}
imagediff-trivy: ${{fromJson(needs.image-vulnerability.outputs.imagediff-trivy)}} imagediff: ${{fromJson(needs.image-vulnerability.outputs.imagediff)}}
imagediff-docker-scout: ${{fromJson(needs.image-vulnerability.outputs.imagediff-docker-scout)}}
steps: steps:
- name: check job status of diff result - name: Check job status of diff result
if: >- if: >-
matrix.jsdiff.status == 'failure' || matrix.jsdiff.status == 'failure' ||
matrix.godiff.status == 'failure' || matrix.godiff.status == 'failure' ||
matrix.imagediff-trivy.status == 'failure' || matrix.imagediff.status == 'failure'
matrix.imagediff-docker-scout.status == 'failure'
run: | run: |
echo "${{ matrix.jsdiff.status }}" echo ${{ matrix.jsdiff.status }}
echo "${{ matrix.godiff.status }}" echo ${{ matrix.godiff.status }}
echo "${{ matrix.imagediff-trivy.status }}" echo ${{ matrix.imagediff.status }}
echo "${{ matrix.imagediff-docker-scout.status }}" echo ${{ matrix.jsdiff.summary }}
echo "${{ matrix.jsdiff.summary }}" echo ${{ matrix.godiff.summary }}
echo "${{ matrix.godiff.summary }}" echo ${{ matrix.imagediff.summary }}
echo "${{ matrix.imagediff-trivy.summary }}"
echo "${{ matrix.imagediff-docker-scout.summary }}"
exit 1 exit 1

View File

@@ -2,7 +2,6 @@ name: Close Stale Issues
on: on:
schedule: schedule:
- cron: '0 12 * * *' - cron: '0 12 * * *'
workflow_dispatch:
jobs: jobs:
stale: stale:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@@ -10,7 +9,7 @@ jobs:
issues: write issues: write
steps: steps:
- uses: actions/stale@v8 - uses: actions/stale@v4.0.0
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,76 +1,29 @@
name: Test name: Test
on: push
env:
GO_VERSION: 1.21.9
NODE_VERSION: 18.x
on:
workflow_dispatch:
pull_request:
branches:
- master
- develop
- release/*
types:
- opened
- reopened
- synchronize
- ready_for_review
push:
branches:
- master
- develop
- release/*
jobs: jobs:
test-client: test-client:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.event.pull_request.draft == false
steps: steps:
- name: 'checkout the current branch' - uses: actions/checkout@v2
uses: actions/checkout@v4.1.1 - uses: actions/setup-node@v2
with: with:
ref: ${{ github.event.inputs.branch }} node-version: '18'
- name: 'set up node.js'
uses: actions/setup-node@v4.0.1
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'yarn' cache: 'yarn'
- run: yarn --frozen-lockfile - run: yarn --frozen-lockfile
- name: Run tests - name: Run tests
run: make test-client ARGS="--maxWorkers=2 --minWorkers=1" run: yarn jest --maxWorkers=2
# test-server:
test-server: # runs-on: ubuntu-latest
strategy: # env:
matrix: # GOPRIVATE: "github.com/portainer"
config: # steps:
- { platform: linux, arch: amd64 } # - uses: actions/checkout@v3
- { platform: linux, arch: arm64 } # - uses: actions/setup-go@v3
- { platform: windows, arch: amd64, version: 1809 } # with:
- { platform: windows, arch: amd64, version: ltsc2022 } # go-version: '1.18'
runs-on: ubuntu-latest # - name: Run tests
if: github.event.pull_request.draft == false # run: |
# cd api
steps: # go test ./...
- name: 'checkout the current branch'
uses: actions/checkout@v4.1.1
with:
ref: ${{ github.event.inputs.branch }}
- name: 'set up golang'
uses: actions/setup-go@v5.0.0
with:
go-version: ${{ env.GO_VERSION }}
- name: 'install dependencies'
run: make test-deps PLATFORM=linux ARCH=amd64
- name: 'update $PATH'
run: echo "$(pwd)/dist" >> $GITHUB_PATH
- name: 'run tests'
run: make test-server

View File

@@ -6,32 +6,22 @@ on:
- master - master
- develop - develop
- 'release/*' - 'release/*'
types:
- opened
- reopened
- synchronize
- ready_for_review
env:
GO_VERSION: 1.21.9
NODE_VERSION: 18.x
jobs: jobs:
openapi-spec: openapi-spec:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.event.pull_request.draft == false
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: actions/setup-go@v3 - uses: actions/setup-go@v3
with: with:
go-version: ${{ env.GO_VERSION }} go-version: '1.18'
- name: Download golang modules - name: Download golang modules
run: cd ./api && go get -t -v -d ./... run: cd ./api && go get -t -v -d ./...
- uses: actions/setup-node@v3 - uses: actions/setup-node@v3
with: with:
node-version: ${{ env.NODE_VERSION }} node-version: '14'
cache: 'yarn' cache: 'yarn'
- run: yarn --frozen-lockfile - run: yarn --frozen-lockfile

4
.gitignore vendored
View File

@@ -11,10 +11,8 @@ storybook-static
*.DS_Store *.DS_Store
.eslintcache .eslintcache
__debug_bin* __debug_bin
api/docs api/docs
.idea .idea
.env .env
go.work.sum

View File

@@ -1,40 +0,0 @@
linters:
# Disable all linters, the defaults don't pass on our code yet
disable-all: true
# Enable these for now
enable:
- unused
- depguard
- gosimple
- govet
- errorlint
- exportloopref
linters-settings:
depguard:
rules:
main:
deny:
- pkg: 'encoding/json'
desc: 'use github.com/segmentio/encoding/json'
- pkg: 'github.com/sirupsen/logrus'
desc: 'logging is allowed only by github.com/rs/zerolog'
- pkg: 'golang.org/x/exp'
desc: 'exp is not allowed'
- pkg: 'github.com/portainer/libcrypto'
desc: 'use github.com/portainer/portainer/pkg/libcrypto'
- pkg: 'github.com/portainer/libhttp'
desc: 'use github.com/portainer/portainer/pkg/libhttp'
files:
- '!**/*_test.go'
- '!**/base.go'
- '!**/base_tx.go'
# errorlint is causing a typecheck error for some reason. The go compiler will report these
# anyway, so ignore them from the linter
issues:
exclude-rules:
- path: ./
linters:
- typecheck

View File

@@ -1,4 +0,0 @@
#!/usr/bin/env sh
. "$(dirname -- "$0")/_/husky.sh"
yarn lint-staged

View File

@@ -2,21 +2,15 @@
"printWidth": 180, "printWidth": 180,
"singleQuote": true, "singleQuote": true,
"htmlWhitespaceSensitivity": "strict", "htmlWhitespaceSensitivity": "strict",
"trailingComma": "es5",
"overrides": [ "overrides": [
{ {
"files": [ "files": ["*.html"],
"*.html"
],
"options": { "options": {
"parser": "angular" "parser": "angular"
} }
}, },
{ {
"files": [ "files": ["*.{j,t}sx", "*.ts"],
"*.{j,t}sx",
"*.ts"
],
"options": { "options": {
"printWidth": 80 "printWidth": 80
} }

55
.storybook/main.js Normal file
View File

@@ -0,0 +1,55 @@
const TsconfigPathsPlugin = require('tsconfig-paths-webpack-plugin');
module.exports = {
stories: ['../app/**/*.stories.mdx', '../app/**/*.stories.@(ts|tsx)'],
addons: [
'@storybook/addon-links',
'@storybook/addon-essentials',
{
name: '@storybook/addon-postcss',
options: {
cssLoaderOptions: {
importLoaders: 1,
modules: {
localIdentName: '[path][name]__[local]',
auto: true,
exportLocalsConvention: 'camelCaseOnly',
},
},
postcssLoaderOptions: {
implementation: require('postcss'),
},
},
},
],
webpackFinal: (config) => {
config.resolve.plugins = [
...(config.resolve.plugins || []),
new TsconfigPathsPlugin({
extensions: config.resolve.extensions,
}),
];
const svgRule = config.module.rules.find((rule) => rule.test && typeof rule.test.test === 'function' && rule.test.test('.svg'));
svgRule.test = new RegExp(svgRule.test.source.replace('svg|', ''));
config.module.rules.unshift({
test: /\.svg$/i,
type: 'asset',
resourceQuery: { not: [/c/] }, // exclude react component if *.svg?url
});
config.module.rules.unshift({
test: /\.svg$/i,
issuer: /\.(js|ts)(x)?$/,
resourceQuery: /c/, // *.svg?c
use: [{ loader: '@svgr/webpack', options: { icon: true } }],
});
return config;
},
core: {
builder: 'webpack5',
},
staticDirs: ['./public'],
};

View File

@@ -1,93 +0,0 @@
import { StorybookConfig } from '@storybook/react-webpack5';
import TsconfigPathsPlugin from 'tsconfig-paths-webpack-plugin';
import { Configuration } from 'webpack';
import postcss from 'postcss';
const config: StorybookConfig = {
stories: ['../app/**/*.stories.@(ts|tsx)'],
addons: [
'@storybook/addon-links',
'@storybook/addon-essentials',
{
name: '@storybook/addon-styling',
options: {
cssLoaderOptions: {
importLoaders: 1,
modules: {
localIdentName: '[path][name]__[local]',
auto: true,
exportLocalsConvention: 'camelCaseOnly',
},
},
postCss: {
implementation: postcss,
},
},
},
],
webpackFinal: (config) => {
const rules = config?.module?.rules || [];
const imageRule = rules.find((rule) => {
const test = (rule as { test: RegExp }).test;
if (!test) {
return false;
}
return test.test('.svg');
}) as { [key: string]: any };
imageRule.exclude = /\.svg$/;
rules.unshift({
test: /\.svg$/i,
type: 'asset',
resourceQuery: {
not: [/c/],
}, // exclude react component if *.svg?url
});
rules.unshift({
test: /\.svg$/i,
issuer: /\.(js|ts)(x)?$/,
resourceQuery: /c/,
// *.svg?c
use: [
{
loader: '@svgr/webpack',
options: {
icon: true,
},
},
],
});
return {
...config,
resolve: {
...config.resolve,
plugins: [
...(config.resolve?.plugins || []),
new TsconfigPathsPlugin({
extensions: config.resolve?.extensions,
}),
],
},
module: {
...config.module,
rules,
},
} satisfies Configuration;
},
staticDirs: ['./public'],
typescript: {
reactDocgen: 'react-docgen-typescript',
},
framework: {
name: '@storybook/react-webpack5',
options: {},
},
};
export default config;

View File

@@ -1,14 +1,14 @@
import '../app/assets/css'; import '../app/assets/css';
import React from 'react';
import { pushStateLocationPlugin, UIRouter } from '@uirouter/react'; import { pushStateLocationPlugin, UIRouter } from '@uirouter/react';
import { initialize as initMSW, mswLoader } from 'msw-storybook-addon'; import { initialize as initMSW, mswDecorator } from 'msw-storybook-addon';
import { handlers } from '../app/setup-tests/server-handlers'; import { handlers } from '@/setup-tests/server-handlers';
import { QueryClient, QueryClientProvider } from 'react-query'; import { QueryClient, QueryClientProvider } from 'react-query';
initMSW( // Initialize MSW
{ initMSW({
onUnhandledRequest: ({ method, url }) => { onUnhandledRequest: ({ method, url }) => {
if (url.startsWith('/api')) { if (url.pathname.startsWith('/api')) {
console.error(`Unhandled ${method} request to ${url}. console.error(`Unhandled ${method} request to ${url}.
This exception has been only logged in the console, however, it's strongly recommended to resolve this error as you don't want unmocked data in Storybook stories. This exception has been only logged in the console, however, it's strongly recommended to resolve this error as you don't want unmocked data in Storybook stories.
@@ -17,9 +17,7 @@ initMSW(
`); `);
} }
}, },
}, });
handlers
);
export const parameters = { export const parameters = {
actions: { argTypesRegex: '^on[A-Z].*' }, actions: { argTypesRegex: '^on[A-Z].*' },
@@ -46,6 +44,5 @@ export const decorators = [
</UIRouter> </UIRouter>
</QueryClientProvider> </QueryClientProvider>
), ),
mswDecorator,
]; ];
export const loaders = [mswLoader];

View File

@@ -2,22 +2,22 @@
/* tslint:disable */ /* tslint:disable */
/** /**
* Mock Service Worker (2.0.11). * Mock Service Worker (0.36.3).
* @see https://github.com/mswjs/msw * @see https://github.com/mswjs/msw
* - Please do NOT modify this file. * - Please do NOT modify this file.
* - Please do NOT serve this file on production. * - Please do NOT serve this file on production.
*/ */
const INTEGRITY_CHECKSUM = 'c5f7f8e188b673ea4e677df7ea3c5a39'; const INTEGRITY_CHECKSUM = '02f4ad4a2797f85668baf196e553d929';
const IS_MOCKED_RESPONSE = Symbol('isMockedResponse'); const bypassHeaderName = 'x-msw-bypass';
const activeClientIds = new Set(); const activeClientIds = new Set();
self.addEventListener('install', function () { self.addEventListener('install', function () {
self.skipWaiting(); return self.skipWaiting();
}); });
self.addEventListener('activate', function (event) { self.addEventListener('activate', async function (event) {
event.waitUntil(self.clients.claim()); return self.clients.claim();
}); });
self.addEventListener('message', async function (event) { self.addEventListener('message', async function (event) {
@@ -33,9 +33,7 @@ self.addEventListener('message', async function (event) {
return; return;
} }
const allClients = await self.clients.matchAll({ const allClients = await self.clients.matchAll();
type: 'window',
});
switch (event.data) { switch (event.data) {
case 'KEEPALIVE_REQUEST': { case 'KEEPALIVE_REQUEST': {
@@ -85,8 +83,165 @@ self.addEventListener('message', async function (event) {
} }
}); });
// Resolve the "main" client for the given event.
// Client that issues a request doesn't necessarily equal the client
// that registered the worker. It's with the latter the worker should
// communicate with during the response resolving phase.
async function resolveMainClient(event) {
const client = await self.clients.get(event.clientId);
if (client.frameType === 'top-level') {
return client;
}
const allClients = await self.clients.matchAll();
return allClients
.filter((client) => {
// Get only those clients that are currently visible.
return client.visibilityState === 'visible';
})
.find((client) => {
// Find the client ID that's recorded in the
// set of clients that have registered the worker.
return activeClientIds.has(client.id);
});
}
async function handleRequest(event, requestId) {
const client = await resolveMainClient(event);
const response = await getResponse(event, client, requestId);
// Send back the response clone for the "response:*" life-cycle events.
// Ensure MSW is active and ready to handle the message, otherwise
// this message will pend indefinitely.
if (client && activeClientIds.has(client.id)) {
(async function () {
const clonedResponse = response.clone();
sendToClient(client, {
type: 'RESPONSE',
payload: {
requestId,
type: clonedResponse.type,
ok: clonedResponse.ok,
status: clonedResponse.status,
statusText: clonedResponse.statusText,
body: clonedResponse.body === null ? null : await clonedResponse.text(),
headers: serializeHeaders(clonedResponse.headers),
redirected: clonedResponse.redirected,
},
});
})();
}
return response;
}
async function getResponse(event, client, requestId) {
const { request } = event;
const requestClone = request.clone();
const getOriginalResponse = () => fetch(requestClone);
// Bypass mocking when the request client is not active.
if (!client) {
return getOriginalResponse();
}
// Bypass initial page load requests (i.e. static assets).
// The absence of the immediate/parent client in the map of the active clients
// means that MSW hasn't dispatched the "MOCK_ACTIVATE" event yet
// and is not ready to handle requests.
if (!activeClientIds.has(client.id)) {
return await getOriginalResponse();
}
// Bypass requests with the explicit bypass header
if (requestClone.headers.get(bypassHeaderName) === 'true') {
const cleanRequestHeaders = serializeHeaders(requestClone.headers);
// Remove the bypass header to comply with the CORS preflight check.
delete cleanRequestHeaders[bypassHeaderName];
const originalRequest = new Request(requestClone, {
headers: new Headers(cleanRequestHeaders),
});
return fetch(originalRequest);
}
// Send the request to the client-side MSW.
const reqHeaders = serializeHeaders(request.headers);
const body = await request.text();
const clientMessage = await sendToClient(client, {
type: 'REQUEST',
payload: {
id: requestId,
url: request.url,
method: request.method,
headers: reqHeaders,
cache: request.cache,
mode: request.mode,
credentials: request.credentials,
destination: request.destination,
integrity: request.integrity,
redirect: request.redirect,
referrer: request.referrer,
referrerPolicy: request.referrerPolicy,
body,
bodyUsed: request.bodyUsed,
keepalive: request.keepalive,
},
});
switch (clientMessage.type) {
case 'MOCK_SUCCESS': {
return delayPromise(() => respondWithMock(clientMessage), clientMessage.payload.delay);
}
case 'MOCK_NOT_FOUND': {
return getOriginalResponse();
}
case 'NETWORK_ERROR': {
const { name, message } = clientMessage.payload;
const networkError = new Error(message);
networkError.name = name;
// Rejecting a request Promise emulates a network error.
throw networkError;
}
case 'INTERNAL_ERROR': {
const parsedBody = JSON.parse(clientMessage.payload.body);
console.error(
`\
[MSW] Uncaught exception in the request handler for "%s %s":
${parsedBody.location}
This exception has been gracefully handled as a 500 response, however, it's strongly recommended to resolve this error, as it indicates a mistake in your code. If you wish to mock an error response, please see this guide: https://mswjs.io/docs/recipes/mocking-error-responses\
`,
request.method,
request.url
);
return respondWithMock(clientMessage);
}
}
return getOriginalResponse();
}
self.addEventListener('fetch', function (event) { self.addEventListener('fetch', function (event) {
const { request } = event; const { request } = event;
const accept = request.headers.get('accept') || '';
// Bypass server-sent events.
if (accept.includes('text/event-stream')) {
return;
}
// Bypass navigation requests. // Bypass navigation requests.
if (request.mode === 'navigate') { if (request.mode === 'navigate') {
@@ -106,149 +261,36 @@ self.addEventListener('fetch', function (event) {
return; return;
} }
// Generate unique request ID. const requestId = uuidv4();
const requestId = crypto.randomUUID();
event.respondWith(handleRequest(event, requestId)); return event.respondWith(
handleRequest(event, requestId).catch((error) => {
if (error.name === 'NetworkError') {
console.warn('[MSW] Successfully emulated a network error for the "%s %s" request.', request.method, request.url);
return;
}
// At this point, any exception indicates an issue with the original request/response.
console.error(
`\
[MSW] Caught an exception from the "%s %s" request (%s). This is probably not a problem with Mock Service Worker. There is likely an additional logging output above.`,
request.method,
request.url,
`${error.name}: ${error.message}`
);
})
);
}); });
async function handleRequest(event, requestId) { function serializeHeaders(headers) {
const client = await resolveMainClient(event); const reqHeaders = {};
const response = await getResponse(event, client, requestId); headers.forEach((value, name) => {
reqHeaders[name] = reqHeaders[name] ? [].concat(reqHeaders[name]).concat(value) : value;
// Send back the response clone for the "response:*" life-cycle events.
// Ensure MSW is active and ready to handle the message, otherwise
// this message will pend indefinitely.
if (client && activeClientIds.has(client.id)) {
(async function () {
const responseClone = response.clone();
sendToClient(
client,
{
type: 'RESPONSE',
payload: {
requestId,
isMockedResponse: IS_MOCKED_RESPONSE in response,
type: responseClone.type,
status: responseClone.status,
statusText: responseClone.statusText,
body: responseClone.body,
headers: Object.fromEntries(responseClone.headers.entries()),
},
},
[responseClone.body]
);
})();
}
return response;
}
// Resolve the main client for the given event.
// Client that issues a request doesn't necessarily equal the client
// that registered the worker. It's with the latter the worker should
// communicate with during the response resolving phase.
async function resolveMainClient(event) {
const client = await self.clients.get(event.clientId);
if (client?.frameType === 'top-level') {
return client;
}
const allClients = await self.clients.matchAll({
type: 'window',
});
return allClients
.filter((client) => {
// Get only those clients that are currently visible.
return client.visibilityState === 'visible';
})
.find((client) => {
// Find the client ID that's recorded in the
// set of clients that have registered the worker.
return activeClientIds.has(client.id);
}); });
return reqHeaders;
} }
async function getResponse(event, client, requestId) { function sendToClient(client, message) {
const { request } = event;
// Clone the request because it might've been already used
// (i.e. its body has been read and sent to the client).
const requestClone = request.clone();
function passthrough() {
const headers = Object.fromEntries(requestClone.headers.entries());
// Remove internal MSW request header so the passthrough request
// complies with any potential CORS preflight checks on the server.
// Some servers forbid unknown request headers.
delete headers['x-msw-intention'];
return fetch(requestClone, { headers });
}
// Bypass mocking when the client is not active.
if (!client) {
return passthrough();
}
// Bypass initial page load requests (i.e. static assets).
// The absence of the immediate/parent client in the map of the active clients
// means that MSW hasn't dispatched the "MOCK_ACTIVATE" event yet
// and is not ready to handle requests.
if (!activeClientIds.has(client.id)) {
return passthrough();
}
// Bypass requests with the explicit bypass header.
// Such requests can be issued by "ctx.fetch()".
const mswIntention = request.headers.get('x-msw-intention');
if (['bypass', 'passthrough'].includes(mswIntention)) {
return passthrough();
}
// Notify the client that a request has been intercepted.
const requestBuffer = await request.arrayBuffer();
const clientMessage = await sendToClient(
client,
{
type: 'REQUEST',
payload: {
id: requestId,
url: request.url,
mode: request.mode,
method: request.method,
headers: Object.fromEntries(request.headers.entries()),
cache: request.cache,
credentials: request.credentials,
destination: request.destination,
integrity: request.integrity,
redirect: request.redirect,
referrer: request.referrer,
referrerPolicy: request.referrerPolicy,
body: requestBuffer,
keepalive: request.keepalive,
},
},
[requestBuffer]
);
switch (clientMessage.type) {
case 'MOCK_RESPONSE': {
return respondWithMock(clientMessage.data);
}
case 'MOCK_NOT_FOUND': {
return passthrough();
}
}
return passthrough();
}
function sendToClient(client, message, transferrables = []) {
return new Promise((resolve, reject) => { return new Promise((resolve, reject) => {
const channel = new MessageChannel(); const channel = new MessageChannel();
@@ -260,25 +302,27 @@ function sendToClient(client, message, transferrables = []) {
resolve(event.data); resolve(event.data);
}; };
client.postMessage(message, [channel.port2].concat(transferrables.filter(Boolean))); client.postMessage(JSON.stringify(message), [channel.port2]);
}); });
} }
async function respondWithMock(response) { function delayPromise(cb, duration) {
// Setting response status code to 0 is a no-op. return new Promise((resolve) => {
// However, when responding with a "Response.error()", the produced Response setTimeout(() => resolve(cb()), duration);
// instance will have status code set to 0. Since it's not possible to create });
// a Response instance with status code 0, handle that use-case separately. }
if (response.status === 0) {
return Response.error(); function respondWithMock(clientMessage) {
} return new Response(clientMessage.payload.body, {
...clientMessage.payload,
const mockedResponse = new Response(response.body, response); headers: clientMessage.payload.headers,
});
Reflect.defineProperty(mockedResponse, IS_MOCKED_RESPONSE, { }
value: true,
enumerable: true, function uuidv4() {
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function (c) {
const r = (Math.random() * 16) | 0;
const v = c == 'x' ? r : (r & 0x3) | 0x8;
return v.toString(16);
}); });
return mockedResponse;
} }

View File

@@ -15,15 +15,6 @@
// ], // ],
// "description": "Log output to console" // "description": "Log output to console"
// } // }
"React Named Export Component": {
"prefix": "rnec",
"body": [
"export function $TM_FILENAME_BASE() {",
" return <div>$TM_FILENAME_BASE</div>;",
"}"
],
"description": "React Named Export Component"
},
"Component": { "Component": {
"scope": "javascript", "scope": "javascript",
"prefix": "mycomponent", "prefix": "mycomponent",

View File

@@ -79,33 +79,25 @@ The feature request process is similar to the bug report process but has an extr
Ensure you have Docker, Node.js, yarn, and Golang installed in the correct versions. Ensure you have Docker, Node.js, yarn, and Golang installed in the correct versions.
Install dependencies: Install dependencies with yarn:
```sh ```sh
$ make deps $ yarn
``` ```
Then build and run the project in a Docker container: Then build and run the project in a Docker container:
```sh ```sh
$ make dev $ yarn start
``` ```
Portainer server can now be accessed at <https://localhost:9443>. and UI dev server runs on <http://localhost:8999>. Portainer can now be accessed at <https://localhost:9443>.
if you want to build the project you can run:
```sh
make build-all
```
For additional make commands, run `make help`.
Find more detailed steps at <https://docs.portainer.io/contribute/build>. Find more detailed steps at <https://docs.portainer.io/contribute/build>.
### Build customization ### Build customisation
You can customize the following settings: You can customise the following settings:
- `PORTAINER_DATA`: The host dir or volume name used by portainer (default is `/tmp/portainer`, which won't persist over reboots). - `PORTAINER_DATA`: The host dir or volume name used by portainer (default is `/tmp/portainer`, which won't persist over reboots).
- `PORTAINER_PROJECT`: The root dir of the repository - `${portainerRoot}/dist/` is imported into the container to get the build artifacts and external tools (defaults to `your current dir`). - `PORTAINER_PROJECT`: The root dir of the repository - `${portainerRoot}/dist/` is imported into the container to get the build artifacts and external tools (defaults to `your current dir`).

View File

@@ -4,126 +4,119 @@
PLATFORM=$(shell go env GOOS) PLATFORM=$(shell go env GOOS)
ARCH=$(shell go env GOARCH) ARCH=$(shell go env GOARCH)
TAG=latest
SWAG_VERSION=v1.8.11
# build target, can be one of "production", "testing", "development" # build target, can be one of "production", "testing", "development"
ENV=development ENV=development
WEBPACK_CONFIG=webpack/webpack.$(ENV).js WEBPACK_CONFIG=webpack/webpack.$(ENV).js
TAG=local
SWAG=go run github.com/swaggo/swag/cmd/swag@v1.16.2
GOTESTSUM=go run gotest.tools/gotestsum@latest
# Don't change anything below this line unless you know what you're doing
.DEFAULT_GOAL := help .DEFAULT_GOAL := help
.PHONY: help build-storybook build-client devops download-binaries tidy clean client-deps
##@ Building ##@ Building
.PHONY: init-dist build-storybook build build-client build-server build-image devops
init-dist: init-dist:
@mkdir -p dist @mkdir -p dist
build-all: deps build-server build-client ## Build the client, server and download external dependancies (doesn't build an image) build-storybook:
yarn storybook:build
build-client: init-dist ## Build the client build: build-server build-client ## Build the server and client
build-client: init-dist client-deps ## Build the client
export NODE_ENV=$(ENV) && yarn build --config $(WEBPACK_CONFIG) export NODE_ENV=$(ENV) && yarn build --config $(WEBPACK_CONFIG)
build-server: init-dist ## Build the server binary build-server: init-dist ## Build the server binary
./build/build_binary.sh "$(PLATFORM)" "$(ARCH)" ./build/build_binary.sh "$(PLATFORM)" "$(ARCH)"
build-image: build-all ## Build the Portainer image locally build-image: build ## Build the Portainer image
docker buildx build --load -t portainerci/portainer:$(TAG) -f build/linux/Dockerfile . docker buildx build --load -t portainerci/portainer:$(TAG) -f build/linux/Dockerfile .
build-storybook: ## Build and serve the storybook files devops: clean init-dist download-binaries build-client ## Build the server binary for CI
yarn storybook:build
devops: clean deps build-client ## Build the everything target specifically for CI
echo "Building the devops binary..." echo "Building the devops binary..."
@./build/build_binary_azuredevops.sh "$(PLATFORM)" "$(ARCH)" @./build/build_binary_azuredevops.sh "$(PLATFORM)" "$(ARCH)"
##@ Build dependencies ##@ Dependencies
.PHONY: deps server-deps client-deps tidy
deps: server-deps client-deps ## Download all client and server build dependancies
server-deps: init-dist ## Download dependant server binaries download-binaries: ## Download dependant binaries
@./build/download_binaries.sh $(PLATFORM) $(ARCH) @./build/download_binaries.sh $(PLATFORM) $(ARCH)
client-deps: ## Install client dependencies
yarn
tidy: ## Tidy up the go.mod file tidy: ## Tidy up the go.mod file
cd api && go mod tidy cd api && go mod tidy
client-deps: ## Install client dependencies
yarn
##@ Cleanup ##@ Cleanup
.PHONY: clean
clean: ## Remove all build and download artifacts clean: ## Remove all build and download artifacts
@echo "Clearing the dist directory..." @echo "Clearing the dist directory..."
@rm -rf dist/* @rm -rf dist/*
##@ Testing ##@ Testing
.PHONY: test test-client test-server
test: test-server test-client ## Run all tests
test-deps: init-dist
./build/download_docker_compose_binary.sh $(PLATFORM) $(ARCH) $(shell jq -r '.dockerCompose' < "./binary-version.json")
test-client: ## Run client tests test-client: ## Run client tests
yarn test $(ARGS) yarn test
test-server: ## Run server tests test-server: ## Run server tests
$(GOTESTSUM) --format pkgname-and-test-fails --format-hide-empty-pkg --hide-summary skipped -- -cover ./... cd api && go test -v ./...
test: test-client test-server ## Run all tests
##@ Dev ##@ Dev
.PHONY: dev dev-client dev-server
dev: ## Run both the client and server in development mode
make dev-server
make dev-client
dev-client: ## Run the client in development mode dev-client: ## Run the client in development mode
yarn dev yarn dev
dev-server: build-server ## Run the server in development mode dev-server: build-image ## Run the server in development mode
@./dev/run_container.sh @./dev/run_container.sh
##@ Format ##@ Format
.PHONY: format format-client format-server
format: format-client format-server ## Format all code
format-client: ## Format client code format-client: ## Format client code
yarn format yarn format
format-server: ## Format server code format-server: ## Format server code
go fmt ./... cd api && go fmt ./...
format: format-client format-server ## Format all code
##@ Lint ##@ Lint
.PHONY: lint lint-client lint-server
lint: lint-client lint-server ## Lint all code lint: lint-client lint-server ## Lint all code
lint-client: ## Lint client code lint-client: ## Lint client code
yarn lint yarn lint
lint-server: ## Lint server code lint-server: ## Lint server code
golangci-lint run --timeout=10m -c .golangci.yaml cd api && go vet ./...
##@ Extension ##@ Extension
.PHONY: dev-extension
dev-extension: build-server build-client ## Run the extension in development mode dev-extension: build-server build-client ## Run the extension in development mode
make local -f build/docker-extension/Makefile make local -f build/docker-extension/Makefile
##@ Docs ##@ Docs
.PHONY: docs-build docs-validate docs-clean docs-validate-clean
docs-build: init-dist ## Build docs docs-deps: ## Install docs dependencies
cd api && $(SWAG) init -o "../dist/docs" -ot "yaml" -g ./http/handler/handler.go --parseDependency --parseInternal --parseDepth 2 -p pascalcase --markdownFiles ./ go install github.com/swaggo/swag/cmd/swag@$(SWAG_VERSION)
docs-build: docs-deps ## Build docs
cd api && swag init -g ./http/handler/handler.go --parseDependency --parseInternal --parseDepth 2 --markdownFiles ./
docs-validate: docs-build ## Validate docs docs-validate: docs-build ## Validate docs
yarn swagger2openapi --warnOnly dist/docs/swagger.yaml -o dist/docs/openapi.yaml yarn swagger2openapi --warnOnly api/docs/swagger.yaml -o api/docs/openapi.yaml
yarn swagger-cli validate dist/docs/openapi.yaml yarn swagger-cli validate api/docs/openapi.yaml
docs-clean: ## Clean docs
rm -rf api/docs
docs-validate-clean: docs-validate docs-clean ## Validate and clean docs
##@ Helpers ##@ Helpers
.PHONY: help
help: ## Display this help help: ## Display this help
@awk 'BEGIN {FS = ":.*##"; printf "Usage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)

View File

@@ -9,7 +9,7 @@ Portainer consists of a single container that can run on any cluster. It can be
**Portainer Business Edition** builds on the open-source base and includes a range of advanced features and functions (like RBAC and Support) that are specific to the needs of business users. **Portainer Business Edition** builds on the open-source base and includes a range of advanced features and functions (like RBAC and Support) that are specific to the needs of business users.
- [Compare Portainer CE and Compare Portainer BE](https://portainer.io/products) - [Compare Portainer CE and Compare Portainer BE](https://portainer.io/products)
- [Take3 get 3 free nodes of Portainer Business for as long as you want them](https://www.portainer.io/take-3) - [Take5 get 5 free nodes of Portainer Business for as long as you want them](https://portainer.io/pricing/take5)
- [Portainer BE install guide](https://install.portainer.io) - [Portainer BE install guide](https://install.portainer.io)
## Latest Version ## Latest Version
@@ -21,8 +21,8 @@ Portainer CE is updated regularly. We aim to do an update release every couple o
## Getting started ## Getting started
- [Deploy Portainer](https://docs.portainer.io/start/install) - [Deploy Portainer](https://docs.portainer.io/start/install)
- [Documentation](https://docs.portainer.io) - [Documentation](https://documentation.portainer.io)
- [Contribute to the project](https://docs.portainer.io/contribute/contribute) - [Contribute to the project](https://documentation.portainer.io/contributing/instructions/)
## Features & Functions ## Features & Functions
@@ -30,22 +30,23 @@ View [this](https://www.portainer.io/products) table to see all of the Portainer
- [Portainer CE for Docker / Docker Swarm](https://www.portainer.io/solutions/docker) - [Portainer CE for Docker / Docker Swarm](https://www.portainer.io/solutions/docker)
- [Portainer CE for Kubernetes](https://www.portainer.io/solutions/kubernetes-ui) - [Portainer CE for Kubernetes](https://www.portainer.io/solutions/kubernetes-ui)
- [Portainer CE for Azure ACI](https://www.portainer.io/solutions/serverless-containers)
## Getting help ## Getting help
Portainer CE is an open source project and is supported by the community. You can buy a supported version of Portainer at portainer.io Portainer CE is an open source project and is supported by the community. You can buy a supported version of Portainer at portainer.io
Learn more about Portainer's community support channels [here.](https://www.portainer.io/get-support-for-portainer) Learn more about Portainer's community support channels [here.](https://www.portainer.io/community_help)
- Issues: https://github.com/portainer/portainer/issues - Issues: https://github.com/portainer/portainer/issues
- Slack (chat): [https://portainer.io/slack](https://portainer.io/slack) - Slack (chat): [https://portainer.io/slack](https://portainer.io/slack)
You can join the Portainer Community by visiting [https://www.portainer.io/join-our-community](https://www.portainer.io/join-our-community). This will give you advance notice of events, content and other related Portainer content. You can join the Portainer Community by visiting community.portainer.io. This will give you advance notice of events, content and other related Portainer content.
## Reporting bugs and contributing ## Reporting bugs and contributing
- Want to report a bug or request a feature? Please open [an issue](https://github.com/portainer/portainer/issues/new). - Want to report a bug or request a feature? Please open [an issue](https://github.com/portainer/portainer/issues/new).
- Want to help us build **_portainer_**? Follow our [contribution guidelines](https://docs.portainer.io/contribute/contribute) to build it locally and make a pull request. - Want to help us build **_portainer_**? Follow our [contribution guidelines](https://documentation.portainer.io/contributing/instructions/) to build it locally and make a pull request.
## Security ## Security
@@ -59,7 +60,7 @@ If you are a developer, and our code in this repo makes sense to you, we would l
**To make sure we focus our development effort in the right places we need to know which features get used most often. To give us this information we use [Matomo Analytics](https://matomo.org/), which is hosted in Germany and is fully GDPR compliant.** **To make sure we focus our development effort in the right places we need to know which features get used most often. To give us this information we use [Matomo Analytics](https://matomo.org/), which is hosted in Germany and is fully GDPR compliant.**
When Portainer first starts, you are given the option to DISABLE analytics. If you **don't** choose to disable it, we collect anonymous usage as per [our privacy policy](https://www.portainer.io/privacy-policy). **Please note**, there is no personally identifiable information sent or stored at any time and we only use the data to help us improve Portainer. When Portainer first starts, you are given the option to DISABLE analytics. If you **don't** choose to disable it, we collect anonymous usage as per [our privacy policy](https://www.portainer.io/documentation/in-app-analytics-and-privacy-policy/). **Please note**, there is no personally identifiable information sent or stored at any time and we only use the data to help us improve Portainer.
## Limitations ## Limitations

26
api/.golangci.yaml Normal file
View File

@@ -0,0 +1,26 @@
linters:
# Disable all linters.
disable-all: true
enable:
- depguard
linters-settings:
depguard:
list-type: denylist
include-go-root: true
packages:
- github.com/sirupsen/logrus
- golang.org/x/exp
packages-with-error-message:
- github.com/sirupsen/logrus: 'logging is allowed only by github.com/rs/zerolog'
ignore-file-rules:
- "**/*_test.go"
# Create additional guards that follow the same configuration pattern.
# Results from all guards are aggregated together.
# additional-guards:
# - list-type: allowlist
# include-go-root: false
# packages:
# - github.com/sirupsen/logrus
# # Specify rules by which the linter ignores certain files for consideration.
# ignore-file-rules:
# - "!**/*_test.go"

View File

@@ -7,9 +7,9 @@ import (
"sync" "sync"
"time" "time"
httperror "github.com/portainer/libhttp/error"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/dataservices"
httperror "github.com/portainer/portainer/pkg/libhttp/error"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
) )

View File

@@ -4,7 +4,6 @@ import (
"crypto/tls" "crypto/tls"
"errors" "errors"
"fmt" "fmt"
"io"
"net/http" "net/http"
"strconv" "strconv"
"time" "time"
@@ -43,9 +42,7 @@ func GetAgentVersionAndPlatform(endpointUrl string, tlsConfig *tls.Config) (port
if err != nil { if err != nil {
return 0, "", err return 0, "", err
} }
defer resp.Body.Close()
io.Copy(io.Discard, resp.Body)
resp.Body.Close()
if resp.StatusCode != http.StatusNoContent { if resp.StatusCode != http.StatusNoContent {
return 0, "", fmt.Errorf("Failed request with status %d", resp.StatusCode) return 0, "", fmt.Errorf("Failed request with status %d", resp.StatusCode)

View File

@@ -1,17 +1,30 @@
package apikey package apikey
import ( import (
"crypto/rand"
"io"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
) )
// APIKeyService represents a service for managing API keys. // APIKeyService represents a service for managing API keys.
type APIKeyService interface { type APIKeyService interface {
HashRaw(rawKey string) string HashRaw(rawKey string) []byte
GenerateApiKey(user portainer.User, description string) (string, *portainer.APIKey, error) GenerateApiKey(user portainer.User, description string) (string, *portainer.APIKey, error)
GetAPIKey(apiKeyID portainer.APIKeyID) (*portainer.APIKey, error) GetAPIKey(apiKeyID portainer.APIKeyID) (*portainer.APIKey, error)
GetAPIKeys(userID portainer.UserID) ([]portainer.APIKey, error) GetAPIKeys(userID portainer.UserID) ([]portainer.APIKey, error)
GetDigestUserAndKey(digest string) (portainer.User, portainer.APIKey, error) GetDigestUserAndKey(digest []byte) (portainer.User, portainer.APIKey, error)
UpdateAPIKey(apiKey *portainer.APIKey) error UpdateAPIKey(apiKey *portainer.APIKey) error
DeleteAPIKey(apiKeyID portainer.APIKeyID) error DeleteAPIKey(apiKeyID portainer.APIKeyID) error
InvalidateUserKeyCache(userId portainer.UserID) bool InvalidateUserKeyCache(userId portainer.UserID) bool
} }
// generateRandomKey generates a random key of specified length
// source: https://github.com/gorilla/securecookie/blob/master/securecookie.go#L515
func generateRandomKey(length int) []byte {
k := make([]byte, length)
if _, err := io.ReadFull(rand.Reader, k); err != nil {
return nil
}
return k
}

View File

@@ -3,7 +3,6 @@ package apikey
import ( import (
"testing" "testing"
"github.com/portainer/portainer/api/internal/securecookie"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@@ -34,7 +33,7 @@ func Test_generateRandomKey(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
got := securecookie.GenerateRandomKey(tt.wantLenth) got := generateRandomKey(tt.wantLenth)
is.Equal(tt.wantLenth, len(got)) is.Equal(tt.wantLenth, len(got))
}) })
} }
@@ -42,7 +41,7 @@ func Test_generateRandomKey(t *testing.T) {
t.Run("Generated keys are unique", func(t *testing.T) { t.Run("Generated keys are unique", func(t *testing.T) {
keys := make(map[string]bool) keys := make(map[string]bool)
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
key := securecookie.GenerateRandomKey(8) key := generateRandomKey(8)
_, ok := keys[string(key)] _, ok := keys[string(key)]
is.False(ok) is.False(ok)
keys[string(key)] = true keys[string(key)] = true

View File

@@ -33,8 +33,8 @@ func NewAPIKeyCache(cacheSize int) *apiKeyCache {
// Get returns the user/key associated to an api-key's digest // Get returns the user/key associated to an api-key's digest
// This is required because HTTP requests will contain the digest of the API key in header, // This is required because HTTP requests will contain the digest of the API key in header,
// the digest value must be mapped to a portainer user. // the digest value must be mapped to a portainer user.
func (c *apiKeyCache) Get(digest string) (portainer.User, portainer.APIKey, bool) { func (c *apiKeyCache) Get(digest []byte) (portainer.User, portainer.APIKey, bool) {
val, ok := c.cache.Get(digest) val, ok := c.cache.Get(string(digest))
if !ok { if !ok {
return portainer.User{}, portainer.APIKey{}, false return portainer.User{}, portainer.APIKey{}, false
} }
@@ -44,23 +44,23 @@ func (c *apiKeyCache) Get(digest string) (portainer.User, portainer.APIKey, bool
} }
// Set persists a user/key entry to the cache // Set persists a user/key entry to the cache
func (c *apiKeyCache) Set(digest string, user portainer.User, apiKey portainer.APIKey) { func (c *apiKeyCache) Set(digest []byte, user portainer.User, apiKey portainer.APIKey) {
c.cache.Add(digest, entry{ c.cache.Add(string(digest), entry{
user: user, user: user,
apiKey: apiKey, apiKey: apiKey,
}) })
} }
// Delete evicts a digest's user/key entry key from the cache // Delete evicts a digest's user/key entry key from the cache
func (c *apiKeyCache) Delete(digest string) { func (c *apiKeyCache) Delete(digest []byte) {
c.cache.Remove(digest) c.cache.Remove(string(digest))
} }
// InvalidateUserKeyCache loops through all the api-keys associated to a user and removes them from the cache // InvalidateUserKeyCache loops through all the api-keys associated to a user and removes them from the cache
func (c *apiKeyCache) InvalidateUserKeyCache(userId portainer.UserID) bool { func (c *apiKeyCache) InvalidateUserKeyCache(userId portainer.UserID) bool {
present := false present := false
for _, k := range c.cache.Keys() { for _, k := range c.cache.Keys() {
user, _, _ := c.Get(k.(string)) user, _, _ := c.Get([]byte(k.(string)))
if user.ID == userId { if user.ID == userId {
present = c.cache.Remove(k) present = c.cache.Remove(k)
} }

View File

@@ -17,19 +17,19 @@ func Test_apiKeyCacheGet(t *testing.T) {
keyCache.cache.Add(string(""), entry{user: portainer.User{}, apiKey: portainer.APIKey{}}) keyCache.cache.Add(string(""), entry{user: portainer.User{}, apiKey: portainer.APIKey{}})
tests := []struct { tests := []struct {
digest string digest []byte
found bool found bool
}{ }{
{ {
digest: "foo", digest: []byte("foo"),
found: true, found: true,
}, },
{ {
digest: "", digest: []byte(""),
found: true, found: true,
}, },
{ {
digest: "bar", digest: []byte("bar"),
found: false, found: false,
}, },
} }
@@ -48,11 +48,11 @@ func Test_apiKeyCacheSet(t *testing.T) {
keyCache := NewAPIKeyCache(10) keyCache := NewAPIKeyCache(10)
// pre-populate cache // pre-populate cache
keyCache.Set("bar", portainer.User{ID: 2}, portainer.APIKey{}) keyCache.Set([]byte("bar"), portainer.User{ID: 2}, portainer.APIKey{})
keyCache.Set("foo", portainer.User{ID: 1}, portainer.APIKey{}) keyCache.Set([]byte("foo"), portainer.User{ID: 1}, portainer.APIKey{})
// overwrite existing entry // overwrite existing entry
keyCache.Set("foo", portainer.User{ID: 3}, portainer.APIKey{}) keyCache.Set([]byte("foo"), portainer.User{ID: 3}, portainer.APIKey{})
val, ok := keyCache.cache.Get(string("bar")) val, ok := keyCache.cache.Get(string("bar"))
is.True(ok) is.True(ok)
@@ -74,14 +74,14 @@ func Test_apiKeyCacheDelete(t *testing.T) {
t.Run("Delete an existing entry", func(t *testing.T) { t.Run("Delete an existing entry", func(t *testing.T) {
keyCache.cache.Add(string("foo"), entry{user: portainer.User{ID: 1}, apiKey: portainer.APIKey{}}) keyCache.cache.Add(string("foo"), entry{user: portainer.User{ID: 1}, apiKey: portainer.APIKey{}})
keyCache.Delete("foo") keyCache.Delete([]byte("foo"))
_, ok := keyCache.cache.Get(string("foo")) _, ok := keyCache.cache.Get(string("foo"))
is.False(ok) is.False(ok)
}) })
t.Run("Delete a non-existing entry", func(t *testing.T) { t.Run("Delete a non-existing entry", func(t *testing.T) {
nonPanicFunc := func() { keyCache.Delete("non-existent-key") } nonPanicFunc := func() { keyCache.Delete([]byte("non-existent-key")) }
is.NotPanics(nonPanicFunc) is.NotPanics(nonPanicFunc)
}) })
} }
@@ -131,16 +131,16 @@ func Test_apiKeyCacheLRU(t *testing.T) {
keyCache := NewAPIKeyCache(test.cacheLen) keyCache := NewAPIKeyCache(test.cacheLen)
for _, key := range test.key { for _, key := range test.key {
keyCache.Set(key, portainer.User{ID: 1}, portainer.APIKey{}) keyCache.Set([]byte(key), portainer.User{ID: 1}, portainer.APIKey{})
} }
for _, key := range test.foundKeys { for _, key := range test.foundKeys {
_, _, found := keyCache.Get(key) _, _, found := keyCache.Get([]byte(key))
is.True(found, "Key %s not found", key) is.True(found, "Key %s not found", key)
} }
for _, key := range test.evictedKeys { for _, key := range test.evictedKeys {
_, _, found := keyCache.Get(key) _, _, found := keyCache.Get([]byte(key))
is.False(found, "key %s should have been evicted", key) is.False(found, "key %s should have been evicted", key)
} }
}) })

View File

@@ -8,7 +8,6 @@ import (
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/dataservices"
"github.com/portainer/portainer/api/internal/securecookie"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@@ -32,15 +31,15 @@ func NewAPIKeyService(apiKeyRepository dataservices.APIKeyRepository, userReposi
} }
// HashRaw computes a hash digest of provided raw API key. // HashRaw computes a hash digest of provided raw API key.
func (a *apiKeyService) HashRaw(rawKey string) string { func (a *apiKeyService) HashRaw(rawKey string) []byte {
hashDigest := sha256.Sum256([]byte(rawKey)) hashDigest := sha256.Sum256([]byte(rawKey))
return base64.StdEncoding.EncodeToString(hashDigest[:]) return hashDigest[:]
} }
// GenerateApiKey generates a raw API key for a user (for one-time display). // GenerateApiKey generates a raw API key for a user (for one-time display).
// The generated API key is stored in the cache and database. // The generated API key is stored in the cache and database.
func (a *apiKeyService) GenerateApiKey(user portainer.User, description string) (string, *portainer.APIKey, error) { func (a *apiKeyService) GenerateApiKey(user portainer.User, description string) (string, *portainer.APIKey, error) {
randKey := securecookie.GenerateRandomKey(32) randKey := generateRandomKey(32)
encodedRawAPIKey := base64.StdEncoding.EncodeToString(randKey) encodedRawAPIKey := base64.StdEncoding.EncodeToString(randKey)
prefixedAPIKey := portainerAPIKeyPrefix + encodedRawAPIKey prefixedAPIKey := portainerAPIKeyPrefix + encodedRawAPIKey
@@ -54,7 +53,7 @@ func (a *apiKeyService) GenerateApiKey(user portainer.User, description string)
Digest: hashDigest, Digest: hashDigest,
} }
err := a.apiKeyRepository.Create(apiKey) err := a.apiKeyRepository.CreateAPIKey(apiKey)
if err != nil { if err != nil {
return "", nil, errors.Wrap(err, "Unable to create API key") return "", nil, errors.Wrap(err, "Unable to create API key")
} }
@@ -67,7 +66,7 @@ func (a *apiKeyService) GenerateApiKey(user portainer.User, description string)
// GetAPIKey returns an API key by its ID. // GetAPIKey returns an API key by its ID.
func (a *apiKeyService) GetAPIKey(apiKeyID portainer.APIKeyID) (*portainer.APIKey, error) { func (a *apiKeyService) GetAPIKey(apiKeyID portainer.APIKeyID) (*portainer.APIKey, error) {
return a.apiKeyRepository.Read(apiKeyID) return a.apiKeyRepository.GetAPIKey(apiKeyID)
} }
// GetAPIKeys returns all the API keys associated to a user. // GetAPIKeys returns all the API keys associated to a user.
@@ -77,7 +76,7 @@ func (a *apiKeyService) GetAPIKeys(userID portainer.UserID) ([]portainer.APIKey,
// GetDigestUserAndKey returns the user and api-key associated to a specified hash digest. // GetDigestUserAndKey returns the user and api-key associated to a specified hash digest.
// A cache lookup is performed first; if the user/api-key is not found in the cache, respective database lookups are performed. // A cache lookup is performed first; if the user/api-key is not found in the cache, respective database lookups are performed.
func (a *apiKeyService) GetDigestUserAndKey(digest string) (portainer.User, portainer.APIKey, error) { func (a *apiKeyService) GetDigestUserAndKey(digest []byte) (portainer.User, portainer.APIKey, error) {
// get api key from cache if possible // get api key from cache if possible
cachedUser, cachedKey, ok := a.cache.Get(digest) cachedUser, cachedKey, ok := a.cache.Get(digest)
if ok { if ok {
@@ -89,7 +88,7 @@ func (a *apiKeyService) GetDigestUserAndKey(digest string) (portainer.User, port
return portainer.User{}, portainer.APIKey{}, errors.Wrap(err, "Unable to retrieve API key") return portainer.User{}, portainer.APIKey{}, errors.Wrap(err, "Unable to retrieve API key")
} }
user, err := a.userRepository.Read(apiKey.UserID) user, err := a.userRepository.User(apiKey.UserID)
if err != nil { if err != nil {
return portainer.User{}, portainer.APIKey{}, errors.Wrap(err, "Unable to retrieve digest user") return portainer.User{}, portainer.APIKey{}, errors.Wrap(err, "Unable to retrieve digest user")
} }
@@ -107,20 +106,20 @@ func (a *apiKeyService) UpdateAPIKey(apiKey *portainer.APIKey) error {
return errors.Wrap(err, "Unable to retrieve API key") return errors.Wrap(err, "Unable to retrieve API key")
} }
a.cache.Set(apiKey.Digest, user, *apiKey) a.cache.Set(apiKey.Digest, user, *apiKey)
return a.apiKeyRepository.Update(apiKey.ID, apiKey) return a.apiKeyRepository.UpdateAPIKey(apiKey)
} }
// DeleteAPIKey deletes an API key and removes the digest/api-key entry from the cache. // DeleteAPIKey deletes an API key and removes the digest/api-key entry from the cache.
func (a *apiKeyService) DeleteAPIKey(apiKeyID portainer.APIKeyID) error { func (a *apiKeyService) DeleteAPIKey(apiKeyID portainer.APIKeyID) error {
// get api-key digest to remove from cache // get api-key digest to remove from cache
apiKey, err := a.apiKeyRepository.Read(apiKeyID) apiKey, err := a.apiKeyRepository.GetAPIKey(apiKeyID)
if err != nil { if err != nil {
return errors.Wrap(err, fmt.Sprintf("Unable to retrieve API key: %d", apiKeyID)) return errors.Wrap(err, fmt.Sprintf("Unable to retrieve API key: %d", apiKeyID))
} }
// delete the user/api-key from cache // delete the user/api-key from cache
a.cache.Delete(apiKey.Digest) a.cache.Delete(apiKey.Digest)
return a.apiKeyRepository.Delete(apiKeyID) return a.apiKeyRepository.DeleteAPIKey(apiKeyID)
} }
func (a *apiKeyService) InvalidateUserKeyCache(userId portainer.UserID) bool { func (a *apiKeyService) InvalidateUserKeyCache(userId portainer.UserID) bool {

View File

@@ -2,7 +2,6 @@ package apikey
import ( import (
"crypto/sha256" "crypto/sha256"
"encoding/base64"
"fmt" "fmt"
"strings" "strings"
"testing" "testing"
@@ -23,7 +22,8 @@ func Test_SatisfiesAPIKeyServiceInterface(t *testing.T) {
func Test_GenerateApiKey(t *testing.T) { func Test_GenerateApiKey(t *testing.T) {
is := assert.New(t) is := assert.New(t)
_, store := datastore.MustNewTestStore(t, true, true) _, store, teardown := datastore.MustNewTestStore(t, true, true)
defer teardown()
service := NewAPIKeyService(store.APIKeyRepository(), store.User()) service := NewAPIKeyService(store.APIKeyRepository(), store.User())
@@ -69,14 +69,15 @@ func Test_GenerateApiKey(t *testing.T) {
generatedDigest := sha256.Sum256([]byte(rawKey)) generatedDigest := sha256.Sum256([]byte(rawKey))
is.Equal(apiKey.Digest, base64.StdEncoding.EncodeToString(generatedDigest[:])) is.Equal(apiKey.Digest, generatedDigest[:])
}) })
} }
func Test_GetAPIKey(t *testing.T) { func Test_GetAPIKey(t *testing.T) {
is := assert.New(t) is := assert.New(t)
_, store := datastore.MustNewTestStore(t, true, true) _, store, teardown := datastore.MustNewTestStore(t, true, true)
defer teardown()
service := NewAPIKeyService(store.APIKeyRepository(), store.User()) service := NewAPIKeyService(store.APIKeyRepository(), store.User())
@@ -95,7 +96,8 @@ func Test_GetAPIKey(t *testing.T) {
func Test_GetAPIKeys(t *testing.T) { func Test_GetAPIKeys(t *testing.T) {
is := assert.New(t) is := assert.New(t)
_, store := datastore.MustNewTestStore(t, true, true) _, store, teardown := datastore.MustNewTestStore(t, true, true)
defer teardown()
service := NewAPIKeyService(store.APIKeyRepository(), store.User()) service := NewAPIKeyService(store.APIKeyRepository(), store.User())
@@ -115,7 +117,8 @@ func Test_GetAPIKeys(t *testing.T) {
func Test_GetDigestUserAndKey(t *testing.T) { func Test_GetDigestUserAndKey(t *testing.T) {
is := assert.New(t) is := assert.New(t)
_, store := datastore.MustNewTestStore(t, true, true) _, store, teardown := datastore.MustNewTestStore(t, true, true)
defer teardown()
service := NewAPIKeyService(store.APIKeyRepository(), store.User()) service := NewAPIKeyService(store.APIKeyRepository(), store.User())
@@ -150,7 +153,8 @@ func Test_GetDigestUserAndKey(t *testing.T) {
func Test_UpdateAPIKey(t *testing.T) { func Test_UpdateAPIKey(t *testing.T) {
is := assert.New(t) is := assert.New(t)
_, store := datastore.MustNewTestStore(t, true, true) _, store, teardown := datastore.MustNewTestStore(t, true, true)
defer teardown()
service := NewAPIKeyService(store.APIKeyRepository(), store.User()) service := NewAPIKeyService(store.APIKeyRepository(), store.User())
@@ -195,7 +199,8 @@ func Test_UpdateAPIKey(t *testing.T) {
func Test_DeleteAPIKey(t *testing.T) { func Test_DeleteAPIKey(t *testing.T) {
is := assert.New(t) is := assert.New(t)
_, store := datastore.MustNewTestStore(t, true, true) _, store, teardown := datastore.MustNewTestStore(t, true, true)
defer teardown()
service := NewAPIKeyService(store.APIKeyRepository(), store.User()) service := NewAPIKeyService(store.APIKeyRepository(), store.User())
@@ -235,7 +240,8 @@ func Test_DeleteAPIKey(t *testing.T) {
func Test_InvalidateUserKeyCache(t *testing.T) { func Test_InvalidateUserKeyCache(t *testing.T) {
is := assert.New(t) is := assert.New(t)
_, store := datastore.MustNewTestStore(t, true, true) _, store, teardown := datastore.MustNewTestStore(t, true, true)
defer teardown()
service := NewAPIKeyService(store.APIKeyRepository(), store.User()) service := NewAPIKeyService(store.APIKeyRepository(), store.User())

View File

@@ -3,7 +3,6 @@ package archive
import ( import (
"archive/tar" "archive/tar"
"compress/gzip" "compress/gzip"
"errors"
"fmt" "fmt"
"io" "io"
"os" "os"
@@ -48,6 +47,18 @@ func TarGzDir(absolutePath string) (string, error) {
} }
func addToArchive(tarWriter *tar.Writer, pathInArchive string, path string, info os.FileInfo) error { func addToArchive(tarWriter *tar.Writer, pathInArchive string, path string, info os.FileInfo) error {
header, err := tar.FileInfoHeader(info, info.Name())
if err != nil {
return err
}
header.Name = pathInArchive // use relative paths in archive
err = tarWriter.WriteHeader(header)
if err != nil {
return err
}
if info.IsDir() { if info.IsDir() {
return nil return nil
} }
@@ -56,26 +67,6 @@ func addToArchive(tarWriter *tar.Writer, pathInArchive string, path string, info
if err != nil { if err != nil {
return err return err
} }
stat, err := file.Stat()
if err != nil {
return err
}
header, err := tar.FileInfoHeader(stat, stat.Name())
if err != nil {
return err
}
header.Name = pathInArchive // use relative paths in archive
err = tarWriter.WriteHeader(header)
if err != nil {
return err
}
if stat.IsDir() {
return nil
}
_, err = io.Copy(tarWriter, file) _, err = io.Copy(tarWriter, file)
return err return err
} }
@@ -93,7 +84,7 @@ func ExtractTarGz(r io.Reader, outputDirPath string) error {
for { for {
header, err := tarReader.Next() header, err := tarReader.Next()
if errors.Is(err, io.EOF) { if err == io.EOF {
break break
} }
@@ -106,7 +97,7 @@ func ExtractTarGz(r io.Reader, outputDirPath string) error {
// skip, dir will be created with a file // skip, dir will be created with a file
case tar.TypeReg: case tar.TypeReg:
p := filepath.Clean(filepath.Join(outputDirPath, header.Name)) p := filepath.Clean(filepath.Join(outputDirPath, header.Name))
if err := os.MkdirAll(filepath.Dir(p), 0o744); err != nil { if err := os.MkdirAll(filepath.Dir(p), 0744); err != nil {
return fmt.Errorf("Failed to extract dir %s", filepath.Dir(p)) return fmt.Errorf("Failed to extract dir %s", filepath.Dir(p))
} }
outFile, err := os.Create(p) outFile, err := os.Create(p)
@@ -118,7 +109,7 @@ func ExtractTarGz(r io.Reader, outputDirPath string) error {
} }
outFile.Close() outFile.Close()
default: default:
return fmt.Errorf("tar: unknown type: %v in %s", return fmt.Errorf("Tar: uknown type: %v in %s",
header.Typeflag, header.Typeflag,
header.Name) header.Name)
} }

View File

@@ -17,7 +17,7 @@ import (
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
) )
const rwxr__r__ os.FileMode = 0o744 const rwxr__r__ os.FileMode = 0744
var filesToBackup = []string{ var filesToBackup = []string{
"certs", "certs",
@@ -30,7 +30,6 @@ var filesToBackup = []string{
"portainer.key", "portainer.key",
"portainer.pub", "portainer.pub",
"tls", "tls",
"chisel",
} }
// Creates a tar.gz system archive and encrypts it if password is not empty. Returns a path to the archive file. // Creates a tar.gz system archive and encrypts it if password is not empty. Returns a path to the archive file.
@@ -82,9 +81,14 @@ func CreateBackupArchive(password string, gate *offlinegate.OfflineGate, datasto
} }
func backupDb(backupDirPath string, datastore dataservices.DataStore) error { func backupDb(backupDirPath string, datastore dataservices.DataStore) error {
dbFileName := datastore.Connection().GetDatabaseFileName() backupWriter, err := os.Create(filepath.Join(backupDirPath, "portainer.db"))
_, err := datastore.Backup(filepath.Join(backupDirPath, dbFileName)) if err != nil {
return err return err
}
if err = datastore.BackupTo(backupWriter); err != nil {
return err
}
return backupWriter.Close()
} }
func encrypt(path string, passphrase string) (string, error) { func encrypt(path string, passphrase string) (string, error) {

View File

@@ -26,7 +26,7 @@ func RestoreArchive(archive io.Reader, password string, filestorePath string, ga
if password != "" { if password != "" {
archive, err = decrypt(archive, password) archive, err = decrypt(archive, password)
if err != nil { if err != nil {
return errors.Wrap(err, "failed to decrypt the archive. Please ensure the password is correct and try again") return errors.Wrap(err, "failed to decrypt the archive")
} }
} }

View File

@@ -1,12 +1,9 @@
package build package build
import "runtime"
// Variables to be set during the build time // Variables to be set during the build time
var BuildNumber string var BuildNumber string
var ImageTag string var ImageTag string
var NodejsVersion string var NodejsVersion string
var YarnVersion string var YarnVersion string
var WebpackVersion string var WebpackVersion string
var GoVersion string = runtime.Version() var GoVersion string
var GitCommit string

View File

@@ -1,61 +0,0 @@
package crypto
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/x509"
"encoding/pem"
"fmt"
"io"
"math/big"
chshare "github.com/jpillora/chisel/share"
)
var one = new(big.Int).SetInt64(1)
// GenerateGo119CompatibleKey This function is basically copied from chshare.GenerateKey.
func GenerateGo119CompatibleKey(seed string) ([]byte, error) {
r := chshare.NewDetermRand([]byte(seed))
priv, err := ecdsaGenerateKey(elliptic.P256(), r)
if err != nil {
return nil, err
}
b, err := x509.MarshalECPrivateKey(priv)
if err != nil {
return nil, fmt.Errorf("Unable to marshal ECDSA private key: %w", err)
}
return pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: b}), nil
}
// This function is copied from Go1.19
func randFieldElement(c elliptic.Curve, rand io.Reader) (k *big.Int, err error) {
params := c.Params()
// Note that for P-521 this will actually be 63 bits more than the order, as
// division rounds down, but the extra bit is inconsequential.
b := make([]byte, params.N.BitLen()/8+8)
_, err = io.ReadFull(rand, b)
if err != nil {
return
}
k = new(big.Int).SetBytes(b)
n := new(big.Int).Sub(params.N, one)
k.Mod(k, n)
k.Add(k, one)
return
}
// This function is copied from Go1.19
func ecdsaGenerateKey(c elliptic.Curve, rand io.Reader) (*ecdsa.PrivateKey, error) {
k, err := randFieldElement(c, rand)
if err != nil {
return nil, err
}
priv := new(ecdsa.PrivateKey)
priv.PublicKey.Curve = c
priv.D = k
priv.PublicKey.X, priv.PublicKey.Y = c.ScalarBaseMult(k.Bytes())
return priv, nil
}

View File

@@ -1,37 +0,0 @@
package crypto
import (
"reflect"
"testing"
)
func TestGenerateGo119CompatibleKey(t *testing.T) {
type args struct {
seed string
}
tests := []struct {
name string
args args
want []byte
wantErr bool
}{
{
name: "Generate Go 1.19 compatible private key with a given seed",
args: args{seed: "94qh17MCIk8BOkiI"},
want: []byte("-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIHeohwk0Gy3RHVVViaHz7pz/HOiqA7fkv1FTM3mGgfT3oAoGCCqGSM49\nAwEHoUQDQgAEN7riX06xDsLNPuUmOvYFluNEakcFwZZRVvOcIYk/9VYnanDzW0Km\n8/BUUiKyJDuuGdS4fj9SlQ4iL8yBK01uKg==\n-----END EC PRIVATE KEY-----\n"),
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := GenerateGo119CompatibleKey(tt.args.seed)
if (err != nil) != tt.wantErr {
t.Errorf("GenerateGo119CompatibleKey() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("GenerateGo119CompatibleKey()\ngot: Z %v\nwant: %v", got, tt.want)
}
})
}
}

View File

@@ -5,17 +5,6 @@ import (
"github.com/portainer/portainer/api/internal/edge/cache" "github.com/portainer/portainer/api/internal/edge/cache"
) )
// EdgeJobs retrieves the edge jobs for the given environment
func (service *Service) EdgeJobs(endpointID portainer.EndpointID) []portainer.EdgeJob {
service.mu.RLock()
defer service.mu.RUnlock()
return append(
make([]portainer.EdgeJob, 0, len(service.edgeJobs[endpointID])),
service.edgeJobs[endpointID]...,
)
}
// AddEdgeJob register an EdgeJob inside the tunnel details associated to an environment(endpoint). // AddEdgeJob register an EdgeJob inside the tunnel details associated to an environment(endpoint).
func (service *Service) AddEdgeJob(endpoint *portainer.Endpoint, edgeJob *portainer.EdgeJob) { func (service *Service) AddEdgeJob(endpoint *portainer.Endpoint, edgeJob *portainer.EdgeJob) {
if endpoint.Edge.AsyncMode { if endpoint.Edge.AsyncMode {
@@ -23,40 +12,41 @@ func (service *Service) AddEdgeJob(endpoint *portainer.Endpoint, edgeJob *portai
} }
service.mu.Lock() service.mu.Lock()
defer service.mu.Unlock() tunnel := service.getTunnelDetails(endpoint.ID)
existingJobIndex := -1 existingJobIndex := -1
for idx, existingJob := range service.edgeJobs[endpoint.ID] { for idx, existingJob := range tunnel.Jobs {
if existingJob.ID == edgeJob.ID { if existingJob.ID == edgeJob.ID {
existingJobIndex = idx existingJobIndex = idx
break break
} }
} }
if existingJobIndex == -1 { if existingJobIndex == -1 {
service.edgeJobs[endpoint.ID] = append(service.edgeJobs[endpoint.ID], *edgeJob) tunnel.Jobs = append(tunnel.Jobs, *edgeJob)
} else { } else {
service.edgeJobs[endpoint.ID][existingJobIndex] = *edgeJob tunnel.Jobs[existingJobIndex] = *edgeJob
} }
cache.Del(endpoint.ID) cache.Del(endpoint.ID)
service.mu.Unlock()
} }
// RemoveEdgeJob will remove the specified Edge job from each tunnel it was registered with. // RemoveEdgeJob will remove the specified Edge job from each tunnel it was registered with.
func (service *Service) RemoveEdgeJob(edgeJobID portainer.EdgeJobID) { func (service *Service) RemoveEdgeJob(edgeJobID portainer.EdgeJobID) {
service.mu.Lock() service.mu.Lock()
for endpointID := range service.edgeJobs { for endpointID, tunnel := range service.tunnelDetailsMap {
n := 0 n := 0
for _, edgeJob := range service.edgeJobs[endpointID] { for _, edgeJob := range tunnel.Jobs {
if edgeJob.ID != edgeJobID { if edgeJob.ID != edgeJobID {
service.edgeJobs[endpointID][n] = edgeJob tunnel.Jobs[n] = edgeJob
n++ n++
} }
} }
service.edgeJobs[endpointID] = service.edgeJobs[endpointID][:n] tunnel.Jobs = tunnel.Jobs[:n]
cache.Del(endpointID) cache.Del(endpointID)
} }
@@ -66,17 +56,19 @@ func (service *Service) RemoveEdgeJob(edgeJobID portainer.EdgeJobID) {
func (service *Service) RemoveEdgeJobFromEndpoint(endpointID portainer.EndpointID, edgeJobID portainer.EdgeJobID) { func (service *Service) RemoveEdgeJobFromEndpoint(endpointID portainer.EndpointID, edgeJobID portainer.EdgeJobID) {
service.mu.Lock() service.mu.Lock()
defer service.mu.Unlock() tunnel := service.getTunnelDetails(endpointID)
n := 0 n := 0
for _, edgeJob := range service.edgeJobs[endpointID] { for _, edgeJob := range tunnel.Jobs {
if edgeJob.ID != edgeJobID { if edgeJob.ID != edgeJobID {
service.edgeJobs[endpointID][n] = edgeJob tunnel.Jobs[n] = edgeJob
n++ n++
} }
} }
service.edgeJobs[endpointID] = service.edgeJobs[endpointID][:n] tunnel.Jobs = tunnel.Jobs[:n]
cache.Del(endpointID) cache.Del(endpointID)
service.mu.Unlock()
} }

View File

@@ -3,7 +3,6 @@ package chisel
import ( import (
"context" "context"
"fmt" "fmt"
"io"
"net/http" "net/http"
"sync" "sync"
"time" "time"
@@ -12,15 +11,15 @@ import (
"github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/dataservices"
"github.com/portainer/portainer/api/http/proxy" "github.com/portainer/portainer/api/http/proxy"
"github.com/dchest/uniuri"
chserver "github.com/jpillora/chisel/server" chserver "github.com/jpillora/chisel/server"
"github.com/jpillora/chisel/share/ccrypto"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
) )
const ( const (
tunnelCleanupInterval = 10 * time.Second tunnelCleanupInterval = 10 * time.Second
requiredTimeout = 15 * time.Second
activeTimeout = 4*time.Minute + 30*time.Second activeTimeout = 4*time.Minute + 30*time.Second
pingTimeout = 3 * time.Second
) )
// Service represents a service to manage the state of multiple reverse tunnels. // Service represents a service to manage the state of multiple reverse tunnels.
@@ -29,105 +28,69 @@ const (
type Service struct { type Service struct {
serverFingerprint string serverFingerprint string
serverPort string serverPort string
activeTunnels map[portainer.EndpointID]*portainer.TunnelDetails tunnelDetailsMap map[portainer.EndpointID]*portainer.TunnelDetails
edgeJobs map[portainer.EndpointID][]portainer.EdgeJob
dataStore dataservices.DataStore dataStore dataservices.DataStore
snapshotService portainer.SnapshotService snapshotService portainer.SnapshotService
chiselServer *chserver.Server chiselServer *chserver.Server
shutdownCtx context.Context shutdownCtx context.Context
ProxyManager *proxy.Manager ProxyManager *proxy.Manager
mu sync.RWMutex mu sync.Mutex
fileService portainer.FileService
defaultCheckinInterval int
} }
// NewService returns a pointer to a new instance of Service // NewService returns a pointer to a new instance of Service
func NewService(dataStore dataservices.DataStore, shutdownCtx context.Context, fileService portainer.FileService) *Service { func NewService(dataStore dataservices.DataStore, shutdownCtx context.Context) *Service {
defaultCheckinInterval := portainer.DefaultEdgeAgentCheckinIntervalInSeconds
settings, err := dataStore.Settings().Settings()
if err == nil {
defaultCheckinInterval = settings.EdgeAgentCheckinInterval
} else {
log.Error().Err(err).Msg("unable to retrieve the settings from the database")
}
return &Service{ return &Service{
activeTunnels: make(map[portainer.EndpointID]*portainer.TunnelDetails), tunnelDetailsMap: make(map[portainer.EndpointID]*portainer.TunnelDetails),
edgeJobs: make(map[portainer.EndpointID][]portainer.EdgeJob),
dataStore: dataStore, dataStore: dataStore,
shutdownCtx: shutdownCtx, shutdownCtx: shutdownCtx,
fileService: fileService,
defaultCheckinInterval: defaultCheckinInterval,
} }
} }
// pingAgent ping the given agent so that the agent can keep the tunnel alive // pingAgent ping the given agent so that the agent can keep the tunnel alive
func (service *Service) pingAgent(endpointID portainer.EndpointID) error { func (service *Service) pingAgent(endpointID portainer.EndpointID) error {
endpoint, err := service.dataStore.Endpoint().Endpoint(endpointID) tunnel := service.GetTunnelDetails(endpointID)
if err != nil { requestURL := fmt.Sprintf("http://127.0.0.1:%d/ping", tunnel.Port)
return err
}
tunnelAddr, err := service.TunnelAddr(endpoint)
if err != nil {
return err
}
requestURL := fmt.Sprintf("http://%s/ping", tunnelAddr)
req, err := http.NewRequest(http.MethodHead, requestURL, nil) req, err := http.NewRequest(http.MethodHead, requestURL, nil)
if err != nil { if err != nil {
return err return err
} }
httpClient := &http.Client{ httpClient := &http.Client{
Timeout: pingTimeout, Timeout: 3 * time.Second,
} }
_, err = httpClient.Do(req)
resp, err := httpClient.Do(req)
if err != nil {
return err return err
}
io.Copy(io.Discard, resp.Body)
resp.Body.Close()
return nil
} }
// KeepTunnelAlive keeps the tunnel of the given environment for maxAlive duration, or until ctx is done // KeepTunnelAlive keeps the tunnel of the given environment for maxAlive duration, or until ctx is done
func (service *Service) KeepTunnelAlive(endpointID portainer.EndpointID, ctx context.Context, maxAlive time.Duration) { func (service *Service) KeepTunnelAlive(endpointID portainer.EndpointID, ctx context.Context, maxAlive time.Duration) {
go service.keepTunnelAlive(endpointID, ctx, maxAlive) go func() {
}
func (service *Service) keepTunnelAlive(endpointID portainer.EndpointID, ctx context.Context, maxAlive time.Duration) {
log.Debug(). log.Debug().
Int("endpoint_id", int(endpointID)). Int("endpoint_id", int(endpointID)).
Float64("max_alive_minutes", maxAlive.Minutes()). Float64("max_alive_minutes", maxAlive.Minutes()).
Msg("KeepTunnelAlive: start") Msg("start")
maxAliveTicker := time.NewTicker(maxAlive) maxAliveTicker := time.NewTicker(maxAlive)
defer maxAliveTicker.Stop() defer maxAliveTicker.Stop()
pingTicker := time.NewTicker(tunnelCleanupInterval) pingTicker := time.NewTicker(tunnelCleanupInterval)
defer pingTicker.Stop() defer pingTicker.Stop()
for { for {
select { select {
case <-pingTicker.C: case <-pingTicker.C:
service.UpdateLastActivity(endpointID) service.SetTunnelStatusToActive(endpointID)
err := service.pingAgent(endpointID)
if err := service.pingAgent(endpointID); err != nil { if err != nil {
log.Debug(). log.Debug().
Int("endpoint_id", int(endpointID)). Int("endpoint_id", int(endpointID)).
Err(err). Err(err).
Msg("KeepTunnelAlive: ping agent") Msg("ping agent")
} }
case <-maxAliveTicker.C: case <-maxAliveTicker.C:
log.Debug(). log.Debug().
Int("endpoint_id", int(endpointID)). Int("endpoint_id", int(endpointID)).
Float64("timeout_minutes", maxAlive.Minutes()). Float64("timeout_minutes", maxAlive.Minutes()).
Msg("KeepTunnelAlive: tunnel keep alive timeout") Msg("tunnel keep alive timeout")
return return
case <-ctx.Done(): case <-ctx.Done():
@@ -135,11 +98,12 @@ func (service *Service) keepTunnelAlive(endpointID portainer.EndpointID, ctx con
log.Debug(). log.Debug().
Int("endpoint_id", int(endpointID)). Int("endpoint_id", int(endpointID)).
Err(err). Err(err).
Msg("KeepTunnelAlive: tunnel stop") Msg("tunnel stop")
return return
} }
} }
}()
} }
// StartTunnelServer starts a tunnel server on the specified addr and port. // StartTunnelServer starts a tunnel server on the specified addr and port.
@@ -148,14 +112,14 @@ func (service *Service) keepTunnelAlive(endpointID portainer.EndpointID, ctx con
// It starts the tunnel status verification process in the background. // It starts the tunnel status verification process in the background.
// The snapshotter is used in the tunnel status verification process. // The snapshotter is used in the tunnel status verification process.
func (service *Service) StartTunnelServer(addr, port string, snapshotService portainer.SnapshotService) error { func (service *Service) StartTunnelServer(addr, port string, snapshotService portainer.SnapshotService) error {
privateKeyFile, err := service.retrievePrivateKeyFile() keySeed, err := service.retrievePrivateKeySeed()
if err != nil { if err != nil {
return err return err
} }
config := &chserver.Config{ config := &chserver.Config{
Reverse: true, Reverse: true,
KeyFile: privateKeyFile, KeySeed: keySeed,
} }
chiselServer, err := chserver.NewServer(config) chiselServer, err := chserver.NewServer(config)
@@ -166,21 +130,21 @@ func (service *Service) StartTunnelServer(addr, port string, snapshotService por
service.serverFingerprint = chiselServer.GetFingerprint() service.serverFingerprint = chiselServer.GetFingerprint()
service.serverPort = port service.serverPort = port
if err := chiselServer.Start(addr, port); err != nil { err = chiselServer.Start(addr, port)
if err != nil {
return err return err
} }
service.chiselServer = chiselServer service.chiselServer = chiselServer
// TODO: work-around Chisel default behavior. // TODO: work-around Chisel default behavior.
// By default, Chisel will allow anyone to connect if no user exists. // By default, Chisel will allow anyone to connect if no user exists.
username, password := generateRandomCredentials() username, password := generateRandomCredentials()
if err = service.chiselServer.AddUser(username, password, "127.0.0.1"); err != nil { err = service.chiselServer.AddUser(username, password, "127.0.0.1")
if err != nil {
return err return err
} }
service.snapshotService = snapshotService service.snapshotService = snapshotService
go service.startTunnelVerificationLoop() go service.startTunnelVerificationLoop()
return nil return nil
@@ -191,43 +155,26 @@ func (service *Service) StopTunnelServer() error {
return service.chiselServer.Close() return service.chiselServer.Close()
} }
func (service *Service) retrievePrivateKeyFile() (string, error) { func (service *Service) retrievePrivateKeySeed() (string, error) {
privateKeyFile := service.fileService.GetDefaultChiselPrivateKeyPath() var serverInfo *portainer.TunnelServerInfo
if exists, _ := service.fileService.FileExists(privateKeyFile); exists { serverInfo, err := service.dataStore.TunnelServer().Info()
log.Info(). if service.dataStore.IsErrObjectNotFound(err) {
Str("private-key", privateKeyFile). keySeed := uniuri.NewLen(16)
Msg("found Chisel private key file on disk")
return privateKeyFile, nil serverInfo = &portainer.TunnelServerInfo{
PrivateKeySeed: keySeed,
} }
log.Debug(). err := service.dataStore.TunnelServer().UpdateInfo(serverInfo)
Str("private-key", privateKeyFile).
Msg("chisel private key file does not exist")
privateKey, err := ccrypto.GenerateKey("")
if err != nil { if err != nil {
log.Error(). return "", err
Err(err). }
Msg("failed to generate chisel private key") } else if err != nil {
return "", err return "", err
} }
if err = service.fileService.StoreChiselPrivateKey(privateKey); err != nil { return serverInfo.PrivateKeySeed, nil
log.Error().
Err(err).
Msg("failed to save Chisel private key to disk")
return "", err
}
log.Info().
Str("private-key", privateKeyFile).
Msg("generated a new Chisel private key file")
return privateKeyFile, nil
} }
func (service *Service) startTunnelVerificationLoop() { func (service *Service) startTunnelVerificationLoop() {
@@ -254,45 +201,63 @@ func (service *Service) startTunnelVerificationLoop() {
} }
} }
// checkTunnels finds the first tunnel that has not had any activity recently
// and attempts to take a snapshot, then closes it and returns
func (service *Service) checkTunnels() { func (service *Service) checkTunnels() {
service.mu.RLock() tunnels := make(map[portainer.EndpointID]portainer.TunnelDetails)
for endpointID, tunnel := range service.activeTunnels { service.mu.Lock()
elapsed := time.Since(tunnel.LastActivity) for key, tunnel := range service.tunnelDetailsMap {
log.Debug(). if tunnel.LastActivity.IsZero() || tunnel.Status == portainer.EdgeAgentIdle {
Int("endpoint_id", int(endpointID)).
Float64("last_activity_seconds", elapsed.Seconds()).
Msg("environment tunnel monitoring")
if tunnel.Status == portainer.EdgeAgentManagementRequired && elapsed < activeTimeout {
continue continue
} }
tunnelPort := tunnel.Port if tunnel.Status == portainer.EdgeAgentManagementRequired && time.Since(tunnel.LastActivity) < requiredTimeout {
continue
}
service.mu.RUnlock() if tunnel.Status == portainer.EdgeAgentActive && time.Since(tunnel.LastActivity) < activeTimeout {
continue
}
tunnels[key] = *tunnel
}
service.mu.Unlock()
for endpointID, tunnel := range tunnels {
elapsed := time.Since(tunnel.LastActivity)
log.Debug(). log.Debug().
Int("endpoint_id", int(endpointID)). Int("endpoint_id", int(endpointID)).
Float64("last_activity_seconds", elapsed.Seconds()). Str("status", tunnel.Status).
Float64("timeout_seconds", activeTimeout.Seconds()). Float64("status_time_seconds", elapsed.Seconds()).
Msg("last activity timeout exceeded") Msg("environment tunnel monitoring")
if err := service.snapshotEnvironment(endpointID, tunnelPort); err != nil { if tunnel.Status == portainer.EdgeAgentManagementRequired && elapsed > requiredTimeout {
log.Debug().
Int("endpoint_id", int(endpointID)).
Str("status", tunnel.Status).
Float64("status_time_seconds", elapsed.Seconds()).
Float64("timeout_seconds", requiredTimeout.Seconds()).
Msg("REQUIRED state timeout exceeded")
}
if tunnel.Status == portainer.EdgeAgentActive && elapsed > activeTimeout {
log.Debug().
Int("endpoint_id", int(endpointID)).
Str("status", tunnel.Status).
Float64("status_time_seconds", elapsed.Seconds()).
Float64("timeout_seconds", activeTimeout.Seconds()).
Msg("ACTIVE state timeout exceeded")
err := service.snapshotEnvironment(endpointID, tunnel.Port)
if err != nil {
log.Error(). log.Error().
Int("endpoint_id", int(endpointID)). Int("endpoint_id", int(endpointID)).
Err(err). Err(err).
Msg("unable to snapshot Edge environment") Msg("unable to snapshot Edge environment")
} }
service.close(portainer.EndpointID(endpointID))
return
} }
service.mu.RUnlock() service.SetTunnelStatusToIdle(portainer.EndpointID(endpointID))
}
} }
func (service *Service) snapshotEnvironment(endpointID portainer.EndpointID, tunnelPort int) error { func (service *Service) snapshotEnvironment(endpointID portainer.EndpointID, tunnelPort int) error {
@@ -301,7 +266,14 @@ func (service *Service) snapshotEnvironment(endpointID portainer.EndpointID, tun
return err return err
} }
endpoint.URL = fmt.Sprintf("tcp://127.0.0.1:%d", tunnelPort) endpointURL := endpoint.URL
return service.snapshotService.SnapshotEndpoint(endpoint) endpoint.URL = fmt.Sprintf("tcp://127.0.0.1:%d", tunnelPort)
err = service.snapshotService.SnapshotEndpoint(endpoint)
if err != nil {
return err
}
endpoint.URL = endpointURL
return service.dataStore.Endpoint().UpdateEndpoint(endpoint.ID, endpoint)
} }

View File

@@ -1,48 +0,0 @@
package chisel
import (
"net"
"net/http"
"testing"
"time"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/datastore"
"github.com/stretchr/testify/require"
)
func TestPingAgentPanic(t *testing.T) {
endpoint := &portainer.Endpoint{
ID: 1,
EdgeID: "test-edge-id",
Type: portainer.EdgeAgentOnDockerEnvironment,
UserTrusted: true,
}
_, store := datastore.MustNewTestStore(t, true, true)
s := NewService(store, nil, nil)
defer func() {
require.Nil(t, recover())
}()
mux := http.NewServeMux()
mux.HandleFunc("/ping", func(w http.ResponseWriter, r *http.Request) {
time.Sleep(pingTimeout + 1*time.Second)
})
ln, err := net.ListenTCP("tcp", &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0})
require.NoError(t, err)
go func() {
require.NoError(t, http.Serve(ln, mux))
}()
err = s.Open(endpoint)
require.NoError(t, err)
s.activeTunnels[endpoint.ID].Port = ln.Addr().(*net.TCPAddr).Port
require.Error(t, s.pingAgent(endpoint.ID))
}

View File

@@ -5,18 +5,14 @@ import (
"errors" "errors"
"fmt" "fmt"
"math/rand" "math/rand"
"net"
"strings" "strings"
"time" "time"
"github.com/portainer/libcrypto"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/internal/edge"
"github.com/portainer/portainer/api/internal/edge/cache" "github.com/portainer/portainer/api/internal/edge/cache"
"github.com/portainer/portainer/api/internal/endpointutils"
"github.com/portainer/portainer/pkg/libcrypto"
"github.com/dchest/uniuri" "github.com/dchest/uniuri"
"github.com/rs/zerolog/log"
) )
const ( const (
@@ -24,191 +20,18 @@ const (
maxAvailablePort = 65535 maxAvailablePort = 65535
) )
var (
ErrNonEdgeEnv = errors.New("cannot open a tunnel for non-edge environments")
ErrAsyncEnv = errors.New("cannot open a tunnel for async edge environments")
ErrInvalidEnv = errors.New("cannot open a tunnel for an invalid environment")
)
// Open will mark the tunnel as REQUIRED so the agent opens it
func (s *Service) Open(endpoint *portainer.Endpoint) error {
if !endpointutils.IsEdgeEndpoint(endpoint) {
return ErrNonEdgeEnv
}
if endpoint.Edge.AsyncMode {
return ErrAsyncEnv
}
if endpoint.ID == 0 || endpoint.EdgeID == "" || !endpoint.UserTrusted {
return ErrInvalidEnv
}
s.mu.Lock()
defer s.mu.Unlock()
if _, ok := s.activeTunnels[endpoint.ID]; ok {
return nil
}
defer cache.Del(endpoint.ID)
tun := &portainer.TunnelDetails{
Status: portainer.EdgeAgentManagementRequired,
Port: s.getUnusedPort(),
LastActivity: time.Now(),
}
username, password := generateRandomCredentials()
if s.chiselServer != nil {
authorizedRemote := fmt.Sprintf("^R:0.0.0.0:%d$", tun.Port)
if err := s.chiselServer.AddUser(username, password, authorizedRemote); err != nil {
return err
}
}
credentials, err := encryptCredentials(username, password, endpoint.EdgeID)
if err != nil {
return err
}
tun.Credentials = credentials
s.activeTunnels[endpoint.ID] = tun
return nil
}
// close removes the tunnel from the map so the agent will close it
func (s *Service) close(endpointID portainer.EndpointID) {
s.mu.Lock()
defer s.mu.Unlock()
tun, ok := s.activeTunnels[endpointID]
if !ok {
return
}
if len(tun.Credentials) > 0 && s.chiselServer != nil {
user, _, _ := strings.Cut(tun.Credentials, ":")
s.chiselServer.DeleteUser(user)
}
if s.ProxyManager != nil {
s.ProxyManager.DeleteEndpointProxy(endpointID)
}
delete(s.activeTunnels, endpointID)
cache.Del(endpointID)
}
// Config returns the tunnel details needed for the agent to connect
func (s *Service) Config(endpointID portainer.EndpointID) portainer.TunnelDetails {
s.mu.RLock()
defer s.mu.RUnlock()
if tun, ok := s.activeTunnels[endpointID]; ok {
return *tun
}
return portainer.TunnelDetails{Status: portainer.EdgeAgentIdle}
}
// TunnelAddr returns the address of the local tunnel, including the port, it
// will block until the tunnel is ready
func (s *Service) TunnelAddr(endpoint *portainer.Endpoint) (string, error) {
if err := s.Open(endpoint); err != nil {
return "", err
}
tun := s.Config(endpoint.ID)
checkinInterval := time.Duration(s.tryEffectiveCheckinInterval(endpoint)) * time.Second
for t0 := time.Now(); ; {
if time.Since(t0) > 2*checkinInterval {
s.close(endpoint.ID)
return "", errors.New("unable to open the tunnel")
}
// Check if the tunnel is established
conn, err := net.DialTCP("tcp", nil, &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: tun.Port})
if err != nil {
time.Sleep(checkinInterval / 100)
continue
}
conn.Close()
break
}
s.UpdateLastActivity(endpoint.ID)
return fmt.Sprintf("127.0.0.1:%d", tun.Port), nil
}
// tryEffectiveCheckinInterval avoids a potential deadlock by returning a
// previous known value after a timeout
func (s *Service) tryEffectiveCheckinInterval(endpoint *portainer.Endpoint) int {
ch := make(chan int, 1)
go func() {
ch <- edge.EffectiveCheckinInterval(s.dataStore, endpoint)
}()
select {
case <-time.After(50 * time.Millisecond):
s.mu.RLock()
defer s.mu.RUnlock()
return s.defaultCheckinInterval
case i := <-ch:
s.mu.Lock()
s.defaultCheckinInterval = i
s.mu.Unlock()
return i
}
}
// UpdateLastActivity sets the current timestamp to avoid the tunnel timeout
func (s *Service) UpdateLastActivity(endpointID portainer.EndpointID) {
s.mu.Lock()
defer s.mu.Unlock()
if tun, ok := s.activeTunnels[endpointID]; ok {
tun.LastActivity = time.Now()
}
}
// NOTE: it needs to be called with the lock acquired // NOTE: it needs to be called with the lock acquired
// getUnusedPort is used to generate an unused random port in the dynamic port range. // getUnusedPort is used to generate an unused random port in the dynamic port range.
// Dynamic ports (also called private ports) are 49152 to 65535. // Dynamic ports (also called private ports) are 49152 to 65535.
func (service *Service) getUnusedPort() int { func (service *Service) getUnusedPort() int {
port := randomInt(minAvailablePort, maxAvailablePort) port := randomInt(minAvailablePort, maxAvailablePort)
for _, tunnel := range service.activeTunnels { for _, tunnel := range service.tunnelDetailsMap {
if tunnel.Port == port { if tunnel.Port == port {
return service.getUnusedPort() return service.getUnusedPort()
} }
} }
conn, err := net.DialTCP("tcp", nil, &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: port})
if err == nil {
conn.Close()
log.Debug().
Int("port", port).
Msg("selected port is in use, trying a different one")
return service.getUnusedPort()
}
return port return port
} }
@@ -216,10 +39,146 @@ func randomInt(min, max int) int {
return min + rand.Intn(max-min) return min + rand.Intn(max-min)
} }
// NOTE: it needs to be called with the lock acquired
func (service *Service) getTunnelDetails(endpointID portainer.EndpointID) *portainer.TunnelDetails {
if tunnel, ok := service.tunnelDetailsMap[endpointID]; ok {
return tunnel
}
tunnel := &portainer.TunnelDetails{
Status: portainer.EdgeAgentIdle,
}
service.tunnelDetailsMap[endpointID] = tunnel
cache.Del(endpointID)
return tunnel
}
// GetTunnelDetails returns information about the tunnel associated to an environment(endpoint).
func (service *Service) GetTunnelDetails(endpointID portainer.EndpointID) portainer.TunnelDetails {
service.mu.Lock()
defer service.mu.Unlock()
return *service.getTunnelDetails(endpointID)
}
// GetActiveTunnel retrieves an active tunnel which allows communicating with edge agent
func (service *Service) GetActiveTunnel(endpoint *portainer.Endpoint) (portainer.TunnelDetails, error) {
if endpoint.Edge.AsyncMode {
return portainer.TunnelDetails{}, errors.New("cannot open tunnel on async endpoint")
}
tunnel := service.GetTunnelDetails(endpoint.ID)
if tunnel.Status == portainer.EdgeAgentActive {
// update the LastActivity
service.SetTunnelStatusToActive(endpoint.ID)
}
if tunnel.Status == portainer.EdgeAgentIdle || tunnel.Status == portainer.EdgeAgentManagementRequired {
err := service.SetTunnelStatusToRequired(endpoint.ID)
if err != nil {
return portainer.TunnelDetails{}, fmt.Errorf("failed opening tunnel to endpoint: %w", err)
}
if endpoint.EdgeCheckinInterval == 0 {
settings, err := service.dataStore.Settings().Settings()
if err != nil {
return portainer.TunnelDetails{}, fmt.Errorf("failed fetching settings from db: %w", err)
}
endpoint.EdgeCheckinInterval = settings.EdgeAgentCheckinInterval
}
time.Sleep(2 * time.Duration(endpoint.EdgeCheckinInterval) * time.Second)
}
return service.GetTunnelDetails(endpoint.ID), nil
}
// SetTunnelStatusToActive update the status of the tunnel associated to the specified environment(endpoint).
// It sets the status to ACTIVE.
func (service *Service) SetTunnelStatusToActive(endpointID portainer.EndpointID) {
service.mu.Lock()
tunnel := service.getTunnelDetails(endpointID)
tunnel.Status = portainer.EdgeAgentActive
tunnel.Credentials = ""
tunnel.LastActivity = time.Now()
service.mu.Unlock()
cache.Del(endpointID)
}
// SetTunnelStatusToIdle update the status of the tunnel associated to the specified environment(endpoint).
// It sets the status to IDLE.
// It removes any existing credentials associated to the tunnel.
func (service *Service) SetTunnelStatusToIdle(endpointID portainer.EndpointID) {
service.mu.Lock()
tunnel := service.getTunnelDetails(endpointID)
tunnel.Status = portainer.EdgeAgentIdle
tunnel.Port = 0
tunnel.LastActivity = time.Now()
credentials := tunnel.Credentials
if credentials != "" {
tunnel.Credentials = ""
service.chiselServer.DeleteUser(strings.Split(credentials, ":")[0])
}
service.ProxyManager.DeleteEndpointProxy(endpointID)
service.mu.Unlock()
cache.Del(endpointID)
}
// SetTunnelStatusToRequired update the status of the tunnel associated to the specified environment(endpoint).
// It sets the status to REQUIRED.
// If no port is currently associated to the tunnel, it will associate a random unused port to the tunnel
// and generate temporary credentials that can be used to establish a reverse tunnel on that port.
// Credentials are encrypted using the Edge ID associated to the environment(endpoint).
func (service *Service) SetTunnelStatusToRequired(endpointID portainer.EndpointID) error {
defer cache.Del(endpointID)
tunnel := service.getTunnelDetails(endpointID)
service.mu.Lock()
defer service.mu.Unlock()
if tunnel.Port == 0 {
endpoint, err := service.dataStore.Endpoint().Endpoint(endpointID)
if err != nil {
return err
}
tunnel.Status = portainer.EdgeAgentManagementRequired
tunnel.Port = service.getUnusedPort()
tunnel.LastActivity = time.Now()
username, password := generateRandomCredentials()
authorizedRemote := fmt.Sprintf("^R:0.0.0.0:%d$", tunnel.Port)
err = service.chiselServer.AddUser(username, password, authorizedRemote)
if err != nil {
return err
}
credentials, err := encryptCredentials(username, password, endpoint.EdgeID)
if err != nil {
return err
}
tunnel.Credentials = credentials
}
return nil
}
func generateRandomCredentials() (string, string) { func generateRandomCredentials() (string, string) {
username := uniuri.NewLen(8) username := uniuri.NewLen(8)
password := uniuri.NewLen(8) password := uniuri.NewLen(8)
return username, password return username, password
} }

View File

@@ -49,7 +49,7 @@ func (*Service) ParseFlags(version string) (*portainer.CLIFlags, error) {
SSL: kingpin.Flag("ssl", "Secure Portainer instance using SSL (deprecated)").Default(defaultSSL).Bool(), SSL: kingpin.Flag("ssl", "Secure Portainer instance using SSL (deprecated)").Default(defaultSSL).Bool(),
SSLCert: kingpin.Flag("sslcert", "Path to the SSL certificate used to secure the Portainer instance").String(), SSLCert: kingpin.Flag("sslcert", "Path to the SSL certificate used to secure the Portainer instance").String(),
SSLKey: kingpin.Flag("sslkey", "Path to the SSL key used to secure the Portainer instance").String(), SSLKey: kingpin.Flag("sslkey", "Path to the SSL key used to secure the Portainer instance").String(),
Rollback: kingpin.Flag("rollback", "Rollback the database to the previous backup").Bool(), Rollback: kingpin.Flag("rollback", "Rollback the database store to the previous version").Bool(),
SnapshotInterval: kingpin.Flag("snapshot-interval", "Duration between each environment snapshot job").String(), SnapshotInterval: kingpin.Flag("snapshot-interval", "Duration between each environment snapshot job").String(),
AdminPassword: kingpin.Flag("admin-password", "Set admin password with provided hash").String(), AdminPassword: kingpin.Flag("admin-password", "Set admin password with provided hash").String(),
AdminPasswordFile: kingpin.Flag("admin-password-file", "Path to the file containing the password for the admin user").String(), AdminPasswordFile: kingpin.Flag("admin-password-file", "Path to the file containing the password for the admin user").String(),
@@ -62,7 +62,7 @@ func (*Service) ParseFlags(version string) (*portainer.CLIFlags, error) {
MaxBatchDelay: kingpin.Flag("max-batch-delay", "Maximum delay before a batch starts").Duration(), MaxBatchDelay: kingpin.Flag("max-batch-delay", "Maximum delay before a batch starts").Duration(),
SecretKeyName: kingpin.Flag("secret-key-name", "Secret key name for encryption and will be used as /run/secrets/<secret-key-name>.").Default(defaultSecretKeyName).String(), SecretKeyName: kingpin.Flag("secret-key-name", "Secret key name for encryption and will be used as /run/secrets/<secret-key-name>.").Default(defaultSecretKeyName).String(),
LogLevel: kingpin.Flag("log-level", "Set the minimum logging level to show").Default("INFO").Enum("DEBUG", "INFO", "WARN", "ERROR"), LogLevel: kingpin.Flag("log-level", "Set the minimum logging level to show").Default("INFO").Enum("DEBUG", "INFO", "WARN", "ERROR"),
LogMode: kingpin.Flag("log-mode", "Set the logging output mode").Default("PRETTY").Enum("NOCOLOR", "PRETTY", "JSON"), LogMode: kingpin.Flag("log-mode", "Set the logging output mode").Default("PRETTY").Enum("PRETTY", "JSON"),
} }
kingpin.Parse() kingpin.Parse()
@@ -72,7 +72,6 @@ func (*Service) ParseFlags(version string) (*portainer.CLIFlags, error) {
if err != nil { if err != nil {
panic(err) panic(err)
} }
*flags.Assets = filepath.Join(filepath.Dir(ex), *flags.Assets) *flags.Assets = filepath.Join(filepath.Dir(ex), *flags.Assets)
} }
@@ -81,6 +80,7 @@ func (*Service) ParseFlags(version string) (*portainer.CLIFlags, error) {
// ValidateFlags validates the values of the flags. // ValidateFlags validates the values of the flags.
func (*Service) ValidateFlags(flags *portainer.CLIFlags) error { func (*Service) ValidateFlags(flags *portainer.CLIFlags) error {
displayDeprecationWarnings(flags) displayDeprecationWarnings(flags)
err := validateEndpointURL(*flags.EndpointURL) err := validateEndpointURL(*flags.EndpointURL)
@@ -111,10 +111,7 @@ func displayDeprecationWarnings(flags *portainer.CLIFlags) {
} }
func validateEndpointURL(endpointURL string) error { func validateEndpointURL(endpointURL string) error {
if endpointURL == "" { if endpointURL != "" {
return nil
}
if !strings.HasPrefix(endpointURL, "unix://") && !strings.HasPrefix(endpointURL, "tcp://") && !strings.HasPrefix(endpointURL, "npipe://") { if !strings.HasPrefix(endpointURL, "unix://") && !strings.HasPrefix(endpointURL, "tcp://") && !strings.HasPrefix(endpointURL, "npipe://") {
return errInvalidEndpointProtocol return errInvalidEndpointProtocol
} }
@@ -126,23 +123,19 @@ func validateEndpointURL(endpointURL string) error {
if os.IsNotExist(err) { if os.IsNotExist(err) {
return errSocketOrNamedPipeNotFound return errSocketOrNamedPipeNotFound
} }
return err return err
} }
} }
}
return nil return nil
} }
func validateSnapshotInterval(snapshotInterval string) error { func validateSnapshotInterval(snapshotInterval string) error {
if snapshotInterval == "" { if snapshotInterval != "" {
return nil
}
_, err := time.ParseDuration(snapshotInterval) _, err := time.ParseDuration(snapshotInterval)
if err != nil { if err != nil {
return errInvalidSnapshotInterval return errInvalidSnapshotInterval
} }
}
return nil return nil
} }

View File

@@ -9,17 +9,16 @@ import (
// Confirm starts a rollback db cli application // Confirm starts a rollback db cli application
func Confirm(message string) (bool, error) { func Confirm(message string) (bool, error) {
fmt.Printf("%s [y/N] ", message) fmt.Printf("%s [y/N]", message)
reader := bufio.NewReader(os.Stdin) reader := bufio.NewReader(os.Stdin)
answer, err := reader.ReadString('\n') answer, err := reader.ReadString('\n')
if err != nil { if err != nil {
return false, err return false, err
} }
answer = strings.Replace(answer, "\n", "", -1)
answer = strings.ReplaceAll(answer, "\n", "")
answer = strings.ToLower(answer) answer = strings.ToLower(answer)
return answer == "y" || answer == "yes", nil return answer == "y" || answer == "yes", nil
} }

View File

@@ -39,16 +39,9 @@ func setLoggingMode(mode string) {
case "PRETTY": case "PRETTY":
log.Logger = log.Output(zerolog.ConsoleWriter{ log.Logger = log.Output(zerolog.ConsoleWriter{
Out: os.Stderr, Out: os.Stderr,
TimeFormat: "2006/01/02 03:04PM",
FormatMessage: formatMessage,
})
case "NOCOLOR":
log.Logger = log.Output(zerolog.ConsoleWriter{
Out: os.Stderr,
TimeFormat: "2006/01/02 03:04PM",
FormatMessage: formatMessage,
NoColor: true, NoColor: true,
}) TimeFormat: "2006/01/02 03:04PM",
FormatMessage: formatMessage})
case "JSON": case "JSON":
log.Logger = log.Output(os.Stderr) log.Logger = log.Output(os.Stderr)
} }
@@ -58,6 +51,5 @@ func formatMessage(i interface{}) string {
if i == nil { if i == nil {
return "" return ""
} }
return fmt.Sprintf("%s |", i) return fmt.Sprintf("%s |", i)
} }

View File

@@ -3,10 +3,14 @@ package main
import ( import (
"context" "context"
"crypto/sha256" "crypto/sha256"
"math/rand"
"os" "os"
"path" "path"
"strings" "strings"
"time"
libstack "github.com/portainer/docker-compose-wrapper"
"github.com/portainer/docker-compose-wrapper/compose"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/apikey" "github.com/portainer/portainer/api/apikey"
"github.com/portainer/portainer/api/build" "github.com/portainer/portainer/api/build"
@@ -18,22 +22,19 @@ import (
"github.com/portainer/portainer/api/database/models" "github.com/portainer/portainer/api/database/models"
"github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/dataservices"
"github.com/portainer/portainer/api/datastore" "github.com/portainer/portainer/api/datastore"
"github.com/portainer/portainer/api/datastore/migrator"
"github.com/portainer/portainer/api/datastore/postinit"
"github.com/portainer/portainer/api/demo" "github.com/portainer/portainer/api/demo"
"github.com/portainer/portainer/api/docker" "github.com/portainer/portainer/api/docker"
dockerclient "github.com/portainer/portainer/api/docker/client"
"github.com/portainer/portainer/api/exec" "github.com/portainer/portainer/api/exec"
"github.com/portainer/portainer/api/filesystem" "github.com/portainer/portainer/api/filesystem"
"github.com/portainer/portainer/api/git" "github.com/portainer/portainer/api/git"
"github.com/portainer/portainer/api/hostmanagement/openamt" "github.com/portainer/portainer/api/hostmanagement/openamt"
"github.com/portainer/portainer/api/http" "github.com/portainer/portainer/api/http"
"github.com/portainer/portainer/api/http/client"
"github.com/portainer/portainer/api/http/proxy" "github.com/portainer/portainer/api/http/proxy"
kubeproxy "github.com/portainer/portainer/api/http/proxy/factory/kubernetes" kubeproxy "github.com/portainer/portainer/api/http/proxy/factory/kubernetes"
"github.com/portainer/portainer/api/internal/authorization" "github.com/portainer/portainer/api/internal/authorization"
"github.com/portainer/portainer/api/internal/edge" "github.com/portainer/portainer/api/internal/edge"
"github.com/portainer/portainer/api/internal/edge/edgestacks" "github.com/portainer/portainer/api/internal/edge/edgestacks"
"github.com/portainer/portainer/api/internal/endpointutils"
"github.com/portainer/portainer/api/internal/snapshot" "github.com/portainer/portainer/api/internal/snapshot"
"github.com/portainer/portainer/api/internal/ssl" "github.com/portainer/portainer/api/internal/ssl"
"github.com/portainer/portainer/api/internal/upgrade" "github.com/portainer/portainer/api/internal/upgrade"
@@ -42,13 +43,10 @@ import (
kubecli "github.com/portainer/portainer/api/kubernetes/cli" kubecli "github.com/portainer/portainer/api/kubernetes/cli"
"github.com/portainer/portainer/api/ldap" "github.com/portainer/portainer/api/ldap"
"github.com/portainer/portainer/api/oauth" "github.com/portainer/portainer/api/oauth"
"github.com/portainer/portainer/api/pendingactions"
"github.com/portainer/portainer/api/scheduler" "github.com/portainer/portainer/api/scheduler"
"github.com/portainer/portainer/api/stacks/deployments" "github.com/portainer/portainer/api/stacks/deployments"
"github.com/portainer/portainer/pkg/featureflags" "github.com/portainer/portainer/pkg/featureflags"
"github.com/portainer/portainer/pkg/libhelm" "github.com/portainer/portainer/pkg/libhelm"
"github.com/portainer/portainer/pkg/libstack"
"github.com/portainer/portainer/pkg/libstack/compose"
"github.com/gofrs/uuid" "github.com/gofrs/uuid"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
@@ -120,15 +118,11 @@ func initDataStore(flags *portainer.CLIFlags, secretKey []byte, fileService port
log.Fatal().Err(err).Msg("failed generating instance id") log.Fatal().Err(err).Msg("failed generating instance id")
} }
migratorInstance := migrator.NewMigrator(&migrator.MigratorParameters{})
migratorCount := migratorInstance.GetMigratorCountOfCurrentAPIVersion()
// from MigrateData // from MigrateData
v := models.Version{ v := models.Version{
SchemaVersion: portainer.APIVersion, SchemaVersion: portainer.APIVersion,
Edition: int(portainer.PortainerCE), Edition: int(portainer.PortainerCE),
InstanceID: instanceId.String(), InstanceID: instanceId.String(),
MigratorCount: migratorCount,
} }
store.VersionService.UpdateVersion(&v) store.VersionService.UpdateVersion(&v)
@@ -157,17 +151,7 @@ func initDataStore(flags *portainer.CLIFlags, secretKey []byte, fileService port
return store return store
} }
// checkDBSchemaServerVersionMatch checks if the server version matches the db scehma version func initComposeStackManager(composeDeployer libstack.Deployer, reverseTunnelService portainer.ReverseTunnelService, proxyManager *proxy.Manager) portainer.ComposeStackManager {
func checkDBSchemaServerVersionMatch(dbStore dataservices.DataStore, serverVersion string, serverEdition int) bool {
v, err := dbStore.Version().Version()
if err != nil {
return false
}
return v.SchemaVersion == serverVersion && v.Edition == serverEdition
}
func initComposeStackManager(composeDeployer libstack.Deployer, proxyManager *proxy.Manager) portainer.ComposeStackManager {
composeWrapper, err := exec.NewComposeStackManager(composeDeployer, proxyManager) composeWrapper, err := exec.NewComposeStackManager(composeDeployer, proxyManager)
if err != nil { if err != nil {
log.Fatal().Err(err).Msg("failed creating compose manager") log.Fatal().Err(err).Msg("failed creating compose manager")
@@ -199,7 +183,7 @@ func initAPIKeyService(datastore dataservices.DataStore) apikey.APIKeyService {
return apikey.NewAPIKeyService(datastore.APIKeyRepository(), datastore.User()) return apikey.NewAPIKeyService(datastore.APIKeyRepository(), datastore.User())
} }
func initJWTService(userSessionTimeout string, dataStore dataservices.DataStore) (portainer.JWTService, error) { func initJWTService(userSessionTimeout string, dataStore dataservices.DataStore) (dataservices.JWTService, error) {
if userSessionTimeout == "" { if userSessionTimeout == "" {
userSessionTimeout = portainer.DefaultUserSessionTimeout userSessionTimeout = portainer.DefaultUserSessionTimeout
} }
@@ -249,8 +233,8 @@ func initSSLService(addr, certPath, keyPath string, fileService portainer.FileSe
return sslService, nil return sslService, nil
} }
func initDockerClientFactory(signatureService portainer.DigitalSignatureService, reverseTunnelService portainer.ReverseTunnelService) *dockerclient.ClientFactory { func initDockerClientFactory(signatureService portainer.DigitalSignatureService, reverseTunnelService portainer.ReverseTunnelService) *docker.ClientFactory {
return dockerclient.NewClientFactory(signatureService, reverseTunnelService) return docker.NewClientFactory(signatureService, reverseTunnelService)
} }
func initKubernetesClientFactory(signatureService portainer.DigitalSignatureService, reverseTunnelService portainer.ReverseTunnelService, dataStore dataservices.DataStore, instanceID, addrHTTPS, userSessionTimeout string) (*kubecli.ClientFactory, error) { func initKubernetesClientFactory(signatureService portainer.DigitalSignatureService, reverseTunnelService portainer.ReverseTunnelService, dataStore dataservices.DataStore, instanceID, addrHTTPS, userSessionTimeout string) (*kubecli.ClientFactory, error) {
@@ -260,15 +244,14 @@ func initKubernetesClientFactory(signatureService portainer.DigitalSignatureServ
func initSnapshotService( func initSnapshotService(
snapshotIntervalFromFlag string, snapshotIntervalFromFlag string,
dataStore dataservices.DataStore, dataStore dataservices.DataStore,
dockerClientFactory *dockerclient.ClientFactory, dockerClientFactory *docker.ClientFactory,
kubernetesClientFactory *kubecli.ClientFactory, kubernetesClientFactory *kubecli.ClientFactory,
shutdownCtx context.Context, shutdownCtx context.Context,
pendingActionsService *pendingactions.PendingActionsService,
) (portainer.SnapshotService, error) { ) (portainer.SnapshotService, error) {
dockerSnapshotter := docker.NewSnapshotter(dockerClientFactory) dockerSnapshotter := docker.NewSnapshotter(dockerClientFactory)
kubernetesSnapshotter := kubernetes.NewSnapshotter(kubernetesClientFactory) kubernetesSnapshotter := kubernetes.NewSnapshotter(kubernetesClientFactory)
snapshotService, err := snapshot.NewService(snapshotIntervalFromFlag, dataStore, dockerSnapshotter, kubernetesSnapshotter, shutdownCtx, pendingActionsService) snapshotService, err := snapshot.NewService(snapshotIntervalFromFlag, dataStore, dockerSnapshotter, kubernetesSnapshotter, shutdownCtx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -363,6 +346,147 @@ func initKeyPair(fileService portainer.FileService, signatureService portainer.D
return generateAndStoreKeyPair(fileService, signatureService) return generateAndStoreKeyPair(fileService, signatureService)
} }
func createTLSSecuredEndpoint(flags *portainer.CLIFlags, dataStore dataservices.DataStore, snapshotService portainer.SnapshotService) error {
tlsConfiguration := portainer.TLSConfiguration{
TLS: *flags.TLS,
TLSSkipVerify: *flags.TLSSkipVerify,
}
if *flags.TLS {
tlsConfiguration.TLSCACertPath = *flags.TLSCacert
tlsConfiguration.TLSCertPath = *flags.TLSCert
tlsConfiguration.TLSKeyPath = *flags.TLSKey
} else if !*flags.TLS && *flags.TLSSkipVerify {
tlsConfiguration.TLS = true
}
endpointID := dataStore.Endpoint().GetNextIdentifier()
endpoint := &portainer.Endpoint{
ID: portainer.EndpointID(endpointID),
Name: "primary",
URL: *flags.EndpointURL,
GroupID: portainer.EndpointGroupID(1),
Type: portainer.DockerEnvironment,
TLSConfig: tlsConfiguration,
UserAccessPolicies: portainer.UserAccessPolicies{},
TeamAccessPolicies: portainer.TeamAccessPolicies{},
TagIDs: []portainer.TagID{},
Status: portainer.EndpointStatusUp,
Snapshots: []portainer.DockerSnapshot{},
Kubernetes: portainer.KubernetesDefault(),
SecuritySettings: portainer.EndpointSecuritySettings{
AllowVolumeBrowserForRegularUsers: false,
EnableHostManagementFeatures: false,
AllowSysctlSettingForRegularUsers: true,
AllowBindMountsForRegularUsers: true,
AllowPrivilegedModeForRegularUsers: true,
AllowHostNamespaceForRegularUsers: true,
AllowContainerCapabilitiesForRegularUsers: true,
AllowDeviceMappingForRegularUsers: true,
AllowStackManagementForRegularUsers: true,
},
}
if strings.HasPrefix(endpoint.URL, "tcp://") {
tlsConfig, err := crypto.CreateTLSConfigurationFromDisk(tlsConfiguration.TLSCACertPath, tlsConfiguration.TLSCertPath, tlsConfiguration.TLSKeyPath, tlsConfiguration.TLSSkipVerify)
if err != nil {
return err
}
agentOnDockerEnvironment, err := client.ExecutePingOperation(endpoint.URL, tlsConfig)
if err != nil {
return err
}
if agentOnDockerEnvironment {
endpoint.Type = portainer.AgentOnDockerEnvironment
}
}
err := snapshotService.SnapshotEndpoint(endpoint)
if err != nil {
log.Error().
Str("endpoint", endpoint.Name).
Str("URL", endpoint.URL).
Err(err).
Msg("environment snapshot error")
}
return dataStore.Endpoint().Create(endpoint)
}
func createUnsecuredEndpoint(endpointURL string, dataStore dataservices.DataStore, snapshotService portainer.SnapshotService) error {
if strings.HasPrefix(endpointURL, "tcp://") {
_, err := client.ExecutePingOperation(endpointURL, nil)
if err != nil {
return err
}
}
endpointID := dataStore.Endpoint().GetNextIdentifier()
endpoint := &portainer.Endpoint{
ID: portainer.EndpointID(endpointID),
Name: "primary",
URL: endpointURL,
GroupID: portainer.EndpointGroupID(1),
Type: portainer.DockerEnvironment,
TLSConfig: portainer.TLSConfiguration{},
UserAccessPolicies: portainer.UserAccessPolicies{},
TeamAccessPolicies: portainer.TeamAccessPolicies{},
TagIDs: []portainer.TagID{},
Status: portainer.EndpointStatusUp,
Snapshots: []portainer.DockerSnapshot{},
Kubernetes: portainer.KubernetesDefault(),
SecuritySettings: portainer.EndpointSecuritySettings{
AllowVolumeBrowserForRegularUsers: false,
EnableHostManagementFeatures: false,
AllowSysctlSettingForRegularUsers: true,
AllowBindMountsForRegularUsers: true,
AllowPrivilegedModeForRegularUsers: true,
AllowHostNamespaceForRegularUsers: true,
AllowContainerCapabilitiesForRegularUsers: true,
AllowDeviceMappingForRegularUsers: true,
AllowStackManagementForRegularUsers: true,
},
}
err := snapshotService.SnapshotEndpoint(endpoint)
if err != nil {
log.Error().
Str("endpoint", endpoint.Name).
Str("URL", endpoint.URL).Err(err).
Msg("environment snapshot error")
}
return dataStore.Endpoint().Create(endpoint)
}
func initEndpoint(flags *portainer.CLIFlags, dataStore dataservices.DataStore, snapshotService portainer.SnapshotService) error {
if *flags.EndpointURL == "" {
return nil
}
endpoints, err := dataStore.Endpoint().Endpoints()
if err != nil {
return err
}
if len(endpoints) > 0 {
log.Info().Msg("instance already has defined environments, skipping the environment defined via CLI")
return nil
}
if *flags.TLS || *flags.TLSSkipVerify {
return createTLSSecuredEndpoint(flags, dataStore, snapshotService)
}
return createUnsecuredEndpoint(*flags.EndpointURL, dataStore, snapshotService)
}
func loadEncryptionSecretKey(keyfilename string) []byte { func loadEncryptionSecretKey(keyfilename string) []byte {
content, err := os.ReadFile(path.Join("/run/secrets", keyfilename)) content, err := os.ReadFile(path.Join("/run/secrets", keyfilename))
if err != nil { if err != nil {
@@ -399,11 +523,6 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
log.Fatal().Err(err).Msg("") log.Fatal().Err(err).Msg("")
} }
// check if the db schema version matches with server version
if !checkDBSchemaServerVersionMatch(dataStore, portainer.APIVersion, int(portainer.Edition)) {
log.Fatal().Msg("The database schema version does not align with the server version. Please consider reverting to the previous server version or addressing the database migration issue.")
}
instanceID, err := dataStore.Version().InstanceID() instanceID, err := dataStore.Version().InstanceID()
if err != nil { if err != nil {
log.Fatal().Err(err).Msg("failed getting instance id") log.Fatal().Err(err).Msg("failed getting instance id")
@@ -450,11 +569,17 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
log.Fatal().Err(err).Msg("failed initializing key pair") log.Fatal().Err(err).Msg("failed initializing key pair")
} }
reverseTunnelService := chisel.NewService(dataStore, shutdownCtx, fileService) reverseTunnelService := chisel.NewService(dataStore, shutdownCtx)
dockerClientFactory := initDockerClientFactory(digitalSignatureService, reverseTunnelService) dockerClientFactory := initDockerClientFactory(digitalSignatureService, reverseTunnelService)
kubernetesClientFactory, err := initKubernetesClientFactory(digitalSignatureService, reverseTunnelService, dataStore, instanceID, *flags.AddrHTTPS, settings.UserSessionTimeout) kubernetesClientFactory, err := initKubernetesClientFactory(digitalSignatureService, reverseTunnelService, dataStore, instanceID, *flags.AddrHTTPS, settings.UserSessionTimeout)
snapshotService, err := initSnapshotService(*flags.SnapshotInterval, dataStore, dockerClientFactory, kubernetesClientFactory, shutdownCtx)
if err != nil {
log.Fatal().Err(err).Msg("failed initializing snapshot service")
}
snapshotService.Start()
authorizationService := authorization.NewService(dataStore) authorizationService := authorization.NewService(dataStore)
authorizationService.K8sClientFactory = kubernetesClientFactory authorizationService.K8sClientFactory = kubernetesClientFactory
@@ -462,7 +587,7 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
kubeClusterAccessService := kubernetes.NewKubeClusterAccessService(*flags.BaseURL, *flags.AddrHTTPS, sslSettings.CertPath) kubeClusterAccessService := kubernetes.NewKubeClusterAccessService(*flags.BaseURL, *flags.AddrHTTPS, sslSettings.CertPath)
proxyManager := proxy.NewManager(kubernetesClientFactory) proxyManager := proxy.NewManager(dataStore, digitalSignatureService, reverseTunnelService, dockerClientFactory, kubernetesClientFactory, kubernetesTokenCacheManager, gitService)
reverseTunnelService.ProxyManager = proxyManager reverseTunnelService.ProxyManager = proxyManager
@@ -473,7 +598,7 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
log.Fatal().Err(err).Msg("failed initializing compose deployer") log.Fatal().Err(err).Msg("failed initializing compose deployer")
} }
composeStackManager := initComposeStackManager(composeDeployer, proxyManager) composeStackManager := initComposeStackManager(composeDeployer, reverseTunnelService, proxyManager)
swarmStackManager, err := initSwarmStackManager(*flags.Assets, dockerConfigPath, digitalSignatureService, fileService, reverseTunnelService, dataStore) swarmStackManager, err := initSwarmStackManager(*flags.Assets, dockerConfigPath, digitalSignatureService, fileService, reverseTunnelService, dataStore)
if err != nil { if err != nil {
@@ -482,16 +607,6 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
kubernetesDeployer := initKubernetesDeployer(kubernetesTokenCacheManager, kubernetesClientFactory, dataStore, reverseTunnelService, digitalSignatureService, proxyManager, *flags.Assets) kubernetesDeployer := initKubernetesDeployer(kubernetesTokenCacheManager, kubernetesClientFactory, dataStore, reverseTunnelService, digitalSignatureService, proxyManager, *flags.Assets)
pendingActionsService := pendingactions.NewService(dataStore, kubernetesClientFactory, dockerClientFactory, authorizationService, shutdownCtx, *flags.Assets, kubernetesDeployer)
snapshotService, err := initSnapshotService(*flags.SnapshotInterval, dataStore, dockerClientFactory, kubernetesClientFactory, shutdownCtx, pendingActionsService)
if err != nil {
log.Fatal().Err(err).Msg("failed initializing snapshot service")
}
snapshotService.Start()
proxyManager.NewProxyFactory(dataStore, digitalSignatureService, reverseTunnelService, dockerClientFactory, kubernetesClientFactory, kubernetesTokenCacheManager, gitService, snapshotService)
helmPackageManager, err := initHelmPackageManager(*flags.Assets) helmPackageManager, err := initHelmPackageManager(*flags.Assets)
if err != nil { if err != nil {
log.Fatal().Err(err).Msg("failed initializing helm package manager") log.Fatal().Err(err).Msg("failed initializing helm package manager")
@@ -512,10 +627,10 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
} }
} }
// channel to control when the admin user is created err = initEndpoint(flags, dataStore, snapshotService)
adminCreationDone := make(chan struct{}, 1) if err != nil {
log.Fatal().Err(err).Msg("failed initializing environment")
go endpointutils.InitEndpoint(shutdownCtx, adminCreationDone, flags, dataStore, snapshotService) }
adminPasswordHash := "" adminPasswordHash := ""
if *flags.AdminPasswordFile != "" { if *flags.AdminPasswordFile != "" {
@@ -550,9 +665,6 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
if err != nil { if err != nil {
log.Fatal().Err(err).Msg("failed creating admin user") log.Fatal().Err(err).Msg("failed creating admin user")
} }
// notify the admin user is created, the endpoint initialization can start
adminCreationDone <- struct{}{}
} else { } else {
log.Info().Msg("instance already has an administrator user defined, skipping admin password related flags.") log.Info().Msg("instance already has an administrator user defined, skipping admin password related flags.")
} }
@@ -564,7 +676,7 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
} }
scheduler := scheduler.NewScheduler(shutdownCtx) scheduler := scheduler.NewScheduler(shutdownCtx)
stackDeployer := deployments.NewStackDeployer(swarmStackManager, composeStackManager, kubernetesDeployer, dockerClientFactory, dataStore) stackDeployer := deployments.NewStackDeployer(swarmStackManager, composeStackManager, kubernetesDeployer)
deployments.StartStackSchedules(scheduler, stackDeployer, dataStore, gitService) deployments.StartStackSchedules(scheduler, stackDeployer, dataStore, gitService)
sslDBSettings, err := dataStore.SSLSettings().Settings() sslDBSettings, err := dataStore.SSLSettings().Settings()
@@ -572,7 +684,7 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
log.Fatal().Msg("failed to fetch SSL settings from DB") log.Fatal().Msg("failed to fetch SSL settings from DB")
} }
upgradeService, err := upgrade.NewService(*flags.Assets, composeDeployer, kubernetesClientFactory) upgradeService, err := upgrade.NewService(*flags.Assets, composeDeployer)
if err != nil { if err != nil {
log.Fatal().Err(err).Msg("failed initializing upgrade service") log.Fatal().Err(err).Msg("failed initializing upgrade service")
} }
@@ -581,12 +693,10 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
// but some more complex migrations require access to a kubernetes or docker // but some more complex migrations require access to a kubernetes or docker
// client. Therefore we run a separate migration process just before // client. Therefore we run a separate migration process just before
// starting the server. // starting the server.
postInitMigrator := postinit.NewPostInitMigrator( postInitMigrator := datastore.NewPostInitMigrator(
kubernetesClientFactory, kubernetesClientFactory,
dockerClientFactory, dockerClientFactory,
dataStore, dataStore,
*flags.Assets,
kubernetesDeployer,
) )
if err := postInitMigrator.PostInitMigrate(); err != nil { if err := postInitMigrator.PostInitMigrate(); err != nil {
log.Fatal().Err(err).Msg("failure during post init migrations") log.Fatal().Err(err).Msg("failure during post init migrations")
@@ -628,12 +738,12 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
StackDeployer: stackDeployer, StackDeployer: stackDeployer,
DemoService: demoService, DemoService: demoService,
UpgradeService: upgradeService, UpgradeService: upgradeService,
AdminCreationDone: adminCreationDone,
PendingActionsService: pendingActionsService,
} }
} }
func main() { func main() {
rand.Seed(time.Now().UnixNano())
configureLogger() configureLogger()
setLoggingMode("PRETTY") setLoggingMode("PRETTY")
@@ -655,7 +765,6 @@ func main() {
Msg("starting Portainer") Msg("starting Portainer")
err := server.Start() err := server.Start()
log.Info().Err(err).Msg("HTTP server exited") log.Info().Err(err).Msg("HTTP server exited")
} }
} }

View File

@@ -1,216 +1,52 @@
package crypto package crypto
import ( import (
"bufio"
"bytes"
"crypto/aes" "crypto/aes"
"crypto/cipher" "crypto/cipher"
"crypto/rand"
"errors"
"fmt"
"io" "io"
"golang.org/x/crypto/argon2"
"golang.org/x/crypto/scrypt" "golang.org/x/crypto/scrypt"
) )
const ( // NOTE: has to go with what is considered to be a simplistic in that it omits any
// AES GCM settings // authentication of the encrypted data.
aesGcmHeader = "AES256-GCM" // The encrypted file header // Person with better knowledge is welcomed to improve it.
aesGcmBlockSize = 1024 * 1024 // 1MB block for aes gcm // sourced from https://golang.org/src/crypto/cipher/example_test.go
// Argon2 settings var emptySalt []byte = make([]byte, 0)
// Recommded settings lower memory hardware according to current OWASP recommendations
// Considering some people run portainer on a NAS I think it's prudent not to assume we're on server grade hardware
// https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#argon2id
argon2MemoryCost = 12 * 1024
argon2TimeCost = 3
argon2Threads = 1
argon2KeyLength = 32
)
// AesEncrypt reads from input, encrypts with AES-256 and writes to output. passphrase is used to generate an encryption key // AesEncrypt reads from input, encrypts with AES-256 and writes to the output.
func AesEncrypt(input io.Reader, output io.Writer, passphrase []byte) error {
err := aesEncryptGCM(input, output, passphrase)
if err != nil {
return fmt.Errorf("error encrypting file: %w", err)
}
return nil
}
// AesDecrypt reads from input, decrypts with AES-256 and returns the reader to read the decrypted content from
func AesDecrypt(input io.Reader, passphrase []byte) (io.Reader, error) {
// Read file header to determine how it was encrypted
inputReader := bufio.NewReader(input)
header, err := inputReader.Peek(len(aesGcmHeader))
if err != nil {
return nil, fmt.Errorf("error reading encrypted backup file header: %w", err)
}
if string(header) == aesGcmHeader {
reader, err := aesDecryptGCM(inputReader, passphrase)
if err != nil {
return nil, fmt.Errorf("error decrypting file: %w", err)
}
return reader, nil
}
// Use the previous decryption routine which has no header (to support older archives)
reader, err := aesDecryptOFB(inputReader, passphrase)
if err != nil {
return nil, fmt.Errorf("error decrypting legacy file backup: %w", err)
}
return reader, nil
}
// aesEncryptGCM reads from input, encrypts with AES-256 and writes to output. passphrase is used to generate an encryption key.
func aesEncryptGCM(input io.Reader, output io.Writer, passphrase []byte) error {
// Derive key using argon2 with a random salt
salt := make([]byte, 16) // 16 bytes salt
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
return err
}
key := argon2.IDKey(passphrase, salt, argon2TimeCost, argon2MemoryCost, argon2Threads, 32)
block, err := aes.NewCipher(key)
if err != nil {
return err
}
aesgcm, err := cipher.NewGCM(block)
if err != nil {
return err
}
// Generate nonce
nonce, err := NewRandomNonce(aesgcm.NonceSize())
if err != nil {
return err
}
// write the header
if _, err := output.Write([]byte(aesGcmHeader)); err != nil {
return err
}
// Write nonce and salt to the output file
if _, err := output.Write(salt); err != nil {
return err
}
if _, err := output.Write(nonce.Value()); err != nil {
return err
}
// Buffer for reading plaintext blocks
buf := make([]byte, aesGcmBlockSize) // Adjust buffer size as needed
ciphertext := make([]byte, len(buf)+aesgcm.Overhead())
// Encrypt plaintext in blocks
for {
n, err := io.ReadFull(input, buf)
if n == 0 {
break // end of plaintext input
}
if err != nil && !(errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF)) {
return err
}
// Seal encrypts the plaintext using the nonce returning the updated slice.
ciphertext = aesgcm.Seal(ciphertext[:0], nonce.Value(), buf[:n], nil)
_, err = output.Write(ciphertext)
if err != nil {
return err
}
nonce.Increment()
}
return nil
}
// aesDecryptGCM reads from input, decrypts with AES-256 and returns the reader to read the decrypted content from.
func aesDecryptGCM(input io.Reader, passphrase []byte) (io.Reader, error) {
// Reader & verify header
header := make([]byte, len(aesGcmHeader))
if _, err := io.ReadFull(input, header); err != nil {
return nil, err
}
if string(header) != aesGcmHeader {
return nil, fmt.Errorf("invalid header")
}
// Read salt
salt := make([]byte, 16) // Salt size
if _, err := io.ReadFull(input, salt); err != nil {
return nil, err
}
key := argon2.IDKey(passphrase, salt, argon2TimeCost, argon2MemoryCost, argon2Threads, 32)
// Initialize AES cipher block
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
// Create GCM mode with the cipher block
aesgcm, err := cipher.NewGCM(block)
if err != nil {
return nil, err
}
// Read nonce from the input reader
nonce := NewNonce(aesgcm.NonceSize())
if err := nonce.Read(input); err != nil {
return nil, err
}
// Initialize a buffer to store decrypted data
buf := bytes.Buffer{}
plaintext := make([]byte, aesGcmBlockSize)
// Decrypt the ciphertext in blocks
for {
// Read a block of ciphertext from the input reader
ciphertextBlock := make([]byte, aesGcmBlockSize+aesgcm.Overhead()) // Adjust block size as needed
n, err := io.ReadFull(input, ciphertextBlock)
if n == 0 {
break // end of ciphertext
}
if err != nil && !(errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF)) {
return nil, err
}
// Decrypt the block of ciphertext
plaintext, err = aesgcm.Open(plaintext[:0], nonce.Value(), ciphertextBlock[:n], nil)
if err != nil {
return nil, err
}
_, err = buf.Write(plaintext)
if err != nil {
return nil, err
}
nonce.Increment()
}
return &buf, nil
}
// aesDecryptOFB reads from input, decrypts with AES-256 and returns the reader to a read decrypted content from.
// passphrase is used to generate an encryption key. // passphrase is used to generate an encryption key.
// note: This function used to decrypt files that were encrypted without a header i.e. old archives func AesEncrypt(input io.Reader, output io.Writer, passphrase []byte) error {
func aesDecryptOFB(input io.Reader, passphrase []byte) (io.Reader, error) { // making a 32 bytes key that would correspond to AES-256
var emptySalt []byte = make([]byte, 0) // don't necessarily need a salt, so just kept in empty
key, err := scrypt.Key(passphrase, emptySalt, 32768, 8, 1, 32)
if err != nil {
return err
}
block, err := aes.NewCipher(key)
if err != nil {
return err
}
// If the key is unique for each ciphertext, then it's ok to use a zero
// IV.
var iv [aes.BlockSize]byte
stream := cipher.NewOFB(block, iv[:])
writer := &cipher.StreamWriter{S: stream, W: output}
// Copy the input to the output, encrypting as we go.
if _, err := io.Copy(writer, input); err != nil {
return err
}
return nil
}
// AesDecrypt reads from input, decrypts with AES-256 and returns the reader to a read decrypted content from.
// passphrase is used to generate an encryption key.
func AesDecrypt(input io.Reader, passphrase []byte) (io.Reader, error) {
// making a 32 bytes key that would correspond to AES-256 // making a 32 bytes key that would correspond to AES-256
// don't necessarily need a salt, so just kept in empty // don't necessarily need a salt, so just kept in empty
key, err := scrypt.Key(passphrase, emptySalt, 32768, 8, 1, 32) key, err := scrypt.Key(passphrase, emptySalt, 32768, 8, 1, 32)
@@ -223,9 +59,11 @@ func aesDecryptOFB(input io.Reader, passphrase []byte) (io.Reader, error) {
return nil, err return nil, err
} }
// If the key is unique for each ciphertext, then it's ok to use a zero IV. // If the key is unique for each ciphertext, then it's ok to use a zero
// IV.
var iv [aes.BlockSize]byte var iv [aes.BlockSize]byte
stream := cipher.NewOFB(block, iv[:]) stream := cipher.NewOFB(block, iv[:])
reader := &cipher.StreamReader{S: stream, R: input} reader := &cipher.StreamReader{S: stream, R: input}
return reader, nil return reader, nil

View File

@@ -2,7 +2,6 @@ package crypto
import ( import (
"io" "io"
"math/rand"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
@@ -10,19 +9,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
func randBytes(n int) []byte {
b := make([]byte, n)
for i := range b {
b[i] = letterBytes[rand.Intn(len(letterBytes))]
}
return b
}
func Test_encryptAndDecrypt_withTheSamePassword(t *testing.T) { func Test_encryptAndDecrypt_withTheSamePassword(t *testing.T) {
const passphrase = "passphrase"
tmpdir := t.TempDir() tmpdir := t.TempDir()
var ( var (
@@ -31,99 +18,17 @@ func Test_encryptAndDecrypt_withTheSamePassword(t *testing.T) {
decryptedFilePath = filepath.Join(tmpdir, "decrypted") decryptedFilePath = filepath.Join(tmpdir, "decrypted")
) )
content := randBytes(1024*1024*100 + 523) content := []byte("content")
os.WriteFile(originFilePath, content, 0600)
originFile, _ := os.Open(originFilePath)
defer originFile.Close()
encryptedFileWriter, _ := os.Create(encryptedFilePath)
err := AesEncrypt(originFile, encryptedFileWriter, []byte(passphrase))
assert.Nil(t, err, "Failed to encrypt a file")
encryptedFileWriter.Close()
encryptedContent, err := os.ReadFile(encryptedFilePath)
assert.Nil(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
encryptedFileReader, _ := os.Open(encryptedFilePath)
defer encryptedFileReader.Close()
decryptedFileWriter, _ := os.Create(decryptedFilePath)
defer decryptedFileWriter.Close()
decryptedReader, err := AesDecrypt(encryptedFileReader, []byte(passphrase))
assert.Nil(t, err, "Failed to decrypt file")
io.Copy(decryptedFileWriter, decryptedReader)
decryptedContent, _ := os.ReadFile(decryptedFilePath)
assert.Equal(t, content, decryptedContent, "Original and decrypted content should match")
}
func Test_encryptAndDecrypt_withStrongPassphrase(t *testing.T) {
const passphrase = "A strong passphrase with special characters: !@#$%^&*()_+"
tmpdir := t.TempDir()
var (
originFilePath = filepath.Join(tmpdir, "origin2")
encryptedFilePath = filepath.Join(tmpdir, "encrypted2")
decryptedFilePath = filepath.Join(tmpdir, "decrypted2")
)
content := randBytes(500)
os.WriteFile(originFilePath, content, 0600)
originFile, _ := os.Open(originFilePath)
defer originFile.Close()
encryptedFileWriter, _ := os.Create(encryptedFilePath)
err := AesEncrypt(originFile, encryptedFileWriter, []byte(passphrase))
assert.Nil(t, err, "Failed to encrypt a file")
encryptedFileWriter.Close()
encryptedContent, err := os.ReadFile(encryptedFilePath)
assert.Nil(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
encryptedFileReader, _ := os.Open(encryptedFilePath)
defer encryptedFileReader.Close()
decryptedFileWriter, _ := os.Create(decryptedFilePath)
defer decryptedFileWriter.Close()
decryptedReader, err := AesDecrypt(encryptedFileReader, []byte(passphrase))
assert.Nil(t, err, "Failed to decrypt file")
io.Copy(decryptedFileWriter, decryptedReader)
decryptedContent, _ := os.ReadFile(decryptedFilePath)
assert.Equal(t, content, decryptedContent, "Original and decrypted content should match")
}
func Test_encryptAndDecrypt_withTheSamePasswordSmallFile(t *testing.T) {
tmpdir := t.TempDir()
var (
originFilePath = filepath.Join(tmpdir, "origin2")
encryptedFilePath = filepath.Join(tmpdir, "encrypted2")
decryptedFilePath = filepath.Join(tmpdir, "decrypted2")
)
content := randBytes(500)
os.WriteFile(originFilePath, content, 0600) os.WriteFile(originFilePath, content, 0600)
originFile, _ := os.Open(originFilePath) originFile, _ := os.Open(originFilePath)
defer originFile.Close() defer originFile.Close()
encryptedFileWriter, _ := os.Create(encryptedFilePath) encryptedFileWriter, _ := os.Create(encryptedFilePath)
defer encryptedFileWriter.Close()
err := AesEncrypt(originFile, encryptedFileWriter, []byte("passphrase")) err := AesEncrypt(originFile, encryptedFileWriter, []byte("passphrase"))
assert.Nil(t, err, "Failed to encrypt a file") assert.Nil(t, err, "Failed to encrypt a file")
encryptedFileWriter.Close()
encryptedContent, err := os.ReadFile(encryptedFilePath) encryptedContent, err := os.ReadFile(encryptedFilePath)
assert.Nil(t, err, "Couldn't read encrypted file") assert.Nil(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted") assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
@@ -152,7 +57,7 @@ func Test_encryptAndDecrypt_withEmptyPassword(t *testing.T) {
decryptedFilePath = filepath.Join(tmpdir, "decrypted") decryptedFilePath = filepath.Join(tmpdir, "decrypted")
) )
content := randBytes(1024 * 50) content := []byte("content")
os.WriteFile(originFilePath, content, 0600) os.WriteFile(originFilePath, content, 0600)
originFile, _ := os.Open(originFilePath) originFile, _ := os.Open(originFilePath)
@@ -191,7 +96,7 @@ func Test_decryptWithDifferentPassphrase_shouldProduceWrongResult(t *testing.T)
decryptedFilePath = filepath.Join(tmpdir, "decrypted") decryptedFilePath = filepath.Join(tmpdir, "decrypted")
) )
content := randBytes(1034) content := []byte("content")
os.WriteFile(originFilePath, content, 0600) os.WriteFile(originFilePath, content, 0600)
originFile, _ := os.Open(originFilePath) originFile, _ := os.Open(originFilePath)
@@ -212,6 +117,11 @@ func Test_decryptWithDifferentPassphrase_shouldProduceWrongResult(t *testing.T)
decryptedFileWriter, _ := os.Create(decryptedFilePath) decryptedFileWriter, _ := os.Create(decryptedFilePath)
defer decryptedFileWriter.Close() defer decryptedFileWriter.Close()
_, err = AesDecrypt(encryptedFileReader, []byte("garbage")) decryptedReader, err := AesDecrypt(encryptedFileReader, []byte("garbage"))
assert.NotNil(t, err, "Should not allow decrypt with wrong passphrase") assert.Nil(t, err, "Should allow to decrypt with wrong passphrase")
io.Copy(decryptedFileWriter, decryptedReader)
decryptedContent, _ := os.ReadFile(decryptedFilePath)
assert.NotEqual(t, content, decryptedContent, "Original and decrypted content should NOT match")
} }

View File

@@ -7,8 +7,9 @@ import (
"crypto/x509" "crypto/x509"
"encoding/base64" "encoding/base64"
"encoding/hex" "encoding/hex"
"math/big"
"github.com/portainer/portainer/pkg/libcrypto" "github.com/portainer/libcrypto"
) )
const ( const (
@@ -114,6 +115,9 @@ func (service *ECDSAService) CreateSignature(message string) (string, error) {
hash := libcrypto.HashFromBytes([]byte(message)) hash := libcrypto.HashFromBytes([]byte(message))
r := big.NewInt(0)
s := big.NewInt(0)
r, s, err := ecdsa.Sign(rand.Reader, service.privateKey, hash) r, s, err := ecdsa.Sign(rand.Reader, service.privateKey, hash)
if err != nil { if err != nil {
return "", err return "", err

View File

@@ -1,61 +0,0 @@
package crypto
import (
"crypto/rand"
"errors"
"io"
)
type Nonce struct {
val []byte
}
func NewNonce(size int) *Nonce {
return &Nonce{val: make([]byte, size)}
}
// NewRandomNonce generates a new initial nonce with the lower byte set to a random value
// This ensures there are plenty of nonce values availble before rolling over
// Based on ideas from the Secure Programming Cookbook for C and C++ by John Viega, Matt Messier
// https://www.oreilly.com/library/view/secure-programming-cookbook/0596003943/ch04s09.html
func NewRandomNonce(size int) (*Nonce, error) {
randomBytes := 1
if size <= randomBytes {
return nil, errors.New("nonce size must be greater than the number of random bytes")
}
randomPart := make([]byte, randomBytes)
if _, err := rand.Read(randomPart); err != nil {
return nil, err
}
zeroPart := make([]byte, size-randomBytes)
nonceVal := append(randomPart, zeroPart...)
return &Nonce{val: nonceVal}, nil
}
func (n *Nonce) Read(stream io.Reader) error {
_, err := io.ReadFull(stream, n.val)
return err
}
func (n *Nonce) Value() []byte {
return n.val
}
func (n *Nonce) Increment() error {
// Start incrementing from the least significant byte
for i := len(n.val) - 1; i >= 0; i-- {
// Increment the current byte
n.val[i]++
// Check for overflow
if n.val[i] != 0 {
// No overflow, nonce is successfully incremented
return nil
}
}
// If we reach here, it means the nonce has overflowed
return errors.New("nonce overflow")
}

View File

@@ -20,14 +20,6 @@ func CreateTLSConfiguration() *tls.Config {
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
}, },
} }
} }

View File

@@ -5,7 +5,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"math"
"os" "os"
"path" "path"
"time" "time"
@@ -144,8 +143,6 @@ func (connection *DbConnection) Open() error {
// Close closes the BoltDB database. // Close closes the BoltDB database.
// Safe to being called multiple times. // Safe to being called multiple times.
func (connection *DbConnection) Close() error { func (connection *DbConnection) Close() error {
log.Info().Msg("closing PortainerDB")
if connection.DB != nil { if connection.DB != nil {
return connection.DB.Close() return connection.DB.Close()
} }
@@ -185,7 +182,7 @@ func (connection *DbConnection) BackupTo(w io.Writer) error {
func (connection *DbConnection) ExportRaw(filename string) error { func (connection *DbConnection) ExportRaw(filename string) error {
databasePath := connection.GetDatabaseFilePath() databasePath := connection.GetDatabaseFilePath()
if _, err := os.Stat(databasePath); err != nil { if _, err := os.Stat(databasePath); err != nil {
return fmt.Errorf("stat on %s failed, error: %w", databasePath, err) return fmt.Errorf("stat on %s failed: %s", databasePath, err)
} }
b, err := connection.ExportJSON(databasePath, true) b, err := connection.ExportJSON(databasePath, true)
@@ -204,20 +201,6 @@ func (connection *DbConnection) ConvertToKey(v int) []byte {
return b return b
} }
// keyToString Converts a key to a string value suitable for logging
func keyToString(b []byte) string {
if len(b) != 8 {
return string(b)
}
v := binary.BigEndian.Uint64(b)
if v <= math.MaxInt32 {
return fmt.Sprintf("%d", v)
}
return string(b)
}
// CreateBucket is a generic function used to create a bucket inside a database. // CreateBucket is a generic function used to create a bucket inside a database.
func (connection *DbConnection) SetServiceName(bucketName string) error { func (connection *DbConnection) SetServiceName(bucketName string) error {
return connection.UpdateTx(func(tx portainer.Transaction) error { return connection.UpdateTx(func(tx portainer.Transaction) error {
@@ -254,10 +237,10 @@ func (connection *DbConnection) UpdateObjectFunc(bucketName string, key []byte,
data := bucket.Get(key) data := bucket.Get(key)
if data == nil { if data == nil {
return fmt.Errorf("%w (bucket=%s, key=%s)", dserrors.ErrObjectNotFound, bucketName, keyToString(key)) return dserrors.ErrObjectNotFound
} }
err := connection.UnmarshalObject(data, object) err := connection.UnmarshalObjectWithJsoniter(data, object)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -1,10 +1,10 @@
package boltdb package boltdb
import ( import (
"encoding/json"
"time" "time"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
"github.com/segmentio/encoding/json"
bolt "go.etcd.io/bbolt" bolt "go.etcd.io/bbolt"
) )

View File

@@ -1,41 +1,34 @@
package boltdb package boltdb
import ( import (
"bytes"
"crypto/aes" "crypto/aes"
"crypto/cipher" "crypto/cipher"
"crypto/rand" "crypto/rand"
"encoding/json"
"fmt" "fmt"
"io" "io"
jsoniter "github.com/json-iterator/go"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/segmentio/encoding/json"
) )
var errEncryptedStringTooShort = fmt.Errorf("encrypted string too short") var errEncryptedStringTooShort = fmt.Errorf("encrypted string too short")
// MarshalObject encodes an object to binary format // MarshalObject encodes an object to binary format
func (connection *DbConnection) MarshalObject(object interface{}) ([]byte, error) { func (connection *DbConnection) MarshalObject(object interface{}) (data []byte, err error) {
buf := &bytes.Buffer{}
// Special case for the VERSION bucket. Here we're not using json // Special case for the VERSION bucket. Here we're not using json
if v, ok := object.(string); ok { if v, ok := object.(string); ok {
buf.WriteString(v) data = []byte(v)
} else { } else {
enc := json.NewEncoder(buf) data, err = json.Marshal(object)
enc.SetSortMapKeys(false) if err != nil {
enc.SetAppendNewline(false) return data, err
if err := enc.Encode(object); err != nil {
return nil, err
} }
} }
if connection.getEncryptionKey() == nil { if connection.getEncryptionKey() == nil {
return buf.Bytes(), nil return data, nil
} }
return encrypt(data, connection.getEncryptionKey())
return encrypt(buf.Bytes(), connection.getEncryptionKey())
} }
// UnmarshalObject decodes an object from binary data // UnmarshalObject decodes an object from binary data
@@ -61,6 +54,31 @@ func (connection *DbConnection) UnmarshalObject(data []byte, object interface{})
return err return err
} }
// UnmarshalObjectWithJsoniter decodes an object from binary data
// using the jsoniter library. It is mainly used to accelerate environment(endpoint)
// decoding at the moment.
func (connection *DbConnection) UnmarshalObjectWithJsoniter(data []byte, object interface{}) error {
if connection.getEncryptionKey() != nil {
var err error
data, err = decrypt(data, connection.getEncryptionKey())
if err != nil {
return err
}
}
var jsoni = jsoniter.ConfigCompatibleWithStandardLibrary
err := jsoni.Unmarshal(data, &object)
if err != nil {
if s, ok := object.(*string); ok {
*s = string(data)
return nil
}
return err
}
return nil
}
// mmm, don't have a KMS .... aes GCM seems the most likely from // mmm, don't have a KMS .... aes GCM seems the most likely from
// https://gist.github.com/atoponce/07d8d4c833873be2f68c34f9afc5a78a#symmetric-encryption // https://gist.github.com/atoponce/07d8d4c833873be2f68c34f9afc5a78a#symmetric-encryption

View File

@@ -129,7 +129,7 @@ func Test_UnMarshalObjectUnencrypted(t *testing.T) {
var object string var object string
err := conn.UnmarshalObject(test.object, &object) err := conn.UnmarshalObject(test.object, &object)
is.NoError(err) is.NoError(err)
is.Equal(test.expected, object) is.Equal(test.expected, string(object))
}) })
} }
} }

View File

@@ -2,7 +2,6 @@ package boltdb
import ( import (
"bytes" "bytes"
"fmt"
dserrors "github.com/portainer/portainer/api/dataservices/errors" dserrors "github.com/portainer/portainer/api/dataservices/errors"
@@ -25,10 +24,13 @@ func (tx *DbTransaction) GetObject(bucketName string, key []byte, object interfa
value := bucket.Get(key) value := bucket.Get(key)
if value == nil { if value == nil {
return fmt.Errorf("%w (bucket=%s, key=%s)", dserrors.ErrObjectNotFound, bucketName, keyToString(key)) return dserrors.ErrObjectNotFound
} }
return tx.conn.UnmarshalObject(value, object) data := make([]byte, len(value))
copy(data, value)
return tx.conn.UnmarshalObjectWithJsoniter(data, object)
} }
func (tx *DbTransaction) UpdateObject(bucketName string, key []byte, object interface{}) error { func (tx *DbTransaction) UpdateObject(bucketName string, key []byte, object interface{}) error {
@@ -46,9 +48,7 @@ func (tx *DbTransaction) DeleteObject(bucketName string, key []byte) error {
return bucket.Delete(key) return bucket.Delete(key)
} }
func (tx *DbTransaction) DeleteAllObjects(bucketName string, obj interface{}, matchingFn func(o interface{}) (id int, ok bool)) error { func (tx *DbTransaction) DeleteAllObjects(bucketName string, obj interface{}, matching func(o interface{}) (id int, ok bool)) error {
var ids []int
bucket := tx.tx.Bucket([]byte(bucketName)) bucket := tx.tx.Bucket([]byte(bucketName))
cursor := bucket.Cursor() cursor := bucket.Cursor()
@@ -58,16 +58,13 @@ func (tx *DbTransaction) DeleteAllObjects(bucketName string, obj interface{}, ma
return err return err
} }
if id, ok := matchingFn(obj); ok { if id, ok := matching(obj); ok {
ids = append(ids, id) err := bucket.Delete(tx.conn.ConvertToKey(id))
} if err != nil {
}
for _, id := range ids {
if err := bucket.Delete(tx.conn.ConvertToKey(id)); err != nil {
return err return err
} }
} }
}
return nil return nil
} }
@@ -77,6 +74,7 @@ func (tx *DbTransaction) GetNextIdentifier(bucketName string) int {
id, err := bucket.NextSequence() id, err := bucket.NextSequence()
if err != nil { if err != nil {
log.Error().Err(err).Str("bucket", bucketName).Msg("failed to get the next identifer") log.Error().Err(err).Str("bucket", bucketName).Msg("failed to get the next identifer")
return 0 return 0
} }
@@ -94,7 +92,7 @@ func (tx *DbTransaction) CreateObject(bucketName string, fn func(uint64) (int, i
return err return err
} }
return bucket.Put(tx.conn.ConvertToKey(id), data) return bucket.Put(tx.conn.ConvertToKey(int(id)), data)
} }
func (tx *DbTransaction) CreateObjectWithId(bucketName string, id int, obj interface{}) error { func (tx *DbTransaction) CreateObjectWithId(bucketName string, id int, obj interface{}) error {
@@ -117,42 +115,54 @@ func (tx *DbTransaction) CreateObjectWithStringId(bucketName string, id []byte,
return bucket.Put(id, data) return bucket.Put(id, data)
} }
func (tx *DbTransaction) GetAll(bucketName string, obj interface{}, appendFn func(o interface{}) (interface{}, error)) error { func (tx *DbTransaction) GetAll(bucketName string, obj interface{}, append func(o interface{}) (interface{}, error)) error {
bucket := tx.tx.Bucket([]byte(bucketName)) bucket := tx.tx.Bucket([]byte(bucketName))
return bucket.ForEach(func(k []byte, v []byte) error { cursor := bucket.Cursor()
err := tx.conn.UnmarshalObject(v, obj) for k, v := cursor.First(); k != nil; k, v = cursor.Next() {
if err == nil {
obj, err = appendFn(obj)
}
return err
})
}
func (tx *DbTransaction) GetAllWithJsoniter(bucketName string, obj interface{}, appendFn func(o interface{}) (interface{}, error)) error {
bucket := tx.tx.Bucket([]byte(bucketName))
return bucket.ForEach(func(k []byte, v []byte) error {
err := tx.conn.UnmarshalObject(v, obj)
if err == nil {
obj, err = appendFn(obj)
}
return err
})
}
func (tx *DbTransaction) GetAllWithKeyPrefix(bucketName string, keyPrefix []byte, obj interface{}, appendFn func(o interface{}) (interface{}, error)) error {
cursor := tx.tx.Bucket([]byte(bucketName)).Cursor()
for k, v := cursor.Seek(keyPrefix); k != nil && bytes.HasPrefix(k, keyPrefix); k, v = cursor.Next() {
err := tx.conn.UnmarshalObject(v, obj) err := tx.conn.UnmarshalObject(v, obj)
if err != nil { if err != nil {
return err return err
} }
obj, err = appendFn(obj) obj, err = append(obj)
if err != nil {
return err
}
}
return nil
}
func (tx *DbTransaction) GetAllWithJsoniter(bucketName string, obj interface{}, append func(o interface{}) (interface{}, error)) error {
bucket := tx.tx.Bucket([]byte(bucketName))
cursor := bucket.Cursor()
for k, v := cursor.First(); k != nil; k, v = cursor.Next() {
err := tx.conn.UnmarshalObjectWithJsoniter(v, obj)
if err != nil {
return err
}
obj, err = append(obj)
if err != nil {
return err
}
}
return nil
}
func (tx *DbTransaction) GetAllWithKeyPrefix(bucketName string, keyPrefix []byte, obj interface{}, append func(o interface{}) (interface{}, error)) error {
cursor := tx.tx.Bucket([]byte(bucketName)).Cursor()
for k, v := cursor.Seek(keyPrefix); k != nil && bytes.HasPrefix(k, keyPrefix); k, v = cursor.Next() {
err := tx.conn.UnmarshalObjectWithJsoniter(v, obj)
if err != nil {
return err
}
obj, err = append(obj)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -5,7 +5,7 @@ import (
"testing" "testing"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices" dserrors "github.com/portainer/portainer/api/dataservices/errors"
) )
const testBucketName = "test-bucket" const testBucketName = "test-bucket"
@@ -97,7 +97,7 @@ func TestTxs(t *testing.T) {
err = conn.ViewTx(func(tx portainer.Transaction) error { err = conn.ViewTx(func(tx portainer.Transaction) error {
return tx.GetObject(testBucketName, conn.ConvertToKey(testId), &obj) return tx.GetObject(testBucketName, conn.ConvertToKey(testId), &obj)
}) })
if !dataservices.IsErrObjectNotFound(err) { if err != dserrors.ErrObjectNotFound {
t.Fatal(err) t.Fatal(err)
} }

View File

@@ -9,7 +9,8 @@ import (
// NewDatabase should use config options to return a connection to the requested database // NewDatabase should use config options to return a connection to the requested database
func NewDatabase(storeType, storePath string, encryptionKey []byte) (connection portainer.Connection, err error) { func NewDatabase(storeType, storePath string, encryptionKey []byte) (connection portainer.Connection, err error) {
if storeType == "boltdb" { switch storeType {
case "boltdb":
return &boltdb.DbConnection{ return &boltdb.DbConnection{
Path: storePath, Path: storePath,
EncryptionKey: encryptionKey, EncryptionKey: encryptionKey,

View File

@@ -1,22 +1,23 @@
package apikeyrepository package apikeyrepository
import ( import (
"errors" "bytes"
"fmt" "fmt"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/dataservices/errors"
dserrors "github.com/portainer/portainer/api/dataservices/errors"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
) )
// BucketName represents the name of the bucket where this service stores data. const (
const BucketName = "api_key" // BucketName represents the name of the bucket where this service stores data.
BucketName = "api_key"
)
// Service represents a service for managing api-key data. // Service represents a service for managing api-key data.
type Service struct { type Service struct {
dataservices.BaseDataService[portainer.APIKey, portainer.APIKeyID] connection portainer.Connection
} }
// NewService creates a new instance of a service. // NewService creates a new instance of a service.
@@ -27,25 +28,22 @@ func NewService(connection portainer.Connection) (*Service, error) {
} }
return &Service{ return &Service{
BaseDataService: dataservices.BaseDataService[portainer.APIKey, portainer.APIKeyID]{ connection: connection,
Bucket: BucketName,
Connection: connection,
},
}, nil }, nil
} }
// GetAPIKeysByUserID returns a slice containing all the APIKeys a user has access to. // GetAPIKeysByUserID returns a slice containing all the APIKeys a user has access to.
func (service *Service) GetAPIKeysByUserID(userID portainer.UserID) ([]portainer.APIKey, error) { func (service *Service) GetAPIKeysByUserID(userID portainer.UserID) ([]portainer.APIKey, error) {
result := make([]portainer.APIKey, 0) var result = make([]portainer.APIKey, 0)
err := service.Connection.GetAll( err := service.connection.GetAll(
BucketName, BucketName,
&portainer.APIKey{}, &portainer.APIKey{},
func(obj interface{}) (interface{}, error) { func(obj interface{}) (interface{}, error) {
record, ok := obj.(*portainer.APIKey) record, ok := obj.(*portainer.APIKey)
if !ok { if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to APIKey object") log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to APIKey object")
return nil, fmt.Errorf("failed to convert to APIKey object: %s", obj) return nil, fmt.Errorf("Failed to convert to APIKey object: %s", obj)
} }
if record.UserID == userID { if record.UserID == userID {
@@ -60,19 +58,19 @@ func (service *Service) GetAPIKeysByUserID(userID portainer.UserID) ([]portainer
// GetAPIKeyByDigest returns the API key for the associated digest. // GetAPIKeyByDigest returns the API key for the associated digest.
// Note: there is a 1-to-1 mapping of api-key and digest // Note: there is a 1-to-1 mapping of api-key and digest
func (service *Service) GetAPIKeyByDigest(digest string) (*portainer.APIKey, error) { func (service *Service) GetAPIKeyByDigest(digest []byte) (*portainer.APIKey, error) {
var k *portainer.APIKey var k *portainer.APIKey
stop := fmt.Errorf("ok") stop := fmt.Errorf("ok")
err := service.Connection.GetAll( err := service.connection.GetAll(
BucketName, BucketName,
&portainer.APIKey{}, &portainer.APIKey{},
func(obj interface{}) (interface{}, error) { func(obj interface{}) (interface{}, error) {
key, ok := obj.(*portainer.APIKey) key, ok := obj.(*portainer.APIKey)
if !ok { if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to APIKey object") log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to APIKey object")
return nil, fmt.Errorf("failed to convert to APIKey object: %s", obj) return nil, fmt.Errorf("Failed to convert to APIKey object: %s", obj)
} }
if key.Digest == digest { if bytes.Equal(key.Digest, digest) {
k = key k = key
return nil, stop return nil, stop
} }
@@ -80,20 +78,20 @@ func (service *Service) GetAPIKeyByDigest(digest string) (*portainer.APIKey, err
return &portainer.APIKey{}, nil return &portainer.APIKey{}, nil
}) })
if errors.Is(err, stop) { if err == stop {
return k, nil return k, nil
} }
if err == nil { if err == nil {
return nil, dserrors.ErrObjectNotFound return nil, errors.ErrObjectNotFound
} }
return nil, err return nil, err
} }
// Create creates a new APIKey object. // CreateAPIKey creates a new APIKey object.
func (service *Service) Create(record *portainer.APIKey) error { func (service *Service) CreateAPIKey(record *portainer.APIKey) error {
return service.Connection.CreateObject( return service.connection.CreateObject(
BucketName, BucketName,
func(id uint64) (int, interface{}) { func(id uint64) (int, interface{}) {
record.ID = portainer.APIKeyID(id) record.ID = portainer.APIKeyID(id)
@@ -102,3 +100,26 @@ func (service *Service) Create(record *portainer.APIKey) error {
}, },
) )
} }
// GetAPIKey retrieves an existing APIKey object by api key ID.
func (service *Service) GetAPIKey(keyID portainer.APIKeyID) (*portainer.APIKey, error) {
var key portainer.APIKey
identifier := service.connection.ConvertToKey(int(keyID))
err := service.connection.GetObject(BucketName, identifier, &key)
if err != nil {
return nil, err
}
return &key, nil
}
func (service *Service) UpdateAPIKey(key *portainer.APIKey) error {
identifier := service.connection.ConvertToKey(int(key.ID))
return service.connection.UpdateObject(BucketName, identifier, key)
}
func (service *Service) DeleteAPIKey(ID portainer.APIKeyID) error {
identifier := service.connection.ConvertToKey(int(ID))
return service.connection.DeleteObject(BucketName, identifier)
}

View File

@@ -1,66 +0,0 @@
package dataservices
import (
portainer "github.com/portainer/portainer/api"
"golang.org/x/exp/constraints"
)
type BaseCRUD[T any, I constraints.Integer] interface {
Create(element *T) error
Read(ID I) (*T, error)
ReadAll() ([]T, error)
Update(ID I, element *T) error
Delete(ID I) error
}
type BaseDataService[T any, I constraints.Integer] struct {
Bucket string
Connection portainer.Connection
}
func (s *BaseDataService[T, I]) BucketName() string {
return s.Bucket
}
func (service *BaseDataService[T, I]) Tx(tx portainer.Transaction) BaseDataServiceTx[T, I] {
return BaseDataServiceTx[T, I]{
Bucket: service.Bucket,
Connection: service.Connection,
Tx: tx,
}
}
func (service BaseDataService[T, I]) Read(ID I) (*T, error) {
var element *T
return element, service.Connection.ViewTx(func(tx portainer.Transaction) error {
var err error
element, err = service.Tx(tx).Read(ID)
return err
})
}
func (service BaseDataService[T, I]) ReadAll() ([]T, error) {
var collection = make([]T, 0)
return collection, service.Connection.ViewTx(func(tx portainer.Transaction) error {
var err error
collection, err = service.Tx(tx).ReadAll()
return err
})
}
func (service BaseDataService[T, I]) Update(ID I, element *T) error {
return service.Connection.UpdateTx(func(tx portainer.Transaction) error {
return service.Tx(tx).Update(ID, element)
})
}
func (service BaseDataService[T, I]) Delete(ID I) error {
return service.Connection.UpdateTx(func(tx portainer.Transaction) error {
return service.Tx(tx).Delete(ID)
})
}

View File

@@ -1,49 +0,0 @@
package dataservices
import (
portainer "github.com/portainer/portainer/api"
"golang.org/x/exp/constraints"
)
type BaseDataServiceTx[T any, I constraints.Integer] struct {
Bucket string
Connection portainer.Connection
Tx portainer.Transaction
}
func (service BaseDataServiceTx[T, I]) BucketName() string {
return service.Bucket
}
func (service BaseDataServiceTx[T, I]) Read(ID I) (*T, error) {
var element T
identifier := service.Connection.ConvertToKey(int(ID))
err := service.Tx.GetObject(service.Bucket, identifier, &element)
if err != nil {
return nil, err
}
return &element, nil
}
func (service BaseDataServiceTx[T, I]) ReadAll() ([]T, error) {
var collection = make([]T, 0)
return collection, service.Tx.GetAllWithJsoniter(
service.Bucket,
new(T),
AppendFn(&collection),
)
}
func (service BaseDataServiceTx[T, I]) Update(ID I, element *T) error {
identifier := service.Connection.ConvertToKey(int(ID))
return service.Tx.UpdateObject(service.Bucket, identifier, element)
}
func (service BaseDataServiceTx[T, I]) Delete(ID I) error {
identifier := service.Connection.ConvertToKey(int(ID))
return service.Tx.DeleteObject(service.Bucket, identifier)
}

View File

@@ -1,16 +1,25 @@
package customtemplate package customtemplate
import ( import (
"fmt"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/rs/zerolog/log"
) )
// BucketName represents the name of the bucket where this service stores data. const (
const BucketName = "customtemplates" // BucketName represents the name of the bucket where this service stores data.
BucketName = "customtemplates"
)
// Service represents a service for managing custom template data. // Service represents a service for managing custom template data.
type Service struct { type Service struct {
dataservices.BaseDataService[portainer.CustomTemplate, portainer.CustomTemplateID] connection portainer.Connection
}
func (service *Service) BucketName() string {
return BucketName
} }
// NewService creates a new instance of a service. // NewService creates a new instance of a service.
@@ -21,20 +30,64 @@ func NewService(connection portainer.Connection) (*Service, error) {
} }
return &Service{ return &Service{
BaseDataService: dataservices.BaseDataService[portainer.CustomTemplate, portainer.CustomTemplateID]{ connection: connection,
Bucket: BucketName,
Connection: connection,
},
}, nil }, nil
} }
// CustomTemplates return an array containing all the custom templates.
func (service *Service) CustomTemplates() ([]portainer.CustomTemplate, error) {
var customTemplates = make([]portainer.CustomTemplate, 0)
err := service.connection.GetAll(
BucketName,
&portainer.CustomTemplate{},
func(obj interface{}) (interface{}, error) {
//var tag portainer.Tag
customTemplate, ok := obj.(*portainer.CustomTemplate)
if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to CustomTemplate object")
return nil, fmt.Errorf("Failed to convert to CustomTemplate object: %s", obj)
}
customTemplates = append(customTemplates, *customTemplate)
return &portainer.CustomTemplate{}, nil
})
return customTemplates, err
}
// CustomTemplate returns an custom template by ID.
func (service *Service) CustomTemplate(ID portainer.CustomTemplateID) (*portainer.CustomTemplate, error) {
var customTemplate portainer.CustomTemplate
identifier := service.connection.ConvertToKey(int(ID))
err := service.connection.GetObject(BucketName, identifier, &customTemplate)
if err != nil {
return nil, err
}
return &customTemplate, nil
}
// UpdateCustomTemplate updates an custom template.
func (service *Service) UpdateCustomTemplate(ID portainer.CustomTemplateID, customTemplate *portainer.CustomTemplate) error {
identifier := service.connection.ConvertToKey(int(ID))
return service.connection.UpdateObject(BucketName, identifier, customTemplate)
}
// DeleteCustomTemplate deletes an custom template.
func (service *Service) DeleteCustomTemplate(ID portainer.CustomTemplateID) error {
identifier := service.connection.ConvertToKey(int(ID))
return service.connection.DeleteObject(BucketName, identifier)
}
// CreateCustomTemplate uses the existing id and saves it. // CreateCustomTemplate uses the existing id and saves it.
// TODO: where does the ID come from, and is it safe? // TODO: where does the ID come from, and is it safe?
func (service *Service) Create(customTemplate *portainer.CustomTemplate) error { func (service *Service) Create(customTemplate *portainer.CustomTemplate) error {
return service.Connection.CreateObjectWithId(BucketName, int(customTemplate.ID), customTemplate) return service.connection.CreateObjectWithId(BucketName, int(customTemplate.ID), customTemplate)
} }
// GetNextIdentifier returns the next identifier for a custom template. // GetNextIdentifier returns the next identifier for a custom template.
func (service *Service) GetNextIdentifier() int { func (service *Service) GetNextIdentifier() int {
return service.Connection.GetNextIdentifier(BucketName) return service.connection.GetNextIdentifier(BucketName)
} }

View File

@@ -2,7 +2,6 @@ package edgegroup
import ( import (
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
) )
// BucketName represents the name of the bucket where this service stores data. // BucketName represents the name of the bucket where this service stores data.
@@ -10,7 +9,7 @@ const BucketName = "edgegroups"
// Service represents a service for managing Edge group data. // Service represents a service for managing Edge group data.
type Service struct { type Service struct {
dataservices.BaseDataService[portainer.EdgeGroup, portainer.EdgeGroupID] connection portainer.Connection
} }
func (service *Service) BucketName() string { func (service *Service) BucketName() string {
@@ -25,36 +24,69 @@ func NewService(connection portainer.Connection) (*Service, error) {
} }
return &Service{ return &Service{
BaseDataService: dataservices.BaseDataService[portainer.EdgeGroup, portainer.EdgeGroupID]{ connection: connection,
Bucket: BucketName,
Connection: connection,
},
}, nil }, nil
} }
func (service *Service) Tx(tx portainer.Transaction) ServiceTx { func (service *Service) Tx(tx portainer.Transaction) ServiceTx {
return ServiceTx{ return ServiceTx{
BaseDataServiceTx: dataservices.BaseDataServiceTx[portainer.EdgeGroup, portainer.EdgeGroupID]{ service: service,
Bucket: BucketName, tx: tx,
Connection: service.Connection,
Tx: tx,
},
} }
} }
// EdgeGroups return a slice containing all the Edge groups.
func (service *Service) EdgeGroups() ([]portainer.EdgeGroup, error) {
var groups []portainer.EdgeGroup
var err error
err = service.connection.ViewTx(func(tx portainer.Transaction) error {
groups, err = service.Tx(tx).EdgeGroups()
return err
})
return groups, err
}
// EdgeGroup returns an Edge group by ID.
func (service *Service) EdgeGroup(ID portainer.EdgeGroupID) (*portainer.EdgeGroup, error) {
var group *portainer.EdgeGroup
var err error
err = service.connection.ViewTx(func(tx portainer.Transaction) error {
group, err = service.Tx(tx).EdgeGroup(ID)
return err
})
return group, err
}
// UpdateEdgeGroup updates an edge group.
func (service *Service) UpdateEdgeGroup(ID portainer.EdgeGroupID, group *portainer.EdgeGroup) error {
identifier := service.connection.ConvertToKey(int(ID))
return service.connection.UpdateObject(BucketName, identifier, group)
}
// Deprecated: UpdateEdgeGroupFunc updates an edge group inside a transaction avoiding data races. // Deprecated: UpdateEdgeGroupFunc updates an edge group inside a transaction avoiding data races.
func (service *Service) UpdateEdgeGroupFunc(ID portainer.EdgeGroupID, updateFunc func(edgeGroup *portainer.EdgeGroup)) error { func (service *Service) UpdateEdgeGroupFunc(ID portainer.EdgeGroupID, updateFunc func(edgeGroup *portainer.EdgeGroup)) error {
id := service.Connection.ConvertToKey(int(ID)) id := service.connection.ConvertToKey(int(ID))
edgeGroup := &portainer.EdgeGroup{} edgeGroup := &portainer.EdgeGroup{}
return service.Connection.UpdateObjectFunc(BucketName, id, edgeGroup, func() { return service.connection.UpdateObjectFunc(BucketName, id, edgeGroup, func() {
updateFunc(edgeGroup) updateFunc(edgeGroup)
}) })
} }
// DeleteEdgeGroup deletes an Edge group.
func (service *Service) DeleteEdgeGroup(ID portainer.EdgeGroupID) error {
return service.connection.UpdateTx(func(tx portainer.Transaction) error {
return service.Tx(tx).DeleteEdgeGroup(ID)
})
}
// CreateEdgeGroup assign an ID to a new Edge group and saves it. // CreateEdgeGroup assign an ID to a new Edge group and saves it.
func (service *Service) Create(group *portainer.EdgeGroup) error { func (service *Service) Create(group *portainer.EdgeGroup) error {
return service.Connection.UpdateTx(func(tx portainer.Transaction) error { return service.connection.UpdateTx(func(tx portainer.Transaction) error {
return service.Tx(tx).Create(group) return service.Tx(tx).Create(group)
}) })
} }

View File

@@ -2,13 +2,60 @@ package edgegroup
import ( import (
"errors" "errors"
"fmt"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/rs/zerolog/log"
) )
type ServiceTx struct { type ServiceTx struct {
dataservices.BaseDataServiceTx[portainer.EdgeGroup, portainer.EdgeGroupID] service *Service
tx portainer.Transaction
}
func (service ServiceTx) BucketName() string {
return BucketName
}
// EdgeGroups return a slice containing all the Edge groups.
func (service ServiceTx) EdgeGroups() ([]portainer.EdgeGroup, error) {
var groups = make([]portainer.EdgeGroup, 0)
err := service.tx.GetAllWithJsoniter(
BucketName,
&portainer.EdgeGroup{},
func(obj interface{}) (interface{}, error) {
group, ok := obj.(*portainer.EdgeGroup)
if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to EdgeGroup object")
return nil, fmt.Errorf("Failed to convert to EdgeGroup object: %s", obj)
}
groups = append(groups, *group)
return &portainer.EdgeGroup{}, nil
})
return groups, err
}
// EdgeGroup returns an Edge group by ID.
func (service ServiceTx) EdgeGroup(ID portainer.EdgeGroupID) (*portainer.EdgeGroup, error) {
var group portainer.EdgeGroup
identifier := service.service.connection.ConvertToKey(int(ID))
err := service.tx.GetObject(BucketName, identifier, &group)
if err != nil {
return nil, err
}
return &group, nil
}
// UpdateEdgeGroup updates an edge group.
func (service ServiceTx) UpdateEdgeGroup(ID portainer.EdgeGroupID, group *portainer.EdgeGroup) error {
identifier := service.service.connection.ConvertToKey(int(ID))
return service.tx.UpdateObject(BucketName, identifier, group)
} }
// UpdateEdgeGroupFunc is a no-op inside a transaction. // UpdateEdgeGroupFunc is a no-op inside a transaction.
@@ -16,8 +63,14 @@ func (service ServiceTx) UpdateEdgeGroupFunc(ID portainer.EdgeGroupID, updateFun
return errors.New("cannot be called inside a transaction") return errors.New("cannot be called inside a transaction")
} }
// DeleteEdgeGroup deletes an Edge group.
func (service ServiceTx) DeleteEdgeGroup(ID portainer.EdgeGroupID) error {
identifier := service.service.connection.ConvertToKey(int(ID))
return service.tx.DeleteObject(BucketName, identifier)
}
func (service ServiceTx) Create(group *portainer.EdgeGroup) error { func (service ServiceTx) Create(group *portainer.EdgeGroup) error {
return service.Tx.CreateObject( return service.tx.CreateObject(
BucketName, BucketName,
func(id uint64) (int, interface{}) { func(id uint64) (int, interface{}) {
group.ID = portainer.EdgeGroupID(id) group.ID = portainer.EdgeGroupID(id)

View File

@@ -1,8 +1,11 @@
package edgejob package edgejob
import ( import (
"fmt"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/rs/zerolog/log"
) )
// BucketName represents the name of the bucket where this service stores data. // BucketName represents the name of the bucket where this service stores data.
@@ -10,7 +13,11 @@ const BucketName = "edgejobs"
// Service represents a service for managing edge jobs data. // Service represents a service for managing edge jobs data.
type Service struct { type Service struct {
dataservices.BaseDataService[portainer.EdgeJob, portainer.EdgeJobID] connection portainer.Connection
}
func (service *Service) BucketName() string {
return BucketName
} }
// NewService creates a new instance of a service. // NewService creates a new instance of a service.
@@ -21,50 +28,86 @@ func NewService(connection portainer.Connection) (*Service, error) {
} }
return &Service{ return &Service{
BaseDataService: dataservices.BaseDataService[portainer.EdgeJob, portainer.EdgeJobID]{ connection: connection,
Bucket: BucketName,
Connection: connection,
},
}, nil }, nil
} }
func (service *Service) Tx(tx portainer.Transaction) ServiceTx { func (service *Service) Tx(tx portainer.Transaction) ServiceTx {
return ServiceTx{ return ServiceTx{
BaseDataServiceTx: dataservices.BaseDataServiceTx[portainer.EdgeJob, portainer.EdgeJobID]{ service: service,
Bucket: BucketName, tx: tx,
Connection: service.Connection,
Tx: tx,
},
} }
} }
// Create creates a new EdgeJob // EdgeJobs returns a list of Edge jobs
func (service *Service) Create(edgeJob *portainer.EdgeJob) error { func (service *Service) EdgeJobs() ([]portainer.EdgeJob, error) {
return service.CreateWithID(portainer.EdgeJobID(service.GetNextIdentifier()), edgeJob) var edgeJobs = make([]portainer.EdgeJob, 0)
err := service.connection.GetAll(
BucketName,
&portainer.EdgeJob{},
func(obj interface{}) (interface{}, error) {
job, ok := obj.(*portainer.EdgeJob)
if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to EdgeJob object")
return nil, fmt.Errorf("Failed to convert to EdgeJob object: %s", obj)
}
edgeJobs = append(edgeJobs, *job)
return &portainer.EdgeJob{}, nil
})
return edgeJobs, err
} }
// CreateWithID creates a new EdgeJob // EdgeJob returns an Edge job by ID
func (service *Service) CreateWithID(ID portainer.EdgeJobID, edgeJob *portainer.EdgeJob) error { func (service *Service) EdgeJob(ID portainer.EdgeJobID) (*portainer.EdgeJob, error) {
var edgeJob portainer.EdgeJob
identifier := service.connection.ConvertToKey(int(ID))
err := service.connection.GetObject(BucketName, identifier, &edgeJob)
if err != nil {
return nil, err
}
return &edgeJob, nil
}
// Create creates a new EdgeJob
func (service *Service) Create(ID portainer.EdgeJobID, edgeJob *portainer.EdgeJob) error {
edgeJob.ID = ID edgeJob.ID = ID
return service.Connection.CreateObjectWithId( return service.connection.CreateObjectWithId(
BucketName, BucketName,
int(edgeJob.ID), int(edgeJob.ID),
edgeJob, edgeJob,
) )
} }
// Deprecated: use UpdateEdgeJobFunc instead
func (service *Service) UpdateEdgeJob(ID portainer.EdgeJobID, edgeJob *portainer.EdgeJob) error {
identifier := service.connection.ConvertToKey(int(ID))
return service.connection.UpdateObject(BucketName, identifier, edgeJob)
}
// UpdateEdgeJobFunc updates an edge job inside a transaction avoiding data races. // UpdateEdgeJobFunc updates an edge job inside a transaction avoiding data races.
func (service *Service) UpdateEdgeJobFunc(ID portainer.EdgeJobID, updateFunc func(edgeJob *portainer.EdgeJob)) error { func (service *Service) UpdateEdgeJobFunc(ID portainer.EdgeJobID, updateFunc func(edgeJob *portainer.EdgeJob)) error {
id := service.Connection.ConvertToKey(int(ID)) id := service.connection.ConvertToKey(int(ID))
edgeJob := &portainer.EdgeJob{} edgeJob := &portainer.EdgeJob{}
return service.Connection.UpdateObjectFunc(BucketName, id, edgeJob, func() { return service.connection.UpdateObjectFunc(BucketName, id, edgeJob, func() {
updateFunc(edgeJob) updateFunc(edgeJob)
}) })
} }
// DeleteEdgeJob deletes an Edge job
func (service *Service) DeleteEdgeJob(ID portainer.EdgeJobID) error {
identifier := service.connection.ConvertToKey(int(ID))
return service.connection.DeleteObject(BucketName, identifier)
}
// GetNextIdentifier returns the next identifier for an environment(endpoint). // GetNextIdentifier returns the next identifier for an environment(endpoint).
func (service *Service) GetNextIdentifier() int { func (service *Service) GetNextIdentifier() int {
return service.Connection.GetNextIdentifier(BucketName) return service.connection.GetNextIdentifier(BucketName)
} }

View File

@@ -2,25 +2,68 @@ package edgejob
import ( import (
"errors" "errors"
"fmt"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/rs/zerolog/log"
) )
type ServiceTx struct { type ServiceTx struct {
dataservices.BaseDataServiceTx[portainer.EdgeJob, portainer.EdgeJobID] service *Service
tx portainer.Transaction
}
func (service ServiceTx) BucketName() string {
return BucketName
}
// EdgeJobs returns a list of Edge jobs
func (service ServiceTx) EdgeJobs() ([]portainer.EdgeJob, error) {
var edgeJobs = make([]portainer.EdgeJob, 0)
err := service.tx.GetAll(
BucketName,
&portainer.EdgeJob{},
func(obj interface{}) (interface{}, error) {
job, ok := obj.(*portainer.EdgeJob)
if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to EdgeJob object")
return nil, fmt.Errorf("failed to convert to EdgeJob object: %s", obj)
}
edgeJobs = append(edgeJobs, *job)
return &portainer.EdgeJob{}, nil
})
return edgeJobs, err
}
// EdgeJob returns an Edge job by ID
func (service ServiceTx) EdgeJob(ID portainer.EdgeJobID) (*portainer.EdgeJob, error) {
var edgeJob portainer.EdgeJob
identifier := service.service.connection.ConvertToKey(int(ID))
err := service.tx.GetObject(BucketName, identifier, &edgeJob)
if err != nil {
return nil, err
}
return &edgeJob, nil
} }
// Create creates a new EdgeJob // Create creates a new EdgeJob
func (service ServiceTx) Create(edgeJob *portainer.EdgeJob) error { func (service ServiceTx) Create(ID portainer.EdgeJobID, edgeJob *portainer.EdgeJob) error {
return service.CreateWithID(portainer.EdgeJobID(service.GetNextIdentifier()), edgeJob)
}
// CreateWithID creates a new EdgeJob
func (service ServiceTx) CreateWithID(ID portainer.EdgeJobID, edgeJob *portainer.EdgeJob) error {
edgeJob.ID = ID edgeJob.ID = ID
return service.Tx.CreateObjectWithId(BucketName, int(edgeJob.ID), edgeJob) return service.tx.CreateObjectWithId(BucketName, int(edgeJob.ID), edgeJob)
}
// UpdateEdgeJob updates an edge job
func (service ServiceTx) UpdateEdgeJob(ID portainer.EdgeJobID, edgeJob *portainer.EdgeJob) error {
identifier := service.service.connection.ConvertToKey(int(ID))
return service.tx.UpdateObject(BucketName, identifier, edgeJob)
} }
// UpdateEdgeJobFunc is a no-op inside a transaction. // UpdateEdgeJobFunc is a no-op inside a transaction.
@@ -28,7 +71,14 @@ func (service ServiceTx) UpdateEdgeJobFunc(ID portainer.EdgeJobID, updateFunc fu
return errors.New("cannot be called inside a transaction") return errors.New("cannot be called inside a transaction")
} }
// DeleteEdgeJob deletes an Edge job
func (service ServiceTx) DeleteEdgeJob(ID portainer.EdgeJobID) error {
identifier := service.service.connection.ConvertToKey(int(ID))
return service.tx.DeleteObject(BucketName, identifier)
}
// GetNextIdentifier returns the next identifier for an environment(endpoint). // GetNextIdentifier returns the next identifier for an environment(endpoint).
func (service ServiceTx) GetNextIdentifier() int { func (service ServiceTx) GetNextIdentifier() int {
return service.Tx.GetNextIdentifier(BucketName) return service.tx.GetNextIdentifier(BucketName)
} }

View File

@@ -1,10 +1,12 @@
package edgestack package edgestack
import ( import (
"fmt"
"sync" "sync"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/rs/zerolog/log"
) )
// BucketName represents the name of the bucket where this service stores data. // BucketName represents the name of the bucket where this service stores data.
@@ -62,11 +64,22 @@ func (service *Service) Tx(tx portainer.Transaction) ServiceTx {
func (service *Service) EdgeStacks() ([]portainer.EdgeStack, error) { func (service *Service) EdgeStacks() ([]portainer.EdgeStack, error) {
var stacks = make([]portainer.EdgeStack, 0) var stacks = make([]portainer.EdgeStack, 0)
return stacks, service.connection.GetAll( err := service.connection.GetAll(
BucketName, BucketName,
&portainer.EdgeStack{}, &portainer.EdgeStack{},
dataservices.AppendFn(&stacks), func(obj interface{}) (interface{}, error) {
) stack, ok := obj.(*portainer.EdgeStack)
if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to EdgeStack object")
return nil, fmt.Errorf("Failed to convert to EdgeStack object: %s", obj)
}
stacks = append(stacks, *stack)
return &portainer.EdgeStack{}, nil
})
return stacks, err
} }
// EdgeStack returns an Edge stack by ID. // EdgeStack returns an Edge stack by ID.
@@ -146,11 +159,6 @@ func (service *Service) UpdateEdgeStackFunc(ID portainer.EdgeStackID, updateFunc
}) })
} }
// UpdateEdgeStackFuncTx is a helper function used to call UpdateEdgeStackFunc inside a transaction.
func (service *Service) UpdateEdgeStackFuncTx(tx portainer.Transaction, ID portainer.EdgeStackID, updateFunc func(edgeStack *portainer.EdgeStack)) error {
return service.Tx(tx).UpdateEdgeStackFunc(ID, updateFunc)
}
// DeleteEdgeStack deletes an Edge stack. // DeleteEdgeStack deletes an Edge stack.
func (service *Service) DeleteEdgeStack(ID portainer.EdgeStackID) error { func (service *Service) DeleteEdgeStack(ID portainer.EdgeStackID) error {
service.mu.Lock() service.mu.Lock()

View File

@@ -1,6 +1,7 @@
package edgestack package edgestack
import ( import (
"errors"
"fmt" "fmt"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
@@ -28,7 +29,7 @@ func (service ServiceTx) EdgeStacks() ([]portainer.EdgeStack, error) {
stack, ok := obj.(*portainer.EdgeStack) stack, ok := obj.(*portainer.EdgeStack)
if !ok { if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to EdgeStack object") log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to EdgeStack object")
return nil, fmt.Errorf("failed to convert to EdgeStack object: %s", obj) return nil, fmt.Errorf("Failed to convert to EdgeStack object: %s", obj)
} }
stacks = append(stacks, *stack) stacks = append(stacks, *stack)
@@ -100,16 +101,9 @@ func (service ServiceTx) UpdateEdgeStack(ID portainer.EdgeStackID, edgeStack *po
return nil return nil
} }
// Deprecated: use UpdateEdgeStack inside a transaction instead. // UpdateEdgeStackFunc is a no-op inside a transaction.
func (service ServiceTx) UpdateEdgeStackFunc(ID portainer.EdgeStackID, updateFunc func(edgeStack *portainer.EdgeStack)) error { func (service ServiceTx) UpdateEdgeStackFunc(ID portainer.EdgeStackID, updateFunc func(edgeStack *portainer.EdgeStack)) error {
edgeStack, err := service.EdgeStack(ID) return errors.New("cannot be called inside a transaction")
if err != nil {
return err
}
updateFunc(edgeStack)
return service.UpdateEdgeStack(ID, edgeStack)
} }
// DeleteEdgeStack deletes an Edge stack. // DeleteEdgeStack deletes an Edge stack.

View File

@@ -5,7 +5,6 @@ import (
"time" "time"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
) )
// BucketName represents the name of the bucket where this service stores data. // BucketName represents the name of the bucket where this service stores data.
@@ -35,7 +34,7 @@ func NewService(connection portainer.Connection) (*Service, error) {
idxEdgeID: make(map[string]portainer.EndpointID), idxEdgeID: make(map[string]portainer.EndpointID),
} }
es, err := s.endpoints() es, err := s.Endpoints()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -90,7 +89,8 @@ func (service *Service) DeleteEndpoint(ID portainer.EndpointID) error {
}) })
} }
func (service *Service) endpoints() ([]portainer.Endpoint, error) { // Endpoints return an array containing all the environments(endpoints).
func (service *Service) Endpoints() ([]portainer.Endpoint, error) {
var endpoints []portainer.Endpoint var endpoints []portainer.Endpoint
var err error var err error
@@ -99,14 +99,8 @@ func (service *Service) endpoints() ([]portainer.Endpoint, error) {
return err return err
}) })
return endpoints, err
}
// Endpoints return an array containing all the environments(endpoints).
func (service *Service) Endpoints() ([]portainer.Endpoint, error) {
endpoints, err := service.endpoints()
if err != nil { if err != nil {
return nil, err return endpoints, err
} }
for i, e := range endpoints { for i, e := range endpoints {
@@ -145,23 +139,6 @@ func (service *Service) Create(endpoint *portainer.Endpoint) error {
}) })
} }
func (service *Service) EndpointsByTeamID(teamID portainer.TeamID) ([]portainer.Endpoint, error) {
var endpoints = make([]portainer.Endpoint, 0)
return endpoints, service.connection.GetAll(
BucketName,
&portainer.Endpoint{},
dataservices.FilterFn(&endpoints, func(e portainer.Endpoint) bool {
for t := range e.TeamAccessPolicies {
if t == teamID {
return true
}
}
return false
}),
)
}
// GetNextIdentifier returns the next identifier for an environment(endpoint). // GetNextIdentifier returns the next identifier for an environment(endpoint).
func (service *Service) GetNextIdentifier() int { func (service *Service) GetNextIdentifier() int {
var identifier int var identifier int

View File

@@ -1,8 +1,9 @@
package endpoint package endpoint
import ( import (
"fmt"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/portainer/portainer/api/internal/edge/cache" "github.com/portainer/portainer/api/internal/edge/cache"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
@@ -27,8 +28,6 @@ func (service ServiceTx) Endpoint(ID portainer.EndpointID) (*portainer.Endpoint,
return nil, err return nil, err
} }
endpoint.LastCheckInDate, _ = service.service.Heartbeat(ID)
return &endpoint, nil return &endpoint, nil
} }
@@ -66,7 +65,6 @@ func (service ServiceTx) DeleteEndpoint(ID portainer.EndpointID) error {
for edgeID, endpointID := range service.service.idxEdgeID { for edgeID, endpointID := range service.service.idxEdgeID {
if endpointID == ID { if endpointID == ID {
delete(service.service.idxEdgeID, edgeID) delete(service.service.idxEdgeID, edgeID)
break break
} }
} }
@@ -82,11 +80,22 @@ func (service ServiceTx) DeleteEndpoint(ID portainer.EndpointID) error {
func (service ServiceTx) Endpoints() ([]portainer.Endpoint, error) { func (service ServiceTx) Endpoints() ([]portainer.Endpoint, error) {
var endpoints = make([]portainer.Endpoint, 0) var endpoints = make([]portainer.Endpoint, 0)
return endpoints, service.tx.GetAllWithJsoniter( err := service.tx.GetAllWithJsoniter(
BucketName, BucketName,
&portainer.Endpoint{}, &portainer.Endpoint{},
dataservices.AppendFn(&endpoints), func(obj interface{}) (interface{}, error) {
) endpoint, ok := obj.(*portainer.Endpoint)
if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to Endpoint object")
return nil, fmt.Errorf("failed to convert to Endpoint object: %s", obj)
}
endpoints = append(endpoints, *endpoint)
return &portainer.Endpoint{}, nil
})
return endpoints, err
} }
func (service ServiceTx) EndpointIDByEdgeID(edgeID string) (portainer.EndpointID, bool) { func (service ServiceTx) EndpointIDByEdgeID(edgeID string) (portainer.EndpointID, bool) {
@@ -122,23 +131,6 @@ func (service ServiceTx) Create(endpoint *portainer.Endpoint) error {
return nil return nil
} }
func (service ServiceTx) EndpointsByTeamID(teamID portainer.TeamID) ([]portainer.Endpoint, error) {
var endpoints = make([]portainer.Endpoint, 0)
return endpoints, service.tx.GetAll(
BucketName,
&portainer.Endpoint{},
dataservices.FilterFn(&endpoints, func(e portainer.Endpoint) bool {
for t := range e.TeamAccessPolicies {
if t == teamID {
return true
}
}
return false
}),
)
}
// GetNextIdentifier returns the next identifier for an environment(endpoint). // GetNextIdentifier returns the next identifier for an environment(endpoint).
func (service ServiceTx) GetNextIdentifier() int { func (service ServiceTx) GetNextIdentifier() int {
return service.tx.GetNextIdentifier(BucketName) return service.tx.GetNextIdentifier(BucketName)

View File

@@ -1,15 +1,25 @@
package endpointgroup package endpointgroup
import ( import (
"fmt"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/rs/zerolog/log"
) )
const BucketName = "endpoint_groups" const (
// BucketName represents the name of the bucket where this service stores data.
BucketName = "endpoint_groups"
)
// Service represents a service for managing environment(endpoint) data. // Service represents a service for managing environment(endpoint) data.
type Service struct { type Service struct {
dataservices.BaseDataService[portainer.EndpointGroup, portainer.EndpointGroupID] connection portainer.Connection
}
func (service *Service) BucketName() string {
return BucketName
} }
// NewService creates a new instance of a service. // NewService creates a new instance of a service.
@@ -20,26 +30,67 @@ func NewService(connection portainer.Connection) (*Service, error) {
} }
return &Service{ return &Service{
BaseDataService: dataservices.BaseDataService[portainer.EndpointGroup, portainer.EndpointGroupID]{ connection: connection,
Bucket: BucketName,
Connection: connection,
},
}, nil }, nil
} }
func (service *Service) Tx(tx portainer.Transaction) ServiceTx { func (service *Service) Tx(tx portainer.Transaction) ServiceTx {
return ServiceTx{ return ServiceTx{
BaseDataServiceTx: dataservices.BaseDataServiceTx[portainer.EndpointGroup, portainer.EndpointGroupID]{ service: service,
Bucket: BucketName, tx: tx,
Connection: service.Connection,
Tx: tx,
},
} }
} }
// EndpointGroup returns an environment(endpoint) group by ID.
func (service *Service) EndpointGroup(ID portainer.EndpointGroupID) (*portainer.EndpointGroup, error) {
var endpointGroup portainer.EndpointGroup
identifier := service.connection.ConvertToKey(int(ID))
err := service.connection.GetObject(BucketName, identifier, &endpointGroup)
if err != nil {
return nil, err
}
return &endpointGroup, nil
}
// UpdateEndpointGroup updates an environment(endpoint) group.
func (service *Service) UpdateEndpointGroup(ID portainer.EndpointGroupID, endpointGroup *portainer.EndpointGroup) error {
identifier := service.connection.ConvertToKey(int(ID))
return service.connection.UpdateObject(BucketName, identifier, endpointGroup)
}
// DeleteEndpointGroup deletes an environment(endpoint) group.
func (service *Service) DeleteEndpointGroup(ID portainer.EndpointGroupID) error {
identifier := service.connection.ConvertToKey(int(ID))
return service.connection.DeleteObject(BucketName, identifier)
}
// EndpointGroups return an array containing all the environment(endpoint) groups.
func (service *Service) EndpointGroups() ([]portainer.EndpointGroup, error) {
var endpointGroups = make([]portainer.EndpointGroup, 0)
err := service.connection.GetAll(
BucketName,
&portainer.EndpointGroup{},
func(obj interface{}) (interface{}, error) {
endpointGroup, ok := obj.(*portainer.EndpointGroup)
if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to EndpointGroup object")
return nil, fmt.Errorf("Failed to convert to EndpointGroup object: %s", obj)
}
endpointGroups = append(endpointGroups, *endpointGroup)
return &portainer.EndpointGroup{}, nil
})
return endpointGroups, err
}
// CreateEndpointGroup assign an ID to a new environment(endpoint) group and saves it. // CreateEndpointGroup assign an ID to a new environment(endpoint) group and saves it.
func (service *Service) Create(endpointGroup *portainer.EndpointGroup) error { func (service *Service) Create(endpointGroup *portainer.EndpointGroup) error {
return service.Connection.CreateObject( return service.connection.CreateObject(
BucketName, BucketName,
func(id uint64) (int, interface{}) { func(id uint64) (int, interface{}) {
endpointGroup.ID = portainer.EndpointGroupID(id) endpointGroup.ID = portainer.EndpointGroupID(id)

View File

@@ -1,17 +1,72 @@
package endpointgroup package endpointgroup
import ( import (
"fmt"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/rs/zerolog/log"
) )
type ServiceTx struct { type ServiceTx struct {
dataservices.BaseDataServiceTx[portainer.EndpointGroup, portainer.EndpointGroupID] service *Service
tx portainer.Transaction
}
func (service ServiceTx) BucketName() string {
return BucketName
}
// EndpointGroup returns an environment(endpoint) group by ID.
func (service ServiceTx) EndpointGroup(ID portainer.EndpointGroupID) (*portainer.EndpointGroup, error) {
var endpointGroup portainer.EndpointGroup
identifier := service.service.connection.ConvertToKey(int(ID))
err := service.tx.GetObject(BucketName, identifier, &endpointGroup)
if err != nil {
return nil, err
}
return &endpointGroup, nil
}
// UpdateEndpointGroup updates an environment(endpoint) group.
func (service ServiceTx) UpdateEndpointGroup(ID portainer.EndpointGroupID, endpointGroup *portainer.EndpointGroup) error {
identifier := service.service.connection.ConvertToKey(int(ID))
return service.tx.UpdateObject(BucketName, identifier, endpointGroup)
}
// DeleteEndpointGroup deletes an environment(endpoint) group.
func (service ServiceTx) DeleteEndpointGroup(ID portainer.EndpointGroupID) error {
identifier := service.service.connection.ConvertToKey(int(ID))
return service.tx.DeleteObject(BucketName, identifier)
}
// EndpointGroups return an array containing all the environment(endpoint) groups.
func (service ServiceTx) EndpointGroups() ([]portainer.EndpointGroup, error) {
var endpointGroups = make([]portainer.EndpointGroup, 0)
err := service.tx.GetAll(
BucketName,
&portainer.EndpointGroup{},
func(obj interface{}) (interface{}, error) {
endpointGroup, ok := obj.(*portainer.EndpointGroup)
if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to EndpointGroup object")
return nil, fmt.Errorf("failed to convert to EndpointGroup object: %s", obj)
}
endpointGroups = append(endpointGroups, *endpointGroup)
return &portainer.EndpointGroup{}, nil
})
return endpointGroups, err
} }
// CreateEndpointGroup assign an ID to a new environment(endpoint) group and saves it. // CreateEndpointGroup assign an ID to a new environment(endpoint) group and saves it.
func (service ServiceTx) Create(endpointGroup *portainer.EndpointGroup) error { func (service ServiceTx) Create(endpointGroup *portainer.EndpointGroup) error {
return service.Tx.CreateObject( return service.tx.CreateObject(
BucketName, BucketName,
func(id uint64) (int, interface{}) { func(id uint64) (int, interface{}) {
endpointGroup.ID = portainer.EndpointGroupID(id) endpointGroup.ID = portainer.EndpointGroupID(id)

View File

@@ -1,8 +1,9 @@
package endpointrelation package endpointrelation
import ( import (
"fmt"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/portainer/portainer/api/internal/edge/cache" "github.com/portainer/portainer/api/internal/edge/cache"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
@@ -15,19 +16,14 @@ const BucketName = "endpoint_relations"
type Service struct { type Service struct {
connection portainer.Connection connection portainer.Connection
updateStackFn func(ID portainer.EdgeStackID, updateFunc func(edgeStack *portainer.EdgeStack)) error updateStackFn func(ID portainer.EdgeStackID, updateFunc func(edgeStack *portainer.EdgeStack)) error
updateStackFnTx func(tx portainer.Transaction, ID portainer.EdgeStackID, updateFunc func(edgeStack *portainer.EdgeStack)) error
} }
func (service *Service) BucketName() string { func (service *Service) BucketName() string {
return BucketName return BucketName
} }
func (service *Service) RegisterUpdateStackFunction( func (service *Service) RegisterUpdateStackFunction(updateFunc func(ID portainer.EdgeStackID, updateFunc func(edgeStack *portainer.EdgeStack)) error) {
updateFunc func(portainer.EdgeStackID, func(*portainer.EdgeStack)) error,
updateFuncTx func(portainer.Transaction, portainer.EdgeStackID, func(*portainer.EdgeStack)) error,
) {
service.updateStackFn = updateFunc service.updateStackFn = updateFunc
service.updateStackFnTx = updateFuncTx
} }
// NewService creates a new instance of a service. // NewService creates a new instance of a service.
@@ -53,11 +49,22 @@ func (service *Service) Tx(tx portainer.Transaction) ServiceTx {
func (service *Service) EndpointRelations() ([]portainer.EndpointRelation, error) { func (service *Service) EndpointRelations() ([]portainer.EndpointRelation, error) {
var all = make([]portainer.EndpointRelation, 0) var all = make([]portainer.EndpointRelation, 0)
return all, service.connection.GetAll( err := service.connection.GetAll(
BucketName, BucketName,
&portainer.EndpointRelation{}, &portainer.EndpointRelation{},
dataservices.AppendFn(&all), func(obj interface{}) (interface{}, error) {
) r, ok := obj.(*portainer.EndpointRelation)
if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to EndpointRelation object")
return nil, fmt.Errorf("Failed to convert to EndpointRelation object: %s", obj)
}
all = append(all, *r)
return &portainer.EndpointRelation{}, nil
})
return all, err
} }
// EndpointRelation returns a Environment(Endpoint) relation object by EndpointID // EndpointRelation returns a Environment(Endpoint) relation object by EndpointID

View File

@@ -1,8 +1,9 @@
package endpointrelation package endpointrelation
import ( import (
"fmt"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/portainer/portainer/api/internal/edge/cache" "github.com/portainer/portainer/api/internal/edge/cache"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
@@ -21,11 +22,22 @@ func (service ServiceTx) BucketName() string {
func (service ServiceTx) EndpointRelations() ([]portainer.EndpointRelation, error) { func (service ServiceTx) EndpointRelations() ([]portainer.EndpointRelation, error) {
var all = make([]portainer.EndpointRelation, 0) var all = make([]portainer.EndpointRelation, 0)
return all, service.tx.GetAll( err := service.tx.GetAll(
BucketName, BucketName,
&portainer.EndpointRelation{}, &portainer.EndpointRelation{},
dataservices.AppendFn(&all), func(obj interface{}) (interface{}, error) {
) r, ok := obj.(*portainer.EndpointRelation)
if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to EndpointRelation object")
return nil, fmt.Errorf("failed to convert to EndpointRelation object: %s", obj)
}
all = append(all, *r)
return &portainer.EndpointRelation{}, nil
})
return all, err
} }
// EndpointRelation returns an Environment(Endpoint) relation object by EndpointID // EndpointRelation returns an Environment(Endpoint) relation object by EndpointID
@@ -139,7 +151,7 @@ func (service ServiceTx) updateEdgeStacksAfterRelationChange(previousRelationSta
} }
} }
service.service.updateStackFnTx(service.tx, refStackId, func(edgeStack *portainer.EdgeStack) { service.service.updateStackFn(refStackId, func(edgeStack *portainer.EdgeStack) {
edgeStack.NumDeployments = numDeployments edgeStack.NumDeployments = numDeployments
}) })
} }

View File

@@ -1,10 +1,9 @@
package errors package errors
import ( import "errors"
"errors"
)
var ( var (
// TODO: i'm pretty sure this needs wrapping at several levels
ErrObjectNotFound = errors.New("object not found inside the database") ErrObjectNotFound = errors.New("object not found inside the database")
ErrWrongDBEdition = errors.New("the Portainer database is set for Portainer Business Edition, please follow the instructions in our documentation to downgrade it: https://documentation.portainer.io/v2.0-be/downgrade/be-to-ce/") ErrWrongDBEdition = errors.New("the Portainer database is set for Portainer Business Edition, please follow the instructions in our documentation to downgrade it: https://documentation.portainer.io/v2.0-be/downgrade/be-to-ce/")
ErrDBImportFailed = errors.New("importing backup failed") ErrDBImportFailed = errors.New("importing backup failed")

View File

@@ -1,12 +1,17 @@
package extension package extension
import ( import (
"fmt"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/rs/zerolog/log"
) )
// BucketName represents the name of the bucket where this service stores data. const (
const BucketName = "extension" // BucketName represents the name of the bucket where this service stores data.
BucketName = "extension"
)
// Service represents a service for managing environment(endpoint) data. // Service represents a service for managing environment(endpoint) data.
type Service struct { type Service struct {
@@ -46,12 +51,22 @@ func (service *Service) Extension(ID portainer.ExtensionID) (*portainer.Extensio
func (service *Service) Extensions() ([]portainer.Extension, error) { func (service *Service) Extensions() ([]portainer.Extension, error) {
var extensions = make([]portainer.Extension, 0) var extensions = make([]portainer.Extension, 0)
return extensions, service.connection.GetAll( err := service.connection.GetAll(
BucketName, BucketName,
&portainer.Extension{}, &portainer.Extension{},
dataservices.AppendFn(&extensions), func(obj interface{}) (interface{}, error) {
) extension, ok := obj.(*portainer.Extension)
if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to Extension object")
return nil, fmt.Errorf("Failed to convert to Extension object: %s", obj)
}
extensions = append(extensions, *extension)
return &portainer.Extension{}, nil
})
return extensions, err
} }
// Persist persists a extension inside the database. // Persist persists a extension inside the database.

View File

@@ -1,16 +1,25 @@
package fdoprofile package fdoprofile
import ( import (
"fmt"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/rs/zerolog/log"
) )
// BucketName represents the name of the bucket where this service stores data. const (
const BucketName = "fdo_profiles" // BucketName represents the name of the bucket where this service stores data.
BucketName = "fdo_profiles"
)
// Service represents a service for managingFDO Profiles data. // Service represents a service for managingFDO Profiles data.
type Service struct { type Service struct {
dataservices.BaseDataService[portainer.FDOProfile, portainer.FDOProfileID] connection portainer.Connection
}
func (service *Service) BucketName() string {
return BucketName
} }
// NewService creates a new instance of a service. // NewService creates a new instance of a service.
@@ -21,23 +30,66 @@ func NewService(connection portainer.Connection) (*Service, error) {
} }
return &Service{ return &Service{
BaseDataService: dataservices.BaseDataService[portainer.FDOProfile, portainer.FDOProfileID]{ connection: connection,
Bucket: BucketName,
Connection: connection,
},
}, nil }, nil
} }
// FDOProfiles return an array containing all the FDO Profiles.
func (service *Service) FDOProfiles() ([]portainer.FDOProfile, error) {
var fdoProfiles = make([]portainer.FDOProfile, 0)
err := service.connection.GetAll(
BucketName,
&portainer.FDOProfile{},
func(obj interface{}) (interface{}, error) {
fdoProfile, ok := obj.(*portainer.FDOProfile)
if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to FDOProfile object")
return nil, fmt.Errorf("Failed to convert to FDOProfile object: %s", obj)
}
fdoProfiles = append(fdoProfiles, *fdoProfile)
return &portainer.FDOProfile{}, nil
})
return fdoProfiles, err
}
// FDOProfile returns an FDO Profile by ID.
func (service *Service) FDOProfile(ID portainer.FDOProfileID) (*portainer.FDOProfile, error) {
var FDOProfile portainer.FDOProfile
identifier := service.connection.ConvertToKey(int(ID))
err := service.connection.GetObject(BucketName, identifier, &FDOProfile)
if err != nil {
return nil, err
}
return &FDOProfile, nil
}
// Create assign an ID to a new FDO Profile and saves it. // Create assign an ID to a new FDO Profile and saves it.
func (service *Service) Create(FDOProfile *portainer.FDOProfile) error { func (service *Service) Create(FDOProfile *portainer.FDOProfile) error {
return service.Connection.CreateObjectWithId( return service.connection.CreateObjectWithId(
BucketName, BucketName,
int(FDOProfile.ID), int(FDOProfile.ID),
FDOProfile, FDOProfile,
) )
} }
// Update updates an FDO Profile.
func (service *Service) Update(ID portainer.FDOProfileID, FDOProfile *portainer.FDOProfile) error {
identifier := service.connection.ConvertToKey(int(ID))
return service.connection.UpdateObject(BucketName, identifier, FDOProfile)
}
// Delete deletes an FDO Profile.
func (service *Service) Delete(ID portainer.FDOProfileID) error {
identifier := service.connection.ConvertToKey(int(ID))
return service.connection.DeleteObject(BucketName, identifier)
}
// GetNextIdentifier returns the next identifier for a FDO Profile. // GetNextIdentifier returns the next identifier for a FDO Profile.
func (service *Service) GetNextIdentifier() int { func (service *Service) GetNextIdentifier() int {
return service.Connection.GetNextIdentifier(BucketName) return service.connection.GetNextIdentifier(BucketName)
} }

View File

@@ -1,16 +1,25 @@
package helmuserrepository package helmuserrepository
import ( import (
"fmt"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/rs/zerolog/log"
) )
// BucketName represents the name of the bucket where this service stores data. const (
const BucketName = "helm_user_repository" // BucketName represents the name of the bucket where this service stores data.
BucketName = "helm_user_repository"
)
// Service represents a service for managing environment(endpoint) data. // Service represents a service for managing environment(endpoint) data.
type Service struct { type Service struct {
dataservices.BaseDataService[portainer.HelmUserRepository, portainer.HelmUserRepositoryID] connection portainer.Connection
}
func (service *Service) BucketName() string {
return BucketName
} }
// NewService creates a new instance of a service. // NewService creates a new instance of a service.
@@ -21,29 +30,59 @@ func NewService(connection portainer.Connection) (*Service, error) {
} }
return &Service{ return &Service{
BaseDataService: dataservices.BaseDataService[portainer.HelmUserRepository, portainer.HelmUserRepositoryID]{ connection: connection,
Bucket: BucketName,
Connection: connection,
},
}, nil }, nil
} }
// HelmUserRepository returns an array of all HelmUserRepository
func (service *Service) HelmUserRepositories() ([]portainer.HelmUserRepository, error) {
var repos = make([]portainer.HelmUserRepository, 0)
err := service.connection.GetAll(
BucketName,
&portainer.HelmUserRepository{},
func(obj interface{}) (interface{}, error) {
r, ok := obj.(*portainer.HelmUserRepository)
if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to HelmUserRepository object")
return nil, fmt.Errorf("Failed to convert to HelmUserRepository object: %s", obj)
}
repos = append(repos, *r)
return &portainer.HelmUserRepository{}, nil
})
return repos, err
}
// HelmUserRepositoryByUserID return an array containing all the HelmUserRepository objects where the specified userID is present. // HelmUserRepositoryByUserID return an array containing all the HelmUserRepository objects where the specified userID is present.
func (service *Service) HelmUserRepositoryByUserID(userID portainer.UserID) ([]portainer.HelmUserRepository, error) { func (service *Service) HelmUserRepositoryByUserID(userID portainer.UserID) ([]portainer.HelmUserRepository, error) {
var result = make([]portainer.HelmUserRepository, 0) var result = make([]portainer.HelmUserRepository, 0)
return result, service.Connection.GetAll( err := service.connection.GetAll(
BucketName, BucketName,
&portainer.HelmUserRepository{}, &portainer.HelmUserRepository{},
dataservices.FilterFn(&result, func(e portainer.HelmUserRepository) bool { func(obj interface{}) (interface{}, error) {
return e.UserID == userID record, ok := obj.(*portainer.HelmUserRepository)
}), if !ok {
) log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to HelmUserRepository object")
return nil, fmt.Errorf("Failed to convert to HelmUserRepository object: %s", obj)
}
if record.UserID == userID {
result = append(result, *record)
}
return &portainer.HelmUserRepository{}, nil
})
return result, err
} }
// CreateHelmUserRepository creates a new HelmUserRepository object. // CreateHelmUserRepository creates a new HelmUserRepository object.
func (service *Service) Create(record *portainer.HelmUserRepository) error { func (service *Service) Create(record *portainer.HelmUserRepository) error {
return service.Connection.CreateObject( return service.connection.CreateObject(
BucketName, BucketName,
func(id uint64) (int, interface{}) { func(id uint64) (int, interface{}) {
record.ID = portainer.HelmUserRepositoryID(id) record.ID = portainer.HelmUserRepositoryID(id)
@@ -51,3 +90,15 @@ func (service *Service) Create(record *portainer.HelmUserRepository) error {
}, },
) )
} }
// UpdateHelmUserRepostory updates an registry.
func (service *Service) UpdateHelmUserRepository(ID portainer.HelmUserRepositoryID, registry *portainer.HelmUserRepository) error {
identifier := service.connection.ConvertToKey(int(ID))
return service.connection.UpdateObject(BucketName, identifier, registry)
}
// DeleteHelmUserRepository deletes an registry.
func (service *Service) DeleteHelmUserRepository(ID portainer.HelmUserRepositoryID) error {
identifier := service.connection.ConvertToKey(int(ID))
return service.connection.DeleteObject(BucketName, identifier)
}

View File

@@ -1,68 +0,0 @@
package dataservices
import (
"errors"
"fmt"
perrors "github.com/portainer/portainer/api/dataservices/errors"
"github.com/rs/zerolog/log"
)
// ErrStop signals the stop of computation when filtering results
var ErrStop = errors.New("stop")
func IsErrObjectNotFound(e error) bool {
return errors.Is(e, perrors.ErrObjectNotFound)
}
// AppendFn appends elements to the given collection slice
func AppendFn[T any](collection *[]T) func(obj interface{}) (interface{}, error) {
return func(obj interface{}) (interface{}, error) {
element, ok := obj.(*T)
if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("type assertion failed")
return nil, fmt.Errorf("failed to convert to %T object: %#v", new(T), obj)
}
*collection = append(*collection, *element)
return new(T), nil
}
}
// FilterFn appends elements to the given collection when the predicate is true
func FilterFn[T any](collection *[]T, predicate func(T) bool) func(obj interface{}) (interface{}, error) {
return func(obj interface{}) (interface{}, error) {
element, ok := obj.(*T)
if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("type assertion failed")
return nil, fmt.Errorf("failed to convert to %T object: %#v", new(T), obj)
}
if predicate(*element) {
*collection = append(*collection, *element)
}
return new(T), nil
}
}
// FirstFn sets the element to the first one that satisfies the predicate and stops the computation, returns ErrStop on
// success
func FirstFn[T any](element *T, predicate func(T) bool) func(obj interface{}) (interface{}, error) {
return func(obj interface{}) (interface{}, error) {
e, ok := obj.(*T)
if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("type assertion failed")
return nil, fmt.Errorf("failed to convert to %T object: %#v", new(T), obj)
}
if predicate(*e) {
*element = *e
return new(T), ErrStop
}
return new(T), nil
}
}

View File

@@ -1,8 +1,15 @@
package dataservices package dataservices
// "github.com/portainer/portainer/api/dataservices"
import ( import (
portainer "github.com/portainer/portainer/api" "io"
"time"
"github.com/portainer/portainer/api/database/models" "github.com/portainer/portainer/api/database/models"
"github.com/portainer/portainer/api/dataservices/errors"
portainer "github.com/portainer/portainer/api"
) )
type ( type (
@@ -32,11 +39,10 @@ type (
User() UserService User() UserService
Version() VersionService Version() VersionService
Webhook() WebhookService Webhook() WebhookService
PendingActions() PendingActionsService
} }
// DataStore defines the interface to manage the data
DataStore interface { DataStore interface {
Connection() portainer.Connection
Open() (newStore bool, err error) Open() (newStore bool, err error)
Init() error Init() error
Close() error Close() error
@@ -45,7 +51,7 @@ type (
MigrateData() error MigrateData() error
Rollback(force bool) error Rollback(force bool) error
CheckCurrentEdition() error CheckCurrentEdition() error
Backup(path string) (string, error) BackupTo(w io.Writer) error
Export(filename string) (err error) Export(filename string) (err error)
DataStoreTx DataStoreTx
@@ -53,28 +59,36 @@ type (
// CustomTemplateService represents a service to manage custom templates // CustomTemplateService represents a service to manage custom templates
CustomTemplateService interface { CustomTemplateService interface {
BaseCRUD[portainer.CustomTemplate, portainer.CustomTemplateID]
GetNextIdentifier() int GetNextIdentifier() int
CustomTemplates() ([]portainer.CustomTemplate, error)
CustomTemplate(ID portainer.CustomTemplateID) (*portainer.CustomTemplate, error)
Create(customTemplate *portainer.CustomTemplate) error
UpdateCustomTemplate(ID portainer.CustomTemplateID, customTemplate *portainer.CustomTemplate) error
DeleteCustomTemplate(ID portainer.CustomTemplateID) error
BucketName() string
} }
// EdgeGroupService represents a service to manage Edge groups // EdgeGroupService represents a service to manage Edge groups
EdgeGroupService interface { EdgeGroupService interface {
BaseCRUD[portainer.EdgeGroup, portainer.EdgeGroupID] EdgeGroups() ([]portainer.EdgeGroup, error)
EdgeGroup(ID portainer.EdgeGroupID) (*portainer.EdgeGroup, error)
Create(group *portainer.EdgeGroup) error
UpdateEdgeGroup(ID portainer.EdgeGroupID, group *portainer.EdgeGroup) error
UpdateEdgeGroupFunc(ID portainer.EdgeGroupID, updateFunc func(group *portainer.EdgeGroup)) error UpdateEdgeGroupFunc(ID portainer.EdgeGroupID, updateFunc func(group *portainer.EdgeGroup)) error
DeleteEdgeGroup(ID portainer.EdgeGroupID) error
BucketName() string
} }
// EdgeJobService represents a service to manage Edge jobs // EdgeJobService represents a service to manage Edge jobs
EdgeJobService interface { EdgeJobService interface {
BaseCRUD[portainer.EdgeJob, portainer.EdgeJobID] EdgeJobs() ([]portainer.EdgeJob, error)
CreateWithID(ID portainer.EdgeJobID, edgeJob *portainer.EdgeJob) error EdgeJob(ID portainer.EdgeJobID) (*portainer.EdgeJob, error)
Create(ID portainer.EdgeJobID, edgeJob *portainer.EdgeJob) error
UpdateEdgeJob(ID portainer.EdgeJobID, edgeJob *portainer.EdgeJob) error
UpdateEdgeJobFunc(ID portainer.EdgeJobID, updateFunc func(edgeJob *portainer.EdgeJob)) error UpdateEdgeJobFunc(ID portainer.EdgeJobID, updateFunc func(edgeJob *portainer.EdgeJob)) error
DeleteEdgeJob(ID portainer.EdgeJobID) error
GetNextIdentifier() int GetNextIdentifier() int
} BucketName() string
PendingActionsService interface {
BaseCRUD[portainer.PendingActions, portainer.PendingActionsID]
GetNextIdentifier() int
DeleteByEndpointID(ID portainer.EndpointID) error
} }
// EdgeStackService represents a service to manage Edge stacks // EdgeStackService represents a service to manage Edge stacks
@@ -94,7 +108,6 @@ type (
EndpointService interface { EndpointService interface {
Endpoint(ID portainer.EndpointID) (*portainer.Endpoint, error) Endpoint(ID portainer.EndpointID) (*portainer.Endpoint, error)
EndpointIDByEdgeID(edgeID string) (portainer.EndpointID, bool) EndpointIDByEdgeID(edgeID string) (portainer.EndpointID, bool)
EndpointsByTeamID(teamID portainer.TeamID) ([]portainer.Endpoint, error)
Heartbeat(endpointID portainer.EndpointID) (int64, bool) Heartbeat(endpointID portainer.EndpointID) (int64, bool)
UpdateHeartbeat(endpointID portainer.EndpointID) UpdateHeartbeat(endpointID portainer.EndpointID)
Endpoints() ([]portainer.Endpoint, error) Endpoints() ([]portainer.Endpoint, error)
@@ -107,7 +120,12 @@ type (
// EndpointGroupService represents a service for managing environment(endpoint) group data // EndpointGroupService represents a service for managing environment(endpoint) group data
EndpointGroupService interface { EndpointGroupService interface {
BaseCRUD[portainer.EndpointGroup, portainer.EndpointGroupID] EndpointGroup(ID portainer.EndpointGroupID) (*portainer.EndpointGroup, error)
EndpointGroups() ([]portainer.EndpointGroup, error)
Create(group *portainer.EndpointGroup) error
UpdateEndpointGroup(ID portainer.EndpointGroupID, group *portainer.EndpointGroup) error
DeleteEndpointGroup(ID portainer.EndpointGroupID) error
BucketName() string
} }
// EndpointRelationService represents a service for managing environment(endpoint) relations data // EndpointRelationService represents a service for managing environment(endpoint) relations data
@@ -122,37 +140,72 @@ type (
// FDOProfileService represents a service to manage FDO Profiles // FDOProfileService represents a service to manage FDO Profiles
FDOProfileService interface { FDOProfileService interface {
BaseCRUD[portainer.FDOProfile, portainer.FDOProfileID] FDOProfiles() ([]portainer.FDOProfile, error)
FDOProfile(ID portainer.FDOProfileID) (*portainer.FDOProfile, error)
Create(FDOProfile *portainer.FDOProfile) error
Update(ID portainer.FDOProfileID, FDOProfile *portainer.FDOProfile) error
Delete(ID portainer.FDOProfileID) error
GetNextIdentifier() int GetNextIdentifier() int
BucketName() string
} }
// HelmUserRepositoryService represents a service to manage HelmUserRepositories // HelmUserRepositoryService represents a service to manage HelmUserRepositories
HelmUserRepositoryService interface { HelmUserRepositoryService interface {
BaseCRUD[portainer.HelmUserRepository, portainer.HelmUserRepositoryID] HelmUserRepositories() ([]portainer.HelmUserRepository, error)
HelmUserRepositoryByUserID(userID portainer.UserID) ([]portainer.HelmUserRepository, error) HelmUserRepositoryByUserID(userID portainer.UserID) ([]portainer.HelmUserRepository, error)
Create(record *portainer.HelmUserRepository) error
UpdateHelmUserRepository(ID portainer.HelmUserRepositoryID, repository *portainer.HelmUserRepository) error
DeleteHelmUserRepository(ID portainer.HelmUserRepositoryID) error
BucketName() string
}
// JWTService represents a service for managing JWT tokens
JWTService interface {
GenerateToken(data *portainer.TokenData) (string, error)
GenerateTokenForOAuth(data *portainer.TokenData, expiryTime *time.Time) (string, error)
GenerateTokenForKubeconfig(data *portainer.TokenData) (string, error)
ParseAndVerifyToken(token string) (*portainer.TokenData, error)
SetUserSessionDuration(userSessionDuration time.Duration)
} }
// RegistryService represents a service for managing registry data // RegistryService represents a service for managing registry data
RegistryService interface { RegistryService interface {
BaseCRUD[portainer.Registry, portainer.RegistryID] Registry(ID portainer.RegistryID) (*portainer.Registry, error)
Registries() ([]portainer.Registry, error)
Create(registry *portainer.Registry) error
UpdateRegistry(ID portainer.RegistryID, registry *portainer.Registry) error
DeleteRegistry(ID portainer.RegistryID) error
BucketName() string
} }
// ResourceControlService represents a service for managing resource control data // ResourceControlService represents a service for managing resource control data
ResourceControlService interface { ResourceControlService interface {
BaseCRUD[portainer.ResourceControl, portainer.ResourceControlID] ResourceControl(ID portainer.ResourceControlID) (*portainer.ResourceControl, error)
ResourceControlByResourceIDAndType(resourceID string, resourceType portainer.ResourceControlType) (*portainer.ResourceControl, error) ResourceControlByResourceIDAndType(resourceID string, resourceType portainer.ResourceControlType) (*portainer.ResourceControl, error)
ResourceControls() ([]portainer.ResourceControl, error)
Create(rc *portainer.ResourceControl) error
UpdateResourceControl(ID portainer.ResourceControlID, resourceControl *portainer.ResourceControl) error
DeleteResourceControl(ID portainer.ResourceControlID) error
BucketName() string
} }
// RoleService represents a service for managing user roles // RoleService represents a service for managing user roles
RoleService interface { RoleService interface {
BaseCRUD[portainer.Role, portainer.RoleID] Role(ID portainer.RoleID) (*portainer.Role, error)
Roles() ([]portainer.Role, error)
Create(role *portainer.Role) error
UpdateRole(ID portainer.RoleID, role *portainer.Role) error
BucketName() string
} }
// APIKeyRepositoryService // APIKeyRepositoryService
APIKeyRepository interface { APIKeyRepository interface {
BaseCRUD[portainer.APIKey, portainer.APIKeyID] CreateAPIKey(key *portainer.APIKey) error
GetAPIKey(keyID portainer.APIKeyID) (*portainer.APIKey, error)
UpdateAPIKey(key *portainer.APIKey) error
DeleteAPIKey(ID portainer.APIKeyID) error
GetAPIKeysByUserID(userID portainer.UserID) ([]portainer.APIKey, error) GetAPIKeysByUserID(userID portainer.UserID) ([]portainer.APIKey, error)
GetAPIKeyByDigest(digest string) (*portainer.APIKey, error) GetAPIKeyByDigest(digest []byte) (*portainer.APIKey, error)
} }
// SettingsService represents a service for managing application settings // SettingsService represents a service for managing application settings
@@ -163,7 +216,12 @@ type (
} }
SnapshotService interface { SnapshotService interface {
BaseCRUD[portainer.Snapshot, portainer.EndpointID] Snapshot(endpointID portainer.EndpointID) (*portainer.Snapshot, error)
Snapshots() ([]portainer.Snapshot, error)
UpdateSnapshot(snapshot *portainer.Snapshot) error
DeleteSnapshot(endpointID portainer.EndpointID) error
Create(snapshot *portainer.Snapshot) error
BucketName() string
} }
// SSLSettingsService represents a service for managing application settings // SSLSettingsService represents a service for managing application settings
@@ -175,33 +233,53 @@ type (
// StackService represents a service for managing stack data // StackService represents a service for managing stack data
StackService interface { StackService interface {
BaseCRUD[portainer.Stack, portainer.StackID] Stack(ID portainer.StackID) (*portainer.Stack, error)
StackByName(name string) (*portainer.Stack, error) StackByName(name string) (*portainer.Stack, error)
StacksByName(name string) ([]portainer.Stack, error) StacksByName(name string) ([]portainer.Stack, error)
Stacks() ([]portainer.Stack, error)
Create(stack *portainer.Stack) error
UpdateStack(ID portainer.StackID, stack *portainer.Stack) error
DeleteStack(ID portainer.StackID) error
GetNextIdentifier() int GetNextIdentifier() int
StackByWebhookID(ID string) (*portainer.Stack, error) StackByWebhookID(ID string) (*portainer.Stack, error)
RefreshableStacks() ([]portainer.Stack, error) RefreshableStacks() ([]portainer.Stack, error)
BucketName() string
} }
// TagService represents a service for managing tag data // TagService represents a service for managing tag data
TagService interface { TagService interface {
BaseCRUD[portainer.Tag, portainer.TagID] Tags() ([]portainer.Tag, error)
Tag(ID portainer.TagID) (*portainer.Tag, error)
Create(tag *portainer.Tag) error
UpdateTag(ID portainer.TagID, tag *portainer.Tag) error
UpdateTagFunc(ID portainer.TagID, updateFunc func(tag *portainer.Tag)) error UpdateTagFunc(ID portainer.TagID, updateFunc func(tag *portainer.Tag)) error
DeleteTag(ID portainer.TagID) error
BucketName() string
} }
// TeamService represents a service for managing user data // TeamService represents a service for managing user data
TeamService interface { TeamService interface {
BaseCRUD[portainer.Team, portainer.TeamID] Team(ID portainer.TeamID) (*portainer.Team, error)
TeamByName(name string) (*portainer.Team, error) TeamByName(name string) (*portainer.Team, error)
Teams() ([]portainer.Team, error)
Create(team *portainer.Team) error
UpdateTeam(ID portainer.TeamID, team *portainer.Team) error
DeleteTeam(ID portainer.TeamID) error
BucketName() string
} }
// TeamMembershipService represents a service for managing team membership data // TeamMembershipService represents a service for managing team membership data
TeamMembershipService interface { TeamMembershipService interface {
BaseCRUD[portainer.TeamMembership, portainer.TeamMembershipID] TeamMembership(ID portainer.TeamMembershipID) (*portainer.TeamMembership, error)
TeamMemberships() ([]portainer.TeamMembership, error)
TeamMembershipsByUserID(userID portainer.UserID) ([]portainer.TeamMembership, error) TeamMembershipsByUserID(userID portainer.UserID) ([]portainer.TeamMembership, error)
TeamMembershipsByTeamID(teamID portainer.TeamID) ([]portainer.TeamMembership, error) TeamMembershipsByTeamID(teamID portainer.TeamID) ([]portainer.TeamMembership, error)
Create(membership *portainer.TeamMembership) error
UpdateTeamMembership(ID portainer.TeamMembershipID, membership *portainer.TeamMembership) error
DeleteTeamMembership(ID portainer.TeamMembershipID) error
DeleteTeamMembershipByUserID(userID portainer.UserID) error DeleteTeamMembershipByUserID(userID portainer.UserID) error
DeleteTeamMembershipByTeamID(teamID portainer.TeamID) error DeleteTeamMembershipByTeamID(teamID portainer.TeamID) error
BucketName() string
DeleteTeamMembershipByTeamIDAndUserID(teamID portainer.TeamID, userID portainer.UserID) error DeleteTeamMembershipByTeamIDAndUserID(teamID portainer.TeamID, userID portainer.UserID) error
} }
@@ -214,24 +292,38 @@ type (
// UserService represents a service for managing user data // UserService represents a service for managing user data
UserService interface { UserService interface {
BaseCRUD[portainer.User, portainer.UserID] User(ID portainer.UserID) (*portainer.User, error)
UserByUsername(username string) (*portainer.User, error) UserByUsername(username string) (*portainer.User, error)
Users() ([]portainer.User, error)
UsersByRole(role portainer.UserRole) ([]portainer.User, error) UsersByRole(role portainer.UserRole) ([]portainer.User, error)
Create(user *portainer.User) error
UpdateUser(ID portainer.UserID, user *portainer.User) error
DeleteUser(ID portainer.UserID) error
BucketName() string
} }
// VersionService represents a service for managing version data // VersionService represents a service for managing version data
VersionService interface { VersionService interface {
Edition() (portainer.SoftwareEdition, error)
InstanceID() (string, error) InstanceID() (string, error)
UpdateInstanceID(ID string) error UpdateInstanceID(ID string) error
Edition() (portainer.SoftwareEdition, error)
Version() (*models.Version, error) Version() (*models.Version, error)
UpdateVersion(*models.Version) error UpdateVersion(*models.Version) error
} }
// WebhookService represents a service for managing webhook data. // WebhookService represents a service for managing webhook data.
WebhookService interface { WebhookService interface {
BaseCRUD[portainer.Webhook, portainer.WebhookID] Webhooks() ([]portainer.Webhook, error)
Webhook(ID portainer.WebhookID) (*portainer.Webhook, error)
Create(portainer *portainer.Webhook) error
UpdateWebhook(ID portainer.WebhookID, webhook *portainer.Webhook) error
WebhookByResourceID(resourceID string) (*portainer.Webhook, error) WebhookByResourceID(resourceID string) (*portainer.Webhook, error)
WebhookByToken(token string) (*portainer.Webhook, error) WebhookByToken(token string) (*portainer.Webhook, error)
DeleteWebhook(ID portainer.WebhookID) error
BucketName() string
} }
) )
func IsErrObjectNotFound(e error) bool {
return e == errors.ErrObjectNotFound
}

View File

@@ -1,105 +0,0 @@
package pendingactions
import (
"fmt"
"time"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/rs/zerolog/log"
)
const (
BucketName = "pending_actions"
)
type Service struct {
dataservices.BaseDataService[portainer.PendingActions, portainer.PendingActionsID]
}
type ServiceTx struct {
dataservices.BaseDataServiceTx[portainer.PendingActions, portainer.PendingActionsID]
}
func NewService(connection portainer.Connection) (*Service, error) {
err := connection.SetServiceName(BucketName)
if err != nil {
return nil, err
}
return &Service{
BaseDataService: dataservices.BaseDataService[portainer.PendingActions, portainer.PendingActionsID]{
Bucket: BucketName,
Connection: connection,
},
}, nil
}
func (s Service) Create(config *portainer.PendingActions) error {
return s.Connection.UpdateTx(func(tx portainer.Transaction) error {
return s.Tx(tx).Create(config)
})
}
func (s Service) Update(ID portainer.PendingActionsID, config *portainer.PendingActions) error {
return s.Connection.UpdateTx(func(tx portainer.Transaction) error {
return s.Tx(tx).Update(ID, config)
})
}
func (s Service) DeleteByEndpointID(ID portainer.EndpointID) error {
return s.Connection.UpdateTx(func(tx portainer.Transaction) error {
return s.Tx(tx).DeleteByEndpointID(ID)
})
}
func (service *Service) Tx(tx portainer.Transaction) ServiceTx {
return ServiceTx{
BaseDataServiceTx: dataservices.BaseDataServiceTx[portainer.PendingActions, portainer.PendingActionsID]{
Bucket: BucketName,
Connection: service.Connection,
Tx: tx,
},
}
}
func (s ServiceTx) Create(config *portainer.PendingActions) error {
return s.Tx.CreateObject(BucketName, func(id uint64) (int, interface{}) {
config.ID = portainer.PendingActionsID(id)
config.CreatedAt = time.Now().Unix()
return int(config.ID), config
})
}
func (s ServiceTx) Update(ID portainer.PendingActionsID, config *portainer.PendingActions) error {
return s.BaseDataServiceTx.Update(ID, config)
}
func (s ServiceTx) DeleteByEndpointID(ID portainer.EndpointID) error {
log.Debug().Int("endpointId", int(ID)).Msg("deleting pending actions for endpoint")
pendingActions, err := s.BaseDataServiceTx.ReadAll()
if err != nil {
return fmt.Errorf("failed to retrieve pending-actions for endpoint (%d): %w", ID, err)
}
for _, pendingAction := range pendingActions {
if pendingAction.EndpointID == ID {
err := s.BaseDataServiceTx.Delete(pendingAction.ID)
if err != nil {
log.Debug().Int("endpointId", int(ID)).Msgf("failed to delete pending action: %v", err)
}
}
}
return nil
}
// GetNextIdentifier returns the next identifier for a custom template.
func (service ServiceTx) GetNextIdentifier() int {
return service.Tx.GetNextIdentifier(BucketName)
}
// GetNextIdentifier returns the next identifier for a custom template.
func (service *Service) GetNextIdentifier() int {
return service.Connection.GetNextIdentifier(BucketName)
}

View File

@@ -1,16 +1,25 @@
package registry package registry
import ( import (
"fmt"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/rs/zerolog/log"
) )
// BucketName represents the name of the bucket where this service stores data. const (
const BucketName = "registries" // BucketName represents the name of the bucket where this service stores data.
BucketName = "registries"
)
// Service represents a service for managing environment(endpoint) data. // Service represents a service for managing environment(endpoint) data.
type Service struct { type Service struct {
dataservices.BaseDataService[portainer.Registry, portainer.RegistryID] connection portainer.Connection
}
func (service *Service) BucketName() string {
return BucketName
} }
// NewService creates a new instance of a service. // NewService creates a new instance of a service.
@@ -21,26 +30,48 @@ func NewService(connection portainer.Connection) (*Service, error) {
} }
return &Service{ return &Service{
BaseDataService: dataservices.BaseDataService[portainer.Registry, portainer.RegistryID]{ connection: connection,
Bucket: BucketName,
Connection: connection,
},
}, nil }, nil
} }
func (service *Service) Tx(tx portainer.Transaction) ServiceTx { // Registry returns an registry by ID.
return ServiceTx{ func (service *Service) Registry(ID portainer.RegistryID) (*portainer.Registry, error) {
BaseDataServiceTx: dataservices.BaseDataServiceTx[portainer.Registry, portainer.RegistryID]{ var registry portainer.Registry
Bucket: BucketName, identifier := service.connection.ConvertToKey(int(ID))
Connection: service.Connection,
Tx: tx, err := service.connection.GetObject(BucketName, identifier, &registry)
}, if err != nil {
return nil, err
} }
return &registry, nil
} }
// Create creates a new registry. // Registries returns an array containing all the registries.
func (service *Service) Registries() ([]portainer.Registry, error) {
var registries = make([]portainer.Registry, 0)
err := service.connection.GetAll(
BucketName,
&portainer.Registry{},
func(obj interface{}) (interface{}, error) {
registry, ok := obj.(*portainer.Registry)
if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to Registry object")
return nil, fmt.Errorf("Failed to convert to Registry object: %s", obj)
}
registries = append(registries, *registry)
return &portainer.Registry{}, nil
})
return registries, err
}
// CreateRegistry creates a new registry.
func (service *Service) Create(registry *portainer.Registry) error { func (service *Service) Create(registry *portainer.Registry) error {
return service.Connection.CreateObject( return service.connection.CreateObject(
BucketName, BucketName,
func(id uint64) (int, interface{}) { func(id uint64) (int, interface{}) {
registry.ID = portainer.RegistryID(id) registry.ID = portainer.RegistryID(id)
@@ -48,3 +79,15 @@ func (service *Service) Create(registry *portainer.Registry) error {
}, },
) )
} }
// UpdateRegistry updates an registry.
func (service *Service) UpdateRegistry(ID portainer.RegistryID, registry *portainer.Registry) error {
identifier := service.connection.ConvertToKey(int(ID))
return service.connection.UpdateObject(BucketName, identifier, registry)
}
// DeleteRegistry deletes an registry.
func (service *Service) DeleteRegistry(ID portainer.RegistryID) error {
identifier := service.connection.ConvertToKey(int(ID))
return service.connection.DeleteObject(BucketName, identifier)
}

View File

@@ -1,21 +0,0 @@
package registry
import (
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
)
type ServiceTx struct {
dataservices.BaseDataServiceTx[portainer.Registry, portainer.RegistryID]
}
// Create creates a new registry.
func (service ServiceTx) Create(registry *portainer.Registry) error {
return service.Tx.CreateObject(
BucketName,
func(id uint64) (int, interface{}) {
registry.ID = portainer.RegistryID(id)
return int(registry.ID), registry
},
)
}

View File

@@ -1,21 +1,25 @@
package resourcecontrol package resourcecontrol
import ( import (
"errors"
"fmt" "fmt"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
) )
// BucketName represents the name of the bucket where this service stores data. const (
const BucketName = "resource_control" // BucketName represents the name of the bucket where this service stores data.
BucketName = "resource_control"
)
// Service represents a service for managing environment(endpoint) data. // Service represents a service for managing environment(endpoint) data.
type Service struct { type Service struct {
dataservices.BaseDataService[portainer.ResourceControl, portainer.ResourceControlID] connection portainer.Connection
}
func (service *Service) BucketName() string {
return BucketName
} }
// NewService creates a new instance of a service. // NewService creates a new instance of a service.
@@ -26,21 +30,21 @@ func NewService(connection portainer.Connection) (*Service, error) {
} }
return &Service{ return &Service{
BaseDataService: dataservices.BaseDataService[portainer.ResourceControl, portainer.ResourceControlID]{ connection: connection,
Bucket: BucketName,
Connection: connection,
},
}, nil }, nil
} }
func (service *Service) Tx(tx portainer.Transaction) ServiceTx { // ResourceControl returns a ResourceControl object by ID
return ServiceTx{ func (service *Service) ResourceControl(ID portainer.ResourceControlID) (*portainer.ResourceControl, error) {
BaseDataServiceTx: dataservices.BaseDataServiceTx[portainer.ResourceControl, portainer.ResourceControlID]{ var resourceControl portainer.ResourceControl
Bucket: BucketName, identifier := service.connection.ConvertToKey(int(ID))
Connection: service.Connection,
Tx: tx, err := service.connection.GetObject(BucketName, identifier, &resourceControl)
}, if err != nil {
return nil, err
} }
return &resourceControl, nil
} }
// ResourceControlByResourceIDAndType returns a ResourceControl object by checking if the resourceID is equal // ResourceControlByResourceIDAndType returns a ResourceControl object by checking if the resourceID is equal
@@ -49,14 +53,14 @@ func (service *Service) Tx(tx portainer.Transaction) ServiceTx {
func (service *Service) ResourceControlByResourceIDAndType(resourceID string, resourceType portainer.ResourceControlType) (*portainer.ResourceControl, error) { func (service *Service) ResourceControlByResourceIDAndType(resourceID string, resourceType portainer.ResourceControlType) (*portainer.ResourceControl, error) {
var resourceControl *portainer.ResourceControl var resourceControl *portainer.ResourceControl
stop := fmt.Errorf("ok") stop := fmt.Errorf("ok")
err := service.Connection.GetAll( err := service.connection.GetAll(
BucketName, BucketName,
&portainer.ResourceControl{}, &portainer.ResourceControl{},
func(obj interface{}) (interface{}, error) { func(obj interface{}) (interface{}, error) {
rc, ok := obj.(*portainer.ResourceControl) rc, ok := obj.(*portainer.ResourceControl)
if !ok { if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to ResourceControl object") log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to ResourceControl object")
return nil, fmt.Errorf("failed to convert to ResourceControl object: %s", obj) return nil, fmt.Errorf("Failed to convert to ResourceControl object: %s", obj)
} }
if rc.ResourceID == resourceID && rc.Type == resourceType { if rc.ResourceID == resourceID && rc.Type == resourceType {
@@ -73,16 +77,38 @@ func (service *Service) ResourceControlByResourceIDAndType(resourceID string, re
return &portainer.ResourceControl{}, nil return &portainer.ResourceControl{}, nil
}) })
if errors.Is(err, stop) { if err == stop {
return resourceControl, nil return resourceControl, nil
} }
return nil, err return nil, err
} }
// ResourceControls returns all the ResourceControl objects
func (service *Service) ResourceControls() ([]portainer.ResourceControl, error) {
var rcs = make([]portainer.ResourceControl, 0)
err := service.connection.GetAll(
BucketName,
&portainer.ResourceControl{},
func(obj interface{}) (interface{}, error) {
rc, ok := obj.(*portainer.ResourceControl)
if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to ResourceControl object")
return nil, fmt.Errorf("Failed to convert to ResourceControl object: %s", obj)
}
rcs = append(rcs, *rc)
return &portainer.ResourceControl{}, nil
})
return rcs, err
}
// CreateResourceControl creates a new ResourceControl object // CreateResourceControl creates a new ResourceControl object
func (service *Service) Create(resourceControl *portainer.ResourceControl) error { func (service *Service) Create(resourceControl *portainer.ResourceControl) error {
return service.Connection.CreateObject( return service.connection.CreateObject(
BucketName, BucketName,
func(id uint64) (int, interface{}) { func(id uint64) (int, interface{}) {
resourceControl.ID = portainer.ResourceControlID(id) resourceControl.ID = portainer.ResourceControlID(id)
@@ -90,3 +116,15 @@ func (service *Service) Create(resourceControl *portainer.ResourceControl) error
}, },
) )
} }
// UpdateResourceControl saves a ResourceControl object.
func (service *Service) UpdateResourceControl(ID portainer.ResourceControlID, resourceControl *portainer.ResourceControl) error {
identifier := service.connection.ConvertToKey(int(ID))
return service.connection.UpdateObject(BucketName, identifier, resourceControl)
}
// DeleteResourceControl deletes a ResourceControl object by ID
func (service *Service) DeleteResourceControl(ID portainer.ResourceControlID) error {
identifier := service.connection.ConvertToKey(int(ID))
return service.connection.DeleteObject(BucketName, identifier)
}

View File

@@ -1,63 +0,0 @@
package resourcecontrol
import (
"errors"
"fmt"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/rs/zerolog/log"
)
type ServiceTx struct {
dataservices.BaseDataServiceTx[portainer.ResourceControl, portainer.ResourceControlID]
}
// ResourceControlByResourceIDAndType returns a ResourceControl object by checking if the resourceID is equal
// to the main ResourceID or in SubResourceIDs. It also performs a check on the resource type. Return nil
// if no ResourceControl was found.
func (service ServiceTx) ResourceControlByResourceIDAndType(resourceID string, resourceType portainer.ResourceControlType) (*portainer.ResourceControl, error) {
var resourceControl *portainer.ResourceControl
stop := fmt.Errorf("ok")
err := service.Tx.GetAll(
BucketName,
&portainer.ResourceControl{},
func(obj interface{}) (interface{}, error) {
rc, ok := obj.(*portainer.ResourceControl)
if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to ResourceControl object")
return nil, fmt.Errorf("failed to convert to ResourceControl object: %s", obj)
}
if rc.ResourceID == resourceID && rc.Type == resourceType {
resourceControl = rc
return nil, stop
}
for _, subResourceID := range rc.SubResourceIDs {
if subResourceID == resourceID {
resourceControl = rc
return nil, stop
}
}
return &portainer.ResourceControl{}, nil
})
if errors.Is(err, stop) {
return resourceControl, nil
}
return nil, err
}
// CreateResourceControl creates a new ResourceControl object
func (service ServiceTx) Create(resourceControl *portainer.ResourceControl) error {
return service.Tx.CreateObject(
BucketName,
func(id uint64) (int, interface{}) {
resourceControl.ID = portainer.ResourceControlID(id)
return int(resourceControl.ID), resourceControl
},
)
}

View File

@@ -1,16 +1,25 @@
package role package role
import ( import (
"fmt"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/rs/zerolog/log"
) )
// BucketName represents the name of the bucket where this service stores data. const (
const BucketName = "roles" // BucketName represents the name of the bucket where this service stores data.
BucketName = "roles"
)
// Service represents a service for managing environment(endpoint) data. // Service represents a service for managing environment(endpoint) data.
type Service struct { type Service struct {
dataservices.BaseDataService[portainer.Role, portainer.RoleID] connection portainer.Connection
}
func (service *Service) BucketName() string {
return BucketName
} }
// NewService creates a new instance of a service. // NewService creates a new instance of a service.
@@ -21,26 +30,48 @@ func NewService(connection portainer.Connection) (*Service, error) {
} }
return &Service{ return &Service{
BaseDataService: dataservices.BaseDataService[portainer.Role, portainer.RoleID]{ connection: connection,
Bucket: BucketName,
Connection: connection,
},
}, nil }, nil
} }
func (service *Service) Tx(tx portainer.Transaction) ServiceTx { // Role returns a Role by ID
return ServiceTx{ func (service *Service) Role(ID portainer.RoleID) (*portainer.Role, error) {
BaseDataServiceTx: dataservices.BaseDataServiceTx[portainer.Role, portainer.RoleID]{ var set portainer.Role
Bucket: BucketName, identifier := service.connection.ConvertToKey(int(ID))
Connection: service.Connection,
Tx: tx, err := service.connection.GetObject(BucketName, identifier, &set)
}, if err != nil {
return nil, err
} }
return &set, nil
}
// Roles return an array containing all the sets.
func (service *Service) Roles() ([]portainer.Role, error) {
var sets = make([]portainer.Role, 0)
err := service.connection.GetAll(
BucketName,
&portainer.Role{},
func(obj interface{}) (interface{}, error) {
set, ok := obj.(*portainer.Role)
if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to Role object")
return nil, fmt.Errorf("Failed to convert to Role object: %s", obj)
}
sets = append(sets, *set)
return &portainer.Role{}, nil
})
return sets, err
} }
// CreateRole creates a new Role. // CreateRole creates a new Role.
func (service *Service) Create(role *portainer.Role) error { func (service *Service) Create(role *portainer.Role) error {
return service.Connection.CreateObject( return service.connection.CreateObject(
BucketName, BucketName,
func(id uint64) (int, interface{}) { func(id uint64) (int, interface{}) {
role.ID = portainer.RoleID(id) role.ID = portainer.RoleID(id)
@@ -48,3 +79,9 @@ func (service *Service) Create(role *portainer.Role) error {
}, },
) )
} }
// UpdateRole updates a role.
func (service *Service) UpdateRole(ID portainer.RoleID, role *portainer.Role) error {
identifier := service.connection.ConvertToKey(int(ID))
return service.connection.UpdateObject(BucketName, identifier, role)
}

View File

@@ -1,21 +0,0 @@
package role
import (
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
)
type ServiceTx struct {
dataservices.BaseDataServiceTx[portainer.Role, portainer.RoleID]
}
// CreateRole creates a new Role.
func (service ServiceTx) Create(role *portainer.Role) error {
return service.Tx.CreateObject(
BucketName,
func(id uint64) (int, interface{}) {
role.ID = portainer.RoleID(id)
return int(role.ID), role
},
)
}

View File

@@ -1,12 +1,17 @@
package schedule package schedule
import ( import (
"fmt"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/rs/zerolog/log"
) )
// BucketName represents the name of the bucket where this service stores data. const (
const BucketName = "schedules" // BucketName represents the name of the bucket where this service stores data.
BucketName = "schedules"
)
// Service represents a service for managing schedule data. // Service represents a service for managing schedule data.
type Service struct { type Service struct {
@@ -58,11 +63,22 @@ func (service *Service) DeleteSchedule(ID portainer.ScheduleID) error {
func (service *Service) Schedules() ([]portainer.Schedule, error) { func (service *Service) Schedules() ([]portainer.Schedule, error) {
var schedules = make([]portainer.Schedule, 0) var schedules = make([]portainer.Schedule, 0)
return schedules, service.connection.GetAll( err := service.connection.GetAll(
BucketName, BucketName,
&portainer.Schedule{}, &portainer.Schedule{},
dataservices.AppendFn(&schedules), func(obj interface{}) (interface{}, error) {
) schedule, ok := obj.(*portainer.Schedule)
if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to Schedule object")
return nil, fmt.Errorf("Failed to convert to Schedule object: %s", obj)
}
schedules = append(schedules, *schedule)
return &portainer.Schedule{}, nil
})
return schedules, err
} }
// SchedulesByJobType return a array containing all the schedules // SchedulesByJobType return a array containing all the schedules
@@ -70,13 +86,24 @@ func (service *Service) Schedules() ([]portainer.Schedule, error) {
func (service *Service) SchedulesByJobType(jobType portainer.JobType) ([]portainer.Schedule, error) { func (service *Service) SchedulesByJobType(jobType portainer.JobType) ([]portainer.Schedule, error) {
var schedules = make([]portainer.Schedule, 0) var schedules = make([]portainer.Schedule, 0)
return schedules, service.connection.GetAll( err := service.connection.GetAll(
BucketName, BucketName,
&portainer.Schedule{}, &portainer.Schedule{},
dataservices.FilterFn(&schedules, func(e portainer.Schedule) bool { func(obj interface{}) (interface{}, error) {
return e.JobType == jobType schedule, ok := obj.(*portainer.Schedule)
}), if !ok {
) log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to Schedule object")
return nil, fmt.Errorf("Failed to convert to Schedule object: %s", obj)
}
if schedule.JobType == jobType {
schedules = append(schedules, *schedule)
}
return &portainer.Schedule{}, nil
})
return schedules, err
} }
// Create assign an ID to a new schedule and saves it. // Create assign an ID to a new schedule and saves it.

View File

@@ -31,13 +31,6 @@ func NewService(connection portainer.Connection) (*Service, error) {
}, nil }, nil
} }
func (service *Service) Tx(tx portainer.Transaction) ServiceTx {
return ServiceTx{
service: service,
tx: tx,
}
}
// Settings retrieve the settings object. // Settings retrieve the settings object.
func (service *Service) Settings() (*portainer.Settings, error) { func (service *Service) Settings() (*portainer.Settings, error) {
var settings portainer.Settings var settings portainer.Settings

View File

@@ -1,31 +0,0 @@
package settings
import (
portainer "github.com/portainer/portainer/api"
)
type ServiceTx struct {
service *Service
tx portainer.Transaction
}
func (service ServiceTx) BucketName() string {
return BucketName
}
// Settings retrieve the settings object.
func (service ServiceTx) Settings() (*portainer.Settings, error) {
var settings portainer.Settings
err := service.tx.GetObject(BucketName, []byte(settingsKey), &settings)
if err != nil {
return nil, err
}
return &settings, nil
}
// UpdateSettings persists a Settings object.
func (service ServiceTx) UpdateSettings(settings *portainer.Settings) error {
return service.tx.UpdateObject(BucketName, []byte(settingsKey), settings)
}

View File

@@ -1,14 +1,23 @@
package snapshot package snapshot
import ( import (
"fmt"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/rs/zerolog/log"
) )
const BucketName = "snapshots" const (
BucketName = "snapshots"
)
type Service struct { type Service struct {
dataservices.BaseDataService[portainer.Snapshot, portainer.EndpointID] connection portainer.Connection
}
func (service *Service) BucketName() string {
return BucketName
} }
func NewService(connection portainer.Connection) (*Service, error) { func NewService(connection portainer.Connection) (*Service, error) {
@@ -18,23 +27,58 @@ func NewService(connection portainer.Connection) (*Service, error) {
} }
return &Service{ return &Service{
BaseDataService: dataservices.BaseDataService[portainer.Snapshot, portainer.EndpointID]{ connection: connection,
Bucket: BucketName,
Connection: connection,
},
}, nil }, nil
} }
func (service *Service) Tx(tx portainer.Transaction) ServiceTx { func (service *Service) Tx(tx portainer.Transaction) ServiceTx {
return ServiceTx{ return ServiceTx{
BaseDataServiceTx: dataservices.BaseDataServiceTx[portainer.Snapshot, portainer.EndpointID]{ service: service,
Bucket: BucketName, tx: tx,
Connection: service.Connection,
Tx: tx,
},
} }
} }
func (service *Service) Create(snapshot *portainer.Snapshot) error { func (service *Service) Snapshot(endpointID portainer.EndpointID) (*portainer.Snapshot, error) {
return service.Connection.CreateObjectWithId(BucketName, int(snapshot.EndpointID), snapshot) var snapshot portainer.Snapshot
identifier := service.connection.ConvertToKey(int(endpointID))
err := service.connection.GetObject(BucketName, identifier, &snapshot)
if err != nil {
return nil, err
}
return &snapshot, nil
}
func (service *Service) Snapshots() ([]portainer.Snapshot, error) {
var snapshots = make([]portainer.Snapshot, 0)
err := service.connection.GetAllWithJsoniter(
BucketName,
&portainer.Snapshot{},
func(obj interface{}) (interface{}, error) {
snapshot, ok := obj.(*portainer.Snapshot)
if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to Snapshot object")
return nil, fmt.Errorf("failed to convert to Snapshot object: %s", obj)
}
snapshots = append(snapshots, *snapshot)
return &portainer.Snapshot{}, nil
})
return snapshots, err
}
func (service *Service) UpdateSnapshot(snapshot *portainer.Snapshot) error {
identifier := service.connection.ConvertToKey(int(snapshot.EndpointID))
return service.connection.UpdateObject(BucketName, identifier, snapshot)
}
func (service *Service) DeleteSnapshot(endpointID portainer.EndpointID) error {
identifier := service.connection.ConvertToKey(int(endpointID))
return service.connection.DeleteObject(BucketName, identifier)
}
func (service *Service) Create(snapshot *portainer.Snapshot) error {
return service.connection.CreateObjectWithId(BucketName, int(snapshot.EndpointID), snapshot)
} }

View File

@@ -1,14 +1,63 @@
package snapshot package snapshot
import ( import (
"fmt"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/rs/zerolog/log"
) )
type ServiceTx struct { type ServiceTx struct {
dataservices.BaseDataServiceTx[portainer.Snapshot, portainer.EndpointID] service *Service
tx portainer.Transaction
}
func (service ServiceTx) BucketName() string {
return BucketName
}
func (service ServiceTx) Snapshot(endpointID portainer.EndpointID) (*portainer.Snapshot, error) {
var snapshot portainer.Snapshot
identifier := service.service.connection.ConvertToKey(int(endpointID))
err := service.tx.GetObject(BucketName, identifier, &snapshot)
if err != nil {
return nil, err
}
return &snapshot, nil
}
func (service ServiceTx) Snapshots() ([]portainer.Snapshot, error) {
var snapshots = make([]portainer.Snapshot, 0)
err := service.tx.GetAllWithJsoniter(
BucketName,
&portainer.Snapshot{},
func(obj interface{}) (interface{}, error) {
snapshot, ok := obj.(*portainer.Snapshot)
if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to Snapshot object")
return nil, fmt.Errorf("failed to convert to Snapshot object: %s", obj)
}
snapshots = append(snapshots, *snapshot)
return &portainer.Snapshot{}, nil
})
return snapshots, err
}
func (service ServiceTx) UpdateSnapshot(snapshot *portainer.Snapshot) error {
identifier := service.service.connection.ConvertToKey(int(snapshot.EndpointID))
return service.tx.UpdateObject(BucketName, identifier, snapshot)
}
func (service ServiceTx) DeleteSnapshot(endpointID portainer.EndpointID) error {
identifier := service.service.connection.ConvertToKey(int(endpointID))
return service.tx.DeleteObject(BucketName, identifier)
} }
func (service ServiceTx) Create(snapshot *portainer.Snapshot) error { func (service ServiceTx) Create(snapshot *portainer.Snapshot) error {
return service.Tx.CreateObjectWithId(BucketName, int(snapshot.EndpointID), snapshot) return service.tx.CreateObjectWithId(BucketName, int(snapshot.EndpointID), snapshot)
} }

View File

@@ -1,20 +1,27 @@
package stack package stack
import ( import (
"errors" "fmt"
"strings" "strings"
portainer "github.com/portainer/portainer/api" portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/dataservices/errors"
dserrors "github.com/portainer/portainer/api/dataservices/errors"
"github.com/rs/zerolog/log"
) )
// BucketName represents the name of the bucket where this service stores data. const (
const BucketName = "stacks" // BucketName represents the name of the bucket where this service stores data.
BucketName = "stacks"
)
// Service represents a service for managing environment(endpoint) data. // Service represents a service for managing environment(endpoint) data.
type Service struct { type Service struct {
dataservices.BaseDataService[portainer.Stack, portainer.StackID] connection portainer.Connection
}
func (service *Service) BucketName() string {
return BucketName
} }
// NewService creates a new instance of a service. // NewService creates a new instance of a service.
@@ -25,37 +32,50 @@ func NewService(connection portainer.Connection) (*Service, error) {
} }
return &Service{ return &Service{
BaseDataService: dataservices.BaseDataService[portainer.Stack, portainer.StackID]{ connection: connection,
Bucket: BucketName,
Connection: connection,
},
}, nil }, nil
} }
func (service *Service) Tx(tx portainer.Transaction) ServiceTx { // Stack returns a stack object by ID.
return ServiceTx{ func (service *Service) Stack(ID portainer.StackID) (*portainer.Stack, error) {
BaseDataServiceTx: service.BaseDataService.Tx(tx), var stack portainer.Stack
identifier := service.connection.ConvertToKey(int(ID))
err := service.connection.GetObject(BucketName, identifier, &stack)
if err != nil {
return nil, err
} }
return &stack, nil
} }
// StackByName returns a stack object by name. // StackByName returns a stack object by name.
func (service *Service) StackByName(name string) (*portainer.Stack, error) { func (service *Service) StackByName(name string) (*portainer.Stack, error) {
var s portainer.Stack var s *portainer.Stack
err := service.Connection.GetAll( stop := fmt.Errorf("ok")
err := service.connection.GetAll(
BucketName, BucketName,
&portainer.Stack{}, &portainer.Stack{},
dataservices.FirstFn(&s, func(e portainer.Stack) bool { func(obj interface{}) (interface{}, error) {
return e.Name == name stack, ok := obj.(*portainer.Stack)
}), if !ok {
) log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to Stack object")
return nil, fmt.Errorf("Failed to convert to Stack object: %s", obj)
if errors.Is(err, dataservices.ErrStop) {
return &s, nil
} }
if stack.Name == name {
s = stack
return nil, stop
}
return &portainer.Stack{}, nil
})
if err == stop {
return s, nil
}
if err == nil { if err == nil {
return nil, dserrors.ErrObjectNotFound return nil, errors.ErrObjectNotFound
} }
return nil, err return nil, err
@@ -65,58 +85,125 @@ func (service *Service) StackByName(name string) (*portainer.Stack, error) {
func (service *Service) StacksByName(name string) ([]portainer.Stack, error) { func (service *Service) StacksByName(name string) ([]portainer.Stack, error) {
var stacks = make([]portainer.Stack, 0) var stacks = make([]portainer.Stack, 0)
return stacks, service.Connection.GetAll( err := service.connection.GetAll(
BucketName, BucketName,
&portainer.Stack{}, &portainer.Stack{},
dataservices.FilterFn(&stacks, func(e portainer.Stack) bool { func(obj interface{}) (interface{}, error) {
return e.Name == name stack, ok := obj.(portainer.Stack)
}), if !ok {
) log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to Stack object")
return nil, fmt.Errorf("Failed to convert to Stack object: %s", obj)
}
if stack.Name == name {
stacks = append(stacks, stack)
}
return &portainer.Stack{}, nil
})
return stacks, err
}
// Stacks returns an array containing all the stacks.
func (service *Service) Stacks() ([]portainer.Stack, error) {
var stacks = make([]portainer.Stack, 0)
err := service.connection.GetAll(
BucketName,
&portainer.Stack{},
func(obj interface{}) (interface{}, error) {
stack, ok := obj.(*portainer.Stack)
if !ok {
log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to Stack object")
return nil, fmt.Errorf("Failed to convert to Stack object: %s", obj)
}
stacks = append(stacks, *stack)
return &portainer.Stack{}, nil
})
return stacks, err
} }
// GetNextIdentifier returns the next identifier for a stack. // GetNextIdentifier returns the next identifier for a stack.
func (service *Service) GetNextIdentifier() int { func (service *Service) GetNextIdentifier() int {
return service.Connection.GetNextIdentifier(BucketName) return service.connection.GetNextIdentifier(BucketName)
} }
// CreateStack creates a new stack. // CreateStack creates a new stack.
func (service *Service) Create(stack *portainer.Stack) error { func (service *Service) Create(stack *portainer.Stack) error {
return service.Connection.CreateObjectWithId(BucketName, int(stack.ID), stack) return service.connection.CreateObjectWithId(BucketName, int(stack.ID), stack)
}
// UpdateStack updates a stack.
func (service *Service) UpdateStack(ID portainer.StackID, stack *portainer.Stack) error {
identifier := service.connection.ConvertToKey(int(ID))
return service.connection.UpdateObject(BucketName, identifier, stack)
}
// DeleteStack deletes a stack.
func (service *Service) DeleteStack(ID portainer.StackID) error {
identifier := service.connection.ConvertToKey(int(ID))
return service.connection.DeleteObject(BucketName, identifier)
} }
// StackByWebhookID returns a pointer to a stack object by webhook ID. // StackByWebhookID returns a pointer to a stack object by webhook ID.
// It returns nil, errors.ErrObjectNotFound if there's no stack associated with the webhook ID. // It returns nil, errors.ErrObjectNotFound if there's no stack associated with the webhook ID.
func (service *Service) StackByWebhookID(id string) (*portainer.Stack, error) { func (service *Service) StackByWebhookID(id string) (*portainer.Stack, error) {
var s portainer.Stack var s *portainer.Stack
stop := fmt.Errorf("ok")
err := service.Connection.GetAll( err := service.connection.GetAll(
BucketName, BucketName,
&portainer.Stack{}, &portainer.Stack{},
dataservices.FirstFn(&s, func(e portainer.Stack) bool { func(obj interface{}) (interface{}, error) {
return e.AutoUpdate != nil && strings.EqualFold(e.AutoUpdate.Webhook, id) var ok bool
}), s, ok = obj.(*portainer.Stack)
)
if errors.Is(err, dataservices.ErrStop) { if !ok {
return &s, nil log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to Stack object")
return &portainer.Stack{}, nil
} }
if s.AutoUpdate != nil && strings.EqualFold(s.AutoUpdate.Webhook, id) {
return nil, stop
}
return &portainer.Stack{}, nil
})
if err == stop {
return s, nil
}
if err == nil { if err == nil {
return nil, dserrors.ErrObjectNotFound return nil, errors.ErrObjectNotFound
} }
return nil, err return nil, err
} }
// RefreshableStacks returns stacks that are configured for a periodic update // RefreshableStacks returns stacks that are configured for a periodic update
func (service *Service) RefreshableStacks() ([]portainer.Stack, error) { func (service *Service) RefreshableStacks() ([]portainer.Stack, error) {
stacks := make([]portainer.Stack, 0) stacks := make([]portainer.Stack, 0)
return stacks, service.Connection.GetAll( err := service.connection.GetAll(
BucketName, BucketName,
&portainer.Stack{}, &portainer.Stack{},
dataservices.FilterFn(&stacks, func(e portainer.Stack) bool { func(obj interface{}) (interface{}, error) {
return e.AutoUpdate != nil && e.AutoUpdate.Interval != "" stack, ok := obj.(*portainer.Stack)
}), if !ok {
) log.Debug().Str("obj", fmt.Sprintf("%#v", obj)).Msg("failed to convert to Stack object")
return nil, fmt.Errorf("Failed to convert to Stack object: %s", obj)
}
if stack.AutoUpdate != nil && stack.AutoUpdate.Interval != "" {
stacks = append(stacks, *stack)
}
return &portainer.Stack{}, nil
})
return stacks, err
} }

View File

@@ -29,7 +29,8 @@ func TestService_StackByWebhookID(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("skipping test in short mode. Normally takes ~1s to run.") t.Skip("skipping test in short mode. Normally takes ~1s to run.")
} }
_, store := datastore.MustNewTestStore(t, true, true) _, store, teardown := datastore.MustNewTestStore(t, true, true)
defer teardown()
b := stackBuilder{t: t, store: store} b := stackBuilder{t: t, store: store}
b.createNewStack(newGuidString(t)) b.createNewStack(newGuidString(t))
@@ -58,7 +59,7 @@ func (b *stackBuilder) createNewStack(webhookID string) portainer.Stack {
Type: portainer.DockerComposeStack, Type: portainer.DockerComposeStack,
EndpointID: 2, EndpointID: 2,
EntryPoint: filesystem.ComposeFileDefaultName, EntryPoint: filesystem.ComposeFileDefaultName,
Env: []portainer.Pair{{Name: "Name1", Value: "Value1"}}, Env: []portainer.Pair{{"Name1", "Value1"}},
Status: portainer.StackStatusActive, Status: portainer.StackStatusActive,
CreationDate: time.Now().Unix(), CreationDate: time.Now().Unix(),
ProjectPath: "/tmp/project", ProjectPath: "/tmp/project",
@@ -86,7 +87,8 @@ func Test_RefreshableStacks(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("skipping test in short mode. Normally takes ~1s to run.") t.Skip("skipping test in short mode. Normally takes ~1s to run.")
} }
_, store := datastore.MustNewTestStore(t, true, true) _, store, teardown := datastore.MustNewTestStore(t, true, true)
defer teardown()
staticStack := portainer.Stack{ID: 1} staticStack := portainer.Stack{ID: 1}
stackWithWebhook := portainer.Stack{ID: 2, AutoUpdate: &portainer.AutoUpdateSettings{Webhook: "webhook"}} stackWithWebhook := portainer.Stack{ID: 2, AutoUpdate: &portainer.AutoUpdateSettings{Webhook: "webhook"}}

View File

@@ -1,98 +0,0 @@
package stack
import (
"errors"
"strings"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
dserrors "github.com/portainer/portainer/api/dataservices/errors"
)
type ServiceTx struct {
dataservices.BaseDataServiceTx[portainer.Stack, portainer.StackID]
}
// StackByName returns a stack object by name.
func (service ServiceTx) StackByName(name string) (*portainer.Stack, error) {
var s portainer.Stack
err := service.Tx.GetAll(
BucketName,
&portainer.Stack{},
dataservices.FirstFn(&s, func(e portainer.Stack) bool {
return e.Name == name
}),
)
if errors.Is(err, dataservices.ErrStop) {
return &s, nil
}
if err == nil {
return nil, dserrors.ErrObjectNotFound
}
return nil, err
}
// Stacks returns an array containing all the stacks with same name
func (service ServiceTx) StacksByName(name string) ([]portainer.Stack, error) {
var stacks = make([]portainer.Stack, 0)
return stacks, service.Tx.GetAll(
BucketName,
&portainer.Stack{},
dataservices.FilterFn(&stacks, func(e portainer.Stack) bool {
return e.Name == name
}),
)
}
// GetNextIdentifier returns the next identifier for a stack.
func (service ServiceTx) GetNextIdentifier() int {
return service.Tx.GetNextIdentifier(BucketName)
}
// CreateStack creates a new stack.
func (service ServiceTx) Create(stack *portainer.Stack) error {
return service.Tx.CreateObjectWithId(BucketName, int(stack.ID), stack)
}
// StackByWebhookID returns a pointer to a stack object by webhook ID.
// It returns nil, errors.ErrObjectNotFound if there's no stack associated with the webhook ID.
func (service ServiceTx) StackByWebhookID(id string) (*portainer.Stack, error) {
var s portainer.Stack
err := service.Tx.GetAll(
BucketName,
&portainer.Stack{},
dataservices.FirstFn(&s, func(e portainer.Stack) bool {
return e.AutoUpdate != nil && strings.EqualFold(e.AutoUpdate.Webhook, id)
}),
)
if errors.Is(err, dataservices.ErrStop) {
return &s, nil
}
if err == nil {
return nil, dserrors.ErrObjectNotFound
}
return nil, err
}
// RefreshableStacks returns stacks that are configured for a periodic update
func (service ServiceTx) RefreshableStacks() ([]portainer.Stack, error) {
stacks := make([]portainer.Stack, 0)
return stacks, service.Tx.GetAll(
BucketName,
&portainer.Stack{},
dataservices.FilterFn(&stacks, func(e portainer.Stack) bool {
return e.AutoUpdate != nil && e.AutoUpdate.Interval != ""
}),
)
}

Some files were not shown because too many files have changed in this diff Show More