Skip to content

Commit

Permalink
tests/ceph: cephfs testing for restricted users
Browse files Browse the repository at this point in the history
Add a `INCUS_CEPH_CLIENT` variable for testing with a restricted caps
client.

Moved cephfs testing to a nonstandard fs name for testing mount syntax.

Signed-off-by: Michael 'ASAP' Weinrich <[email protected]>
  • Loading branch information
MadnessASAP committed Dec 24, 2024
1 parent 3edd98e commit ee4955e
Show file tree
Hide file tree
Showing 5 changed files with 51 additions and 12 deletions.
8 changes: 4 additions & 4 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,8 @@ jobs:
env:
CGO_LDFLAGS_ALLOW: "(-Wl,-wrap,pthread_create)|(-Wl,-z,now)"
INCUS_CEPH_CLUSTER: "ceph"
INCUS_CEPH_CEPHFS: "cephfs"
INCUS_CEPH_CEPHFS: "cephfs-incus"
INCUS_CEPH_CLIENT: "incus"
INCUS_CEPH_CEPHOBJECT_RADOSGW: "http://127.0.0.1"
INCUS_CONCURRENT: "1"
INCUS_VERBOSE: "1"
Expand Down Expand Up @@ -335,8 +336,7 @@ jobs:
sudo microceph enable rgw
sudo microceph.ceph osd pool create cephfs_meta 32
sudo microceph.ceph osd pool create cephfs_data 32
sudo microceph.ceph fs new cephfs cephfs_meta cephfs_data
sudo microceph.ceph fs ls
sudo microceph.ceph fs new ${INCUS_CEPH_CEPHFS} cephfs_meta cephfs_data
sleep 30
sudo microceph.ceph status
# Wait until there are no more "unkowns" pgs
Expand Down Expand Up @@ -389,7 +389,7 @@ jobs:

- name: Create build directory
run: |
mkdir bin
mkdir bin
- name: Build static x86_64 incus
env:
Expand Down
1 change: 1 addition & 0 deletions test/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ Name | Default | Description
`INCUS_CEPH_CLUSTER` | ceph | The name of the ceph cluster to create osd pools in
`INCUS_CEPH_CEPHFS` | "" | Enables the CephFS tests using the specified cephfs filesystem for `cephfs` pools
`INCUS_CEPH_CEPHOBJECT_RADOSGW` | "" | Enables the Ceph Object tests using the specified radosgw HTTP endpoint for `cephobject` pools
`INCUS_CEPH_CLIENT` | "admin" | User to use when authenticating to the Ceph storage cluster
`INCUS_CONCURRENT` | 0 | Run concurrency tests, very CPU intensive
`INCUS_DEBUG` | 0 | Run incusd, incus and the shell in debug mode (very verbose)
`INCUS_INSPECT` | 0 | Don't teardown the test environment on failure
Expand Down
7 changes: 7 additions & 0 deletions test/main.sh
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,13 @@ if [ "$INCUS_BACKEND" != "random" ] && ! storage_backend_available "$INCUS_BACKE
fi
echo "==> Using storage backend ${INCUS_BACKEND}"

if [ "${INCUS_BACKEND}" = "ceph" ]; then
if [ -z "${INCUS_CEPH_CLIENT:-}" ]; then
INCUS_CEPH_CLIENT="admin"
fi
echo "==> Using ceph client ${INCUS_CEPH_CLIENT}"
fi

import_storage_backends

cleanup() {
Expand Down
13 changes: 12 additions & 1 deletion test/suites/container_devices_disk.sh
Original file line number Diff line number Diff line change
Expand Up @@ -157,10 +157,21 @@ test_container_devices_disk_cephfs() {
fi

incus launch testimage ceph-fs -c security.privileged=true
incus config device add ceph-fs fs disk source=cephfs:"${INCUS_CEPH_CEPHFS}"/ ceph.user_name=admin ceph.cluster_name=ceph path=/cephfs
ceph fs authorize "${INCUS_CEPH_CEPHFS}" "client.${INCUS_CEPH_CLIENT}" / rw
incus config device add ceph-fs fs disk \
source=cephfs:"${INCUS_CEPH_CEPHFS}"/ \
ceph.user_name="${INCUS_CEPH_CLIENT}" \
ceph.cluster_name="${INCUS_CEPH_CLUSTER}" \
path=/cephfs
incus exec ceph-fs -- stat /cephfs
incus restart ceph-fs --force
incus exec ceph-fs -- stat /cephfs
incus exec ceph-fs -- mkdir /cephfs/ro
incus stop ceph-fs
ceph fs authorize "${INCUS_CEPH_CEPHFS}" "client.${INCUS_CEPH_CLIENT}" / rw / ro
incus start ceph-fs
! incus exec ceph-fs -- touch /cephfs/ro/fail || true
incus exec ceph-fs -- touch /cephfs/succeed
incus delete -f ceph-fs
}

Expand Down
34 changes: 27 additions & 7 deletions test/suites/storage_driver_cephfs.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,22 +7,42 @@ test_storage_driver_cephfs() {
return
fi

if [ "${INCUS_CEPH_CLIENT}" != "admin" ]
ceph fs authorize "${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")" / rw
fi

# Simple create/delete attempt
incus storage create cephfs cephfs source="${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")"
incus storage create cephfs cephfs \
source="${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")" \
cephfs.user.name="${INCUS_CEPH_CLIENT}"
incus storage delete cephfs

# Test invalid key combinations for auto-creation of cephfs entities.
! incus storage create cephfs cephfs source="${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")" cephfs.osd_pg_num=32 || true
! incus storage create cephfs cephfs source="${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")" cephfs.meta_pool=xyz || true
! incus storage create cephfs cephfs source="${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")" cephfs.data_pool=xyz || true
! incus storage create cephfs cephfs source="${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")" cephfs.create_missing=true cephfs.data_pool=xyz_data cephfs.meta_pool=xyz_meta || true
! incus storage create cephfs cephfs \
source="${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")" \
cephfs.user.name="${INCUS_CEPH_CLIENT}" \
cephfs.osd_pg_num=32 || true
! incus storage create cephfs cephfs \
source="${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")" \
cephfs.user.name="${INCUS_CEPH_CLIENT}" \
cephfs.meta_pool=xyz || true
! incus storage create cephfs cephfs \
source="${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")" \
cephfs.user.name="${INCUS_CEPH_CLIENT}" \
cephfs.data_pool=xyz || true
! incus storage create cephfs cephfs \
source="${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")" \
cephfs.user.name="${INCUS_CEPH_CLIENT}" \
cephfs.create_missing=true cephfs.data_pool=xyz_data cephfs.meta_pool=xyz_meta || true


# Test cephfs storage volumes.
for fs in "cephfs" "cephfs2" ; do
if [ "${fs}" = "cephfs" ]; then
# Create one cephfs with pre-existing OSDs.
incus storage create "${fs}" cephfs source="${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")"
incus storage create "${fs}" cephfs \
source="${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")" \
cephfs.user.name="${INCUS_CEPH_CLIENT}"
else
# Create one cephfs by creating the OSDs and the cephfs itself.
incus storage create "${fs}" cephfs source=cephfs2 cephfs.create_missing=true cephfs.data_pool=xyz_data cephfs.meta_pool=xyz_meta
Expand Down Expand Up @@ -64,5 +84,5 @@ test_storage_driver_cephfs() {
done

# Recreate the fs for other tests.
ceph fs new cephfs cephfs_meta cephfs_data --force
ceph fs new "${INCUS_CEPH_CEPHFS}" cephfs_meta cephfs_data --force
}

0 comments on commit ee4955e

Please sign in to comment.