Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Deploy Metadata in Backbeat tests #2610

Merged
merged 7 commits into from
Mar 3, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions .github/dockerfiles/cloudserver/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
ARG CLOUDSERVER_IMAGE=ghcr.io/scality/cloudserver:7.70.47

FROM ${CLOUDSERVER_IMAGE}

ADD ./config.json /conf/config.json

RUN apt update \
&& apt install -y curl \
&& apt clean
84 changes: 84 additions & 0 deletions .github/dockerfiles/cloudserver/config.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
{
"port": 8000,
"listenOn": [],
"replicationGroupId": "RG001",
"restEndpoints": {
"localhost": "us-east-1",
"127.0.0.1": "us-east-1",
"cloudserver-front": "us-east-1",
"s3.docker.test": "us-east-1",
"127.0.0.2": "us-east-1",
"s3.amazonaws.com": "us-east-1"
},
"websiteEndpoints": [
"s3-website-us-east-1.amazonaws.com",
"s3-website.us-east-2.amazonaws.com",
"s3-website-us-west-1.amazonaws.com",
"s3-website-us-west-2.amazonaws.com",
"s3-website.ap-south-1.amazonaws.com",
"s3-website.ap-northeast-2.amazonaws.com",
"s3-website-ap-southeast-1.amazonaws.com",
"s3-website-ap-southeast-2.amazonaws.com",
"s3-website-ap-northeast-1.amazonaws.com",
"s3-website.eu-central-1.amazonaws.com",
"s3-website-eu-west-1.amazonaws.com",
"s3-website-sa-east-1.amazonaws.com",
"s3-website.localhost",
"s3-website.scality.test"
],
"replicationEndpoints": [
{
"site": "aws-location",
"servers": ["127.0.0.1:8001"],
"default": true
}
],
"cdmi": {
"host": "localhost",
"port": 81,
"path": "/dewpoint",
"readonly": true
},
"bucketd": {
"bootstrap": ["metadata:9000"]
},
"vaultd": {
"host": "localhost",
"port": 8500
},
"clusters": 1,
"log": {
"logLevel": "info",
"dumpLevel": "error"
},
"healthChecks": {
"allowFrom": ["127.0.0.1/8", "::1"]
},
"metadataClient": {
"host": "localhost",
"port": 9990
},
"dataClient": {
"host": "localhost",
"port": 9991
},
"metadataDaemon": {
"bindAddress": "localhost",
"port": 9990
},
"dataDaemon": {
"bindAddress": "localhost",
"port": 9991
},
"recordLog": {
"enabled": false,
"recordLogName": "s3-recordlog"
},
"requests": {
"viaProxy": false,
"trustedProxyCIDRs": [],
"extractClientIPFromHeader": ""
},
"bucketNotificationDestinations": [],
"testingMode": true
}
60 changes: 60 additions & 0 deletions .github/dockerfiles/ft/docker-compose.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
services:
sproxyd:
profiles: ['s3c']
build:
context: ../sproxyd
user: 0:0
entrypoint: /usr/bin/sproxyd
command: -dlw -Vinfo -c /conf/sproxyd.conf -P /run/sproxyd-0 --no-sysfs
metadata:
profiles: ['s3c']
image: ${METADATA_IMAGE}
depends_on:
- sproxyd
ports:
- 9000:9000
environment:
SPROXYD: sproxyd:8181
volumes:
- ./metadata-config.json:/mnt/standalone_workdir/config.json:ro
healthcheck:
test: ["CMD-SHELL", "curl http://localhost:9000 || exit 1"]
interval: 10s
timeout: 30s
retries: 12
start_period: 10s
cloudserver:
profiles: ['s3c']
build:
context: ../cloudserver
depends_on:
- metadata
ports:
- 8000:8000
environment:
S3DATA: file
S3METADATA: scality
S3VAULT: mem
S3_CONFIG_FILE: /conf/config.json
REMOTE_MANAGEMENT_DISABLE: 1
command: yarn start
healthcheck:
test: ["CMD-SHELL", "curl http://localhost:8000 || exit 1"]
interval: 10s
timeout: 30s
retries: 12
start_period: 10s
redis:
profiles: ['s3c']
image: ${REDIS_IMAGE}
ports:
- 6379:6379
kafka:
profiles: ['s3c']
image: ${KAFKA_IMAGE}
ports:
- 9092:9092
- 2181:2181
environment:
ADVERTISED_HOST: "localhost"
ADVERTISED_PORT: 9092
15 changes: 15 additions & 0 deletions .github/dockerfiles/ft/metadata-config.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
{
"raftSessions": 3,
"raftMembers": 5,
"bucketdCount": 1,
"bucketdWorkers": 1,
"basePorts": {
"bucketd": 9000,
"repd": 4200,
"repdAdmin": 4250
},
"logLevel": "info",
"env": {
"METADATA_NEW_BUCKETS_VFORMAT": "v0"
}
}
11 changes: 11 additions & 0 deletions .github/dockerfiles/sproxyd/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
ARG SPROXYD_VERSION=7.10.11.0

FROM ghcr.io/scality/federation/sproxyd:${SPROXYD_VERSION}

ADD ./sproxyd.conf /conf/sproxyd.conf

RUN chown root:root /conf/sproxyd.conf

RUN mkdir /var/run/scality/

RUN touch /var/run/scality/scality-sproxyd-adminapi
28 changes: 28 additions & 0 deletions .github/dockerfiles/sproxyd/sproxyd.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
{
"general": {
"bind": "127.0.0.1",
"conn_max": 3000,
"conn_max_reuse": 100000,
"consistent_reads": false,
"consistent_writes": true,
"max_proc_fd": 40960,
"n_workers": 100,
"n_responders": 100,
"port": 8181,
"split_chunk_size": 33554432,
"split_control_by_request": false,
"split_enabled": true,
"split_gc_cos": 2,
"split_memory_limit": 671088640,
"split_n_get_workers": 20,
"split_n_io_workers": 20,
"split_n_put_workers": 20,
"split_threshold": 67108864
},
"ring_driver:0": {
"alias": "arc",
"type": "local",
"queue_path": "/data",
"trusted": 0
}
}
73 changes: 72 additions & 1 deletion .github/workflows/tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -140,9 +140,11 @@ jobs:
run: yarn run --silent lint

- name: Run unit tests
run: yarn run cover:test
run: yarn run cover
env:
BACKBEAT_CONFIG_FILE: tests/config.json
TEST_SUITE: test

- uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
Expand Down Expand Up @@ -213,3 +215,72 @@ jobs:
continue-on-error: true
timeout-minutes: 60
if: failure() && runner.debug == '1'

ft-tests:
needs: build
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
profile: [ 's3c' ]
cloudserver_tag: [ '7.70.47' ]
steps:
- name: Checkout
uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: '22'
cache: yarn
- name: Install node dependencies
run: yarn install --ignore-engines --frozen-lockfile --network-concurrency 1
- name: Login to Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Setup services
env:
KAFKA_IMAGE: ghcr.io/${{ github.repository }}/ci-kafka:${{ github.sha }}
REDIS_IMAGE: redis:alpine
METADATA_IMAGE: ghcr.io/scality/metadata:7.70.45-standalone
CLOUDSERVER_IMAGE: ghcr.io/scality/cloudserver:${{ matrix.cloudserver_tag }}
run: |-
set -e -o pipefail;
docker compose --profile ${{ matrix.profile }} up -d --quiet-pull --wait
working-directory: .github/dockerfiles/ft
- name: Create Zookeeper paths for tests with metadata
run: |-
# Setup zookeeper paths for backbeat like we do in federation
docker exec ft-kafka-1 /opt/kafka_2.11-0.10.1.0/bin/zookeeper-shell.sh localhost:2181 create /backbeat ""
docker exec ft-kafka-1 /opt/kafka_2.11-0.10.1.0/bin/zookeeper-shell.sh localhost:2181 create /backbeat/replication-populator ""
docker exec ft-kafka-1 /opt/kafka_2.11-0.10.1.0/bin/zookeeper-shell.sh localhost:2181 create /backbeat/replication-populator/raft-id-dispatcher ""
docker exec ft-kafka-1 /opt/kafka_2.11-0.10.1.0/bin/zookeeper-shell.sh localhost:2181 create /backbeat/replication-populator/raft-id-dispatcher/owners ""
docker exec ft-kafka-1 /opt/kafka_2.11-0.10.1.0/bin/zookeeper-shell.sh localhost:2181 create /backbeat/replication-populator/raft-id-dispatcher/leaders ""
docker exec ft-kafka-1 /opt/kafka_2.11-0.10.1.0/bin/zookeeper-shell.sh localhost:2181 create /backbeat/replication-populator/raft-id-dispatcher/provisions ""
# provision raft ids, we configure 4 raft sessions in metadata config
docker exec ft-kafka-1 /opt/kafka_2.11-0.10.1.0/bin/zookeeper-shell.sh localhost:2181 create /backbeat/replication-populator/raft-id-dispatcher/provisions/0 ""
docker exec ft-kafka-1 /opt/kafka_2.11-0.10.1.0/bin/zookeeper-shell.sh localhost:2181 create /backbeat/replication-populator/raft-id-dispatcher/provisions/1 ""
docker exec ft-kafka-1 /opt/kafka_2.11-0.10.1.0/bin/zookeeper-shell.sh localhost:2181 create /backbeat/replication-populator/raft-id-dispatcher/provisions/2 ""
docker exec ft-kafka-1 /opt/kafka_2.11-0.10.1.0/bin/zookeeper-shell.sh localhost:2181 create /backbeat/replication-populator/raft-id-dispatcher/provisions/3 ""
if: ${{ matrix.profile == 's3c' }}
- name: Run QueuePopulator functional tests
env:
PROFILE: ${{ matrix.profile }}
TEST_SUITE: ft_test:queuepopulator
run: yarn run cover
- uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
directory: ./coverage/ft_test:queuepopulator
flags: ft_test:queuepopulator
- name: Debug wait
uses: scality/actions/[email protected]
with:
tmate-server-host: ${{ secrets.TMATE_SERVER_HOST }}
tmate-server-port: ${{ secrets.TMATE_SERVER_PORT }}
tmate-server-rsa-fingerprint: ${{ secrets.TMATE_SERVER_RSA_FINGERPRINT }}
tmate-server-ed25519-fingerprint: ${{ secrets.TMATE_SERVER_ED25519_FINGERPRINT }}
continue-on-error: true
timeout-minutes: 60
if: failure() && runner.debug == '1'
4 changes: 3 additions & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
"mongo_queue_processor": "node extensions/mongoProcessor/mongoProcessorTask.js",
"garbage_collector": "node extensions/gc/service.js",
"test": "mocha --recursive tests/unit --timeout 30000 --exit",
"cover:test": "nyc --clean --silent yarn run test && nyc report --report-dir ./coverage/test --reporter=lcov",
"cover": "cross-env TEST_SUITE=${TEST_SUITE:-test} nyc --clean --silent yarn run $TEST_SUITE && nyc report --reporter=lcov --report-dir ./coverage/$TEST_SUITE",
"ft_test": "mocha --recursive $(find tests/functional -name '*.js') --timeout 30000 --exit",
"ft_test:notification": "mocha --recursive $(find tests/functional/notification -name '*.js') --timeout 30000 --exit",
"ft_test:replication": "mocha --recursive $(find tests/functional/replication -name '*.js') --timeout 30000 --exit",
Expand All @@ -27,6 +27,7 @@
"ft_test:ingestion": "mocha --recursive $(find tests/functional/ingestion -name '*.js') --timeout 30000 --exit",
"ft_test:api:routes": "mocha tests/functional/api/routes.js --timeout 30000 --exit",
"ft_test:api:retry": "mocha tests/functional/api/retry.js --timeout 30000 --exit",
"ft_test:queuepopulator": "mocha --recursive $(find tests/functional/queuePopulator -name '*.js') --timeout 30000 --exit",
"bh_test": "mocha --recursive tests/behavior --exit",
"lint": "eslint $(git ls-files '*.js')",
"lint_md": "mdlint $(git ls-files '[^bucket-scanner/]*.md')",
Expand Down Expand Up @@ -76,6 +77,7 @@
"devDependencies": {
"@zenko/cloudserver": "git+https://github.com/scality/cloudserver#9.0.0",
"c8": "^10.1.3",
"cross-env": "^7.0.3",
"eslint": "^9.12.0",
"eslint-config-airbnb-base": "^13.1.0",
"eslint-config-scality": "scality/Guidelines#8.3.0",
Expand Down
Loading