diff --git a/.asf.yaml b/.asf.yaml index fa106d0cab42..69f3f2c8d43e 100644 --- a/.asf.yaml +++ b/.asf.yaml @@ -53,6 +53,14 @@ github: dismiss_stale_reviews: true require_code_owner_reviews: true required_approving_review_count: 2 + release/3.6: + required_pull_request_reviews: + require_code_owner_reviews: true + required_approving_review_count: 2 + release/3.5: + required_pull_request_reviews: + require_code_owner_reviews: true + required_approving_review_count: 2 release/3.4: required_pull_request_reviews: require_code_owner_reviews: true diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c90a8e90082d..95e72fe578b9 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -31,7 +31,7 @@ jobs: test_dir: - t/plugin/[a-k]* - t/plugin/[l-z]* - - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/deployment t/discovery t/error_page t/misc + - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/misc - t/node t/pubsub t/router t/script t/secret t/stream-node t/utils t/wasm t/xds-library t/xrpc runs-on: ${{ matrix.platform }} @@ -42,7 +42,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 with: submodules: recursive @@ -136,7 +136,7 @@ jobs: [[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after echo "Linux launch services, done." - name: Start Dubbo Backend - if: matrix.os_name == 'linux_openresty' && steps.test_env.outputs.type == 'plugin' + if: matrix.os_name == 'linux_openresty' && (steps.test_env.outputs.type == 'plugin' || steps.test_env.outputs.type == 'last') run: | sudo apt install -y maven cd t/lib/dubbo-backend diff --git a/.github/workflows/centos7-ci.yml b/.github/workflows/centos7-ci.yml index dc08b0fd384e..2be0c39cbb94 100644 --- a/.github/workflows/centos7-ci.yml +++ b/.github/workflows/centos7-ci.yml @@ -30,12 +30,12 @@ jobs: test_dir: - t/plugin/[a-k]* - t/plugin/[l-z]* - - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/deployment t/discovery t/error_page t/misc + - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/misc - t/node t/pubsub t/router t/script t/secret t/stream-node t/utils t/wasm t/xds-library steps: - name: Check out code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 with: submodules: recursive diff --git a/.github/workflows/chaos.yml b/.github/workflows/chaos.yml index 2bf3518dd2ab..7b47664c55e9 100644 --- a/.github/workflows/chaos.yml +++ b/.github/workflows/chaos.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 35 steps: - - uses: actions/checkout@v3.2.0 + - uses: actions/checkout@v4 with: submodules: recursive diff --git a/.github/workflows/cli-master.yml b/.github/workflows/cli-master.yml index dd77dcd1537c..d521a9d7a103 100644 --- a/.github/workflows/cli-master.yml +++ b/.github/workflows/cli-master.yml @@ -33,7 +33,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 with: submodules: recursive diff --git a/.github/workflows/cli.yml b/.github/workflows/cli.yml index 7aa6554095b9..7c50e3fc58d2 100644 --- a/.github/workflows/cli.yml +++ b/.github/workflows/cli.yml @@ -38,7 +38,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 with: submodules: recursive diff --git a/.github/workflows/close-unresponded.yml b/.github/workflows/close-unresponded.yml index 52e81228eba2..9508af7ded1c 100644 --- a/.github/workflows/close-unresponded.yml +++ b/.github/workflows/close-unresponded.yml @@ -20,7 +20,7 @@ jobs: - name: Prune Stale uses: actions/stale@v8 with: - days-before-issue-stale: 14 + days-before-issue-stale: 60 days-before-issue-close: 3 stale-issue-message: > Due to lack of the reporter's response this issue has been labeled with "no response". @@ -35,4 +35,5 @@ jobs: # Issues with these labels will never be considered stale. only-labels: 'wait for update' stale-issue-label: 'no response' + exempt-issue-labels: "don't close" ascending: true diff --git a/.github/workflows/code-lint.yml b/.github/workflows/code-lint.yml index 07a1807f811e..9e2befc9ac1f 100644 --- a/.github/workflows/code-lint.yml +++ b/.github/workflows/code-lint.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 10 steps: - - uses: actions/checkout@v3.2.0 + - uses: actions/checkout@v4 - name: Install run: | . ./ci/common.sh @@ -37,7 +37,7 @@ jobs: timeout-minutes: 5 steps: - name: Checkout code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 - name: Shellcheck code run: | diff --git a/.github/workflows/doc-lint.yml b/.github/workflows/doc-lint.yml index 99340f991e2c..f644f59ce03e 100644 --- a/.github/workflows/doc-lint.yml +++ b/.github/workflows/doc-lint.yml @@ -5,11 +5,13 @@ on: paths: - "docs/**" - "**/*.md" + - ".github/workflows/doc-lint.yml" pull_request: branches: [master, "release/**"] paths: - "docs/**" - "**/*.md" + - ".github/workflows/doc-lint.yml" permissions: contents: read @@ -20,9 +22,9 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 1 steps: - - uses: actions/checkout@v3.2.0 + - uses: actions/checkout@v4 - name: 🚀 Use Node.js - uses: actions/setup-node@v3.8.0 + uses: actions/setup-node@v4.0.0 with: node-version: "12.x" - run: npm install -g markdownlint-cli@0.25.0 @@ -47,7 +49,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 1 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: submodules: recursive - name: Check Chinese copywriting diff --git a/.github/workflows/fips.yml b/.github/workflows/fips.yml index aeaf121f1fe3..2115b0a7b8c8 100644 --- a/.github/workflows/fips.yml +++ b/.github/workflows/fips.yml @@ -30,7 +30,7 @@ jobs: # The RSA and SHA tests are fully covered by jwt-auth and hmac-auth plugin tests, while other plugins only repeat such tests. - t/plugin/jwt-auth2.t t/plugin/jwt-auth.t t/plugin/hmac-auth.t # all SSL related core tests are covered by below two lists. - - t/admin/ssl* t/admin/schema.t t/admin/upstream.t t/config-center-yaml/ssl.t t/core/etcd-mtls.t t/core/config_etcd.t t/deployment/conf_server.t t/misc/patch.t + - t/admin/ssl* t/admin/schema.t t/admin/upstream.t t/config-center-yaml/ssl.t t/core/etcd-mtls.t t/core/config_etcd.t t/misc/patch.t - t/node/grpc-proxy-unary.t t/node/upstream-keepalive-pool.t t/node/upstream-websocket.t t/node/client-mtls.t t/node/upstream-mtls.t t/pubsub/kafka.t t/router/radixtree-sni2.t t/router/multi-ssl-certs.t t/router/radixtree-sni.t t/stream-node/mtls.t t/stream-node/tls.t t/stream-node/upstream-tls.t t/stream-node/sni.t - t/fips @@ -42,7 +42,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 with: submodules: recursive diff --git a/.github/workflows/fuzzing-ci.yaml b/.github/workflows/fuzzing-ci.yaml index ec3701532d77..4d313ebfeb1a 100644 --- a/.github/workflows/fuzzing-ci.yaml +++ b/.github/workflows/fuzzing-ci.yaml @@ -27,7 +27,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 with: submodules: recursive diff --git a/.github/workflows/gm-cron.yaml b/.github/workflows/gm-cron.yaml index 669a21798be3..f0327540498e 100644 --- a/.github/workflows/gm-cron.yaml +++ b/.github/workflows/gm-cron.yaml @@ -20,7 +20,7 @@ jobs: test_dir: - t/plugin/[a-k]* - t/plugin/[l-z]* - - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/deployment t/discovery t/error_page t/misc + - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/misc - t/node t/pubsub t/router t/script t/stream-node t/utils t/wasm t/xds-library t/xrpc runs-on: ${{ matrix.platform }} @@ -33,7 +33,7 @@ jobs: # scripts or a separate action? steps: - name: Check out code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 with: submodules: recursive diff --git a/.github/workflows/gm.yml b/.github/workflows/gm.yml index 003e567bace8..297c746caebd 100644 --- a/.github/workflows/gm.yml +++ b/.github/workflows/gm.yml @@ -39,7 +39,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 with: submodules: recursive diff --git a/.github/workflows/kubernetes-ci.yml b/.github/workflows/kubernetes-ci.yml index b8d33af7c956..dc8857739b85 100644 --- a/.github/workflows/kubernetes-ci.yml +++ b/.github/workflows/kubernetes-ci.yml @@ -37,7 +37,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 with: submodules: recursive diff --git a/.github/workflows/license-checker.yml b/.github/workflows/license-checker.yml index 2122e0db8fbc..830f1a1802e3 100644 --- a/.github/workflows/license-checker.yml +++ b/.github/workflows/license-checker.yml @@ -30,8 +30,8 @@ jobs: timeout-minutes: 3 steps: - - uses: actions/checkout@v3.2.0 + - uses: actions/checkout@v4 - name: Check License Header - uses: apache/skywalking-eyes@v0.4.0 + uses: apache/skywalking-eyes@v0.5.0 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/link-check.yml b/.github/workflows/link-check.yml old mode 100755 new mode 100644 index 106a9d582c49..20b2f16ec94e --- a/.github/workflows/link-check.yml +++ b/.github/workflows/link-check.yml @@ -32,7 +32,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get script run: | diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 10f852db30a3..56cd00c02c8b 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out code. - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 - name: spell check run: | pip install codespell==2.1.0 @@ -30,10 +30,10 @@ jobs: steps: - name: Check out code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 - name: Setup Nodejs env - uses: actions/setup-node@v3.8.0 + uses: actions/setup-node@v4.0.0 with: node-version: '12' diff --git a/.github/workflows/performance.yml b/.github/workflows/performance.yml index c054303ccd74..17cf28691a4a 100644 --- a/.github/workflows/performance.yml +++ b/.github/workflows/performance.yml @@ -18,7 +18,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 with: submodules: recursive diff --git a/.github/workflows/redhat-ci.yaml b/.github/workflows/redhat-ci.yaml index cf03ae002186..9bd8d39e35aa 100644 --- a/.github/workflows/redhat-ci.yaml +++ b/.github/workflows/redhat-ci.yaml @@ -26,12 +26,12 @@ jobs: test_dir: - t/plugin/[a-k]* - t/plugin/[l-z]* - - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/deployment t/discovery t/error_page t/misc + - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/misc - t/node t/pubsub t/router t/script t/secret t/stream-node t/utils t/wasm t/xds-library steps: - name: Check out code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 with: submodules: recursive diff --git a/.github/workflows/semantic.yml b/.github/workflows/semantic.yml index a2b606667fad..85df2c0816f7 100644 --- a/.github/workflows/semantic.yml +++ b/.github/workflows/semantic.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out repository code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 with: submodules: recursive - uses: ./.github/actions/action-semantic-pull-request diff --git a/.github/workflows/tars-ci.yml b/.github/workflows/tars-ci.yml index a646d86ce1fe..9e1c9fa2963e 100644 --- a/.github/workflows/tars-ci.yml +++ b/.github/workflows/tars-ci.yml @@ -37,7 +37,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 with: submodules: recursive diff --git a/.github/workflows/update-labels.yml b/.github/workflows/update-labels.yml index 262604f23cf0..80919aefb841 100644 --- a/.github/workflows/update-labels.yml +++ b/.github/workflows/update-labels.yml @@ -1,19 +1,21 @@ -name: Update label when user responds +name: Update labels when user responds in issue and pr permissions: issues: write + pull-requests: write on: issue_comment: types: [created] + pull_request_review_comment: + types: [created] jobs: - run-check: - if: ${{ !github.event.issue.pull_request }} # don't execute for PR comments + issue_commented: + if: github.event.issue && !github.event.issue.pull_request && github.event.comment.user.login == github.event.issue.user.login && contains(github.event.issue.labels.*.name, 'wait for update') && !contains(github.event.issue.labels.*.name, 'user responded') runs-on: ubuntu-latest steps: - name: update labels when user responds uses: actions/github-script@v6 - if: ${{ github.event.comment.user.login == github.event.issue.user.login && contains(github.event.issue.labels.*.name, 'wait for update') && !contains(github.event.issue.labels.*.name, 'user responded') }} with: script: | github.rest.issues.addLabels({ @@ -28,3 +30,33 @@ jobs: repo: context.repo.repo, name: "wait for update" }) + + pr_commented: + if: github.event.issue && github.event.issue.pull_request && github.event.comment.user.login == github.event.issue.user.login && (contains(github.event.issue.labels.*.name, 'wait for update') || contains(github.event.issue.labels.*.name, 'discuss') || contains(github.event.issue.labels.*.name, 'need test cases')) && !contains(github.event.issue.labels.*.name, 'user responded') + runs-on: ubuntu-latest + steps: + - name: update label when user responds + uses: actions/github-script@v6 + with: + script: | + github.rest.issues.addLabels({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + labels: ["user responded"] + }) + + pr_review_commented: + if: github.event.pull_request && github.event.comment.user.login == github.event.pull_request.user.login && (contains(github.event.pull_request.labels.*.name, 'wait for update') || contains(github.event.pull_request.labels.*.name, 'discuss') || contains(github.event.issue.labels.*.name, 'need test cases')) && !contains(github.event.pull_request.labels.*.name, 'user responded') + runs-on: ubuntu-latest + steps: + - name: update label when user responds + uses: actions/github-script@v6 + with: + script: | + github.rest.issues.addLabels({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + labels: ["user responded"] + }) diff --git a/.licenserc.yaml b/.licenserc.yaml index 315fa71bcf42..8b423f25cdd1 100644 --- a/.licenserc.yaml +++ b/.licenserc.yaml @@ -23,6 +23,8 @@ header: paths-ignore: - '.gitignore' + - '.gitattributes' + - '.gitmodules' - 'LICENSE' - 'NOTICE' - '**/*.json' @@ -46,7 +48,11 @@ header: # Exclude plugin-specific configuration files - 't/plugin/authz-casbin' - 't/coredns' + - 't/fuzzing/requirements.txt' + - 't/perf/requirements.txt' - 'autodocs/' - 'docs/**/*.md' + - '.ignore_words' + - '.luacheckrc' comment: on-failure diff --git a/CHANGELOG.md b/CHANGELOG.md index 78f9cc93a4dd..ce5f1d01726b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,8 @@ title: Changelog ## Table of Contents +- [3.6.0](#360) +- [3.5.0](#350) - [3.4.0](#340) - [3.3.0](#330) - [3.2.1](#321) @@ -71,6 +73,73 @@ title: Changelog - [0.7.0](#070) - [0.6.0](#060) +## 3.6.0 + +### Change + +- :warning: Remove gRPC support between APISIX and etcd and remove `etcd.use_grpc` configuration option: [#10015](https://github.com/apache/apisix/pull/10015) +- :warning: Remove conf server. The data plane no longer supports direct communication with the control plane, and the configuration should be adjusted from `config_provider: control_plane` to `config_provider: etcd`: [#10012](https://github.com/apache/apisix/pull/10012) +- :warning: Enforce strict schema validation on the properties of the core APISIX resources: [#10233](https://github.com/apache/apisix/pull/10233) + +### Core + +- :sunrise: Support configuring the buffer size of the access log: [#10225](https://github.com/apache/apisix/pull/10225) +- :sunrise: Support the use of local DNS resolvers in service discovery by configuring `resolv_conf`: [#9770](https://github.com/apache/apisix/pull/9770) +- :sunrise: Remove Rust dependency for installation: [#10121](https://github.com/apache/apisix/pull/10121) +- :sunrise: Support Dubbo protocol in xRPC [#9660](https://github.com/apache/apisix/pull/9660) + +### Plugins + +- :sunrise: Support https in traffic-split plugin: [#9115](https://github.com/apache/apisix/pull/9115) +- :sunrise: Support rewrite request body in external plugin:[#9990](https://github.com/apache/apisix/pull/9990) +- :sunrise: Support set nginx variables in opentelemetry plugin: [#8871](https://github.com/apache/apisix/pull/8871) +- :sunrise: Support unix sock host pattern in the chaitin-waf plugin: [#10161](https://github.com/apache/apisix/pull/10161) + +### Bugfixes + +- Fix GraphQL POST request route matching exception: [#10198](https://github.com/apache/apisix/pull/10198) +- Fix error on array of multiline string in `apisix.yaml`: [#10193](https://github.com/apache/apisix/pull/10193) +- Add error handlers for invalid `cache_zone` configuration in the `proxy-cache` plugin: [#10138](https://github.com/apache/apisix/pull/10138) + +## 3.5.0 + +### Change + +- :warning: remove snowflake algorithm in the request-id plugin: [#9715](https://github.com/apache/apisix/pull/9715) +- :warning: No longer compatible with OpenResty 1.19, it needs to be upgraded to 1.21+: [#9913](https://github.com/apache/apisix/pull/9913) +- :warning: Remove the configuration item `apisix.stream_proxy.only`, the L4/L7 proxy needs to be enabled through the configuration item `apisix.proxy_mode`: [#9607](https://github.com/apache/apisix/pull/9607) +- :warning: The admin-api `/apisix/admin/plugins?all=true` marked as deprecated: [#9580](https://github.com/apache/apisix/pull/9580) +- :warning: allowlist and denylist can't be enabled at the same time in ua-restriction plugin: [#9841](https://github.com/apache/apisix/pull/9841) + +### Core + +- :sunrise: Support host level dynamic setting of tls protocol version: [#9903](https://github.com/apache/apisix/pull/9903) +- :sunrise: Support force delete resource: [#9810](https://github.com/apache/apisix/pull/9810) +- :sunrise: Support pulling env vars from yaml keys: [#9855](https://github.com/apache/apisix/pull/9855) +- :sunrise: Add schema validate API in admin-api: [#10065](https://github.com/apache/apisix/pull/10065) + +### Plugins + +- :sunrise: Add chaitin-waf plugin: [#9838](https://github.com/apache/apisix/pull/9838) +- :sunrise: Support vars for file-logger plugin: [#9712](https://github.com/apache/apisix/pull/9712) +- :sunrise: Support adding response headers for mock plugin: [#9720](https://github.com/apache/apisix/pull/9720) +- :sunrise: Support regex_uri with unsafe_uri for proxy-rewrite plugin: [#9813](https://github.com/apache/apisix/pull/9813) +- :sunrise: Support set client_email field for google-cloud-logging plugin: [#9813](https://github.com/apache/apisix/pull/9813) +- :sunrise: Support sending headers upstream returned by OPA server for opa plugin: [#9710](https://github.com/apache/apisix/pull/9710) +- :sunrise: Support configuring proxy server for openid-connect plugin: [#9948](https://github.com/apache/apisix/pull/9948) + +### Bugfixes + +- Fix(log-rotate): the max_kept configuration doesn't work when using custom name: [#9749](https://github.com/apache/apisix/pull/9749) +- Fix(limit_conn): do not use the http variable in stream mode: [#9816](https://github.com/apache/apisix/pull/9816) +- Fix(loki-logger): getting an error with log_labels: [#9850](https://github.com/apache/apisix/pull/9850) +- Fix(limit-count): X-RateLimit-Reset shouldn't be set to 0 after request be rejected: [#9978](https://github.com/apache/apisix/pull/9978) +- Fix(nacos): attempt to index upvalue 'applications' (a nil value): [#9960](https://github.com/apache/apisix/pull/9960) +- Fix(etcd): can't sync etcd data if key has special character: [#9967](https://github.com/apache/apisix/pull/9967) +- Fix(tencent-cloud-cls): dns parsing failure: [#9843](https://github.com/apache/apisix/pull/9843) +- Fix(reload): worker not exited when executing quit or reload command [#9909](https://github.com/apache/apisix/pull/9909) +- Fix(traffic-split): upstream_id validity verification [#10008](https://github.com/apache/apisix/pull/10008) + ## 3.4.0 ### Core diff --git a/Makefile b/Makefile index c6979cd6f906..4031e314300c 100644 --- a/Makefile +++ b/Makefile @@ -26,6 +26,7 @@ VERSION ?= master project_name ?= apache-apisix project_release_name ?= $(project_name)-$(VERSION)-src +OTEL_CONFIG ?= ./ci/pod/otelcol-contrib/data-otlp.json # Hyperconverged Infrastructure ENV_OS_NAME ?= $(shell uname -s | tr '[:upper:]' '[:lower:]') @@ -68,6 +69,8 @@ endif ifeq ($(ENV_OS_NAME), darwin) ifeq ($(ENV_OS_ARCH), arm64) ENV_HOMEBREW_PREFIX := /opt/homebrew + ENV_INST_BINDIR := $(ENV_INST_PREFIX)/local/bin + ENV_INST_LUADIR := $(shell which lua | xargs realpath | sed 's/bin\/lua//g') endif # OSX archive `._` cache file @@ -147,14 +150,6 @@ help: fi @echo -### check-rust : check if Rust is installed in the environment -.PHONY: check-rust -check-rust: - @if ! [ $(shell command -v rustc) ]; then \ - echo "ERROR: Rust is not installed. Please install Rust before continuing." >&2; \ - exit 1; \ - fi; - ### deps : Installing dependencies .PHONY: deps @@ -382,6 +377,9 @@ install: runtime $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/stream/xrpc/protocols/redis $(ENV_INSTALL) apisix/stream/xrpc/protocols/redis/*.lua $(ENV_INST_LUADIR)/apisix/stream/xrpc/protocols/redis/ + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/stream/xrpc/protocols/dubbo + $(ENV_INSTALL) apisix/stream/xrpc/protocols/dubbo/*.lua $(ENV_INST_LUADIR)/apisix/stream/xrpc/protocols/dubbo/ + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/utils $(ENV_INSTALL) apisix/utils/*.lua $(ENV_INST_LUADIR)/apisix/utils/ @@ -450,6 +448,8 @@ compress-tar: .PHONY: ci-env-up ci-env-up: @$(call func_echo_status, "$@ -> [ Start ]") + touch $(OTEL_CONFIG) + chmod 777 $(OTEL_CONFIG) $(ENV_DOCKER_COMPOSE) up -d @$(call func_echo_success_status, "$@ -> [ Done ]") @@ -474,5 +474,6 @@ ci-env-rebuild: .PHONY: ci-env-down ci-env-down: @$(call func_echo_status, "$@ -> [ Start ]") + rm $(OTEL_CONFIG) $(ENV_DOCKER_COMPOSE) down @$(call func_echo_success_status, "$@ -> [ Done ]") diff --git a/README.md b/README.md index 9d304c5fed92..46e630743d61 100644 --- a/README.md +++ b/README.md @@ -217,6 +217,7 @@ A wide variety of companies and organizations use APISIX API Gateway for researc - HONOR - Horizon Robotics - iQIYI +- Lenovo - NASA JPL - Nayuki - OPPO @@ -226,6 +227,7 @@ A wide variety of companies and organizations use APISIX API Gateway for researc - Travelsky - vivo - Sina Weibo +- WeCity - WPS - XPENG - Zoom diff --git a/apisix/admin/init.lua b/apisix/admin/init.lua index 0d4ef932362f..333c798e6ada 100644 --- a/apisix/admin/init.lua +++ b/apisix/admin/init.lua @@ -376,6 +376,41 @@ local function reload_plugins(data, event, source, pid) end +local function schema_validate() + local uri_segs = core.utils.split_uri(ngx.var.uri) + core.log.info("uri: ", core.json.delay_encode(uri_segs)) + + local seg_res = uri_segs[6] + local resource = resources[seg_res] + if not resource then + core.response.exit(404, {error_msg = "Unsupported resource type: ".. seg_res}) + end + + local req_body, err = core.request.get_body(MAX_REQ_BODY) + if err then + core.log.error("failed to read request body: ", err) + core.response.exit(400, {error_msg = "invalid request body: " .. err}) + end + + if req_body then + local data, err = core.json.decode(req_body) + if err then + core.log.error("invalid request body: ", req_body, " err: ", err) + core.response.exit(400, {error_msg = "invalid request body: " .. err, + req_body = req_body}) + end + + req_body = data + end + + local ok, err = core.schema.check(resource.schema, req_body) + if ok then + core.response.exit(200) + end + core.response.exit(400, {error_msg = err}) +end + + local uri_route = { { paths = [[/apisix/admin]], @@ -392,6 +427,11 @@ local uri_route = { methods = {"GET"}, handler = get_plugins_list, }, + { + paths = [[/apisix/admin/schema/validate/*]], + methods = {"POST"}, + handler = schema_validate, + }, { paths = reload_event, methods = {"PUT"}, diff --git a/apisix/admin/resource.lua b/apisix/admin/resource.lua index 35fe3bba2476..b03f1b069ea6 100644 --- a/apisix/admin/resource.lua +++ b/apisix/admin/resource.lua @@ -19,6 +19,7 @@ local utils = require("apisix.admin.utils") local apisix_ssl = require("apisix.ssl") local setmetatable = setmetatable local tostring = tostring +local ipairs = ipairs local type = type @@ -49,7 +50,38 @@ local function split_typ_and_id(id, sub_path) end -function _M:check_conf(id, conf, need_id, typ) +local function check_forbidden_properties(conf, forbidden_properties) + local not_allow_properties = "the property is forbidden: " + + if conf then + for _, v in ipairs(forbidden_properties) do + if conf[v] then + return not_allow_properties .. " " .. v + end + end + + if conf.upstream then + for _, v in ipairs(forbidden_properties) do + if conf.upstream[v] then + return not_allow_properties .. " upstream." .. v + end + end + end + + if conf.plugins then + for _, v in ipairs(forbidden_properties) do + if conf.plugins[v] then + return not_allow_properties .. " plugins." .. v + end + end + end + end + + return nil +end + + +function _M:check_conf(id, conf, need_id, typ, allow_time) if self.name == "secrets" then id = typ .. "/" .. id end @@ -76,6 +108,15 @@ function _M:check_conf(id, conf, need_id, typ) conf.id = id end + -- check create time and update time + if not allow_time then + local forbidden_properties = {"create_time", "update_time"} + local err = check_forbidden_properties(conf, forbidden_properties) + if err then + return nil, {error_msg = err} + end + end + core.log.info("conf : ", core.json.delay_encode(conf)) -- check the resource own rules @@ -355,7 +396,7 @@ function _M:patch(id, conf, sub_path, args) core.log.info("new conf: ", core.json.delay_encode(node_value, true)) - local ok, err = self:check_conf(id, node_value, true, typ) + local ok, err = self:check_conf(id, node_value, true, typ, true) if not ok then return 400, err end diff --git a/apisix/admin/services.lua b/apisix/admin/services.lua index dc14bda44ec6..4218b77f22dd 100644 --- a/apisix/admin/services.lua +++ b/apisix/admin/services.lua @@ -16,6 +16,7 @@ -- local core = require("apisix.core") local get_routes = require("apisix.router").http_routes +local get_stream_routes = require("apisix.router").stream_routes local apisix_upstream = require("apisix.upstream") local resource = require("apisix.admin.resource") local schema_plugin = require("apisix.admin.plugins").check_schema @@ -99,6 +100,21 @@ local function delete_checker(id) end end + local stream_routes, stream_routes_ver = get_stream_routes() + core.log.info("stream_routes: ", core.json.delay_encode(stream_routes, true)) + core.log.info("stream_routes_ver: ", stream_routes_ver) + if stream_routes_ver and stream_routes then + for _, route in ipairs(stream_routes) do + if type(route) == "table" and route.value + and route.value.service_id + and tostring(route.value.service_id) == id then + return 400, {error_msg = "can not delete this service directly," + .. " stream_route [" .. route.value.id + .. "] is still using it now"} + end + end + end + return nil, nil end diff --git a/apisix/admin/stream_routes.lua b/apisix/admin/stream_routes.lua index c16a9a7938c3..6e1c6e6385c3 100644 --- a/apisix/admin/stream_routes.lua +++ b/apisix/admin/stream_routes.lua @@ -42,6 +42,23 @@ local function check_conf(id, conf, need_id, schema) end end + local service_id = conf.service_id + if service_id then + local key = "/services/" .. service_id + local res, err = core.etcd.get(key) + if not res then + return nil, {error_msg = "failed to fetch service info by " + .. "service id [" .. service_id .. "]: " + .. err} + end + + if res.status ~= 200 then + return nil, {error_msg = "failed to fetch service info by " + .. "service id [" .. service_id .. "], " + .. "response code: " .. res.status} + end + end + local ok, err = stream_route_checker(conf, true) if not ok then return nil, {error_msg = err} diff --git a/apisix/balancer.lua b/apisix/balancer.lua index f836533171e7..0fe2e6539922 100644 --- a/apisix/balancer.lua +++ b/apisix/balancer.lua @@ -79,7 +79,7 @@ local function fetch_health_nodes(upstream, checker) if ok then up_nodes = transform_node(up_nodes, node) elseif err then - core.log.error("failed to get health check target status, addr: ", + core.log.warn("failed to get health check target status, addr: ", node.host, ":", port or node.port, ", host: ", host, ", err: ", err) end end diff --git a/apisix/cli/etcd.lua b/apisix/cli/etcd.lua index 51cac2a508e6..b67248095d92 100644 --- a/apisix/cli/etcd.lua +++ b/apisix/cli/etcd.lua @@ -280,103 +280,7 @@ local function prepare_dirs_via_http(yaml_conf, args, index, host, host_count) end -local function grpc_request(url, yaml_conf, key) - local cmd - - local auth = "" - if yaml_conf.etcd.user then - local user = yaml_conf.etcd.user - local password = yaml_conf.etcd.password - auth = str_format("--user=%s:%s", user, password) - end - - if str_sub(url, 1, 8) == "https://" then - local host = url:sub(9) - - local verify = true - local certificate, pkey, cafile - if yaml_conf.etcd.tls then - local cfg = yaml_conf.etcd.tls - - if cfg.verify == false then - verify = false - end - - certificate = cfg.cert - pkey = cfg.key - - local apisix_ssl = yaml_conf.apisix.ssl - if apisix_ssl and apisix_ssl.ssl_trusted_certificate then - cafile = apisix_ssl.ssl_trusted_certificate - end - end - - cmd = str_format( - "etcdctl --insecure-transport=false %s %s %s %s " .. - "%s --endpoints=%s put %s init_dir", - verify and "" or "--insecure-skip-tls-verify", - certificate and "--cert " .. certificate or "", - pkey and "--key " .. pkey or "", - cafile and "--cacert " .. cafile or "", - auth, host, key) - else - local host = url:sub(#("http://") + 1) - - cmd = str_format( - "etcdctl %s --endpoints=%s put %s init_dir", - auth, host, key) - end - - local res, err = util.execute_cmd(cmd) - return res, err -end - - -local function prepare_dirs_via_grpc(yaml_conf, args, index, host) - local is_success = true - - local errmsg - local dirs = {} - for name in pairs(constants.HTTP_ETCD_DIRECTORY) do - dirs[name] = true - end - for name in pairs(constants.STREAM_ETCD_DIRECTORY) do - dirs[name] = true - end - - for dir_name in pairs(dirs) do - local key = (yaml_conf.etcd.prefix or "") .. dir_name .. "/" - local res, err - local retry_time = 0 - while retry_time < 2 do - res, err = grpc_request(host, yaml_conf, key) - retry_time = retry_time + 1 - if res then - break - end - print(str_format("Warning! Request etcd endpoint \'%s\' error, %s, retry time=%s", - host, err, retry_time)) - end - - if not res then - errmsg = str_format("request etcd endpoint \"%s\" error, %s\n", host, err) - util.die(errmsg) - end - - if args and args["verbose"] then - print(res) - end - end - - return is_success -end - - -local function prepare_dirs(use_grpc, yaml_conf, args, index, host, host_count) - if use_grpc then - return prepare_dirs_via_grpc(yaml_conf, args, index, host) - end - +local function prepare_dirs(yaml_conf, args, index, host, host_count) return prepare_dirs_via_http(yaml_conf, args, index, host, host_count) end @@ -400,8 +304,6 @@ function _M.init(env, args) util.die("failed to read `etcd` field from yaml file when init etcd") end - local etcd_conf = yaml_conf.etcd - -- convert old single etcd config to multiple etcd config if type(yaml_conf.etcd.host) == "string" then yaml_conf.etcd.host = {yaml_conf.etcd.host} @@ -477,22 +379,9 @@ function _M.init(env, args) util.die("the etcd cluster needs at least 50% and above healthy nodes\n") end - if etcd_conf.use_grpc and not env.use_apisix_base then - io_stderr:write("'use_grpc: true' in the etcd configuration " .. - "is not supported by vanilla OpenResty\n") - end - - local use_grpc = etcd_conf.use_grpc and env.use_apisix_base - if use_grpc then - local ok, err = util.execute_cmd("command -v etcdctl") - if not ok then - util.die("can't find etcdctl: ", err, "\n") - end - end - local etcd_ok = false for index, host in ipairs(etcd_healthy_hosts) do - if prepare_dirs(use_grpc, yaml_conf, args, index, host, host_count) then + if prepare_dirs(yaml_conf, args, index, host, host_count) then etcd_ok = true break end diff --git a/apisix/cli/file.lua b/apisix/cli/file.lua index 149c4e913c35..94e790db65e3 100644 --- a/apisix/cli/file.lua +++ b/apisix/cli/file.lua @@ -292,26 +292,14 @@ function _M.read_yaml_conf(apisix_home) default_conf.apisix.enable_admin = true elseif default_conf.deployment.role == "data_plane" then + default_conf.etcd = default_conf.deployment.etcd if default_conf.deployment.role_data_plane.config_provider == "yaml" then default_conf.deployment.config_provider = "yaml" elseif default_conf.deployment.role_data_plane.config_provider == "xds" then default_conf.deployment.config_provider = "xds" - else - default_conf.etcd = default_conf.deployment.role_data_plane.control_plane end default_conf.apisix.enable_admin = false end - - if default_conf.etcd and default_conf.deployment.certs then - -- copy certs configuration to keep backward compatible - local certs = default_conf.deployment.certs - local etcd = default_conf.etcd - if not etcd.tls then - etcd.tls = {} - end - etcd.tls.cert = certs.cert - etcd.tls.key = certs.cert_key - end end if default_conf.deployment.config_provider == "yaml" then diff --git a/apisix/cli/ngx_tpl.lua b/apisix/cli/ngx_tpl.lua index ab8407b572ec..3e1aadd9b543 100644 --- a/apisix/cli/ngx_tpl.lua +++ b/apisix/cli/ngx_tpl.lua @@ -115,10 +115,6 @@ http { } } {% end %} - - {% if conf_server then %} - {* conf_server *} - {% end %} } {% end %} @@ -369,8 +365,12 @@ http { log_format main escape={* http.access_log_format_escape *} '{* http.access_log_format *}'; uninitialized_variable_warn off; + {% if http.access_log_buffer then %} + access_log {* http.access_log *} main buffer={* http.access_log_buffer *} flush=3; + {% else %} access_log {* http.access_log *} main buffer=16384 flush=3; {% end %} + {% end %} open_file_cache max=1000 inactive=60; client_max_body_size {* http.client_max_body_size *}; keepalive_timeout {* http.keepalive_timeout *}; @@ -469,7 +469,7 @@ http { } apisix.http_init(args) - -- set apisix_lua_home into constans module + -- set apisix_lua_home into constants module -- it may be used by plugins to determine the work path of apisix local constants = require("apisix.constants") constants.apisix_lua_home = "{*apisix_lua_home*}" @@ -576,10 +576,6 @@ http { } {% end %} - {% if conf_server then %} - {* conf_server *} - {% end %} - {% if deployment_role ~= "control_plane" then %} {% if enabled_plugins["proxy-cache"] then %} @@ -639,6 +635,22 @@ http { proxy_ssl_trusted_certificate {* ssl.ssl_trusted_certificate *}; {% end %} + # opentelemetry_set_ngx_var starts + {% if opentelemetry_set_ngx_var then %} + set $opentelemetry_context_traceparent ''; + set $opentelemetry_trace_id ''; + set $opentelemetry_span_id ''; + {% end %} + # opentelemetry_set_ngx_var ends + + # zipkin_set_ngx_var starts + {% if zipkin_set_ngx_var then %} + set $zipkin_context_traceparent ''; + set $zipkin_trace_id ''; + set $zipkin_span_id ''; + {% end %} + # zipkin_set_ngx_var ends + # http server configuration snippet starts {% if http_server_configuration_snippet then %} {* http_server_configuration_snippet *} diff --git a/apisix/cli/ops.lua b/apisix/cli/ops.lua index 8ba08c7fa974..0eaebae56c43 100644 --- a/apisix/cli/ops.lua +++ b/apisix/cli/ops.lua @@ -21,7 +21,6 @@ local file = require("apisix.cli.file") local schema = require("apisix.cli.schema") local ngx_tpl = require("apisix.cli.ngx_tpl") local cli_ip = require("apisix.cli.ip") -local snippet = require("apisix.cli.snippet") local profile = require("apisix.core.profile") local template = require("resty.template") local argparse = require("argparse") @@ -533,11 +532,6 @@ Please modify "admin_key" in conf/config.yaml . proxy_mirror_timeouts = yaml_conf.plugin_attr["proxy-mirror"].timeout end - local conf_server, err = snippet.generate_conf_server(env, yaml_conf) - if err then - util.die(err, "\n") - end - if yaml_conf.deployment and yaml_conf.deployment.role then local role = yaml_conf.deployment.role env.deployment_role = role @@ -548,6 +542,16 @@ Please modify "admin_key" in conf/config.yaml . end end + local opentelemetry_set_ngx_var + if enabled_plugins["opentelemetry"] and yaml_conf.plugin_attr["opentelemetry"] then + opentelemetry_set_ngx_var = yaml_conf.plugin_attr["opentelemetry"].set_ngx_var + end + + local zipkin_set_ngx_var + if enabled_plugins["zipkin"] and yaml_conf.plugin_attr["zipkin"] then + zipkin_set_ngx_var = yaml_conf.plugin_attr["zipkin"].set_ngx_var + end + -- Using template.render local sys_conf = { lua_path = env.pkg_path_org, @@ -568,7 +572,8 @@ Please modify "admin_key" in conf/config.yaml . control_server_addr = control_server_addr, prometheus_server_addr = prometheus_server_addr, proxy_mirror_timeouts = proxy_mirror_timeouts, - conf_server = conf_server, + opentelemetry_set_ngx_var = opentelemetry_set_ngx_var, + zipkin_set_ngx_var = zipkin_set_ngx_var } if not yaml_conf.apisix then @@ -814,20 +819,11 @@ local function start(env, ...) -- start a new APISIX instance - local conf_server_sock_path = env.apisix_home .. "/conf/config_listen.sock" - if pl_path.exists(conf_server_sock_path) then - -- remove stale sock (if exists) so that APISIX can start - local ok, err = os_remove(conf_server_sock_path) - if not ok then - util.die("failed to remove stale conf server sock file, error: ", err) - end - end - local parser = argparse() parser:argument("_", "Placeholder") parser:option("-c --config", "location of customized config.yaml") -- TODO: more logs for APISIX cli could be added using this feature - parser:flag("--verbose", "show init_etcd debug information") + parser:flag("-v --verbose", "show init_etcd debug information") local args = parser:parse() local customized_yaml = args["config"] diff --git a/apisix/cli/schema.lua b/apisix/cli/schema.lua index 3684232f1a7f..6f6450b46e0c 100644 --- a/apisix/cli/schema.lua +++ b/apisix/cli/schema.lua @@ -62,11 +62,6 @@ local etcd_schema = { minimum = 1, description = "etcd connection timeout in seconds", }, - use_grpc = { - type = "boolean", - -- TODO: set true by default in v3.2 - default = false, - }, }, required = {"prefix", "host"} } @@ -388,60 +383,23 @@ local deployment_schema = { config_provider = { enum = {"etcd"} }, - conf_server = { - properties = { - listen = { - type = "string", - default = "0.0.0.0:9280", - }, - cert = { type = "string" }, - cert_key = { type = "string" }, - client_ca_cert = { type = "string" }, - }, - required = {"cert", "cert_key"} - }, - }, - required = {"config_provider", "conf_server"} - }, - certs = { - properties = { - cert = { type = "string" }, - cert_key = { type = "string" }, - trusted_ca_cert = { type = "string" }, - }, - dependencies = { - cert = { - required = {"cert_key"}, - }, }, - default = {}, + required = {"config_provider"} }, }, required = {"etcd", "role_control_plane"} }, data_plane = { properties = { + etcd = etcd_schema, role_data_plane = { properties = { config_provider = { - enum = {"control_plane", "yaml", "xds"} + enum = {"etcd", "yaml", "xds"} }, }, required = {"config_provider"} }, - certs = { - properties = { - cert = { type = "string" }, - cert_key = { type = "string" }, - trusted_ca_cert = { type = "string" }, - }, - dependencies = { - cert = { - required = {"cert_key"}, - }, - }, - default = {}, - }, }, required = {"role_data_plane"} } diff --git a/apisix/cli/snippet.lua b/apisix/cli/snippet.lua deleted file mode 100644 index 95069a0ab263..000000000000 --- a/apisix/cli/snippet.lua +++ /dev/null @@ -1,206 +0,0 @@ --- --- Licensed to the Apache Software Foundation (ASF) under one or more --- contributor license agreements. See the NOTICE file distributed with --- this work for additional information regarding copyright ownership. --- The ASF licenses this file to You under the Apache License, Version 2.0 --- (the "License"); you may not use this file except in compliance with --- the License. You may obtain a copy of the License at --- --- http://www.apache.org/licenses/LICENSE-2.0 --- --- Unless required by applicable law or agreed to in writing, software --- distributed under the License is distributed on an "AS IS" BASIS, --- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. --- See the License for the specific language governing permissions and --- limitations under the License. --- -local template = require("resty.template") -local pl_path = require("pl.path") -local ipairs = ipairs - - --- this module provide methods to generate snippets which will be used in the nginx.conf template -local _M = {} -local conf_server_tpl = [[ -upstream apisix_conf_backend { - server 0.0.0.0:80; - balancer_by_lua_block { - local conf_server = require("apisix.conf_server") - conf_server.balancer() - } - keepalive 320; - keepalive_requests 1000; - keepalive_timeout 60s; -} - -{% if trusted_ca_cert then %} -lua_ssl_trusted_certificate {* trusted_ca_cert *}; -{% end %} - -server { - {% if control_plane then %} - {% if directive_prefix == "grpc" then %} - listen {* control_plane.listen *} ssl http2; - {% else %} - listen {* control_plane.listen *} ssl; - {% end %} - ssl_certificate {* control_plane.cert *}; - ssl_certificate_key {* control_plane.cert_key *}; - - {% if control_plane.client_ca_cert then %} - ssl_verify_client on; - ssl_client_certificate {* control_plane.client_ca_cert *}; - {% end %} - - {% else %} - {% if directive_prefix == "grpc" then %} - listen unix:{* home *}/conf/config_listen.sock http2; - {% else %} - listen unix:{* home *}/conf/config_listen.sock; - {% end %} - {% end %} - - access_log off; - - set $upstream_host ''; - - access_by_lua_block { - local conf_server = require("apisix.conf_server") - conf_server.access() - } - - location / { - {% if enable_https then %} - {* directive_prefix *}_pass {* scheme_name *}s://apisix_conf_backend; - {* directive_prefix *}_ssl_protocols TLSv1.2 TLSv1.3; - {* directive_prefix *}_ssl_server_name on; - - {% if etcd_tls_verify then %} - {* directive_prefix *}_ssl_verify on; - {* directive_prefix *}_ssl_trusted_certificate {* ssl_trusted_certificate *}; - {% end %} - - {% if sni then %} - {* directive_prefix *}_ssl_name {* sni *}; - {% else %} - {* directive_prefix *}_ssl_name $upstream_host; - {% end %} - - {% if client_cert then %} - {* directive_prefix *}_ssl_certificate {* client_cert *}; - {* directive_prefix *}_ssl_certificate_key {* client_cert_key *}; - {% end %} - - {% else %} - {* directive_prefix *}_pass {* scheme_name *}://apisix_conf_backend; - {% end %} - - {% if scheme_name == "http" then %} - proxy_http_version 1.1; - proxy_set_header Connection ""; - {% end %} - - {* directive_prefix *}_set_header Host $upstream_host; - {* directive_prefix *}_next_upstream error timeout non_idempotent - http_500 http_502 http_503 http_504; - } - - log_by_lua_block { - local conf_server = require("apisix.conf_server") - conf_server.log() - } -} -]] - - -local function is_grpc_used(env, etcd) - local is_grpc_available = env.use_apisix_base - return is_grpc_available and etcd.use_grpc -end - - -function _M.generate_conf_server(env, conf) - if not (conf.deployment and ( - conf.deployment.role == "traditional" or - conf.deployment.role == "control_plane")) - then - return nil, nil - end - - -- we use proxy even the role is traditional so that we can test the proxy in daily dev - local etcd = conf.deployment.etcd - local servers = etcd.host - local enable_https = false - local prefix = "https://" - if servers[1]:find(prefix, 1, true) then - enable_https = true - end - - for i, s in ipairs(servers) do - if (s:find(prefix, 1, true) ~= nil) ~= enable_https then - return nil, "all nodes in the etcd cluster should enable/disable TLS together" - end - - local _, to = s:find("://", 1, true) - if not to then - return nil, "bad etcd endpoint format" - end - end - - local control_plane - if conf.deployment.role == "control_plane" then - control_plane = conf.deployment.role_control_plane.conf_server - control_plane.cert = pl_path.abspath(control_plane.cert) - control_plane.cert_key = pl_path.abspath(control_plane.cert_key) - - if control_plane.client_ca_cert then - control_plane.client_ca_cert = pl_path.abspath(control_plane.client_ca_cert) - end - end - - local trusted_ca_cert - if conf.deployment.certs then - if conf.deployment.certs.trusted_ca_cert then - trusted_ca_cert = pl_path.abspath(conf.deployment.certs.trusted_ca_cert) - end - end - - local conf_render = template.compile(conf_server_tpl) - local tls = etcd.tls - local client_cert - local client_cert_key - local ssl_trusted_certificate - local etcd_tls_verify - local use_grpc = is_grpc_used(env, etcd) - if tls then - if tls.cert then - client_cert = pl_path.abspath(tls.cert) - client_cert_key = pl_path.abspath(tls.key) - end - - etcd_tls_verify = tls.verify - if enable_https and etcd_tls_verify then - if not conf.apisix.ssl.ssl_trusted_certificate then - return nil, "should set ssl_trusted_certificate if etcd tls verify is enabled" - end - ssl_trusted_certificate = pl_path.abspath(conf.apisix.ssl.ssl_trusted_certificate) - end - end - - return conf_render({ - sni = tls and tls.sni, - home = env.apisix_home or ".", - control_plane = control_plane, - enable_https = enable_https, - client_cert = client_cert, - client_cert_key = client_cert_key, - trusted_ca_cert = trusted_ca_cert, - etcd_tls_verify = etcd_tls_verify, - ssl_trusted_certificate = ssl_trusted_certificate, - scheme_name = use_grpc and "grpc" or "http", - directive_prefix = use_grpc and "grpc" or "proxy", - }) -end - - -return _M diff --git a/apisix/constants.lua b/apisix/constants.lua index 72209aa4d905..0b3ec160b53d 100644 --- a/apisix/constants.lua +++ b/apisix/constants.lua @@ -37,6 +37,7 @@ return { }, STREAM_ETCD_DIRECTORY = { ["/upstreams"] = true, + ["/services"] = true, ["/plugins"] = true, ["/ssls"] = true, ["/stream_routes"] = true, diff --git a/apisix/core/config_etcd.lua b/apisix/core/config_etcd.lua index e3e40672c95f..357f24fa1e6e 100644 --- a/apisix/core/config_etcd.lua +++ b/apisix/core/config_etcd.lua @@ -362,40 +362,6 @@ local function readdir(etcd_cli, key, formatter) end -local function grpc_waitdir(self, etcd_cli, key, modified_index, timeout) - local watching_stream = self.watching_stream - if not watching_stream then - local attr = {} - attr.start_revision = modified_index - local opts = {} - opts.timeout = timeout - - local st, err = etcd_cli:create_grpc_watch_stream(key, attr, opts) - if not st then - log.error("create watch stream failed: ", err) - return nil, err - end - - log.info("create watch stream for key: ", key, ", modified_index: ", modified_index) - - self.watching_stream = st - watching_stream = st - end - - return etcd_cli:read_grpc_watch_stream(watching_stream) -end - - -local function flush_watching_streams(self) - local etcd_cli = self.etcd_cli - if not etcd_cli.use_grpc then - return - end - - self.watching_stream = nil -end - - local function http_waitdir(self, etcd_cli, key, modified_index, timeout) if not watch_ctx.idx[key] then watch_ctx.idx[key] = 1 @@ -470,12 +436,7 @@ local function waitdir(self) return nil, "not inited" end - local res, err - if etcd_cli.use_grpc then - res, err = grpc_waitdir(self, etcd_cli, key, modified_index, timeout) - else - res, err = http_waitdir(self, etcd_cli, key, modified_index, timeout) - end + local res, err = http_waitdir(self, etcd_cli, key, modified_index, timeout) if not res then -- log.error("failed to get key from etcd: ", err) @@ -620,13 +581,9 @@ local function sync_data(self) return nil, "missing 'key' arguments" end - if not self.etcd_cli.use_grpc then - init_watch_ctx(self.key) - end + init_watch_ctx(self.key) if self.need_reload then - flush_watching_streams(self) - local res, err = readdir(self.etcd_cli, self.key) if not res then return false, err @@ -916,7 +873,6 @@ local function _automatic_fetch(premature, self) end if not exiting() and self.running then - flush_watching_streams(self) ngx_timer_at(0, _automatic_fetch, self) end end @@ -1118,10 +1074,6 @@ function _M.init() return true end - if local_conf.etcd.use_grpc then - return true - end - -- don't go through proxy during start because the proxy is not available local etcd_cli, prefix, err = etcd_apisix.new_without_proxy() if not etcd_cli then @@ -1147,21 +1099,6 @@ function _M.init_worker() return true end - if not local_conf.etcd.use_grpc then - return true - end - - -- don't go through proxy during start because the proxy is not available - local etcd_cli, prefix, err = etcd_apisix.new_without_proxy() - if not etcd_cli then - return nil, "failed to start a etcd instance: " .. err - end - - local res, err = readdir(etcd_cli, prefix, create_formatter(prefix)) - if not res then - return nil, err - end - return true end diff --git a/apisix/core/config_util.lua b/apisix/core/config_util.lua index 7e57ed402fd8..7313e0116ad2 100644 --- a/apisix/core/config_util.lua +++ b/apisix/core/config_util.lua @@ -114,7 +114,7 @@ function _M.fire_all_clean_handlers(item) clean_handler.f(item) end - item.clean_handlers = nil + item.clean_handlers = {} end diff --git a/apisix/core/ctx.lua b/apisix/core/ctx.lua index 5128061d58fe..6d77b43811ca 100644 --- a/apisix/core/ctx.lua +++ b/apisix/core/ctx.lua @@ -260,7 +260,9 @@ do elseif core_str.has_prefix(key, "post_arg_") then -- only match default post form - if request.header(nil, "Content-Type") == "application/x-www-form-urlencoded" then + local content_type = request.header(nil, "Content-Type") + if content_type ~= nil and core_str.has_prefix(content_type, + "application/x-www-form-urlencoded") then local arg_key = sub_str(key, 10) local args = request.get_post_args()[arg_key] if args then diff --git a/apisix/core/etcd.lua b/apisix/core/etcd.lua index b52517cd40b5..5cd1038581dd 100644 --- a/apisix/core/etcd.lua +++ b/apisix/core/etcd.lua @@ -28,25 +28,15 @@ local clone_tab = require("table.clone") local health_check = require("resty.etcd.health_check") local pl_path = require("pl.path") local ipairs = ipairs -local pcall = pcall local setmetatable = setmetatable local string = string local tonumber = tonumber -local ngx_config_prefix = ngx.config.prefix() -local ngx_socket_tcp = ngx.socket.tcp local ngx_get_phase = ngx.get_phase -local is_http = ngx.config.subsystem == "http" local _M = {} -local function has_mtls_support() - local s = ngx_socket_tcp() - return s.tlshandshake ~= nil -end - - local function _new(etcd_conf) local prefix = etcd_conf.prefix etcd_conf.http_host = etcd_conf.host @@ -72,17 +62,6 @@ local function _new(etcd_conf) end end - if etcd_conf.use_grpc then - if ngx_get_phase() == "init" then - etcd_conf.use_grpc = false - else - local ok = pcall(require, "resty.grpc") - if not ok then - etcd_conf.use_grpc = false - end - end - end - local etcd_cli, err = etcd.new(etcd_conf) if not etcd_cli then return nil, nil, err @@ -129,64 +108,7 @@ local function new() etcd_conf.trusted_ca = local_conf.apisix.ssl.ssl_trusted_certificate end - local proxy_by_conf_server = false - - if local_conf.deployment then - if local_conf.deployment.role == "traditional" - -- we proxy the etcd requests in traditional mode so we can test the CP's behavior in - -- daily development. However, a stream proxy can't be the CP. - -- Hence, generate a HTTP conf server to proxy etcd requests in stream proxy is - -- unnecessary and inefficient. - and is_http - then - local sock_prefix = ngx_config_prefix - etcd_conf.unix_socket_proxy = - "unix:" .. sock_prefix .. "/conf/config_listen.sock" - etcd_conf.host = {"http://127.0.0.1:2379"} - proxy_by_conf_server = true - - elseif local_conf.deployment.role == "control_plane" then - local addr = local_conf.deployment.role_control_plane.conf_server.listen - etcd_conf.host = {"https://" .. addr} - etcd_conf.tls = { - verify = false, - } - - if has_mtls_support() and local_conf.deployment.certs.cert then - local cert = local_conf.deployment.certs.cert - local cert_key = local_conf.deployment.certs.cert_key - etcd_conf.tls.cert = cert - etcd_conf.tls.key = cert_key - end - - proxy_by_conf_server = true - - elseif local_conf.deployment.role == "data_plane" then - if has_mtls_support() and local_conf.deployment.certs.cert then - local cert = local_conf.deployment.certs.cert - local cert_key = local_conf.deployment.certs.cert_key - - if not etcd_conf.tls then - etcd_conf.tls = {} - end - - etcd_conf.tls.cert = cert - etcd_conf.tls.key = cert_key - end - end - - if local_conf.deployment.certs and local_conf.deployment.certs.trusted_ca_cert then - etcd_conf.trusted_ca = local_conf.deployment.certs.trusted_ca_cert - end - end - - -- if an unhealthy etcd node is selected in a single admin read/write etcd operation, - -- the retry mechanism for health check can select another healthy etcd node - -- to complete the read/write etcd operation. - if proxy_by_conf_server then - -- health check is done in conf server - health_check.disable() - elseif not health_check.conf then + if not health_check.conf then health_check.init({ max_fails = 1, retry = true, @@ -349,10 +271,6 @@ do return nil, nil, err end - if tmp_etcd_cli.use_grpc then - etcd_cli_init_phase = tmp_etcd_cli - end - return tmp_etcd_cli, prefix end diff --git a/apisix/core/response.lua b/apisix/core/response.lua index cfbac1467341..04430abd5266 100644 --- a/apisix/core/response.lua +++ b/apisix/core/response.lua @@ -70,7 +70,9 @@ function resp_exit(code, ...) error("failed to encode data: " .. err, -2) else idx = idx + 1 - t[idx] = body .. "\n" + t[idx] = body + idx = idx + 1 + t[idx] = "\n" end elseif v ~= nil then @@ -80,7 +82,7 @@ function resp_exit(code, ...) end if idx > 0 then - ngx_print(concat_tab(t, "", 1, idx)) + ngx_print(t) end if code then diff --git a/apisix/core/version.lua b/apisix/core/version.lua index 7ba204811a82..ff16402d38b4 100644 --- a/apisix/core/version.lua +++ b/apisix/core/version.lua @@ -20,5 +20,5 @@ -- @module core.version return { - VERSION = "3.4.0" + VERSION = "3.6.0" } diff --git a/apisix/discovery/consul/init.lua b/apisix/discovery/consul/init.lua index ae1e4c64cc9c..32e306709e95 100644 --- a/apisix/discovery/consul/init.lua +++ b/apisix/discovery/consul/init.lua @@ -32,6 +32,7 @@ local ngx_timer_every = ngx.timer.every local log = core.log local json_delay_encode = core.json.delay_encode local ngx_worker_id = ngx.worker.id +local exiting = ngx.worker.exiting local thread_spawn = ngx.thread.spawn local thread_wait = ngx.thread.wait local thread_kill = ngx.thread.kill @@ -197,21 +198,20 @@ local function get_opts(consul_server, is_catalog) port = consul_server.port, connect_timeout = consul_server.connect_timeout, read_timeout = consul_server.read_timeout, + default_args = { + token = consul_server.token, + } } if not consul_server.keepalive then return opts end + opts.default_args.wait = consul_server.wait_timeout --blocked wait!=0; unblocked by wait=0 + if is_catalog then - opts.default_args = { - wait = consul_server.wait_timeout, --blocked wait!=0; unblocked by wait=0 - index = consul_server.catalog_index, - } + opts.default_args.index = consul_server.catalog_index else - opts.default_args = { - wait = consul_server.wait_timeout, --blocked wait!=0; unblocked by wait=0 - index = consul_server.health_index, - } + opts.default_args.index = consul_server.health_index end return opts @@ -277,7 +277,7 @@ end local function check_keepalive(consul_server, retry_delay) - if consul_server.keepalive then + if consul_server.keepalive and not exiting() then local ok, err = ngx_timer_at(0, _M.connect, consul_server, retry_delay) if not ok then log.error("create ngx_timer_at got error: ", err) @@ -396,6 +396,9 @@ function _M.connect(premature, consul_server, retry_delay) port = consul_server.port, connect_timeout = consul_server.connect_timeout, read_timeout = consul_server.read_timeout, + default_args = { + token = consul_server.token + } }) local catalog_success, catalog_res, catalog_err = pcall(function() return consul_client:get(consul_server.consul_watch_catalog_url) @@ -545,6 +548,7 @@ local function format_consul_params(consul_conf) core.table.insert(consul_server_list, { host = host, port = port, + token = consul_conf.token, connect_timeout = consul_conf.timeout.connect, read_timeout = consul_conf.timeout.read, wait_timeout = consul_conf.timeout.wait, diff --git a/apisix/discovery/consul/schema.lua b/apisix/discovery/consul/schema.lua index 3e998b015ce1..d7cf2954abf3 100644 --- a/apisix/discovery/consul/schema.lua +++ b/apisix/discovery/consul/schema.lua @@ -24,6 +24,7 @@ return { type = "string", } }, + token = {type = "string", default = ""}, fetch_interval = {type = "integer", minimum = 1, default = 3}, keepalive = { type = "boolean", diff --git a/apisix/discovery/consul_kv/init.lua b/apisix/discovery/consul_kv/init.lua index 2dad772ace75..6d616e059190 100644 --- a/apisix/discovery/consul_kv/init.lua +++ b/apisix/discovery/consul_kv/init.lua @@ -320,18 +320,14 @@ end local function format_consul_params(consul_conf) local consul_server_list = core.table.new(0, #consul_conf.servers) - local args + local args = { + token = consul_conf.token, + recurse = true + } - if consul_conf.keepalive == false then - args = { - recurse = true, - } - elseif consul_conf.keepalive then - args = { - recurse = true, - wait = consul_conf.timeout.wait, --blocked wait!=0; unblocked by wait=0 - index = 0, - } + if consul_conf.keepalive then + args.wait = consul_conf.timeout.wait --blocked wait!=0; unblocked by wait=0 + args.index = 0 end for _, v in pairs(consul_conf.servers) do diff --git a/apisix/discovery/consul_kv/schema.lua b/apisix/discovery/consul_kv/schema.lua index a2ebb5d07919..4c02b2c80dd0 100644 --- a/apisix/discovery/consul_kv/schema.lua +++ b/apisix/discovery/consul_kv/schema.lua @@ -24,6 +24,7 @@ return { type = "string", } }, + token = {type = "string", default = ""}, fetch_interval = {type = "integer", minimum = 1, default = 3}, keepalive = { type = "boolean", diff --git a/apisix/discovery/dns/init.lua b/apisix/discovery/dns/init.lua index 609ad5ea163f..601de0ebc9ee 100644 --- a/apisix/discovery/dns/init.lua +++ b/apisix/discovery/dns/init.lua @@ -64,14 +64,14 @@ end function _M.init_worker() local local_conf = config_local.local_conf() local servers = local_conf.discovery.dns.servers - + local resolv_conf = local_conf.discovery.dns.resolv_conf local default_order = {"last", "SRV", "A", "AAAA", "CNAME"} local order = core.table.try_read_attr(local_conf, "discovery", "dns", "order") order = order or default_order local opts = { hosts = {}, - resolvConf = {}, + resolvConf = resolv_conf, nameservers = servers, order = order, } diff --git a/apisix/discovery/dns/schema.lua b/apisix/discovery/dns/schema.lua index 989938ab1fa3..03c7934ae4cf 100644 --- a/apisix/discovery/dns/schema.lua +++ b/apisix/discovery/dns/schema.lua @@ -24,6 +24,9 @@ return { type = "string", }, }, + resolv_conf = { + type = "string", + }, order = { type = "array", minItems = 1, @@ -34,5 +37,12 @@ return { }, }, }, - required = {"servers"} + oneOf = { + { + required = {"servers"}, + }, + { + required = {"resolv_conf"}, + } + } } diff --git a/apisix/http/route.lua b/apisix/http/route.lua index d475646b56c6..dbf11abf5e28 100644 --- a/apisix/http/route.lua +++ b/apisix/http/route.lua @@ -103,8 +103,8 @@ function _M.create_radixtree_uri_router(routes, uri_routes, with_parameter) end -function _M.match_uri(uri_router, match_opts, api_ctx) - core.table.clear(match_opts) +function _M.match_uri(uri_router, api_ctx) + local match_opts = core.tablepool.fetch("route_match_opts", 0, 4) match_opts.method = api_ctx.var.request_method match_opts.host = api_ctx.var.host match_opts.remote_addr = api_ctx.var.remote_addr @@ -112,6 +112,7 @@ function _M.match_uri(uri_router, match_opts, api_ctx) match_opts.matched = core.tablepool.fetch("matched_route_record", 0, 4) local ok = uri_router:dispatch(api_ctx.var.uri, match_opts, api_ctx, match_opts) + core.tablepool.release("route_match_opts", match_opts) return ok end diff --git a/apisix/http/router/radixtree_host_uri.lua b/apisix/http/router/radixtree_host_uri.lua index 532576e53d4a..680a04fbe815 100644 --- a/apisix/http/router/radixtree_host_uri.lua +++ b/apisix/http/router/radixtree_host_uri.lua @@ -142,8 +142,6 @@ local function create_radixtree_router(routes) return true end - - local match_opts = {} function _M.match(api_ctx) local user_routes = _M.user_routes local _, service_version = get_services() @@ -162,7 +160,7 @@ end function _M.matching(api_ctx) core.log.info("route match mode: radixtree_host_uri") - core.table.clear(match_opts) + local match_opts = core.tablepool.fetch("route_match_opts", 0, 16) match_opts.method = api_ctx.var.request_method match_opts.remote_addr = api_ctx.var.remote_addr match_opts.vars = api_ctx.var @@ -181,11 +179,13 @@ function _M.matching(api_ctx) api_ctx.curr_req_matched._host = api_ctx.real_curr_req_matched_host:reverse() api_ctx.real_curr_req_matched_host = nil end + core.tablepool.release("route_match_opts", match_opts) return true end end local ok = only_uri_router:dispatch(api_ctx.var.uri, match_opts, api_ctx, match_opts) + core.tablepool.release("route_match_opts", match_opts) return ok end diff --git a/apisix/http/router/radixtree_uri.lua b/apisix/http/router/radixtree_uri.lua index 6e546364ac14..7c1b5c0c147a 100644 --- a/apisix/http/router/radixtree_uri.lua +++ b/apisix/http/router/radixtree_uri.lua @@ -27,7 +27,6 @@ local _M = {version = 0.2} local uri_routes = {} local uri_router - local match_opts = {} function _M.match(api_ctx) local user_routes = _M.user_routes local _, service_version = get_services() @@ -51,8 +50,7 @@ end function _M.matching(api_ctx) core.log.info("route match mode: radixtree_uri") - - return base_router.match_uri(uri_router, match_opts, api_ctx) + return base_router.match_uri(uri_router, api_ctx) end diff --git a/apisix/http/router/radixtree_uri_with_parameter.lua b/apisix/http/router/radixtree_uri_with_parameter.lua index 4bf7f3ebee5f..3f10f4fcac49 100644 --- a/apisix/http/router/radixtree_uri_with_parameter.lua +++ b/apisix/http/router/radixtree_uri_with_parameter.lua @@ -27,7 +27,6 @@ local _M = {} local uri_routes = {} local uri_router - local match_opts = {} function _M.match(api_ctx) local user_routes = _M.user_routes local _, service_version = get_services() @@ -51,8 +50,7 @@ end function _M.matching(api_ctx) core.log.info("route match mode: radixtree_uri_with_parameter") - - return base_router.match_uri(uri_router, match_opts, api_ctx) + return base_router.match_uri(uri_router, api_ctx) end diff --git a/apisix/http/service.lua b/apisix/http/service.lua index 83bcb9b9d341..97b224d622c8 100644 --- a/apisix/http/service.lua +++ b/apisix/http/service.lua @@ -61,7 +61,7 @@ function _M.init_worker() filter = filter, }) if not services then - error("failed to create etcd instance for fetching upstream: " .. err) + error("failed to create etcd instance for fetching /services: " .. err) return end end diff --git a/apisix/init.lua b/apisix/init.lua index 86b68cf62208..4cfd179d25a6 100644 --- a/apisix/init.lua +++ b/apisix/init.lua @@ -170,7 +170,7 @@ end function _M.http_exit_worker() - -- TODO: we can support stream plugin later - currently there is not `destory` method + -- TODO: we can support stream plugin later - currently there is not `destroy` method -- in stream plugins plugin.exit_worker() require("apisix.plugins.ext-plugin.init").exit_worker() @@ -1021,6 +1021,7 @@ function _M.stream_init_worker() plugin.init_worker() xrpc.init_worker() router.stream_init_worker() + require("apisix.http.service").init_worker() apisix_upstream.init_worker() local we = require("resty.worker.events") @@ -1078,6 +1079,34 @@ function _M.stream_preread_phase() api_ctx.matched_upstream = upstream + elseif matched_route.value.service_id then + local service = service_fetch(matched_route.value.service_id) + if not service then + core.log.error("failed to fetch service configuration by ", + "id: ", matched_route.value.service_id) + return core.response.exit(404) + end + + matched_route = plugin.merge_service_stream_route(service, matched_route) + api_ctx.matched_route = matched_route + api_ctx.conf_type = "stream_route&service" + api_ctx.conf_version = matched_route.modifiedIndex .. "&" .. service.modifiedIndex + api_ctx.conf_id = matched_route.value.id .. "&" .. service.value.id + api_ctx.service_id = service.value.id + api_ctx.service_name = service.value.name + api_ctx.matched_upstream = matched_route.value.upstream + if matched_route.value.upstream_id and not matched_route.value.upstream then + local upstream = apisix_upstream.get_by_id(matched_route.value.upstream_id) + if not upstream then + if is_http then + return core.response.exit(502) + end + + return ngx_exit(1) + end + + api_ctx.matched_upstream = upstream + end else if matched_route.has_domain then local err diff --git a/apisix/inspect/dbg.lua b/apisix/inspect/dbg.lua index 7f4e7b114424..2fd78782faac 100644 --- a/apisix/inspect/dbg.lua +++ b/apisix/inspect/dbg.lua @@ -98,6 +98,9 @@ local function hook(_, arg) if #hooks == 0 then core.log.warn("inspect: all hooks removed") debug.sethook() + if jit then + jit.on() + end end end end diff --git a/apisix/plugin.lua b/apisix/plugin.lua index bde2b89a5393..fa1d814b290a 100644 --- a/apisix/plugin.lua +++ b/apisix/plugin.lua @@ -43,6 +43,9 @@ local stream_local_plugins_hash = core.table.new(0, 32) local merged_route = core.lrucache.new({ ttl = 300, count = 512 }) +local merged_stream_route = core.lrucache.new({ + ttl = 300, count = 512 +}) local expr_lrucache = core.lrucache.new({ ttl = 300, count = 512 }) @@ -637,6 +640,49 @@ function _M.merge_service_route(service_conf, route_conf) end +local function merge_service_stream_route(service_conf, route_conf) + -- because many fields in Service are not supported by stream route, + -- so we copy the stream route as base object + local new_conf = core.table.deepcopy(route_conf) + if service_conf.value.plugins then + for name, conf in pairs(service_conf.value.plugins) do + if not new_conf.value.plugins then + new_conf.value.plugins = {} + end + + if not new_conf.value.plugins[name] then + new_conf.value.plugins[name] = conf + end + end + end + + new_conf.value.service_id = nil + + if not new_conf.value.upstream and service_conf.value.upstream then + new_conf.value.upstream = service_conf.value.upstream + end + + if not new_conf.value.upstream_id and service_conf.value.upstream_id then + new_conf.value.upstream_id = service_conf.value.upstream_id + end + + return new_conf +end + + +function _M.merge_service_stream_route(service_conf, route_conf) + core.log.info("service conf: ", core.json.delay_encode(service_conf, true)) + core.log.info(" stream route conf: ", core.json.delay_encode(route_conf, true)) + + local version = route_conf.modifiedIndex .. "#" .. service_conf.modifiedIndex + local route_service_key = route_conf.value.id .. "#" + .. version + return merged_stream_route(route_service_key, version, + merge_service_stream_route, + service_conf, route_conf) +end + + local function merge_consumer_route(route_conf, consumer_conf, consumer_group_conf) if not consumer_conf.plugins or core.table.nkeys(consumer_conf.plugins) == 0 diff --git a/apisix/plugins/authz-keycloak.lua b/apisix/plugins/authz-keycloak.lua index f2c02727c0ce..99fe96cb06e7 100644 --- a/apisix/plugins/authz-keycloak.lua +++ b/apisix/plugins/authz-keycloak.lua @@ -20,6 +20,7 @@ local sub_str = string.sub local type = type local ngx = ngx local plugin_name = "authz-keycloak" +local fetch_secrets = require("apisix.secret").fetch_secrets local log = core.log local pairs = pairs @@ -757,6 +758,8 @@ local function generate_token_using_password_grant(conf,ctx) end function _M.access(conf, ctx) + -- resolve secrets + conf = fetch_secrets(conf) local headers = core.request.headers(ctx) local need_grant_token = conf.password_grant_token_generation_incoming_uri and ctx.var.request_uri == conf.password_grant_token_generation_incoming_uri and diff --git a/apisix/plugins/chaitin-waf.lua b/apisix/plugins/chaitin-waf.lua index afb4c108b3fb..cc870a47f47e 100644 --- a/apisix/plugins/chaitin-waf.lua +++ b/apisix/plugins/chaitin-waf.lua @@ -95,7 +95,7 @@ local metadata_schema = { properties = { host = { type = "string", - pattern = "^\\*?[0-9a-zA-Z-._\\[\\]:]+$" + pattern = "^\\*?[0-9a-zA-Z-._\\[\\]:/]+$" }, port = { type = "integer", diff --git a/apisix/plugins/cors.lua b/apisix/plugins/cors.lua index 4f0bfa5d37aa..f4a59ce5e301 100644 --- a/apisix/plugins/cors.lua +++ b/apisix/plugins/cors.lua @@ -98,7 +98,7 @@ local schema = { type = "array", description = "you can use regex to allow specific origins when no credentials," .. - "for example use [.*\\.test.com] to allow a.test.com and b.test.com", + "for example use [.*\\.test.com$] to allow a.test.com and b.test.com", items = { type = "string", minLength = 1, diff --git a/apisix/plugins/ext-plugin/init.lua b/apisix/plugins/ext-plugin/init.lua index b6fcf6fea82c..7d47bb96efbb 100644 --- a/apisix/plugins/ext-plugin/init.lua +++ b/apisix/plugins/ext-plugin/init.lua @@ -65,6 +65,7 @@ local ipairs = ipairs local pairs = pairs local tostring = tostring local type = type +local ngx = ngx local events_list @@ -655,6 +656,13 @@ local rpc_handlers = { end end + local body_len = rewrite:BodyLength() + if body_len > 0 then + local body = rewrite:BodyAsString() + ngx.req.read_body() + ngx.req.set_body_data(body) + end + local len = rewrite:RespHeadersLength() if len > 0 then local rewrite_resp_headers = {} diff --git a/apisix/plugins/openid-connect.lua b/apisix/plugins/openid-connect.lua index 927e4ddbd8aa..0bd39f20d2cb 100644 --- a/apisix/plugins/openid-connect.lua +++ b/apisix/plugins/openid-connect.lua @@ -156,6 +156,10 @@ local schema = { description = "Comma separated list of hosts that should not be proxied.", } }, + }, + authorization_params = { + description = "Extra authorization params to the authorize endpoint", + type = "object" } }, encrypt_fields = {"client_secret"}, diff --git a/apisix/plugins/opentelemetry.lua b/apisix/plugins/opentelemetry.lua index f8013e6f7675..0c84fad49da2 100644 --- a/apisix/plugins/opentelemetry.lua +++ b/apisix/plugins/opentelemetry.lua @@ -47,6 +47,7 @@ local type = type local pairs = pairs local ipairs = ipairs local unpack = unpack +local string_format = string.format local lrucache = core.lrucache.new({ type = 'plugin', count = 128, ttl = 24 * 60 * 60, @@ -112,6 +113,11 @@ local attr_schema = { }, default = {}, }, + set_ngx_var = { + type = "boolean", + description = "set nginx variables", + default = false, + }, }, } @@ -332,6 +338,17 @@ function _M.rewrite(conf, api_ctx) kind = span_kind.server, attributes = attributes, }) + + if plugin_info.set_ngx_var then + local span_context = ctx:span():context() + ngx_var.opentelemetry_context_traceparent = string_format("00-%s-%s-%02x", + span_context.trace_id, + span_context.span_id, + span_context.trace_flags) + ngx_var.opentelemetry_trace_id = span_context.trace_id + ngx_var.opentelemetry_span_id = span_context.span_id + end + api_ctx.otel_context_token = ctx:attach() -- inject trace context into the headers of upstream HTTP request diff --git a/apisix/plugins/proxy-cache/init.lua b/apisix/plugins/proxy-cache/init.lua index 333c20e20e1b..918f755994ea 100644 --- a/apisix/plugins/proxy-cache/init.lua +++ b/apisix/plugins/proxy-cache/init.lua @@ -25,6 +25,7 @@ local plugin_name = "proxy-cache" local STRATEGY_DISK = "disk" local STRATEGY_MEMORY = "memory" +local DEFAULT_CACHE_ZONE = "disk_cache_one" local schema = { type = "object", @@ -33,7 +34,7 @@ local schema = { type = "string", minLength = 1, maxLength = 100, - default = "disk_cache_one", + default = DEFAULT_CACHE_ZONE, }, cache_strategy = { type = "string", @@ -129,14 +130,23 @@ function _M.check_schema(conf) local found = false local local_conf = core.config.local_conf() if local_conf.apisix.proxy_cache then + local err = "cache_zone " .. conf.cache_zone .. " not found" for _, cache in ipairs(local_conf.apisix.proxy_cache.zones) do + -- cache_zone passed in plugin config matched one of the proxy_cache zones if cache.name == conf.cache_zone then - found = true + -- check for the mismatch between cache_strategy and corresponding cache zone + if (conf.cache_strategy == STRATEGY_MEMORY and cache.disk_path) or + (conf.cache_strategy == STRATEGY_DISK and not cache.disk_path) then + err = "invalid or empty cache_zone for cache_strategy: "..conf.cache_strategy + else + found = true + end + break end end if found == false then - return false, "cache_zone " .. conf.cache_zone .. " not found" + return false, err end end diff --git a/apisix/plugins/proxy-cache/memory.lua b/apisix/plugins/proxy-cache/memory.lua index 0112db63b568..9d5c665a8d92 100644 --- a/apisix/plugins/proxy-cache/memory.lua +++ b/apisix/plugins/proxy-cache/memory.lua @@ -32,6 +32,10 @@ end function _M:set(key, obj, ttl) + if self.dict == nil then + return nil, "invalid cache_zone provided" + end + local obj_json = core.json.encode(obj) if not obj_json then return nil, "could not encode object" @@ -43,6 +47,10 @@ end function _M:get(key) + if self.dict == nil then + return nil, "invalid cache_zone provided" + end + -- If the key does not exist or has expired, then res_json will be nil. local res_json, err = self.dict:get(key) if not res_json then @@ -63,6 +71,9 @@ end function _M:purge(key) + if self.dict == nil then + return nil, "invalid cache_zone provided" + end self.dict:delete(key) end diff --git a/apisix/plugins/traffic-split.lua b/apisix/plugins/traffic-split.lua index 1d621426a137..f546225c8c95 100644 --- a/apisix/plugins/traffic-split.lua +++ b/apisix/plugins/traffic-split.lua @@ -173,6 +173,7 @@ local function set_upstream(upstream_info, ctx) key = upstream_info.key, nodes = new_nodes, timeout = upstream_info.timeout, + scheme = upstream_info.scheme } local ok, err = upstream.check_schema(up_conf) @@ -190,7 +191,9 @@ local function set_upstream(upstream_info, ctx) end core.log.info("upstream_key: ", upstream_key) upstream.set(ctx, upstream_key, ctx.conf_version, up_conf) - + if upstream_info.scheme == "https" then + upstream.set_scheme(ctx, up_conf) + end return end diff --git a/apisix/plugins/zipkin.lua b/apisix/plugins/zipkin.lua index 0c0c4748daff..efebd5115035 100644 --- a/apisix/plugins/zipkin.lua +++ b/apisix/plugins/zipkin.lua @@ -20,13 +20,17 @@ local zipkin_codec = require("apisix.plugins.zipkin.codec") local new_random_sampler = require("apisix.plugins.zipkin.random_sampler").new local new_reporter = require("apisix.plugins.zipkin.reporter").new local ngx = ngx +local ngx_var = ngx.var local ngx_re = require("ngx.re") local pairs = pairs local tonumber = tonumber +local to_hex = require "resty.string".to_hex local plugin_name = "zipkin" local ZIPKIN_SPAN_VER_1 = 1 local ZIPKIN_SPAN_VER_2 = 2 +local plugin = require("apisix.plugin") +local string_format = string.format local lrucache = core.lrucache.new({ @@ -69,6 +73,8 @@ function _M.check_schema(conf) return core.schema.check(schema, conf) end +local plugin_info = plugin.plugin_attr(plugin_name) or {} + local function create_tracer(conf,ctx) conf.route_id = ctx.route_id @@ -205,9 +211,23 @@ function _M.rewrite(plugin_conf, ctx) ctx.opentracing_sample = tracer.sampler:sample(per_req_sample_ratio or conf.sample_ratio) if not ctx.opentracing_sample then request_span:set_baggage_item("x-b3-sampled","0") + else + request_span:set_baggage_item("x-b3-sampled","1") + end + + if plugin_info.set_ngx_var then + local span_context = request_span:context() + ngx_var.zipkin_context_traceparent = string_format("00-%s-%s-%02x", + to_hex(span_context.trace_id), + to_hex(span_context.span_id), + span_context:get_baggage_item("x-b3-sampled")) + ngx_var.zipkin_trace_id = span_context.trace_id + ngx_var.zipkin_span_id = span_context.span_id + end + + if not ctx.opentracing_sample then return end - request_span:set_baggage_item("x-b3-sampled","1") local request_span = ctx.opentracing.request_span if conf.span_version == ZIPKIN_SPAN_VER_1 then diff --git a/apisix/schema_def.lua b/apisix/schema_def.lua index 01e0649e0a91..e3e9a05aca26 100644 --- a/apisix/schema_def.lua +++ b/apisix/schema_def.lua @@ -283,6 +283,7 @@ local health_checker = { {required = {"active"}}, {required = {"active", "passive"}}, }, + additionalProperties = false, } @@ -401,16 +402,10 @@ local upstream_schema = { }, }, dependencies = { - client_cert = { - required = {"client_key"}, - ["not"] = {required = {"client_cert_id"}} - }, - client_key = { - required = {"client_cert"}, - ["not"] = {required = {"client_cert_id"}} - }, + client_cert = {required = {"client_key"}}, + client_key = {required = {"client_cert"}}, client_cert_id = { - ["not"] = {required = {"client_client", "client_key"}} + ["not"] = {required = {"client_cert", "client_key"}} } } }, @@ -501,7 +496,8 @@ local upstream_schema = { oneOf = { {required = {"nodes"}}, {required = {"service_name", "discovery_type"}}, - } + }, + additionalProperties = false } -- TODO: add more nginx variable support @@ -662,6 +658,7 @@ _M.route = { {required = {"script", "plugin_config_id"}}, } }, + additionalProperties = false, } @@ -689,6 +686,7 @@ _M.service = { uniqueItems = true, }, }, + additionalProperties = false, } @@ -707,6 +705,7 @@ _M.consumer = { desc = desc_def, }, required = {"username"}, + additionalProperties = false, } @@ -779,10 +778,6 @@ _M.ssl = { }, required = {"ca"}, }, - exptime = { - type = "integer", - minimum = 1588262400, -- 2020/5/1 0:0:0 - }, labels = labels_def, status = { description = "ssl status, 1 to enable, 0 to disable", @@ -799,8 +794,6 @@ _M.ssl = { enum = {"TLSv1.1", "TLSv1.2", "TLSv1.3"} }, }, - validity_end = timestamp_def, - validity_start = timestamp_def, create_time = timestamp_def, update_time = timestamp_def }, @@ -817,7 +810,8 @@ _M.ssl = { {required = {"snis", "key", "cert"}} } }, - ["else"] = {required = {"key", "cert"}} + ["else"] = {required = {"key", "cert"}}, + additionalProperties = false, } @@ -834,6 +828,7 @@ _M.proto = { } }, required = {"content"}, + additionalProperties = false, } @@ -846,6 +841,7 @@ _M.global_rule = { update_time = timestamp_def }, required = {"id", "plugins"}, + additionalProperties = false, } @@ -879,6 +875,7 @@ local xrpc_protocol_schema = { dependencies = { name = {"conf"}, }, + additionalProperties = false, }, }, @@ -911,9 +908,11 @@ _M.stream_route = { }, upstream = upstream_schema, upstream_id = id_schema, + service_id = id_schema, plugins = plugins_schema, protocol = xrpc_protocol_schema, - } + }, + additionalProperties = false, } @@ -929,6 +928,7 @@ _M.plugins = { stream = { type = "boolean" }, + additionalProperties = false, }, required = {"name"} } @@ -938,6 +938,9 @@ _M.plugins = { _M.plugin_config = { type = "object", properties = { + name = { + type = "string", + }, id = id_schema, desc = desc_def, plugins = plugins_schema, @@ -946,6 +949,7 @@ _M.plugin_config = { update_time = timestamp_def }, required = {"id", "plugins"}, + additionalProperties = false, } @@ -960,6 +964,7 @@ _M.consumer_group = { update_time = timestamp_def }, required = {"id", "plugins"}, + additionalProperties = false, } diff --git a/apisix/stream/router/ip_port.lua b/apisix/stream/router/ip_port.lua index 977bcb2d3a4e..284cc456edbc 100644 --- a/apisix/stream/router/ip_port.lua +++ b/apisix/stream/router/ip_port.lua @@ -110,6 +110,8 @@ do for _, route in ipairs(items) do local hit = match_addrs(route, vars) if hit then + route.value.remote_addr_matcher = nil + route.value.server_addr_matcher = nil ctx.matched_route = route return true end @@ -175,6 +177,8 @@ do for _, route in ipairs(other_routes) do local hit = match_addrs(route, api_ctx.var) if hit then + route.value.remote_addr_matcher = nil + route.value.server_addr_matcher = nil api_ctx.matched_route = route return true end diff --git a/apisix/stream/xrpc/protocols/dubbo/init.lua b/apisix/stream/xrpc/protocols/dubbo/init.lua new file mode 100644 index 000000000000..19160d6c544e --- /dev/null +++ b/apisix/stream/xrpc/protocols/dubbo/init.lua @@ -0,0 +1,231 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local sdk = require("apisix.stream.xrpc.sdk") +local xrpc_socket = require("resty.apisix.stream.xrpc.socket") +local math_random = math.random +local ngx = ngx +local OK = ngx.OK +local str_format = string.format +local DECLINED = ngx.DECLINED +local DONE = ngx.DONE +local bit = require("bit") +local ffi = require("ffi") +local ffi_str = ffi.string + + +-- dubbo protocol spec: https://cn.dubbo.apache.org/zh-cn/overview/reference/protocols/tcp/ +local header_len = 16 +local _M = {} + + +function _M.init_downstream(session) + session.req_id_seq = 0 + session.resp_id_seq = 0 + session.cmd_labels = { session.route.id, "" } + return xrpc_socket.downstream.socket() +end + + +local function parse_dubbo_header(header) + for i = 1, header_len do + local currentByte = header:byte(i) + if not currentByte then + return nil + end + end + + local magic_number = str_format("%04x", header:byte(1) * 256 + header:byte(2)) + local message_flag = header:byte(3) + local status = header:byte(4) + local request_id = 0 + for i = 5, 12 do + request_id = request_id * 256 + header:byte(i) + end + + local byte13Val = header:byte(13) * 256 * 256 * 256 + local byte14Val = header:byte(14) * 256 * 256 + local data_length = byte13Val + byte14Val + header:byte(15) * 256 + header:byte(16) + + local is_request = bit.band(bit.rshift(message_flag, 7), 0x01) == 1 and 1 or 0 + local is_two_way = bit.band(bit.rshift(message_flag, 6), 0x01) == 1 and 1 or 0 + local is_event = bit.band(bit.rshift(message_flag, 5), 0x01) == 1 and 1 or 0 + + return { + magic_number = magic_number, + message_flag = message_flag, + is_request = is_request, + is_two_way = is_two_way, + is_event = is_event, + status = status, + request_id = request_id, + data_length = data_length + } +end + + +local function read_data(sk, is_req) + local header_data, err = sk:read(header_len) + if not header_data then + return nil, err, false + end + + local header_str = ffi_str(header_data, header_len) + local header_info = parse_dubbo_header(header_str) + if not header_info then + return nil, "header insufficient", false + end + + local is_valid_magic_number = header_info.magic_number == "dabb" + if not is_valid_magic_number then + return nil, str_format("unknown magic number: \"%s\"", header_info.magic_number), false + end + + local body_data, err = sk:read(header_info.data_length) + if not body_data then + core.log.error("failed to read dubbo request body") + return nil, err, false + end + + local ctx = ngx.ctx + ctx.dubbo_serialization_id = bit.band(header_info.message_flag, 0x1F) + + if is_req then + ctx.dubbo_req_body_data = body_data + else + ctx.dubbo_rsp_body_data = body_data + end + + return true, nil, false +end + + +local function read_req(sk) + return read_data(sk, true) +end + + +local function read_reply(sk) + return read_data(sk, false) +end + + +local function handle_reply(session, sk) + local ok, err = read_reply(sk) + if not ok then + return nil, err + end + + local ctx = sdk.get_req_ctx(session, 10) + + return ctx +end + + +function _M.from_downstream(session, downstream) + local read_pipeline = false + session.req_id_seq = session.req_id_seq + 1 + local ctx = sdk.get_req_ctx(session, session.req_id_seq) + session._downstream_ctx = ctx + while true do + local ok, err, pipelined = read_req(downstream) + if not ok then + if err ~= "timeout" and err ~= "closed" then + core.log.error("failed to read request: ", err) + end + + if read_pipeline and err == "timeout" then + break + end + + return DECLINED + end + + if not pipelined then + break + end + + if not read_pipeline then + read_pipeline = true + -- set minimal read timeout to read pipelined data + downstream:settimeouts(0, 0, 1) + end + end + + if read_pipeline then + -- set timeout back + downstream:settimeouts(0, 0, 0) + end + + return OK, ctx +end + + +function _M.connect_upstream(session, ctx) + local conf = session.upstream_conf + local nodes = conf.nodes + if #nodes == 0 then + core.log.error("failed to connect: no nodes") + return DECLINED + end + + local node = nodes[math_random(#nodes)] + local sk = sdk.connect_upstream(node, conf) + if not sk then + return DECLINED + end + + core.log.debug("dubbo_connect_upstream end") + + return OK, sk +end + +function _M.disconnect_upstream(session, upstream) + sdk.disconnect_upstream(upstream, session.upstream_conf) +end + + +function _M.to_upstream(session, ctx, downstream, upstream) + local ok, _ = upstream:move(downstream) + if not ok then + return DECLINED + end + + return OK +end + + +function _M.from_upstream(session, downstream, upstream) + local ctx,err = handle_reply(session, upstream) + if err then + return DECLINED + end + + local ok, _ = downstream:move(upstream) + if not ok then + return DECLINED + end + + return DONE, ctx +end + + +function _M.log(_, _) +end + + +return _M diff --git a/apisix/stream/xrpc/protocols/dubbo/schema.lua b/apisix/stream/xrpc/protocols/dubbo/schema.lua new file mode 100644 index 000000000000..3a9d73325498 --- /dev/null +++ b/apisix/stream/xrpc/protocols/dubbo/schema.lua @@ -0,0 +1,32 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") + + +local schema = { + type = "object", +} + +local _M = {} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +return _M diff --git a/apisix/upstream.lua b/apisix/upstream.lua index 416bbea7cfa4..d8e3f3a98750 100644 --- a/apisix/upstream.lua +++ b/apisix/upstream.lua @@ -83,7 +83,7 @@ _M.set = set_directly local function release_checker(healthcheck_parent) local checker = healthcheck_parent.checker core.log.info("try to release checker: ", tostring(checker)) - checker:clear() + checker:delayed_clear(3) checker:stop() end @@ -175,7 +175,7 @@ local function set_upstream_scheme(ctx, upstream) ctx.var["upstream_scheme"] = ctx.upstream_scheme end - +_M.set_scheme = set_upstream_scheme local scheme_to_port = { http = 80, diff --git a/apisix/wasm.lua b/apisix/wasm.lua index a27641504a2b..c8b863aeac9d 100644 --- a/apisix/wasm.lua +++ b/apisix/wasm.lua @@ -15,6 +15,7 @@ -- limitations under the License. -- local core = require("apisix.core") +local type = type local support_wasm, wasm = pcall(require, "resty.proxy-wasm") local ngx_var = ngx.var @@ -23,8 +24,10 @@ local schema = { type = "object", properties = { conf = { - type = "string", - minLength = 1, + oneOf = { + { type = "object", minProperties = 1}, + { type = "string", minLength = 1}, + } }, }, required = {"conf"} @@ -51,7 +54,13 @@ local function fetch_plugin_ctx(conf, ctx, plugin) local plugin_ctx = ctxs[key] local err if not plugin_ctx then - plugin_ctx, err = wasm.on_configure(plugin, conf.conf) + if type(conf.conf) == "table" then + plugin_ctx, err = wasm.on_configure(plugin, core.json.encode(conf.conf)) + elseif type(conf.conf) == "string" then + plugin_ctx, err = wasm.on_configure(plugin, conf.conf) + else + return nil, "invalid conf type" + end if not plugin_ctx then return nil, err end diff --git a/ci/centos7-ci.sh b/ci/centos7-ci.sh index 6b6483a4f065..cf506ef54e55 100755 --- a/ci/centos7-ci.sh +++ b/ci/centos7-ci.sh @@ -33,7 +33,7 @@ install_dependencies() { # install openresty to make apisix's rpm test work yum install -y yum-utils && yum-config-manager --add-repo https://openresty.org/package/centos/openresty.repo - yum install -y openresty-1.21.4.1 openresty-debug-1.21.4.1 openresty-openssl111-debug-devel pcre pcre-devel + yum install -y openresty-1.21.4.2 openresty-debug-1.21.4.2 openresty-openssl111-debug-devel pcre pcre-devel # install luarocks ./utils/linux-install-luarocks.sh @@ -58,14 +58,9 @@ install_dependencies() { cd t/grpc_server_example CGO_ENABLED=0 go build - ./grpc_server_example \ - -grpc-address :50051 -grpcs-address :50052 -grpcs-mtls-address :50053 -grpc-http-address :50054 \ - -crt ../certs/apisix.crt -key ../certs/apisix.key -ca ../certs/mtls_ca.crt \ - > grpc_server_example.log 2>&1 || (cat grpc_server_example.log && exit 1)& - cd ../../ - # wait for grpc_server_example to fully start - sleep 3 + + start_grpc_server_example # installing grpcurl install_grpcurl @@ -73,9 +68,6 @@ install_dependencies() { # install nodejs install_nodejs - # install rust - install_rust - # grpc-web server && client cd t/plugin/grpc-web ./setup.sh diff --git a/ci/common.sh b/ci/common.sh index 2840b7d8a711..0aa9f9e85bda 100644 --- a/ci/common.sh +++ b/ci/common.sh @@ -100,14 +100,6 @@ install_nodejs () { npm config set registry https://registry.npmjs.org/ } -install_rust () { - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sudo sh -s -- -y - source "$HOME/.cargo/env" - # 1.69.0 version required to compile lua-resty-ldap - rustup install 1.69.0 - rustup default 1.69.0 -} - set_coredns() { # test a domain name is configured as upstream echo "127.0.0.1 test.com" | sudo tee -a /etc/hosts @@ -148,6 +140,9 @@ set_coredns() { pushd t/coredns || exit 1 ../../build-cache/coredns -dns.port=1053 & popd || exit 1 + + touch build-cache/test_resolve.conf + echo "nameserver 127.0.0.1:1053" > build-cache/test_resolve.conf } GRPC_SERVER_EXAMPLE_VER=20210819 @@ -156,3 +151,24 @@ linux_get_dependencies () { apt update apt install -y cpanminus build-essential libncurses5-dev libreadline-dev libssl-dev perl libpcre3 libpcre3-dev libldap2-dev } + +function start_grpc_server_example() { + ./t/grpc_server_example/grpc_server_example \ + -grpc-address :10051 -grpcs-address :10052 -grpcs-mtls-address :10053 -grpc-http-address :10054 \ + -crt ./t/certs/apisix.crt -key ./t/certs/apisix.key -ca ./t/certs/mtls_ca.crt \ + > grpc_server_example.log 2>&1 & + + for (( i = 0; i <= 10; i++ )); do + sleep 0.5 + GRPC_PROC=`ps -ef | grep grpc_server_example | grep -v grep || echo "none"` + if [[ $GRPC_PROC == "none" || "$i" -eq 10 ]]; then + echo "failed to start grpc_server_example" + ss -antp | grep 1005 || echo "no proc listen port 1005x" + cat grpc_server_example.log + + exit 1 + fi + + ss -lntp | grep 10051 | grep grpc_server && break + done +} diff --git a/ci/linux_apisix_current_luarocks_runner.sh b/ci/linux_apisix_current_luarocks_runner.sh index a8836f43b691..96aac508f762 100755 --- a/ci/linux_apisix_current_luarocks_runner.sh +++ b/ci/linux_apisix_current_luarocks_runner.sh @@ -34,9 +34,6 @@ script() { sudo rm -rf /usr/local/share/lua/5.1/apisix - # install rust - install_rust - # install APISIX with local version luarocks install rockspec/apisix-master-0.rockspec --only-deps > build.log 2>&1 || (cat build.log && exit 1) luarocks make rockspec/apisix-master-0.rockspec > build.log 2>&1 || (cat build.log && exit 1) diff --git a/ci/linux_apisix_master_luarocks_runner.sh b/ci/linux_apisix_master_luarocks_runner.sh index 3e99baf34116..afc487ddd160 100755 --- a/ci/linux_apisix_master_luarocks_runner.sh +++ b/ci/linux_apisix_master_luarocks_runner.sh @@ -38,9 +38,6 @@ script() { mkdir tmp && cd tmp cp -r ../utils ./ - # install rust - install_rust - # install APISIX by luarocks luarocks install $APISIX_MAIN > build.log 2>&1 || (cat build.log && exit 1) cp ../bin/apisix /usr/local/bin/apisix diff --git a/ci/linux_openresty_common_runner.sh b/ci/linux_openresty_common_runner.sh index 743dfac7d980..466fe8b69651 100755 --- a/ci/linux_openresty_common_runner.sh +++ b/ci/linux_openresty_common_runner.sh @@ -33,9 +33,6 @@ do_install() { ./ci/linux-install-etcd-client.sh - # install rust - install_rust - create_lua_deps # sudo apt-get install tree -y @@ -75,20 +72,7 @@ script() { set_coredns - ./t/grpc_server_example/grpc_server_example \ - -grpc-address :50051 -grpcs-address :50052 -grpcs-mtls-address :50053 -grpc-http-address :50054 \ - -crt ./t/certs/apisix.crt -key ./t/certs/apisix.key -ca ./t/certs/mtls_ca.crt \ - & - - # ensure grpc server example is already started - for (( i = 0; i <= 100; i++ )); do - if [[ "$i" -eq 100 ]]; then - echo "failed to start grpc_server_example in time" - exit 1 - fi - nc -zv 127.0.0.1 50051 && break - sleep 1 - done + start_grpc_server_example # APISIX_ENABLE_LUACOV=1 PERL5LIB=.:$PERL5LIB prove -Itest-nginx/lib -r t FLUSH_ETCD=1 prove --timer -Itest-nginx/lib -I./ -r $TEST_FILE_SUB_DIR | tee /tmp/test.result diff --git a/ci/linux_openresty_runner.sh b/ci/linux_openresty_runner.sh index 877248913368..2e39224efc59 100755 --- a/ci/linux_openresty_runner.sh +++ b/ci/linux_openresty_runner.sh @@ -18,5 +18,4 @@ export OPENRESTY_VERSION=source -#export TEST_CI_USE_GRPC=true . ./ci/linux_openresty_common_runner.sh diff --git a/ci/pod/docker-compose.first.yml b/ci/pod/docker-compose.first.yml index 62ef7a328c16..aee79a8387c7 100644 --- a/ci/pod/docker-compose.first.yml +++ b/ci/pod/docker-compose.first.yml @@ -46,6 +46,15 @@ services: networks: consul_net: + consul_3: + image: hashicorp/consul:1.16.2 + restart: unless-stopped + ports: + - "8502:8500" + command: [ "consul", "agent", "-server", "-bootstrap-expect=1", "-client", "0.0.0.0", "-log-level", "info", "-data-dir=/consul/data", "-enable-script-checks", "-ui", "-hcl", "acl = {\nenabled = true\ndefault_policy = \"deny\"\nenable_token_persistence = true\ntokens = {\nagent = \"2b778dd9-f5f1-6f29-b4b4-9a5fa948757a\"\n}}" ] + networks: + consul_net: + ## Nacos cluster nacos_auth: hostname: nacos1 diff --git a/ci/pod/docker-compose.plugin.yml b/ci/pod/docker-compose.plugin.yml index 4ea069b8c5f2..748b28b868f6 100644 --- a/ci/pod/docker-compose.plugin.yml +++ b/ci/pod/docker-compose.plugin.yml @@ -341,6 +341,13 @@ services: - '8124:8123' networks: clickhouse_net: + otel-collector: + image: otel/opentelemetry-collector-contrib + volumes: + - ./ci/pod/otelcol-contrib:/etc/otelcol-contrib:rw + ports: + - '4318:4318' + networks: apisix_net: diff --git a/ci/pod/openfunction/function-example/test-body/go.mod b/ci/pod/openfunction/function-example/test-body/go.mod index b9e81701913d..3e2f6155748e 100644 --- a/ci/pod/openfunction/function-example/test-body/go.mod +++ b/ci/pod/openfunction/function-example/test-body/go.mod @@ -25,7 +25,7 @@ require ( google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 // indirect google.golang.org/grpc v1.40.0 // indirect google.golang.org/protobuf v1.28.0 // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + gopkg.in/yaml.v3 v3.0.0 // indirect k8s.io/klog/v2 v2.30.0 // indirect skywalking.apache.org/repo/goapi v0.0.0-20220401015832-2c9eee9481eb // indirect ) diff --git a/ci/pod/openfunction/function-example/test-body/go.sum b/ci/pod/openfunction/function-example/test-body/go.sum index 1fb1db392365..35f77fd70a02 100644 --- a/ci/pod/openfunction/function-example/test-body/go.sum +++ b/ci/pod/openfunction/function-example/test-body/go.sum @@ -1695,8 +1695,9 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v0.0.0-20181223230014-1083505acf35/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= diff --git a/ci/pod/otelcol-contrib/config.yaml b/ci/pod/otelcol-contrib/config.yaml new file mode 100644 index 000000000000..438f04c8b9fe --- /dev/null +++ b/ci/pod/otelcol-contrib/config.yaml @@ -0,0 +1,30 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +receivers: + otlp: + protocols: + grpc: + http: +exporters: + file: + path: /etc/otelcol-contrib/data-otlp.json +service: + pipelines: + traces: + receivers: [otlp] + exporters: [file] diff --git a/ci/redhat-ci.sh b/ci/redhat-ci.sh index 4b307e64811a..ff867fb71622 100755 --- a/ci/redhat-ci.sh +++ b/ci/redhat-ci.sh @@ -32,7 +32,7 @@ install_dependencies() { # install openresty to make apisix's rpm test work yum install -y yum-utils && yum-config-manager --add-repo https://openresty.org/package/centos/openresty.repo - yum install -y openresty-1.21.4.1 openresty-debug-1.21.4.1 openresty-openssl111-debug-devel pcre pcre-devel xz + yum install -y openresty-1.21.4.2 openresty-debug-1.21.4.2 openresty-openssl111-debug-devel pcre pcre-devel xz # install luarocks ./utils/linux-install-luarocks.sh @@ -58,14 +58,10 @@ install_dependencies() { pushd t/grpc_server_example CGO_ENABLED=0 go build - ./grpc_server_example \ - -grpc-address :50051 -grpcs-address :50052 -grpcs-mtls-address :50053 -grpc-http-address :50054 \ - -crt ../certs/apisix.crt -key ../certs/apisix.key -ca ../certs/mtls_ca.crt \ - > grpc_server_example.log 2>&1 || (cat grpc_server_example.log && exit 1)& - popd - # wait for grpc_server_example to fully start - sleep 3 + + yum install -y iproute procps + start_grpc_server_example # installing grpcurl install_grpcurl @@ -73,9 +69,6 @@ install_dependencies() { # install nodejs install_nodejs - # install rust - install_rust - # grpc-web server && client pushd t/plugin/grpc-web ./setup.sh diff --git a/conf/config-default.yaml b/conf/config-default.yaml index 359e2c2d806e..38d67823e5a9 100755 --- a/conf/config-default.yaml +++ b/conf/config-default.yaml @@ -76,7 +76,6 @@ apisix: # http is the default proxy mode. proxy_mode can be one of `http`, `stream`, or `http&stream` proxy_mode: http # stream_proxy: # TCP/UDP L4 proxy - # only: true # Enable L4 proxy only without L7 proxy. # tcp: # - addr: 9100 # Set the TCP proxy listening ports. # tls: true @@ -109,8 +108,9 @@ apisix: # Disabled by default because it renders Perfect Forward Secrecy (FPS) # useless. See https://github.com/mozilla/server-side-tls/issues/135. - key_encrypt_salt: # Set the encryption key for AES-128-CBC. It should be a - - edd1c9f0985e76a2 # hexadecimal string of length 16. + key_encrypt_salt: # This field is only used to encrypt the private key of SSL. + - edd1c9f0985e76a2 # Set the encryption key for AES-128-CBC. It should be a + # hexadecimal string of length 16. # If not set, APISIX saves the original data into etcd. # CAUTION: If you would like to update the key, add the new key as the # first item in the array and keep the older keys below the newly added @@ -212,6 +212,7 @@ nginx_config: # Config for render the template to generate n http: enable_access_log: true # Enable HTTP proxy access logging. access_log: logs/access.log # Location of the access log. + access_log_buffer: 16384 # buffer size of access log. access_log_format: "$remote_addr - $remote_user [$time_local] $http_host \"$request\" $status $body_bytes_sent $request_time \"$http_referer\" \"$http_user_agent\" $upstream_addr $upstream_status $upstream_response_time \"$upstream_scheme://$upstream_host$upstream_uri\"" # Customize log format: http://nginx.org/en/docs/varindex.html access_log_format_escape: default # Escape default or json characters in variables. @@ -278,6 +279,7 @@ nginx_config: # Config for render the template to generate n # dns: # servers: # - "127.0.0.1:8600" # Replace with the address of your DNS server. +# resolv_conf: /etc/resolv.conf # Replace with the path to the local DNS resolv config. Configure either "servers" or "resolv_conf". # order: # Resolve DNS records this order. # - last # Try the latest successful type for a hostname. # - SRV @@ -299,7 +301,9 @@ nginx_config: # Config for render the template to generate n # - "http://${username}:${password}@${host1}:${port1}" # prefix: "/nacos/v1/" # fetch_interval: 30 # Default 30s -# weight: 100 # Default 100 +# `weight` is the `default_weight` that will be attached to each discovered node that +# doesn't have a weight explicitly provided in nacos results +# weight: 100 # Default 100. # timeout: # connect: 2000 # Default 2000ms # send: 2000 # Default 2000ms @@ -566,6 +570,7 @@ plugin_attr: # Plugin attributes inactive_timeout: 1 # Set the timeout for spans to wait in the export queue before being sent, # if the queue is not full. max_export_batch_size: 16 # Set the maximum number of spans to include in each batch sent to the + set_ngx_var: false # export opentelemetry variables to nginx variables # OpenTelemetry collector. prometheus: # Plugin: prometheus export_uri: /apisix/prometheus/metrics # Set the URI for the Prometheus metrics endpoint. @@ -610,35 +615,19 @@ plugin_attr: # Plugin attributes hooks_file: "/usr/local/apisix/plugin_inspect_hooks.lua" # Set the path to the Lua file that defines # hooks. Only administrators should have # write access to this file for security. + zipkin: # Plugin: zipkin + set_ngx_var: false # export zipkin variables to nginx variables deployment: # Deployment configurations role: traditional # Set deployment mode: traditional, control_plane, or data_plane. role_traditional: config_provider: etcd # Set the configuration center. - # role_data_plane: # Set data plane details if role is data_plane. - # config_provider: control_plane # Set the configuration center: control_plane, or yaml. - # control_plane: # Set control plane details if config_provider is control_plane. - # host: # Set the address of control plane. - # - https://${control_plane_IP}:9280 - # prefix: /apisix # Set etcd prefix. - # timeout: 30 # Set timeout in seconds. - # certs: - # cert: /path/to/client.crt # Set path to the client certificate. - # cert_key: /path/to/client.key # Set path to the client key. - # trusted_ca_cert: /path/to/ca.crt # Set path to the trusted CA certificate. - - # role_control_plane: # Set control plane details if role is control_plane. - # config_provider: etcd # Set the configuration center. - # conf_server: - # listen: 0.0.0.0:9280 # Set the address of the conf server. - # cert: /path/to/server.crt # Set path to the server certificate. - # cert_key: /path/to/server.key # Set path to the server key. - # client_ca_cert: /path/to/ca.crt # Set path to the trusted CA certificate. - # certs: - # cert: /path/to/client.crt # Set path to the client certificate. - # cert_key: /path/to/client.key # Set path to the client key. - # trusted_ca_cert: /path/to/ca.crt # Set path to the trusted CA certificate. + #role_data_plane: # Set data plane details if role is data_plane. + # config_provider: etcd # Set the configuration center: etcd, xds, or yaml. + + #role_control_plane: # Set control plane details if role is control_plane. + # config_provider: etcd # Set the configuration center. admin: # Admin API admin_key_required: true # Enable Admin API authentication by default for security. @@ -673,9 +662,7 @@ deployment: # Deployment configurations host: # Set etcd address(es) in the same etcd cluster. - "http://127.0.0.1:2379" # If TLS is enabled for etcd, use https://127.0.0.1:2379. prefix: /apisix # Set etcd prefix. - use_grpc: false # Use gRPC (experimental) for etcd configuration sync. timeout: 30 # Set timeout in seconds. - # Set a higher timeout (e.g. an hour) if `use_grpc` is true. # resync_delay: 5 # Set resync time in seconds after a sync failure. # The actual resync time would be resync_delay plus 50% random jitter. # health_check_timeout: 10 # Set timeout in seconds for etcd health check. diff --git a/docs/en/latest/FAQ.md b/docs/en/latest/FAQ.md index b462e01582b8..79e3f3d48a51 100644 --- a/docs/en/latest/FAQ.md +++ b/docs/en/latest/FAQ.md @@ -105,7 +105,7 @@ Mainland China users can use `luarocks.cn` as the LuaRocks server. You can use t make deps ENV_LUAROCKS_SERVER=https://luarocks.cn ``` -If this does not solve your problem, you can try getting a detailed log by using the `--verbose` flag to diagnose the problem. +If this does not solve your problem, you can try getting a detailed log by using the `--verbose` or `-v` flag to diagnose the problem. ## How do I build the APISIX-Base environment? diff --git a/docs/en/latest/admin-api.md b/docs/en/latest/admin-api.md index e34468eacc4b..77c2141336c8 100644 --- a/docs/en/latest/admin-api.md +++ b/docs/en/latest/admin-api.md @@ -325,8 +325,6 @@ ID's as a text string must be of a length between 1 and 64 characters and they s | timeout | False | Auxiliary | Sets the timeout (in seconds) for connecting to, and sending and receiving messages between the Upstream and the Route. This will overwrite the `timeout` value configured in your [Upstream](#upstream). | {"connect": 3, "send": 3, "read": 3} | | enable_websocket | False | Auxiliary | Enables a websocket. Set to `false` by default. | | | status | False | Auxiliary | Enables the current Route. Set to `1` (enabled) by default. | `1` to enable, `0` to disable | -| create_time | False | Auxiliary | Epoch timestamp (in seconds) of the created time. If missing, this field will be populated automatically. | 1602883670 | -| update_time | False | Auxiliary | Epoch timestamp (in seconds) of the updated time. If missing, this field will be populated automatically. | 1602883670 | Example configuration: @@ -630,8 +628,6 @@ Service resource request address: /apisix/admin/services/{id} | labels | False | Match Rules | Attributes of the Service specified as key-value pairs. | {"version":"v2","build":"16","env":"production"} | | enable_websocket | False | Auxiliary | Enables a websocket. Set to `false` by default. | | | hosts | False | Match Rules | Matches with any one of the multiple `host`s specified in the form of a non-empty list. | ["foo.com", "*.bar.com"] | -| create_time | False | Auxiliary | Epoch timestamp (in seconds) of the created time. If missing, this field will be populated automatically. | 1602883670 | -| update_time | False | Auxiliary | Epoch timestamp (in seconds) of the updated time. If missing, this field will be populated automatically. | 1602883670 | Example configuration: @@ -815,8 +811,6 @@ Consumer resource request address: /apisix/admin/consumers/{username} | plugins | False | Plugin | Plugins that are executed during the request/response cycle. See [Plugin](terminology/plugin.md) for more. | | | desc | False | Auxiliary | Description of usage scenarios. | customer xxxx | | labels | False | Match Rules | Attributes of the Consumer specified as key-value pairs. | {"version":"v2","build":"16","env":"production"} | -| create_time | False | Auxiliary | Epoch timestamp (in seconds) of the created time. If missing, this field will be populated automatically. | 1602883670 | -| update_time | False | Auxiliary | Epoch timestamp (in seconds) of the updated time. If missing, this field will be populated automatically. | 1602883670 | Example Configuration: @@ -893,32 +887,30 @@ For notes on ID syntax please refer to: [ID Syntax](#quick-note-on-id-syntax) In addition to the equalization algorithm selections, Upstream also supports passive health check and retry for the upstream. See the table below for more details: -| Name | Optional | Description | Example | -| --------------------------- | ------------------------------------------- |----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| ------------------------------------------------------------------------------------------------------------------------------------------ | -| type | optional | Load balancing algorithm to be used, and the default value is `roundrobin`. | | -| nodes | required, can't be used with `service_name` | IP addresses (with optional ports) of the Upstream nodes represented as a hash table or an array. In the hash table, the key is the IP address and the value is the weight of the node for the load balancing algorithm. For hash table case, if the key is IPv6 address with port, then the IPv6 address must be quoted with square brackets. In the array, each item is a hash table with keys `host`, `weight`, and the optional `port` and `priority`. Empty nodes are treated as placeholders and clients trying to access this Upstream will receive a 502 response. | `192.168.1.100:80`, `[::1]:80` | -| service_name | required, can't be used with `nodes` | Service name used for [service discovery](discovery.md). | `a-bootiful-client` | -| discovery_type | required, if `service_name` is used | The type of service [discovery](discovery.md). | `eureka` | -| hash_on | optional | Only valid if the `type` is `chash`. Supports Nginx variables (`vars`), custom headers (`header`), `cookie` and `consumer`. Defaults to `vars`. | | -| key | optional | Only valid if the `type` is `chash`. Finds the corresponding node `id` according to `hash_on` and `key` values. When `hash_on` is set to `vars`, `key` is a required parameter and it supports [Nginx variables](http://nginx.org/en/docs/varindex.html). When `hash_on` is set as `header`, `key` is a required parameter, and `header name` can be customized. When `hash_on` is set to `cookie`, `key` is also a required parameter, and `cookie name` can be customized. When `hash_on` is set to `consumer`, `key` need not be set and the `key` used by the hash algorithm would be the authenticated `consumer_name`. If the specified `hash_on` and `key` fail to fetch the values, it will default to `remote_addr`. | `uri`, `server_name`, `server_addr`, `request_uri`, `remote_port`, `remote_addr`, `query_string`, `host`, `hostname`, `arg_***`, `arg_***` | -| checks | optional | Configures the parameters for the [health check](./tutorials/health-check.md). | | -| retries | optional | Sets the number of retries while passing the request to Upstream using the underlying Nginx mechanism. Set according to the number of available backend nodes by default. Setting this to `0` disables retry. | | -| retry_timeout | optional | Timeout to continue with retries. Setting this to `0` disables the retry timeout. | | -| timeout | optional | Sets the timeout (in seconds) for connecting to, and sending and receiving messages to and from the Upstream. | `{"connect": 0.5,"send": 0.5,"read": 0.5}` | -| name | optional | Identifier for the Upstream. | | -| desc | optional | Description of usage scenarios. | | -| pass_host | optional | Configures the `host` when the request is forwarded to the upstream. Can be one of `pass`, `node` or `rewrite`. Defaults to `pass` if not specified. `pass`- transparently passes the client's host to the Upstream. `node`- uses the host configured in the node of the Upstream. `rewrite`- Uses the value configured in `upstream_host`. | | -| upstream_host | optional | Specifies the host of the Upstream request. This is only valid if the `pass_host` is set to `rewrite`. | | -| scheme | optional | The scheme used when communicating with the Upstream. For an L7 proxy, this value can be one of `http`, `https`, `grpc`, `grpcs`. For an L4 proxy, this value could be one of `tcp`, `udp`, `tls`. Defaults to `http`. | | -| labels | optional | Attributes of the Upstream specified as `key-value` pairs. | {"version":"v2","build":"16","env":"production"} | -| create_time | optional | Epoch timestamp (in seconds) of the created time. If missing, this field will be populated automatically. | 1602883670 | -| update_time | optional | Epoch timestamp (in seconds) of the updated time. If missing, this field will be populated automatically. | 1602883670 | -| tls.client_cert | optional, can't be used with `tls.client_cert_id` | Sets the client certificate while connecting to a TLS Upstream. | | -| tls.client_key | optional, can't be used with `tls.client_cert_id` | Sets the client private key while connecting to a TLS Upstream. | | -| tls.client_cert_id | optional, can't be used with `tls.client_cert` and `tls.client_key` | Set the referenced [SSL](#ssl) id. | | -| keepalive_pool.size | optional | Sets `keepalive` directive dynamically. | | -| keepalive_pool.idle_timeout | optional | Sets `keepalive_timeout` directive dynamically. | | -| keepalive_pool.requests | optional | Sets `keepalive_requests` directive dynamically. | | +| Parameter | Required | Type | Description | Example | +|-----------------------------|------------------------------------------------------------------|-------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------| +| type | False | Enumeration | Load balancing algorithm to be used, and the default value is `roundrobin`. | | +| nodes | True, can't be used with `service_name` | Node | IP addresses (with optional ports) of the Upstream nodes represented as a hash table or an array. In the hash table, the key is the IP address and the value is the weight of the node for the load balancing algorithm. For hash table case, if the key is IPv6 address with port, then the IPv6 address must be quoted with square brackets. In the array, each item is a hash table with keys `host`, `weight`, and the optional `port` and `priority`. Empty nodes are treated as placeholders and clients trying to access this Upstream will receive a 502 response. | `192.168.1.100:80`, `[::1]:80` | +| service_name | True, can't be used with `nodes` | String | Service name used for [service discovery](discovery.md). | `a-bootiful-client` | +| discovery_type | True, if `service_name` is used | String | The type of service [discovery](discovery.md). | `eureka` | +| hash_on | False | Auxiliary | Only valid if the `type` is `chash`. Supports Nginx variables (`vars`), custom headers (`header`), `cookie` and `consumer`. Defaults to `vars`. | | +| key | False | Match Rules | Only valid if the `type` is `chash`. Finds the corresponding node `id` according to `hash_on` and `key` values. When `hash_on` is set to `vars`, `key` is a required parameter and it supports [Nginx variables](http://nginx.org/en/docs/varindex.html). When `hash_on` is set as `header`, `key` is a required parameter, and `header name` can be customized. When `hash_on` is set to `cookie`, `key` is also a required parameter, and `cookie name` can be customized. When `hash_on` is set to `consumer`, `key` need not be set and the `key` used by the hash algorithm would be the authenticated `consumer_name`. | `uri`, `server_name`, `server_addr`, `request_uri`, `remote_port`, `remote_addr`, `query_string`, `host`, `hostname`, `arg_***`, `arg_***` | +| checks | False | Health Checker | Configures the parameters for the [health check](./tutorials/health-check.md). | | +| retries | False | Integer | Sets the number of retries while passing the request to Upstream using the underlying Nginx mechanism. Set according to the number of available backend nodes by default. Setting this to `0` disables retry. | | +| retry_timeout | False | Integer | Timeout to continue with retries. Setting this to `0` disables the retry timeout. | | +| timeout | False | Timeout | Sets the timeout (in seconds) for connecting to, and sending and receiving messages to and from the Upstream. | `{"connect": 0.5,"send": 0.5,"read": 0.5}` | +| name | False | Auxiliary | Identifier for the Upstream. | | +| desc | False | Auxiliary | Description of usage scenarios. | | +| pass_host | False | Enumeration | Configures the `host` when the request is forwarded to the upstream. Can be one of `pass`, `node` or `rewrite`. Defaults to `pass` if not specified. `pass`- transparently passes the client's host to the Upstream. `node`- uses the host configured in the node of the Upstream. `rewrite`- Uses the value configured in `upstream_host`. | | +| upstream_host | False | Auxiliary | Specifies the host of the Upstream request. This is only valid if the `pass_host` is set to `rewrite`. | | +| scheme | False | Auxiliary | The scheme used when communicating with the Upstream. For an L7 proxy, this value can be one of `http`, `https`, `grpc`, `grpcs`. For an L4 proxy, this value could be one of `tcp`, `udp`, `tls`. Defaults to `http`. | | +| labels | False | Match Rules | Attributes of the Upstream specified as `key-value` pairs. | {"version":"v2","build":"16","env":"production"} | +| tls.client_cert | False, can't be used with `tls.client_cert_id` | HTTPS certificate | Sets the client certificate while connecting to a TLS Upstream. | | +| tls.client_key | False, can't be used with `tls.client_cert_id` | HTTPS certificate private key | Sets the client private key while connecting to a TLS Upstream. | | +| tls.client_cert_id | False, can't be used with `tls.client_cert` and `tls.client_key` | SSL | Set the referenced [SSL](#ssl) id. | | +| keepalive_pool.size | False | Auxiliary | Sets `keepalive` directive dynamically. | | +| keepalive_pool.idle_timeout | False | Auxiliary | Sets `keepalive_timeout` directive dynamically. | | +| keepalive_pool.requests | False | Auxiliary | Sets `keepalive_requests` directive dynamically. | | An Upstream can be one of the following `types`: @@ -935,7 +927,6 @@ The following should be considered when setting the `hash_on` value: - When set to `cookie`, a `key` is required. This key is equal to "cookie\_`key`". The cookie name is case-sensitive. - When set to `consumer`, the `key` is optional and the key is set to the `consumer_name` captured from the authentication Plugin. - When set to `vars_combinations`, the `key` is required. The value of the key can be a combination of any of the [Nginx variables](http://nginx.org/en/docs/varindex.html) like `$request_uri$remote_addr`. -- When no value is set for either `hash_on` or `key`, the key defaults to `remote_addr`. The features described below requires APISIX to be run on [APISIX-Base](./FAQ.md#how-do-i-build-the-apisix-base-environment): @@ -1211,8 +1202,6 @@ For notes on ID syntax please refer to: [ID Syntax](#quick-note-on-id-syntax) | client.skip_mtls_uri_regex | False | An array of regular expressions, in PCRE format | Used to match URI, if matched, this request bypasses the client certificate checking, i.e. skip the MTLS. | ["/hello[0-9]+", "/foobar"] | | snis | True, only if `type` is `server` | Match Rules | A non-empty array of HTTPS SNI | | | labels | False | Match Rules | Attributes of the resource specified as key-value pairs. | {"version":"v2","build":"16","env":"production"} | -| create_time | False | Auxiliary | Epoch timestamp (in seconds) of the created time. If missing, this field will be populated automatically. | 1602883670 | -| update_time | False | Auxiliary | Epoch timestamp (in seconds) of the updated time. If missing, this field will be populated automatically. | 1602883670 | | type | False | Auxiliary | Identifies the type of certificate, default `server`. | `client` Indicates that the certificate is a client certificate, which is used when APISIX accesses the upstream; `server` Indicates that the certificate is a server-side certificate, which is used by APISIX when verifying client requests. | | status | False | Auxiliary | Enables the current SSL. Set to `1` (enabled) by default. | `1` to enable, `0` to disable | | ssl_protocols | False | An array of ssl protocols | It is used to control the SSL/TLS protocol version used between servers and clients. See [SSL Protocol](./ssl-protocol.md) for more examples. | `["TLSv1.2", "TLSv2.3"]` | @@ -1254,8 +1243,6 @@ Global Rule resource request address: /apisix/admin/global_rules/{id} | Parameter | Required | Description | Example | | ----------- | -------- | ------------------------------------------------------------------------------------------------------------------ | ---------- | | plugins | True | Plugins that are executed during the request/response cycle. See [Plugin](terminology/plugin.md) for more. | | -| create_time | False | Epoch timestamp (in seconds) of the created time. If missing, this field will be populated automatically. | 1602883670 | -| update_time | False | Epoch timestamp (in seconds) of the updated time. If missing, this field will be populated automatically. | 1602883670 | ## Consumer group @@ -1283,8 +1270,6 @@ Consumer group resource request address: /apisix/admin/consumer_groups/{id} | plugins | True | Plugins that are executed during the request/response cycle. See [Plugin](terminology/plugin.md) for more. | | | desc | False | Description of usage scenarios. | customer xxxx | | labels | False | Attributes of the Consumer group specified as key-value pairs. | {"version":"v2","build":"16","env":"production"} | -| create_time | False | Epoch timestamp (in seconds) of the created time. If missing, this field will be populated automatically. | 1602883670 | -| update_time | False | Epoch timestamp (in seconds) of the updated time. If missing, this field will be populated automatically. | 1602883670 | ## Plugin config @@ -1312,8 +1297,6 @@ Plugin Config resource request address: /apisix/admin/plugin_configs/{id} | plugins | True | Plugins that are executed during the request/response cycle. See [Plugin](terminology/plugin.md) for more. | | | desc | False | Description of usage scenarios. | customer xxxx | | labels | False | Attributes of the Plugin config specified as key-value pairs. | {"version":"v2","build":"16","env":"production"} | -| create_time | False | Epoch timestamp (in seconds) of the created time. If missing, this field will be populated automatically. | 1602883670 | -| update_time | False | Epoch timestamp (in seconds) of the updated time. If missing, this field will be populated automatically. | 1602883670 | ## Plugin Metadata @@ -1362,6 +1345,18 @@ Plugin resource request address: /apisix/admin/plugins/{plugin_name} | ------ | ----------------------------------- | ------------ | ---------------------------------------------- | | GET | /apisix/admin/plugins/list | NULL | Fetches a list of all Plugins. | | GET | /apisix/admin/plugins/{plugin_name} | NULL | Fetches the specified Plugin by `plugin_name`. | +| GET | /apisix/admin/plugins?all=true | NULL | Get all properties of all plugins. | +| GET | /apisix/admin/plugins?all=true&subsystem=stream| NULL | Gets properties of all Stream plugins.| +| GET | /apisix/admin/plugins?all=true&subsystem=http | NULL | Gets properties of all HTTP plugins. | +| PUT | /apisix/admin/plugins/reload | NULL | Reloads the plugin according to the changes made in code | +| GET | apisix/admin/plugins/{plugin_name}?subsystem=stream | NULL | Gets properties of a specified plugin if it is supported in Stream/L4 subsystem. | +| GET | apisix/admin/plugins/{plugin_name}?subsystem=http | NULL | Gets properties of a specified plugin if it is supported in HTTP/L7 subsystem. | + +:::caution + +The interface of getting properties of all plugins via `/apisix/admin/plugins?all=true` will be deprecated soon. + +::: ### Request Body Parameters @@ -1424,6 +1419,7 @@ Stream Route resource request address: /apisix/admin/stream_routes/{id} | ----------- | -------- | -------- | ------------------------------------------------------------------- | ----------------------------- | | upstream | False | Upstream | Configuration of the [Upstream](./terminology/upstream.md). | | | upstream_id | False | Upstream | Id of the [Upstream](terminology/upstream.md) service. | | +| service_id | False | String | Id of the [Service](terminology/service.md) service. | | | remote_addr | False | IPv4, IPv4 CIDR, IPv6 | Filters Upstream forwards by matching with client IP. | "127.0.0.1" or "127.0.0.1/32" or "::1" | | server_addr | False | IPv4, IPv4 CIDR, IPv6 | Filters Upstream forwards by matching with APISIX Server IP. | "127.0.0.1" or "127.0.0.1/32" or "::1" | | server_port | False | Integer | Filters Upstream forwards by matching with APISIX Server port. | 9090 | @@ -1517,8 +1513,55 @@ Proto resource request address: /apisix/admin/protos/{id} ### Request Body Parameters -| Parameter | Required | Type | Description | Example | -| ----------- | -------- | -------- | ------------------------------------------------------------------- | ----------------------------- | -| content | True | String | content of `.proto` or `.pb` files | See [here](./plugins/grpc-transcode.md#enabling-the-plugin) | -| create_time | False | Epoch timestamp (in seconds) of the created time. If missing, this field will be populated automatically. | 1602883670 | -| update_time | False | Epoch timestamp (in seconds) of the updated time. If missing, this field will be populated automatically. | 1602883670 | +| Parameter | Required | Type | Description | Example | +|-----------|----------|---------|--------------------------------------| ----------------------------- | +| content | True | String | content of `.proto` or `.pb` files | See [here](./plugins/grpc-transcode.md#enabling-the-plugin) | + +## Schema validation + +Check the validity of a configuration against its entity schema. This allows you to test your input before submitting a request to the entity endpoints of the Admin API. + +Note that this only performs the schema validation checks, checking that the input configuration is well-formed. Requests to the entity endpoint using the given configuration may still fail due to other reasons, such as invalid foreign key relationships or uniqueness check failures against the contents of the data store. + +### Schema validation + +Schema validation request address: /apisix/admin/schema/validate/{resource} + +### Request Methods + +| Method | Request URI | Request Body | Description | +| ------ | -------------------------------- | ------------ | ----------------------------------------------- | +| POST | /apisix/admin/schema/validate/{resource} | {..resource conf..} | Validate the resource configuration against corresponding schema. | + +### Request Body Parameters + +* 200: validate ok. +* 400: validate failed, with error as response body in JSON format. + +Example: + +```bash +curl http://127.0.0.1:9180/apisix/admin/schema/validate/routes \ + -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X POST -i -d '{ + "uri": 1980, + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } +}' +HTTP/1.1 400 Bad Request +Date: Mon, 21 Aug 2023 07:37:13 GMT +Content-Type: application/json +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX/3.4.0 +Access-Control-Allow-Origin: * +Access-Control-Allow-Credentials: true +Access-Control-Expose-Headers: * +Access-Control-Max-Age: 3600 + +{"error_msg":"property \"uri\" validation failed: wrong type: expected string, got number"} +``` diff --git a/docs/en/latest/building-apisix.md b/docs/en/latest/building-apisix.md index 01d4ac331240..e4804eac4ed4 100644 --- a/docs/en/latest/building-apisix.md +++ b/docs/en/latest/building-apisix.md @@ -37,7 +37,9 @@ If you are looking to quickly get started with APISIX, check out the other [inst :::note -If you want to build and package APISIX for a specific platform, see [apisix-build-tools](https://github.com/api7/apisix-build-tools). +To build an APISIX docker image from source code, see [build image from source code](https://apisix.apache.org/docs/docker/build/#build-an-image-from-customizedpatched-source-code). + +To build and package APISIX for a specific platform, see [apisix-build-tools](https://github.com/api7/apisix-build-tools) instead. ::: @@ -52,7 +54,7 @@ curl https://raw.githubusercontent.com/apache/apisix/master/utils/install-depend Save the APISIX version to an environment variable to be used next: ```shell -APISIX_VERSION='3.4.0' +APISIX_VERSION='3.6.0' ``` Clone the APISIX source code of this version into a new directory `apisix-APISIX_VERSION`: @@ -63,7 +65,7 @@ git clone --depth 1 --branch ${APISIX_VERSION} https://github.com/apache/apisix. Alternatively, you can also download the source package from the [Downloads](https://apisix.apache.org/downloads/) page. Note that source packages here are not distributed with test cases. -Next, navigate to the directory, install dependencies, and build APISIX. You should have [Rust](https://www.rust-lang.org) installed in your environment first before running `make deps`: +Next, navigate to the directory, install dependencies, and build APISIX. ```shell cd apisix-${APISIX_VERSION} diff --git a/docs/en/latest/config.json b/docs/en/latest/config.json index ee172c33290c..e1c8391f275b 100644 --- a/docs/en/latest/config.json +++ b/docs/en/latest/config.json @@ -1,5 +1,5 @@ { - "version": "3.4.0", + "version": "3.6.0", "sidebar": [ { "type": "category", diff --git a/docs/en/latest/deployment-modes.md b/docs/en/latest/deployment-modes.md index 9f75a1d99c06..bc195121c946 100644 --- a/docs/en/latest/deployment-modes.md +++ b/docs/en/latest/deployment-modes.md @@ -39,10 +39,6 @@ Each of these deployment modes are explained in detail below. In the traditional deployment mode, one instance of APISIX will be both the `data_plane` and the `control_plane`. -![traditional deployment mode](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/deployment-traditional.png) - -There will be a conf server that listens on the UNIX socket and acts as a proxy between APISIX and etcd. Both the data and the control planes connect to this conf server via HTTP. - An example configuration of the traditional deployment mode is shown below: ```yaml title="conf/config.yaml" @@ -73,16 +69,9 @@ The instance of APISIX deployed as the traditional role will: In the decoupled deployment mode the `data_plane` and `control_plane` instances of APISIX are deployed separately, i.e., one instance of APISIX is configured to be a *data plane* and the other to be a *control plane*. -![decoupled](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/deployment-cp_and_dp.png) - The instance of APISIX deployed as the data plane will: -1. Fetch the configuration from the *control plane*. The default port is `9280`. -2. Performs a health check on all configured control plane addresses before starting the service. - 1. If the control plane addresses are unavailable, the startup fails and an exception is thrown. - 2. If at least one control plane address is available, it prints the unhealthy control planes logs, and starts the APISIX service. - 3. If all control planes are normal, APISIX service is started normally. -3. Once the service is started, it will handle the user requests. +Once the service is started, it will handle the user requests. The example below shows the configuration of an APISIX instance as *data plane* in the decoupled mode: @@ -90,23 +79,13 @@ The example below shows the configuration of an APISIX instance as *data plane* deployment: role: data_plane role_data_plane: - config_provider: control_plane - control_plane: - host: - - https://${Control_Plane_IP}:9280 - prefix: /apisix - timeout: 30 - certs: - cert: /path/to/client.crt - cert_key: /path/to/client.key - trusted_ca_cert: /path/to/ca.crt + config_provider: etcd #END ``` The instance of APISIX deployed as the control plane will: 1. Listen on port `9180` and handle Admin API requests. -2. Provide the conf server which will listen on port `9280`. Both the control plane and the data plane will connect to this via HTTPS enforced by mTLS. The example below shows the configuration of an APISIX instance as *control plane* in the decoupled mode: @@ -115,48 +94,14 @@ deployment: role: control_plane role_control_plane: config_provider: etcd - conf_server: - listen: 0.0.0.0:9280 - cert: /path/to/server.crt - cert_key: /path/to/server.key - client_ca_cert: /path/to/ca.crt etcd: host: - https://${etcd_IP}:${etcd_Port} prefix: /apisix timeout: 30 - certs: - cert: /path/to/client.crt - cert_key: /path/to/client.key - trusted_ca_cert: /path/to/ca.crt #END ``` -:::tip - -As OpenResty <= 1.21.4 does not support sending mTLS requests, to accept connections from APISIX running on these OpenResty versions, you need to disable the client certificate verification in the control plane instance as shown below: - -```yaml title="conf/config.yaml" -deployment: - role: control_plane - role_control_plane: - config_provider: etcd - conf_server: - listen: 0.0.0.0:9280 - cert: /path/to/server.crt - cert_key: /path/to/server.key - etcd: - host: - - https://${etcd_IP}:${etcd_Port} - prefix: /apisix - timeout: 30 - certs: - trusted_ca_cert: /path/to/ca.crt -#END -``` - -::: - ## Standalone Turning on the APISIX node in Standalone mode will no longer use the default etcd as the configuration center. diff --git a/docs/en/latest/discovery/consul.md b/docs/en/latest/discovery/consul.md index b4eab61e982d..85e6b9ba2c8b 100644 --- a/docs/en/latest/discovery/consul.md +++ b/docs/en/latest/discovery/consul.md @@ -37,6 +37,7 @@ discovery: servers: # make sure service name is unique in these consul servers - "http://127.0.0.1:8500" # `http://127.0.0.1:8500` and `http://127.0.0.1:8600` are different clusters - "http://127.0.0.1:8600" # `consul` service is default skip service + token: "..." # if your consul cluster has enabled acl access control, you need to specify the token skip_services: # if you need to skip special services - "service_a" timeout: diff --git a/docs/en/latest/discovery/consul_kv.md b/docs/en/latest/discovery/consul_kv.md index bfb434417033..e0a2602c074b 100644 --- a/docs/en/latest/discovery/consul_kv.md +++ b/docs/en/latest/discovery/consul_kv.md @@ -40,6 +40,7 @@ discovery: servers: - "http://127.0.0.1:8500" - "http://127.0.0.1:8600" + token: "..." # if your consul cluster has enabled acl access control, you need to specify the token prefix: "upstreams" skip_keys: # if you need to skip special keys - "upstreams/unused_api/" diff --git a/docs/en/latest/discovery/nacos.md b/docs/en/latest/discovery/nacos.md index 35fee254b0dd..9a7084577d30 100644 --- a/docs/en/latest/discovery/nacos.md +++ b/docs/en/latest/discovery/nacos.md @@ -38,6 +38,8 @@ discovery: - "http://${username}:${password}@${host1}:${port1}" prefix: "/nacos/v1/" fetch_interval: 30 # default 30 sec + # `weight` is the `default_weight` that will be attached to each discovered node that + # doesn't have a weight explicitly provided in nacos results weight: 100 # default 100 timeout: connect: 2000 # default 2000 ms diff --git a/docs/en/latest/external-plugin.md b/docs/en/latest/external-plugin.md index 8094f1062ba8..7c81e9d1e40f 100644 --- a/docs/en/latest/external-plugin.md +++ b/docs/en/latest/external-plugin.md @@ -23,9 +23,9 @@ title: External Plugin ## What are external plugin and plugin runner -APISIX supports writing plugins in Lua. This type of plugins will be executed -inside APISIX. Sometimes you want to develop plugin in other languages, so APISIX -provides sidecars that loading your plugins and run them when the requests hit +APISIX supports writing plugins in Lua. This type of plugin will be executed +inside APISIX. Sometimes you want to develop plugins in other languages, so APISIX +provides sidecars that load your plugins and run them when the requests hit APISIX. These sidecars are called plugin runners and your plugins are called external plugins. @@ -49,7 +49,7 @@ plugins. Like other plugins, they can be enabled and reconfigured on the fly. ## How is it implemented -If you are instested in the implementation of Plugin Runner, please refer to [The Implementation of Plugin Runner](./internal/plugin-runner.md). +If you are interested in the implementation of Plugin Runner, please refer to [The Implementation of Plugin Runner](./internal/plugin-runner.md). ## Supported plugin runners @@ -60,7 +60,7 @@ If you are instested in the implementation of Plugin Runner, please refer to [Th ## Configuration for plugin runner in APISIX -To run plugin runner in the prod, add the section below to `config.yaml`: +To run the plugin runner in the prod, add the section below to `config.yaml`: ```yaml ext-plugin: @@ -99,7 +99,7 @@ path will be generated dynamically. ### When managing by APISIX, the runner can't access my environment variable -Since `v2.7`, APISIX can pass environment to the runner. +Since `v2.7`, APISIX can pass environment variables to the runner. However, Nginx will hide all environment variables by default. So you need to declare your variable first in the `conf/config.yaml`: @@ -115,7 +115,7 @@ nginx_config: Since `v2.7`, APISIX will stop the runner with SIGTERM when it is running on OpenResty 1.19+. -However, APISIX needs to wait the runner to quit so that we can ensure the resource +However, APISIX needs to wait for the runner to quit so that we can ensure the resource for the process group is freed. Therefore, we send SIGTERM first. And then after 1 second, if the runner is still diff --git a/docs/en/latest/internal/testing-framework.md b/docs/en/latest/internal/testing-framework.md index db84a23e0663..7fcdf01e4d37 100644 --- a/docs/en/latest/internal/testing-framework.md +++ b/docs/en/latest/internal/testing-framework.md @@ -285,7 +285,7 @@ hash_on: header chash_key: "custom-one" ``` -The default log level is `info`, but you can get the debug level log with `-- log_level: debug`. +The default log level is `info`, but you can get the debug level log with `--- log_level: debug`. ## Upstream diff --git a/docs/en/latest/plugins/authz-keycloak.md b/docs/en/latest/plugins/authz-keycloak.md index d656e7095ea3..21ac21b80edd 100644 --- a/docs/en/latest/plugins/authz-keycloak.md +++ b/docs/en/latest/plugins/authz-keycloak.md @@ -48,7 +48,7 @@ Refer to [Authorization Services Guide](https://www.keycloak.org/docs/latest/aut | token_endpoint | string | False | | https://host.domain/auth/realms/foo/protocol/openid-connect/token | An OAuth2-compliant token endpoint that supports the `urn:ietf:params:oauth:grant-type:uma-ticket` grant type. If provided, overrides the value from discovery. | | resource_registration_endpoint | string | False | | https://host.domain/auth/realms/foo/authz/protection/resource_set | A UMA-compliant resource registration endpoint. If provided, overrides the value from discovery. | | client_id | string | True | | | The identifier of the resource server to which the client is seeking access. | -| client_secret | string | False | | | The client secret, if required. | +| client_secret | string | False | | | The client secret, if required. You can use APISIX secret to store and reference this value. APISIX currently supports storing secrets in two ways. [Environment Variables and HashiCorp Vault](../terminology/secret.md) | | grant_type | string | False | "urn:ietf:params:oauth:grant-type:uma-ticket" | ["urn:ietf:params:oauth:grant-type:uma-ticket"] | | | policy_enforcement_mode | string | False | "ENFORCING" | ["ENFORCING", "PERMISSIVE"] | | | permissions | array[string] | False | | | An array of strings, each representing a set of one or more resources and scopes the client is seeking access. | diff --git a/docs/en/latest/plugins/cors.md b/docs/en/latest/plugins/cors.md index 7d46c7a5a675..dad8279656aa 100644 --- a/docs/en/latest/plugins/cors.md +++ b/docs/en/latest/plugins/cors.md @@ -40,7 +40,7 @@ The `cors` Plugins lets you enable [CORS](https://developer.mozilla.org/en-US/do | expose_headers | string | False | "*" | Headers in the response allowed when accessing a cross-origin resource. Use `,` to add multiple headers. If `allow_credential` is set to `false`, you can enable CORS for all response headers by using `*`. If `allow_credential` is set to `true`, you can forcefully allow CORS on all response headers by using `**` but it will pose some security issues. | | max_age | integer | False | 5 | Maximum time in seconds the result is cached. If the time is within this limit, the browser will check the cached result. Set to `-1` to disable caching. Note that the maximum value is browser dependent. See [Access-Control-Max-Age](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Max-Age#Directives) for more details. | | allow_credential | boolean | False | false | When set to `true`, allows requests to include credentials like cookies. According to CORS specification, if you set this to `true`, you cannot use '*' to allow all for the other attributes. | -| allow_origins_by_regex | array | False | nil | Regex to match with origin for enabling CORS. For example, `[".*\.test.com"]` can match all subdomain of `test.com`. When set to specified range, only domains in this range will be allowed, no matter what `allow_origins` is. | +| allow_origins_by_regex | array | False | nil | Regex to match origins that allow CORS. For example, `[".*\.test.com$"]` can match all subdomains of `test.com`. When set to specified range, only domains in this range will be allowed, no matter what `allow_origins` is. | | allow_origins_by_metadata | array | False | nil | Origins to enable CORS referenced from `allow_origins` set in the Plugin metadata. For example, if `"allow_origins": {"EXAMPLE": "https://example.com"}` is set in the Plugin metadata, then `["EXAMPLE"]` can be used to allow CORS on the origin `https://example.com`. | :::info IMPORTANT diff --git a/docs/en/latest/plugins/degraphql.md b/docs/en/latest/plugins/degraphql.md index b0eaaf83bf05..7407a435c531 100644 --- a/docs/en/latest/plugins/degraphql.md +++ b/docs/en/latest/plugins/degraphql.md @@ -97,7 +97,7 @@ Now we can use RESTful API to query the same data that is proxy by APISIX. First, we need to create a route in APISIX, and enable the degreaph plugin on the route, we need to define the GraphQL query in the plugin's config. ```bash -curl --location --request PUT 'http://localhost:9080/apisix/admin/routes/1' \ +curl --location --request PUT 'http://localhost:9180/apisix/admin/routes/1' \ --header 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \ --header 'Content-Type: application/json' \ --data-raw '{ @@ -210,7 +210,7 @@ we can execute it on `http://localhost:8080/playground`, and get the data as bel We convert the GraphQL query to JSON string like `"query($name: String!, $githubAccount: String!) {\n persons(filter: { name: $name, githubAccount: $githubAccount }) {\n id\n name\n blog\n githubAccount\n talks {\n id\n title\n }\n }\n}"`, so we create a route like this: ```bash -curl --location --request PUT 'http://localhost:9080/apisix/admin/routes/1' \ +curl --location --request PUT 'http://localhost:9180/apisix/admin/routes/1' \ --header 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \ --header 'Content-Type: application/json' \ --data-raw '{ diff --git a/docs/en/latest/plugins/grpc-transcode.md b/docs/en/latest/plugins/grpc-transcode.md index 56680946dff5..9d0fdb46f77c 100644 --- a/docs/en/latest/plugins/grpc-transcode.md +++ b/docs/en/latest/plugins/grpc-transcode.md @@ -238,7 +238,7 @@ If the gRPC service returns an error, there may be a `grpc-status-details-bin` f Upload the proto file: ```shell -curl http://127.0.0.1:9080/apisix/admin/protos/1 \ +curl http://127.0.0.1:9180/apisix/admin/protos/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "content" : "syntax = \"proto3\"; @@ -260,7 +260,7 @@ curl http://127.0.0.1:9080/apisix/admin/protos/1 \ Enable the `grpc-transcode` plugin,and set the option `show_status_in_body` to `true`: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], @@ -308,7 +308,7 @@ Server: APISIX web server Note that there is an undecoded field in the return body. If you need to decode the field, you need to add the `message type` of the field in the uploaded proto file. ```shell -curl http://127.0.0.1:9080/apisix/admin/protos/1 \ +curl http://127.0.0.1:9180/apisix/admin/protos/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "content" : "syntax = \"proto3\"; @@ -335,7 +335,7 @@ curl http://127.0.0.1:9080/apisix/admin/protos/1 \ Also configure the option `status_detail_type` to `helloworld.ErrorDetail`. ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], diff --git a/docs/en/latest/plugins/kafka-logger.md b/docs/en/latest/plugins/kafka-logger.md index 5d62fc758f8f..229256baeb6e 100644 --- a/docs/en/latest/plugins/kafka-logger.md +++ b/docs/en/latest/plugins/kafka-logger.md @@ -47,7 +47,7 @@ It might take some time to receive the log data. It will be automatically sent a | brokers.sasl_config.password | string | True | | | The password of sasl_config. If sasl_config exists, it's required. | | kafka_topic | string | True | | | Target topic to push the logs for organisation. | | producer_type | string | False | async | ["async", "sync"] | Message sending mode of the producer. | -| required_acks | integer | False | 1 | [0, 1, -1] | Number of acknowledgements the leader needs to receive for the producer to consider the request complete. This controls the durability of the sent records. The attribute follows the same configuration as the Kafka `acks` attribute. See [Apache Kafka documentation](https://kafka.apache.org/documentation/#producerconfigs_acks) for more. | +| required_acks | integer | False | 1 | [1, -1] | Number of acknowledgements the leader needs to receive for the producer to consider the request complete. This controls the durability of the sent records. The attribute follows the same configuration as the Kafka `acks` attribute. `required_acks` cannot be 0. See [Apache Kafka documentation](https://kafka.apache.org/documentation/#producerconfigs_acks) for more. | | key | string | False | | | Key used for allocating partitions for messages. | | timeout | integer | False | 3 | [1,...] | Timeout for the upstream to send data. | | name | string | False | "kafka logger" | | Unique identifier for the batch processor. | diff --git a/docs/en/latest/plugins/limit-conn.md b/docs/en/latest/plugins/limit-conn.md index 0f463873cab7..af0e59bc0e9f 100644 --- a/docs/en/latest/plugins/limit-conn.md +++ b/docs/en/latest/plugins/limit-conn.md @@ -28,7 +28,7 @@ description: This document contains information about the Apache APISIX limit-co ## Description -The `limit-con` Plugin limits the number of concurrent requests to your services. +The `limit-conn` Plugin limits the number of concurrent requests to your services. ## Attributes diff --git a/docs/en/latest/plugins/limit-count.md b/docs/en/latest/plugins/limit-count.md index 24164a756cd5..46a775a00226 100644 --- a/docs/en/latest/plugins/limit-count.md +++ b/docs/en/latest/plugins/limit-count.md @@ -43,7 +43,7 @@ The `limit-count` Plugin limits the number of requests to your service by a give | policy | string | False | "local" | ["local", "redis", "redis-cluster"] | Rate-limiting policies to use for retrieving and increment the limit count. When set to `local` the counters will be locally stored in memory on the node. When set to `redis` counters are stored on a Redis server and will be shared across the nodes. It is done usually for global speed limiting, and setting to `redis-cluster` uses a Redis cluster instead of a single instance. | | allow_degradation | boolean | False | false | | When set to `true` enables Plugin degradation when the Plugin is temporarily unavailable (for example, a Redis timeout) and allows requests to continue. | | show_limit_quota_header | boolean | False | true | | When set to `true`, adds `X-RateLimit-Limit` (total number of requests) and `X-RateLimit-Remaining` (remaining number of requests) to the response header. | -| group | string | False | | non-empty | Group to share the counter with. Routes configured with the same group will share the counter. | +| group | string | False | | non-empty | Group to share the counter with. Routes configured with the same group will share the same counter. Do not configure with a value that was previously used in this attribute before as the plugin would not allow. | | redis_host | string | required when `policy` is `redis` | | | Address of the Redis server. Used when the `policy` attribute is set to `redis`. | | redis_port | integer | False | 6379 | [1,...] | Port of the Redis server. Used when the `policy` attribute is set to `redis`. | | redis_username | string | False | | | Username for Redis authentication if Redis ACL is used (for Redis version >= 6.0). If you use the legacy authentication method `requirepass` to configure Redis password, configure only the `redis_password`. Used when the `policy` is set to `redis`. | diff --git a/docs/en/latest/plugins/openid-connect.md b/docs/en/latest/plugins/openid-connect.md index 493370240362..0130d192113d 100644 --- a/docs/en/latest/plugins/openid-connect.md +++ b/docs/en/latest/plugins/openid-connect.md @@ -67,6 +67,7 @@ description: OpenID Connect allows the client to obtain user information from th | proxy_opts.http_proxy_authorization | string | False | | Basic [base64 username:password] | Default `Proxy-Authorization` header value to be used with `http_proxy`. | | proxy_opts.https_proxy_authorization | string | False | | Basic [base64 username:password] | As `http_proxy_authorization` but for use with `https_proxy` (since with HTTPS the authorisation is done when connecting, this one cannot be overridden by passing the `Proxy-Authorization` request header). | | proxy_opts.no_proxy | string | False | | | Comma separated list of hosts that should not be proxied. | +| authorization_params | object | False | | | Additional parameters to send in the in the request to the authorization endpoint. | NOTE: `encrypt_fields = {"client_secret"}` is also defined in the schema, which means that the field will be stored encrypted in etcd. See [encrypted storage fields](../plugin-develop.md#encrypted-storage-fields). diff --git a/docs/en/latest/plugins/opentelemetry.md b/docs/en/latest/plugins/opentelemetry.md index eca682a061aa..55171d539f74 100644 --- a/docs/en/latest/plugins/opentelemetry.md +++ b/docs/en/latest/plugins/opentelemetry.md @@ -89,6 +89,29 @@ plugin_attr: max_export_batch_size: 2 ``` +## Variables + +The following nginx variables are set by OpenTelemetry: + +- `opentelemetry_context_traceparent` - [W3C trace context](https://www.w3.org/TR/trace-context/#trace-context-http-headers-format), e.g.: `00-0af7651916cd43dd8448eb211c80319c-b9c7c989f97918e1-01` +- `opentelemetry_trace_id` - Trace Id of the current span +- `opentelemetry_span_id` - Span Id of the current span + +How to use variables? you have to add it to your configuration file (`conf/config.yaml`): + +```yaml title="./conf/config.yaml" +http: + enable_access_log: true + access_log: "/dev/stdout" + access_log_format: '{"time": "$time_iso8601","opentelemetry_context_traceparent": "$opentelemetry_context_traceparent","opentelemetry_trace_id": "$opentelemetry_trace_id","opentelemetry_span_id": "$opentelemetry_span_id","remote_addr": "$remote_addr","uri": "$uri"}' + access_log_format_escape: json +plugins: + - opentelemetry +plugin_attr: + opentelemetry: + set_ngx_var: true +``` + ## Enable Plugin To enable the Plugin, you have to add it to your configuration file (`conf/config.yaml`): diff --git a/docs/en/latest/plugins/proxy-cache.md b/docs/en/latest/plugins/proxy-cache.md index 885fe2334300..8b31baa46ef7 100644 --- a/docs/en/latest/plugins/proxy-cache.md +++ b/docs/en/latest/plugins/proxy-cache.md @@ -62,7 +62,7 @@ You can add your cache configuration in you APISIX configuration file (`conf/con ```yaml title="conf/config.yaml" apisix: proxy_cache: - cache_ttl: 10s # 如果上游未指定缓存时间,则为默认磁盘缓存时间 + cache_ttl: 10s # default cache TTL for caching on disk zones: - name: disk_cache_one memory_size: 50m diff --git a/docs/en/latest/plugins/response-rewrite.md b/docs/en/latest/plugins/response-rewrite.md index 392d367254f2..9f1312e0bed1 100644 --- a/docs/en/latest/plugins/response-rewrite.md +++ b/docs/en/latest/plugins/response-rewrite.md @@ -83,7 +83,7 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f1 "set": { "X-Server-id": 3, "X-Server-status": "on", - "X-Server-balancer_addr": "$balancer_ip:$balancer_port" + "X-Server-balancer-addr": "$balancer_ip:$balancer_port" } }, "vars":[ @@ -107,7 +107,7 @@ Besides `set` operation, you can also `add` or `remove` response header like: ```json "headers": { "add": [ - "X-Server-balancer_addr: $balancer_ip:$balancer_port" + "X-Server-balancer-addr: $balancer_ip:$balancer_port" ], "remove": [ "X-TO-BE-REMOVED" @@ -137,7 +137,7 @@ Transfer-Encoding: chunked Connection: keep-alive X-Server-id: 3 X-Server-status: on -X-Server-balancer_addr: 127.0.0.1:80 +X-Server-balancer-addr: 127.0.0.1:80 {"code":"ok","message":"new json body"} ``` @@ -170,7 +170,7 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 "set": { "X-Server-id":3, "X-Server-status":"on", - "X-Server-balancer_addr":"$balancer_ip:$balancer_port" + "X-Server-balancer-addr":"$balancer_ip:$balancer_port" } }, "filters":[ diff --git a/docs/en/latest/plugins/ua-restriction.md b/docs/en/latest/plugins/ua-restriction.md index 070f08ba6a85..8438553dc962 100644 --- a/docs/en/latest/plugins/ua-restriction.md +++ b/docs/en/latest/plugins/ua-restriction.md @@ -30,7 +30,7 @@ description: This document contains information about the Apache APISIX ua-restr The `ua-restriction` Plugin allows you to restrict access to a Route or Service based on the `User-Agent` header with an `allowlist` and a `denylist`. -A common scenario is to set crawler rules. `User-Agent` is the identity of the client when sending requests to the server, and the user can whitelist or blacklist some crawler request headers in the `ua-restriction` Plugin. +A common scenario is to set crawler rules. `User-Agent` is the identity of the client when sending requests to the server, and the user can allow or deny some crawler request headers in the `ua-restriction` Plugin. ## Attributes diff --git a/docs/en/latest/plugins/zipkin.md b/docs/en/latest/plugins/zipkin.md index 2a772e608f0d..16d89bec8e30 100644 --- a/docs/en/latest/plugins/zipkin.md +++ b/docs/en/latest/plugins/zipkin.md @@ -235,3 +235,32 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 } }' ``` + +## Variables + +The following nginx variables are set by zipkin: + +- `zipkin_context_traceparent` - [W3C trace context](https://www.w3.org/TR/trace-context/#trace-context-http-headers-format), e.g.: `00-0af7651916cd43dd8448eb211c80319c-b9c7c989f97918e1-01` +- `zipkin_trace_id` - Trace Id of the current span +- `zipkin_span_id` - Span Id of the current span + +How to use variables? you have to add it to your configuration file (`conf/config.yaml`): + +```yaml title="./conf/config.yaml" +http: + enable_access_log: true + access_log: "/dev/stdout" + access_log_format: '{"time": "$time_iso8601","zipkin_context_traceparent": "$zipkin_context_traceparent","zipkin_trace_id": "$zipkin_trace_id","zipkin_span_id": "$zipkin_span_id","remote_addr": "$remote_addr","uri": "$uri"}' + access_log_format_escape: json +plugins: + - zipkin +plugin_attr: + zipkin: + set_ngx_var: true +``` + +You can also include a trace_id when printing logs + +```print error log +log.error(ngx.ERR,ngx_var.zipkin_trace_id,"error message") +``` diff --git a/docs/en/latest/profile.md b/docs/en/latest/profile.md index af226c09a55c..5c4bca67928b 100644 --- a/docs/en/latest/profile.md +++ b/docs/en/latest/profile.md @@ -1,5 +1,11 @@ --- -title: Configuration file switching based on environment variables +title: Configuration based on environments +keywords: + - Apache APISIX + - API Gateway + - Configuration + - Environment +description: This document describes how you can change APISIX configuration based on environments. --- -The reason the configuration is extracted from the code is to better adapt to changes. Usually our applications have different -operating environments such as development environment and production environment. Certain configurations of these applications -will definitely be different, such as the address of the configuration center. +Extracting configuration from the code makes APISIX adaptable to changes in the operating environments. For example, APISIX can be deployed in a development environment for testing and then moved to a production environment. The configuration for APISIX in these environments would be different. -If the configuration of all environments is placed in the same file, it is very difficult to manage. After receiving new -requirements, we need to change the parameters in the configuration file to the development environment when developing the -development environment. You have to change it back. It's very easy to make mistakes. +APISIX supports managing multiple configurations through environment variables in two different ways: -The solution to the above problem is to distinguish the current running environment through environment variables, and switch -between different configuration files through environment variables. The corresponding environment variable in APISIX is: `APISIX_PROFILE` +1. Using environment variables in the configuration file +2. Using an environment variable to switch between multiple configuration profiles -When `APISIX_PROFILE` is not set, the following three configuration files are used by default: +## Using environment variables in the configuration file -* conf/config.yaml -* conf/apisix.yaml -* conf/debug.yaml +This is useful when you want to change some configurations based on the environment. + +To use environment variables, you can use the syntax `key_name: ${{ENVIRONMENT_VARIABLE_NAME:=}}`. You can also set a default value to fall back to if no environment variables are set by adding it to the configuration as `key_name: ${{ENVIRONMENT_VARIABLE_NAME:=VALUE}}`. The example below shows how you can modify your configuration file to use environment variables to set the listening ports of APISIX: + +```yaml title="config.yaml" +apisix: + node_listen: + - ${{APISIX_NODE_LISTEN:=}} +deployment: + admin: + admin_listen: + port: ${{DEPLOYMENT_ADMIN_ADMIN_LISTEN:=}} +``` + +When you run APISIX, you can set these environment variables dynamically: + +```shell +export APISIX_NODE_LISTEN=8132 +export DEPLOYMENT_ADMIN_ADMIN_LISTEN=9232 +``` + +Now when you start APISIX, it will listen on port `8132` and expose the Admin API on port `9232`. + +To use default values if no environment variables are set, you can add it to your configuration file as shown below: + +```yaml title="config.yaml" +apisix: + node_listen: + - ${{APISIX_NODE_LISTEN:=9080}} +deployment: + admin: + admin_listen: + port: ${{DEPLOYMENT_ADMIN_ADMIN_LISTEN:=9180}} +``` + +Now if you don't specify these environment variables when running APISIX, it will fall back to the default values and expose the Admin API on port `9180` and listen on port `9080`. + +## Using the `APISIX_PROFILE` environment variable -If the value of `APISIX_PROFILE` is set to `prod`, the following three configuration files are used: +If you have multiple configuration changes for multiple environments, it might be better to have a different configuration file for each. + +Although this might increase the number of configuration files, you would be able to manage each independently and can even do version management. + +APISIX uses the `APISIX_PROFILE` environment variable to switch between environments, i.e. to switch between different sets of configuration files. If the value of `APISIX_PROFILE` is `env`, then APISIX will look for the configuration files `conf/config-env.yaml`, `conf/apisix-env.yaml`, and `conf/debug-env.yaml`. + +For example for the production environment, you can have: * conf/config-prod.yaml * conf/apisix-prod.yaml * conf/debug-prod.yaml -Although this way will increase the number of configuration files, it can be managed independently, and then version management -tools such as git can be configured, and version management can be better achieved. +And for the development environment: + +* conf/config-dev.yaml +* conf/apisix-dev.yaml +* conf/debug-dev.yaml + +And if no environment is specified, APISIX can use the default configuration files: + +* conf/config.yaml +* conf/apisix.yaml +* conf/debug.yaml + +To use a particular configuration, you can specify it in the environment variable: + +```shell +export APISIX_PROFILE=prod +``` + +APISIX will now use the `-prod.yaml` configuration files. diff --git a/docs/en/latest/terminology/consumer-group.md b/docs/en/latest/terminology/consumer-group.md index 1cb06c3d769b..2f91657805ee 100644 --- a/docs/en/latest/terminology/consumer-group.md +++ b/docs/en/latest/terminology/consumer-group.md @@ -35,7 +35,9 @@ instead of managing each consumer individually. ## Example -The example below illustrates how to create a Consumer Group and bind it to a Consumer: +The example below illustrates how to create a Consumer Group and bind it to a Consumer. + +Create a Consumer Group which shares the same rate limiting quota: ```shell curl http://127.0.0.1:9180/apisix/admin/consumer_groups/company_a \ @@ -46,12 +48,14 @@ curl http://127.0.0.1:9180/apisix/admin/consumer_groups/company_a \ "count": 200, "time_window": 60, "rejected_code": 503, - "group": "$consumer_group_id" + "group": "grp_company_a" } } }' ``` +Create a Consumer within the Consumer Group: + ```shell curl http://127.0.0.1:9180/apisix/admin/consumers \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/terminology/consumer.md b/docs/en/latest/terminology/consumer.md index 0b2a1ad3e827..591396c4f336 100644 --- a/docs/en/latest/terminology/consumer.md +++ b/docs/en/latest/terminology/consumer.md @@ -60,6 +60,8 @@ The process of identifying a Consumer in APISIX is described below: Consumers are useful when you have different consumers requesting the same API and you need to execute different Plugin and Upstream configurations based on the consumer. These need to be used in conjunction with the user authentication system. +Authentcation plugins that can be configured with a Consumer include `basic-auth`, `hmac-auth`, `jwt-auth`, `key-auth`, `ldap-auth`, and `wolf-rbac`. + Refer to the documentation for the [key-auth](../plugins/key-auth.md) authentication Plugin to further understand the concept of a Consumer. :::note diff --git a/docs/en/latest/terminology/plugin-config.md b/docs/en/latest/terminology/plugin-config.md index 59b46b08933e..4ed78e6d5776 100644 --- a/docs/en/latest/terminology/plugin-config.md +++ b/docs/en/latest/terminology/plugin-config.md @@ -30,7 +30,7 @@ description: Plugin Config in Apache APISIX. Plugin Configs are used to extract commonly used [Plugin](./plugin.md) configurations and can be bound directly to a [Route](./route.md). -While configuring the same plugin, only one copy of the configuration is valid. The order of precedence is always `Consumer` > `Consumer Group` > `Route` > `Plugin Config` > `Service`. +While configuring the same plugin, only one copy of the configuration is valid. Please read the [plugin execution order](../terminology/plugin.md#plugins-execution-order) and [plugin merging order](../terminology/plugin.md#plugins-merging-precedence). ## Example diff --git a/docs/en/latest/tutorials/add-multiple-api-versions.md b/docs/en/latest/tutorials/add-multiple-api-versions.md index e48c0c581433..f125a542f09c 100644 --- a/docs/en/latest/tutorials/add-multiple-api-versions.md +++ b/docs/en/latest/tutorials/add-multiple-api-versions.md @@ -105,7 +105,7 @@ docker compose up -d You first need to [Route](https://apisix.apache.org/docs/apisix/terminology/route/) your HTTP requests from the gateway to an [Upstream](https://apisix.apache.org/docs/apisix/terminology/upstream/) (your API). With APISIX, you can create a route by sending an HTTP request to the gateway. ```shell -curl http://apisix:9080/apisix/admin/routes/1 -H 'X-API-KEY: xyz' -X PUT -d ' +curl http://apisix:9180/apisix/admin/routes/1 -H 'X-API-KEY: xyz' -X PUT -d ' { "name": "Direct Route to Old API", "methods": ["GET"], @@ -142,7 +142,7 @@ In the previous step, we created a route that wrapped an upstream inside its con Let's create the shared upstream by running below curl cmd: ```shell -curl http://apisix:9080/apisix/admin/upstreams/1 -H 'X-API-KEY: xyz' -X PUT -d ' +curl http://apisix:9180/apisix/admin/upstreams/1 -H 'X-API-KEY: xyz' -X PUT -d ' { "name": "Old API", "type": "roundrobin", @@ -161,7 +161,7 @@ In the scope of this tutorial, we will use _URI path-based versioning_ because i Before introducing the new version, we also need to rewrite the query that comes to the API gateway before forwarding it to the upstream. Because both the old and new versions should point to the same upstream and the upstream exposes endpoint `/hello`, not `/v1/hello`. Let’s create a plugin configuration to rewrite the path: ```shell -curl http://apisix:9080/apisix/admin/plugin_configs/1 -H 'X-API-KEY: xyz' -X PUT -d ' +curl http://apisix:9180/apisix/admin/plugin_configs/1 -H 'X-API-KEY: xyz' -X PUT -d ' { "plugins": { "proxy-rewrite": { @@ -176,7 +176,7 @@ We can now create the second versioned route that references the existing upstr > Note that we can create routes for different API versions. ```shell -curl http://apisix:9080/apisix/admin/routes/2 -H 'X-API-KEY: xyz' -X PUT -d ' +curl http://apisix:9180/apisix/admin/routes/2 -H 'X-API-KEY: xyz' -X PUT -d ' { "name": "Versioned Route to Old API", "methods": ["GET"], @@ -209,7 +209,7 @@ Hello world We have versioned our API, but our API consumers probably still use the legacy non-versioned API. We want them to migrate, but we cannot just delete the legacy route as our users are unaware of it. Fortunately, the `301 HTTP` status code is our friend: we can let users know that the resource has moved from `http://apisix.org/hello` to `http://apisix.org/v1/hello`. It requires configuring the [redirect plugin](https://apisix.apache.org/docs/apisix/plugins/redirect/) on the initial route: ```shell -curl http://apisix:9080/apisix/admin/routes/1 -H 'X-API-KEY: xyz' -X PATCH -d ' +curl http://apisix:9180/apisix/admin/routes/1 -H 'X-API-KEY: xyz' -X PATCH -d ' { "plugins": { "redirect": { diff --git a/docs/en/latest/tutorials/protect-api.md b/docs/en/latest/tutorials/protect-api.md index 22caa1b16080..38c0bd6240f6 100644 --- a/docs/en/latest/tutorials/protect-api.md +++ b/docs/en/latest/tutorials/protect-api.md @@ -37,7 +37,7 @@ This represents the configuration of the plugins that are executed during the HT :::note -If [Route](../terminology/route.md), [Service](../terminology/service.md), [Plugin Config](../terminology/plugin-config.md) or Consumer are all bound to the same for plugins, only one plugin configuration will take effect. The priority of plugin configurations is: Consumer > Route > Plugin Config > Service. At the same time, there are 6 stages involved in the plugin execution process, namely `rewrite`, `access`, `before_proxy`, `header_filter`, `body_filter` and `log`. +If [Route](../terminology/route.md), [Service](../terminology/service.md), [Plugin Config](../terminology/plugin-config.md) or [Consumer](../terminology/consumer.md) are all bound to the same for plugins, only one plugin configuration will take effect. The priority of plugin configurations is described in [plugin execution order](../terminology/plugin.md#plugins-execution-order). At the same time, there are various stages involved in the plugin execution process. See [plugin execution lifecycle](../terminology/plugin.md#plugins-execution-order). ::: diff --git a/docs/zh/latest/CHANGELOG.md b/docs/zh/latest/CHANGELOG.md index be16cb06949a..a3ce74cca888 100644 --- a/docs/zh/latest/CHANGELOG.md +++ b/docs/zh/latest/CHANGELOG.md @@ -23,6 +23,8 @@ title: CHANGELOG ## Table of Contents +- [3.6.0](#360) +- [3.5.0](#350) - [3.4.0](#340) - [3.3.0](#330) - [3.2.1](#321) @@ -71,6 +73,73 @@ title: CHANGELOG - [0.7.0](#070) - [0.6.0](#060) +## 3.6.0 + +### Change + +- :warning: 移除 `etcd.use_grpc`,不再支持使用 gRPC 协议与 etcd 进行通信:[#10015](https://github.com/apache/apisix/pull/10015) +- :warning: 移除 conf server,数据平面不再支持与控制平面进行通信,需要从 `config_provider: control_plane` 调整为 `config_provider: etcd`:[#10012](https://github.com/apache/apisix/pull/10012) +- :warning: 严格验证核心资源的输入:[#10233](https://github.com/apache/apisix/pull/10233) + +### Core + +- :sunrise: 支持配置访问日志的缓冲区大小:[#10225](https://github.com/apache/apisix/pull/10225) +- :sunrise: 支持在 DNS 发现服务中允许配置 `resolv_conf` 来使用本地 DNS 解析器:[#9770](https://github.com/apache/apisix/pull/9770) +- :sunrise: 安装不再依赖 Rust:[#10121](https://github.com/apache/apisix/pull/10121) +- :sunrise: 在 xRPC 中添加 Dubbo 协议支持:[#9660](https://github.com/apache/apisix/pull/9660) + +### Plugins + +- :sunrise: 在 `traffic-split` 插件中支持 HTTPS:[#9115](https://github.com/apache/apisix/pull/9115) +- :sunrise: 在 `ext-plugin` 插件中支持重写请求体:[#9990](https://github.com/apache/apisix/pull/9990) +- :sunrise: 在 `opentelemetry` 插件中支持设置 NGINX 变量:[#8871](https://github.com/apache/apisix/pull/8871) +- :sunrise: 在 `chaitin-waf` 插件中支持 UNIX sock 主机模式:[#10161](https://github.com/apache/apisix/pull/10161) + +### Bugfixes + +- 修复 GraphQL POST 请求路由匹配异常:[#10198](https://github.com/apache/apisix/pull/10198) +- 修复 `apisix.yaml` 中多行字符串数组的错误:[#10193](https://github.com/apache/apisix/pull/10193) +- 修复在 proxy-cache 插件中缺少 cache_zone 时提供错误而不是 nil panic:[#10138](https://github.com/apache/apisix/pull/10138) + +## 3.5.0 + +### Change + +- :warning: request-id 插件移除雪花算法:[#9715](https://github.com/apache/apisix/pull/9715) +- :warning: 不再兼容 OpenResty 1.19 版本,需要将其升级到 1.21+ 版本:[#9913](https://github.com/apache/apisix/pull/9913) +- :warning: 删除配置项 `apisix.stream_proxy.only`,L4/L7 代理需要通过配置项 `apesix.proxy_mode` 来启用:[#9607](https://github.com/apache/apisix/pull/9607) +- :warning: admin-api 的 `/apisix/admin/plugins?all=true` 接口标记为弃用:[#9580](https://github.com/apache/apisix/pull/9580) +- :warning: ua-restriction 插件不允许同时启用黑名单和白名单:[#9841](https://github.com/apache/apisix/pull/9841) + +### Core + +- :sunrise: 支持根据 host 级别动态设置 TLS 协议版本:[#9903](https://github.com/apache/apisix/pull/9903) +- :sunrise: 支持强制删除资源:[#9810](https://github.com/apache/apisix/pull/9810) +- :sunrise: 支持从 yaml 中提取环境变量:[#9855](https://github.com/apache/apisix/pull/9855) +- :sunrise: admin-api 新增 schema validate API 校验资源配置:[#10065](https://github.com/apache/apisix/pull/10065) + +### Plugins + +- :sunrise: 新增 chaitin-waf 插件:[#9838](https://github.com/apache/apisix/pull/9838) +- :sunrise: file-logger 支持设置 var 变量:[#9712](https://github.com/apache/apisix/pull/9712) +- :sunrise: mock 插件支持添加响应头:[#9720](https://github.com/apache/apisix/pull/9720) +- :sunrise: proxy-rewrite 插件支持正则匹配 URL 编码:[#9813](https://github.com/apache/apisix/pull/9813) +- :sunrise: google-cloud-logging 插件支持 client_email 配置:[#9813](https://github.com/apache/apisix/pull/9813) +- :sunrise: opa 插件支持向上游发送 OPA server 返回的头:[#9710](https://github.com/apache/apisix/pull/9710) +- :sunrise: openid-connect 插件支持配置代理服务器:[#9948](https://github.com/apache/apisix/pull/9948) + +### Bugfixes + +- 修复 log-rotate 插件使用自定义名称时,max_kept 配置不起作用:[#9749](https://github.com/apache/apisix/pull/9749) +- 修复 limit_conn 在 stream 模式下非法使用 http 变量:[#9816](https://github.com/apache/apisix/pull/9816) +- 修复 loki-logger 插件在获取 log_labels 时会索引空值:[#9850](https://github.com/apache/apisix/pull/9850) +- 修复使用 limit-count 插件时,当请求被拒绝后,X-RateLimit-Reset 不应设置为 0:[#9978](https://github.com/apache/apisix/pull/9978) +- 修复 nacos 插件在运行时索引一个空值:[#9960](https://github.com/apache/apisix/pull/9960) +- 修复 etcd 在同步数据时,如果密钥有特殊字符,则同步异常:[#9967](https://github.com/apache/apisix/pull/9967) +- 修复 tencent-cloud-cls 插件 DNS 解析失败:[#9843](https://github.com/apache/apisix/pull/9843) +- 修复执行 reload 或 quit 命令时 worker 未退出:[#9909](https://github.com/apache/apisix/pull/9909) +- 修复在 traffic-split 插件中 upstream_id 有效性验证:[#10008](https://github.com/apache/apisix/pull/10008) + ## 3.4.0 ### Core diff --git a/docs/zh/latest/FAQ.md b/docs/zh/latest/FAQ.md index 923cf22f5cf4..944f96195420 100644 --- a/docs/zh/latest/FAQ.md +++ b/docs/zh/latest/FAQ.md @@ -109,7 +109,7 @@ luarocks config rocks_servers make deps ENV_LUAROCKS_SERVER=https://luarocks.cn ``` -如果通过上述操作仍然无法解决问题,可以尝试使用 `--verbose` 参数获取详细的日志来诊断问题。 +如果通过上述操作仍然无法解决问题,可以尝试使用 `--verbose` 或 `-v` 参数获取详细的日志来诊断问题。 ## 如何构建 APISIX-Base 环境? diff --git a/docs/zh/latest/admin-api.md b/docs/zh/latest/admin-api.md index e1fd063e8a61..899fb4c44deb 100644 --- a/docs/zh/latest/admin-api.md +++ b/docs/zh/latest/admin-api.md @@ -326,8 +326,6 @@ Route 也称之为路由,可以通过定义一些规则来匹配客户端的 | timeout | 否 | 辅助 | 为 Route 设置 Upstream 连接、发送消息和接收消息的超时时间(单位为秒)。该配置将会覆盖在 Upstream 中配置的 [timeout](#upstream) 选项。 | {"connect": 3, "send": 3, "read": 3} | | enable_websocket | 否 | 辅助 | 当设置为 `true` 时,启用 `websocket`(boolean), 默认值为 `false`。 | | | status | 否 | 辅助 | 当设置为 `1` 时,启用该路由,默认值为 `1`。 | `1` 表示启用,`0` 表示禁用。 | -| create_time | 否 | 辅助 | epoch 时间戳,单位为秒。如果不指定则自动创建。 | 1602883670 | -| update_time | 否 | 辅助 | epoch 时间戳,单位为秒。如果不指定则自动创建。 | 1602883670 | :::note 注意 @@ -637,8 +635,6 @@ Service 是某类 API 的抽象(也可以理解为一组 Route 的抽象)。 | labels | 否 | 匹配规则 | 标识附加属性的键值对。 | {"version":"v2","build":"16","env":"production"} | | enable_websocket | 否 | 辅助 | `websocket`(boolean) 配置,默认值为 `false`。 | | | hosts | 否 | 匹配规则 | 非空列表形态的 `host`,表示允许有多个不同 `host`,匹配其中任意一个即可。| ["foo.com", "\*.bar.com"] | -| create_time | 否 | 辅助 | epoch 时间戳,单位为秒。如果不指定则自动创建。 | 1602883670 | -| update_time | 否 | 辅助 | epoch 时间戳,单位为秒。如果不指定则自动创建。 | 1602883670 | Service 对象 JSON 配置示例: @@ -822,8 +818,6 @@ Consumer 资源请求地址:/apisix/admin/consumers/{username} | plugins | 否 | Plugin | 该 Consumer 对应的插件配置,它的优先级是最高的:Consumer > Route > Plugin Config > Service。对于具体插件配置,请参考 [Plugins](#plugin)。 | | | desc | 否 | 辅助 | consumer 描述。 | | | labels | 否 | 匹配规则 | 标识附加属性的键值对。 | {"version":"v2","build":"16","env":"production"} | -| create_time | 否 | 辅助 | epoch 时间戳,单位为秒。如果不指定则自动创建。 | 1602883670 | -| update_time | 否 | 辅助 | epoch 时间戳,单位为秒。如果不指定则自动创建。 | 1602883670 | Consumer 对象 JSON 配置示例: @@ -919,8 +913,6 @@ APISIX 的 Upstream 除了基本的负载均衡算法选择外,还支持对上 | upstream_host | 否 | 辅助 | 指定上游请求的 host,只在 `pass_host` 配置为 `rewrite` 时有效。 | | | scheme | 否 | 辅助 | 跟上游通信时使用的 scheme。对于 7 层代理,可选值为 [`http`, `https`, `grpc`, `grpcs`]。对于 4 层代理,可选值为 [`tcp`, `udp`, `tls`]。默认值为 `http`,详细信息请参考下文。 | | labels | 否 | 匹配规则 | 标识附加属性的键值对。 | {"version":"v2","build":"16","env":"production"} | -| create_time | 否 | 辅助 | epoch 时间戳,单位为秒。如果不指定则自动创建。 | 1602883670 | -| update_time | 否 | 辅助 | epoch 时间戳,单位为秒。如果不指定则自动创建。 | 1602883670 | | tls.client_cert | 否,不能和 `tls.client_cert_id` 一起使用 | https 证书 | 设置跟上游通信时的客户端证书,详细信息请参考下文。 | | | tls.client_key | 否,不能和 `tls.client_cert_id` 一起使用 | https 证书私钥 | 设置跟上游通信时的客户端私钥,详细信息请参考下文。 | | | tls.client_cert_id | 否,不能和 `tls.client_cert`、`tls.client_key` 一起使用 | SSL | 设置引用的 SSL id,详见 [SSL](#ssl)。 | | @@ -942,7 +934,6 @@ APISIX 的 Upstream 除了基本的负载均衡算法选择外,还支持对上 - 设为 `header` 时,`key` 为必传参数,其值为自定义的 Header name,即 "http\_`key`"。 - 设为 `cookie` 时,`key` 为必传参数,其值为自定义的 cookie name,即 "cookie\_`key`"。请注意 cookie name 是**区分大小写字母**的。例如:`cookie_x_foo` 与 `cookie_X_Foo` 表示不同的 `cookie`。 - 设为 `consumer` 时,`key` 不需要设置。此时哈希算法采用的 `key` 为认证通过的 `consumer_name`。 -- 如果指定的 `hash_on` 和 `key` 获取不到值时,使用默认值:`remote_addr`。 以下特性需要 APISIX 运行于 [APISIX-Base](./FAQ.md#如何构建-APISIX-Base-环境?): @@ -1211,8 +1202,6 @@ SSL 资源请求地址:/apisix/admin/ssls/{id} | client.skip_mtls_uri_regex | 否 | PCRE 正则表达式数组 | 用来匹配请求的 URI,如果匹配,则该请求将绕过客户端证书的检查,也就是跳过 MTLS。 | ["/hello[0-9]+", "/foobar"] | | snis | 是 | 匹配规则 | 非空数组形式,可以匹配多个 SNI。 | | | labels | 否 | 匹配规则 | 标识附加属性的键值对。 | {"version":"v2","build":"16","env":"production"} | -| create_time | 否 | 辅助 | epoch 时间戳,单位为秒。如果不指定则自动创建。 | 1602883670 | -| update_time | 否 | 辅助 | epoch 时间戳,单位为秒。如果不指定则自动创建。 | 1602883670 | | type | 否 | 辅助 | 标识证书的类型,默认值为 `server`。 | `client` 表示证书是客户端证书,APISIX 访问上游时使用;`server` 表示证书是服务端证书,APISIX 验证客户端请求时使用。 | | status | 否 | 辅助 | 当设置为 `1` 时,启用此 SSL,默认值为 `1`。 | `1` 表示启用,`0` 表示禁用 | | ssl_protocols | 否 | tls 协议字符串数组 | 用于控制服务器与客户端之间使用的 SSL/TLS 协议版本。更多的配置示例,请参考[SSL 协议](./ssl-protocol.md)。 | | @@ -1254,8 +1243,6 @@ Global Rule 资源请求地址:/apisix/admin/global_rules/{id} | 名称 | 必选项 | 类型 | 描述 | 示例值 | | ----------- | ------ | ------ | ------------------------------------------------- | ---------- | | plugins | 是 | Plugin | 插件配置。详细信息请参考 [Plugin](terminology/plugin.md)。 | | -| create_time | 否 | 辅助 | epoch 时间戳,单位为秒,如果不指定则自动创建。 | 1602883670 | -| update_time | 否 | 辅助 | epoch 时间戳,单位为秒,如果不指定则自动创建。 | 1602883670 | ## Consumer Group @@ -1283,8 +1270,6 @@ Consumer Group 资源请求地址:/apisix/admin/consumer_groups/{id} |plugins | 是 |Plugin| 插件配置。详细信息请参考 [Plugin](terminology/plugin.md)。 | | |desc | 否 | 辅助 | 标识描述、使用场景等。 | Consumer 测试。| |labels | 否 | 辅助 | 标识附加属性的键值对。 |{"version":"v2","build":"16","env":"production"}| -|create_time| 否 | 辅助 | epoch 时间戳,单位为秒,如果不指定则自动创建。 |1602883670| -|update_time| 否 | 辅助 | epoch 时间戳,单位为秒,如果不指定则自动创建。 |1602883670| ## Plugin Config @@ -1312,8 +1297,6 @@ Plugin Config 资源请求地址:/apisix/admin/plugin_configs/{id} |plugins | 是 |Plugin| 更多信息请参考 [Plugin](terminology/plugin.md)。|| |desc | 否 | 辅助 | 标识描述、使用场景等。 |customer xxxx| |labels | 否 | 辅助 | 标识附加属性的键值对。 |{"version":"v2","build":"16","env":"production"}| -|create_time| 否 | 辅助 | epoch 时间戳,单位为秒,如果不指定则自动创建。 |1602883670| -|update_time| 否 | 辅助 | epoch 时间戳,单位为秒,如果不指定则自动创建。 |1602883670| ## Plugin Metadata @@ -1376,6 +1359,16 @@ Plugin 资源请求地址:/apisix/admin/plugins/{plugin_name} | GET       | /apisix/admin/plugins/{plugin_name} | 无 | 获取资源。 | | GET | /apisix/admin/plugins?all=true | 无 | 获取所有插件的所有属性。 | | GET | /apisix/admin/plugins?all=true&subsystem=stream| 无 | 获取所有 Stream 插件的属性。| +| GET | /apisix/admin/plugins?all=true&subsystem=http| 无 | 获取所有 HTTP 插件的属性。| +| PUT | /apisix/admin/plugins/reload | 无 | 根据代码中所做的更改重新加载插件。 | +| GET | apisix/admin/plugins/{plugin_name}?subsystem=stream | 无 | 获取指定 Stream 插件的属性。 | +| GET | apisix/admin/plugins/{plugin_name}?subsystem=http | 无 | 获取指定 HTTP 插件的属性。 | + +:::caution + +获取所有插件属性的接口 `/apisix/admin/plugins?all=true` 将很快被弃用。 + +::: ### 使用示例 {#plugin-example} @@ -1435,6 +1428,7 @@ Plugin 资源请求地址:/apisix/admin/stream_routes/{id} | ---------------- | ------| -------- | ------------------------------------------------------------------------------| ------ | | upstream | 否 | Upstream | Upstream 配置,详细信息请参考 [Upstream](terminology/upstream.md)。 | | | upstream_id | 否 | Upstream | 需要使用的 Upstream id,详细信息请 [Upstream](terminology/upstream.md)。 | | +| service_id | 否 | String | 需要使用的 [Service](terminology/service.md) id. | | | remote_addr | 否 | IPv4, IPv4 CIDR, IPv6 | 过滤选项:如果客户端 IP 匹配,则转发到上游 | "127.0.0.1" 或 "127.0.0.1/32" 或 "::1" | | server_addr | 否 | IPv4, IPv4 CIDR, IPv6 | 过滤选项:如果 APISIX 服务器的 IP 与 `server_addr` 匹配,则转发到上游。 | "127.0.0.1" 或 "127.0.0.1/32" 或 "::1" | | server_port | 否 | 整数 | 过滤选项:如果 APISIX 服务器的端口 与 `server_port` 匹配,则转发到上游。 | 9090 | diff --git a/docs/zh/latest/building-apisix.md b/docs/zh/latest/building-apisix.md index 95672c82b1c7..abeac2033c68 100644 --- a/docs/zh/latest/building-apisix.md +++ b/docs/zh/latest/building-apisix.md @@ -53,7 +53,7 @@ curl https://raw.githubusercontent.com/apache/apisix/master/utils/install-depend 然后,创建一个目录并设置环境变量 `APISIX_VERSION`: ```shell -APISIX_VERSION='3.4.0' +APISIX_VERSION='3.6.0' mkdir apisix-${APISIX_VERSION} ``` diff --git a/docs/zh/latest/config.json b/docs/zh/latest/config.json index 7ac2791cad6c..1ff81e6b64d1 100644 --- a/docs/zh/latest/config.json +++ b/docs/zh/latest/config.json @@ -1,9 +1,16 @@ { - "version": "3.4.0", + "version": "3.6.0", "sidebar": [ { - "type": "doc", - "id": "getting-started" + "type": "category", + "label": "Getting Started", + "items": [ + "getting-started/README", + "getting-started/configure-routes", + "getting-started/load-balancing", + "getting-started/key-authentication", + "getting-started/rate-limiting" + ] }, { "type": "doc", @@ -216,6 +223,10 @@ "type": "doc", "id": "building-apisix" }, + { + "type": "doc", + "id": "support-fips-in-apisix" + }, { "type": "doc", "id": "external-plugin" diff --git a/docs/zh/latest/getting-started.md b/docs/zh/latest/getting-started.md deleted file mode 100644 index 969e30956fc9..000000000000 --- a/docs/zh/latest/getting-started.md +++ /dev/null @@ -1,249 +0,0 @@ ---- -title: 快速入门指南 -keywords: - - APISIX - - APISIX 入门指南 - - APISIX docker 安装教程 -description: 本文档将引导你了解如何开始使用 Apache APISIX。 ---- - - - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -本文将为你介绍 Apache APISIX 的概念、功能以及如何使用 APISIX。 - -通过本文你可以了解到以下内容: - -- Apache APISIX 是什么? -- APISIX 的架构及主要概念。 -- 如何使用 Docker 安装并运行 APISIX。 -- 如何使用 Admin API 创建第一个路由并配置上游。 -- 如何使用 APISIX Dashboard。 -- 如何寻求帮助。 - -## Apache APISIX 是什么? - -Apache APISIX 是由 API7.ai(支流科技)捐赠给 Apache 软件基金会的云原生 API 网关,它兼具动态、实时、高性能等特点,提供了负载均衡、动态上游、灰度发布(金丝雀发布)、服务熔断、身份认证、可观测性等丰富的流量管理功能。我们可以使用 Apache APISIX 来处理传统的南北向流量,也可以处理服务间的东西向流量。同时,它也支持作为 [K8s Ingress Controller](https://github.com/apache/apisix-ingress-controller) 来使用。 - -### 主要特性 - -- 多平台支持:APISIX 提供了多平台解决方案,它不但支持裸机运行,也支持在 Kubernetes 中使用,还支持与 AWS Lambda、Azure Function、Lua 函数和 Apache OpenWhisk 等云服务集成。 -- 全动态能力:APISIX 支持热加载,这意味着你不需要重启服务就可以更新 APISIX 的配置。请访问[为什么 Apache APISIX 选择 Nginx + Lua 这个技术栈?](https://apisix.apache.org/zh/blog/2021/08/25/why-apache-apisix-chose-nginx-and-lua/)以了解实现原理。 -- 精细化路由:APISIX 支持使用 [NGINX 内置变量](https://nginx.org/en/docs/varindex.html)做为路由的匹配条件,你可以自定义匹配函数来过滤请求,匹配路由。 -- 运维友好:APISIX 支持与以下工具和平台集成:[HashiCorp Vault](./terminology/secret.md#使用-vault-管理密钥)、[Zipkin](./plugins/zipkin.md)、[Apache SkyWalking](./plugins/skywalking.md)、[Consul](../../en/latest/discovery/consul_kv.md)、[Nacos](./discovery/nacos.md)、[Eureka](./discovery.md)。通过 [APISIX Dashboard](/docs/dashboard/USER_GUIDE),运维人员可以通过友好且直观的 UI 配置 APISIX。 -- 多语言插件支持:APISIX 支持多种开发语言进行插件开发,开发人员可以选择擅长语言的 SDK 开发自定义插件。 - -## 主要概念 - -下图为 Apache APISIX 的架构: - -![flow-software-architecture](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/flow-software-architecture.png) - -下表是本文涉及到的 APISIX 的主要概念和组件: - -| 概念/组件 | 描述 | -|-------------|--------------------------------------------------------------------------------------------------| -| Route | 通过路由定义规则来匹配客户端请求,根据匹配结果加载并执行相应的插件,最后把请求转发给到指定的上游应用。 | -| Upstream | 上游的作用是按照配置规则对服务节点进行负载均衡,它的地址信息可以直接配置到路由或服务上。 | -| Admin API | 用户可以通过 Admin API 控制 APISIX 实例。 | - -## 前提条件 - -在开始使用 APISIX 之前,请确保你已经安装以下应用: - -- [Docker](https://www.docker.com/) 和 [Docker Compose](https://docs.docker.com/compose/)。 -- [curl](https://curl.se/docs/manpage.html) 用于测试 API。你也可以使用 [Hoppscotch](https://hoppscotch.io/) 之类的工具。 -- 本文使用的上游服务是 [httpbin.org](https://httpbin.org),你可以使用它进行测试。这是一个返回服务,它将返回我们在请求中传递的参数。 - -**请求内容:** - -请求 URL 由以下参数构成: - -- Protocol:即网络传输协议,在示例中,我们使用的是 `HTTP` 协议。 -- Port:即端口,示例中使用的 `80` 端口。 -- Host:即主机地址,示例中使用的是 `httpbin.org`。 -- Path:即路径,示例中的路径是 `/get`。 -- Query Parameters:即查询字符串,这里有两个字符串,分别是 `foo1` 和 `foo2`。 - -运行以下命令,发送请求: - -```bash -curl --location --request GET "http://httpbin.org/get?foo1=bar1&foo2=bar2" -``` - -**响应内容:** - -```json -{ - "args": { - "foo1": "bar1", - "foo2": "bar2" - }, - "headers": { - "Accept": "*/*", - "Host": "httpbin.org", - "User-Agent": "curl/7.29.0", - "X-Amzn-Trace-Id": "Root=1-6088fe84-24f39487166cce1f0e41efc9" - }, - "origin": "58.152.81.42", - "url": "http://httpbin.org/get?foo1=bar1&foo2=bar2" -} -``` - -## 安装 APISIX - -APISIX 可以借助 quickstart 脚本快速安装并启动。 - -```sh -curl -sL https://run.api7.ai/apisix/quickstart | sh -``` - -该命令在本地安装并运行了基于 Docker 的 APISIX 和 etcd 容器,其中 APISIX 采用 etcd 保存和同步配置信息。APISIX 和 etcd 容器使用 [**host**](https://docs.docker.com/network/host/) 的 Docker 网络模式,因此可以从本地直接访问。 - -如果一切顺利,将输出如下信息。 - -```text -✔ APISIX is ready! -``` - -:::note - -你也可以参考 [APISIX 安装指南](./installation-guide.md)了解不同的安装方法。 - -::: - -:::info IMPORTANT - -请确保其他系统进程没有占用 **9080、9180、9443 和 2379** 端口。 - -::: - -你可以通过 curl 来访问正在运行的 APISIX 实例。比如,你可以发送一个简单的 HTTP 请求来验证 APISIX 运行状态是否正常。 - -```sh -curl "http://127.0.0.1:9080" --head | grep Server -``` - -如果一切顺利,将输出如下信息。 - -```text -Server: APISIX/Version -``` - -`Version` 是指您已经安装的 APISIX 的版本。例如,`APISIX/3.3.0`。 - -现在,你已经成功安装并运行了 APISIX! - -## 创建路由 - -APISIX 提供了强大的 [Admin API](./admin-api.md) 和 [Dashboard](https://github.com/apache/apisix-dashboard) 供用户使用。在下述示例中,我们将使用 Admin API 创建一个 [Route](./terminology/route.md) 并与 [Upstream](./terminology/upstream.md) 绑定,当一个请求到达 APISIX 时,APISIX 会将请求转发到指定的上游服务中。 - -以下示例代码中,我们将为路由配置匹配规则,以便 APISIX 可以将请求转发到对应的上游服务: - -```bash -curl "http://127.0.0.1:9180/apisix/admin/routes/1" -X PUT -d ' -{ - "methods": ["GET"], - "host": "example.com", - "uri": "/anything/*", - "upstream": { - "type": "roundrobin", - "nodes": { - "httpbin.org:80": 1 - } - } -}' -``` - -该配置意味着,当请求满足下述的**所有**规则时,请求将被转发到上游服务(`httpbin.org:80`): - -- 请求的 HTTP 方法为 `GET`。 -- 请求头包含 `host` 字段,且它的值为 `example.com`。 -- 请求路径匹配 `/anything/*`,`*` 意味着任意的子路径,例如 `/anything/foo?arg=10`。 - -当路由创建完成后,可以通过以下命令访问上游服务: - -```bash -curl -i -X GET "http://127.0.0.1:9080/anything/foo?arg=10" -H "Host: example.com" -``` - -该请求将被 APISIX 转发到 `http://httpbin.org:80/anything/foo?arg=10`。 - -## 使用上游服务创建路由 - -你可以通过以下命令创建一个上游,并在路由中使用它,而不是直接将其配置在路由中: - -```bash -curl "http://127.0.0.1:9180/apisix/admin/upstreams/1" -X PUT -d ' -{ - "type": "roundrobin", - "nodes": { - "httpbin.org:80": 1 - } -}' -``` - -该上游配置与上一节配置在路由中的上游相同。同样使用了 `roundrobin` 作为负载均衡机制,并设置了 `httpbin.org:80` 为上游服务。为了将该上游绑定到路由,此处需要把 `upstream_id` 设置为 `"1"`。更多字段信息,请参考 [Admin API](./admin-api.md)。 - -上游服务创建完成后,可以通过以下命令绑定到指定路由: - -```bash -curl "http://127.0.0.1:9180/apisix/admin/routes/1" -X PUT -d ' -{ - "methods": ["GET"], - "host": "example.com", - "uri": "/anything/*", - "upstream_id": "1" -}' -``` - -我们已经创建了路由与上游服务,现在可以通过以下命令访问上游服务: - -```bash -curl -i -X GET "http://127.0.0.1:9080/anything/foo?arg=10" -H "Host: example.com" -``` - -该请求将被 APISIX 转发到 `http://httpbin.org:80/anything/foo?arg=10`。 - -## 使用 APISIX Dashboard - -你还可以使用 APISIX Dashboard 创建和配置类似于上述步骤中所创建的路由。 - -如果你已经完成上述操作步骤,就可以通过 [`localhost:9000`](http://localhost:9000/) 访问 APISIX Dashboard。 - -单击侧边栏中的 [`Route`](http://localhost:9000/routes/list),可以查看已经配置的路由列表。你也可以看到在上述步骤中使用 Admin API 创建的路由。 - -你也可以通过单击 [`Create`](http://localhost:9000/routes/create) 按钮并按照提示创建新路由: - -![Creating a Route with APISIX Dashboard](../../assets/images/create-a-route.png) - -新创建的路由将被添加到路由列表中: - -![Creating a Route with APISIX Dashboard](../../assets/images/list-of-routes.png) - -想要了解更多关于 APISIX Dashboard 的信息,请参考 [APISIX Dashboard 文档](/docs/dashboard/USER_GUIDE)。 - -## 总结 - -完成上述步骤后,APISIX 就可以正常运行了。如果想利用 APISIX 实现身份验证、安全性、限流限速和可观测性等功能,可通过添加插件实现。各类插件的详细信息请参考[插件市场](/plugins)。 - -如果你在使用当中遇到困难,可以通过 [APISIX 社区频道](/docs/general/join)或者在 GitHub 上[提交一个 issue](/docs/general/submit-issue) 寻求帮助。 diff --git a/docs/zh/latest/getting-started/README.md b/docs/zh/latest/getting-started/README.md new file mode 100644 index 000000000000..7575132a3dcc --- /dev/null +++ b/docs/zh/latest/getting-started/README.md @@ -0,0 +1,71 @@ +--- +title: 入门指南 +description: 本教程使用脚本在本地环境快速安装 Apache APISIX,并且通过管理 API 来验证是否安装成功。 +--- + + + + + +> 本教程由 [API7.ai](https://api7.ai/) 编写。 + +Apache APISIX 是 Apache 软件基金会下的[顶级项目](https://projects.apache.org/project.html?apisix),由 API7.ai 开发并捐赠。它是一个具有动态、实时、高性能等特点的云原生 API 网关。 + +你可以使用 APISIX 网关作为所有业务的流量入口,它提供了动态路由、动态上游、动态证书、A/B 测试、灰度发布(金丝雀发布)、蓝绿部署、限速、防攻击、收集指标、监控报警、可观测、服务治理等功能。 + +本教程使用脚本在本地环境快速安装 Apache APISIX,并且通过管理 API 来验证是否安装成功。 + +## 前置条件 + +快速启动脚本需要以下条件: + +* 已安装 [Docker](https://docs.docker.com/get-docker/),用于部署 **etcd** 和 **APISIX**。 +* 已安装 [curl](https://curl.se/),用于验证 APISIX 是否安装成功。 + +## 安装 APISIX + +:::caution + +为了提供更好的体验,管理 API 默认无需授权,请在生产环境中打开授权开关。 + +::: +APISIX 可以借助 quickstart 脚本快速安装并启动: + +```shell +curl -sL https://run.api7.ai/apisix/quickstart | sh +``` + +该命令启动 _apisix-quickstart_ 和 _etcd_ 两个容器,APISIX 使用 etcd 保存和同步配置。APISIX 和 etcd 容器使用 Docker 的 [**host**](https://docs.docker.com/network/host/) 网络模式,因此可以从本地直接访问。 + +如果一切顺利,将输出如下信息: + +```text +✔ APISIX is ready! +``` + +## 验证 + +你可以通过 curl 来访问正在运行的 APISIX 实例。比如,你可以发送一个简单的 HTTP 请求来验证 APISIX 运行状态是否正常: + +```shell +curl "http://127.0.0.1:9080" --head | grep Server +``` + +如果一切顺利,将输出如下信息: + +```text +Server: APISIX/Version +``` + +这里的 `Version` 是指你已经安装的 APISIX 版本,比如 `APISIX/3.3.0`。 + +现在,你已经成功安装并运行了 APISIX! + +## 下一步 + +如果你已经成功地安装了 APISIX 并且正常运行,那么你可以继续进行下面的教程。 + +* [配置路由](configure-routes.md) +* [负载均衡](load-balancing.md) +* [限速](rate-limiting.md) +* [密钥验证](key-authentication.md) diff --git a/docs/zh/latest/getting-started/configure-routes.md b/docs/zh/latest/getting-started/configure-routes.md new file mode 100644 index 000000000000..6f6df3aeee56 --- /dev/null +++ b/docs/zh/latest/getting-started/configure-routes.md @@ -0,0 +1,71 @@ +--- +title: 配置路由 +slug: /getting-started/configure-routes +--- + + + + + +> 本教程由 [API7.ai](https://api7.ai/) 编写。 + +Apache APISIX 使用 _routes_ 来提供灵活的网关管理功能,在一个请求中,_routes_ 包含了访问路径和上游目标等信息。 + +本教程将引导你创建一个 route 并验证它,你可以参考以下步骤: + +1. 创建一个指向 [httpbin.org](http://httpbin.org)的 _upstream_。 +2. 使用 _cURL_ 发送一个请求,了解 APISIX 的代理和转发请求机制。 + +## Route 是什么 + +Route(也称之为路由)是访问上游目标的路径,在 [Apache APISIX](https://api7.ai/apisix) 中,Route 首先通过预定的规则来匹配客户端请求,然后加载和执行相应的插件,最后将请求转发至特定的 Upstream。 + +在 APISIX 中,一个最简单的 Route 仅由匹配路径和 Upstream 地址两个信息组成。 + +## Upstream 是什么 + +Upstream(也称之为上游)是一组具备相同功能的节点集合,它是对虚拟主机的抽象。Upstream 可以通过预先配置的规则对多个服务节点进行负载均衡。 + +## 前置条件 + +1. 参考[入门指南](./README.md)完成 APISIX 的安装。 + +## 创建路由 + +你可以创建一个路由,将客户端的请求转发至 [httpbin.org](http://httpbin.org)(这个网站能测试 HTTP 请求和响应的各种信息)。 + +通过下面的命令,你将创建一个路由,把请求`http://127.0.0.1:9080/ip` 转发至 [httpbin.org/ip](http://httpbin.org/ip): + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d ' +{ + "id": "getting-started-ip", + "uri": "/ip", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +如果配置成功,将会返回 `HTTP/1.1 201 OK`。 + +## 验证 + +```shell +curl "http://127.0.0.1:9080/ip" +``` + +你将会得到类似下面的返回: + +```text +{ + "origin": "183.94.122.205" +} +``` + +## 下一步 + +本教程创建的路由仅对应一个上游目标。在下个教程中,你将会学习如何配置多个上游目标的负载均衡。 diff --git a/docs/zh/latest/getting-started/key-authentication.md b/docs/zh/latest/getting-started/key-authentication.md new file mode 100644 index 000000000000..ddcce9029db0 --- /dev/null +++ b/docs/zh/latest/getting-started/key-authentication.md @@ -0,0 +1,183 @@ +--- +title: 密钥验证 +slug: /getting-started/key-authentication +--- + + + + + +> 本教程由 [API7.ai](https://api7.ai/) 编写。 + +API 网关主要作用是连接 API 消费者和提供者。出于安全考虑,在访问内部资源之前,应先对消费者进行身份验证和授权。 + +![身份验证](https://static.apiseven.com/uploads/2023/02/08/8mRaK3v1_consumer.png) + +APISIX 拥有灵活的插件扩展系统,目前有很多可用于用户身份验证和授权的插件。例如: + +- [Key Authentication](https://apisix.apache.org/zh/docs/apisix/plugins/key-auth/) +- [Basic Authentication](https://apisix.apache.org/zh/docs/apisix/plugins/basic-auth/) +- [JSON Web Token (JWT) Authentication](https://apisix.apache.org/zh/docs/apisix/plugins/jwt-auth/) +- [Keycloak](https://apisix.apache.org/zh/docs/apisix/plugins/authz-keycloak/) +- [Casdoor](https://apisix.apache.org/zh/docs/apisix/plugins/authz-casdoor/) +- [Wolf RBAC](https://apisix.apache.org/zh/docs/apisix/plugins/wolf-rbac/) +- [OpenID Connect](https://apisix.apache.org/zh/docs/apisix/plugins/openid-connect/) +- [Central Authentication Service (CAS)](https://apisix.apache.org/zh/docs/apisix/plugins/cas-auth/) +- [HMAC](https://apisix.apache.org/zh/docs/apisix/plugins/hmac-auth/) +- [Casbin](https://apisix.apache.org/zh/docs/apisix/plugins/authz-casbin/) +- [LDAP](https://apisix.apache.org/zh/docs/apisix/plugins/ldap-auth/) +- [Open Policy Agent (OPA)](https://apisix.apache.org/zh/docs/apisix/plugins/opa/) +- [Forward Authentication](https://apisix.apache.org/zh/docs/apisix/plugins/forward-auth/) + +本教程中,你将创建一个带有 _密钥验证_ 插件的 _消费者_,并学习如何启用和停用身份验证插件。 + +## Consumer 是什么 + +Consumer(也称之为消费者)是指使用 API 的应用或开发人员。 + +在 APISIX 中,消费者需要一个全局唯一的 _名称_,并从上面的列表中选择一个身份验证 _插件_。 + +## Key Authentication 是什么 + +Key Authentication(也称之为密钥验证)是一个相对比较简单但是应用广泛的身份验证方法,它的设计思路如下: + +1. 管理员为路由添加一个身份验证密钥(API 密钥)。 +2. API 消费者在发送请求时,在查询字符串或者请求头中添加密钥。 + +## 启用 Key Authentication + +### 前置条件 + +1. 参考[快入门指南](./README.md)完成 APISIX 的安装。 +2. 完成[配置路由](./configure-routes.md#route-是什么)。 + +### 创建消费者 + +创建一个名为 `tom` 的消费者,并启用 `key-auth` 插件,密钥设置为 `secret-key`。所有携带密钥 `secret-key` 的请求都会被识别为消费者 `tom`。 + +:::caution + +生产环境请使用复杂的密钥。 + +::: + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT -d ' +{ + "username": "tom", + "plugins": { + "key-auth": { + "key": "secret-key" + } + } +}' +``` + +如果消费者创建成功,你将得到返回 `HTTP/1.1 201 OK`。 + +### 启用 Authentication + +在教程[配置路由](./configure-routes.md)中,我们已经创建了路由 `getting-started-ip`,我们通过 `PATCH` 方法为该路由增加 `key-auth` 插件: + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes/getting-started-ip" -X PATCH -d ' +{ + "plugins": { + "key-auth": {} + } +}' +``` + +如果增加插件成功,你将得到返回 `HTTP/1.1 201 Created`。 + +### 验证 + +我们可以在以下场景中进行验证: + +#### 1. 发送不带任何密钥的请求 + +发送一个不带请求头 `apikey` 的请求。 + +```shell +curl -i "http://127.0.0.1:9080/ip" +``` + +如果你已经启用了密钥身份验证,你将会得到返回 `HTTP/1.1 401 Unauthorized`,即未授权。 + +```text +HTTP/1.1 401 Unauthorized +Date: Wed, 08 Feb 2023 09:38:36 GMT +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX/3.1.0 +``` + +#### 2. 发送携带错误密钥的请求 + +发送一个携带错误密钥(比如 `wrong-key`)的请求。 + +```shell +curl -i "http://127.0.0.1:9080/ip" -H 'apikey: wrong-key' +``` + +如果密钥错误,你也将得到返回 `HTTP/1.1 401 Unauthorized`,即未授权。 + +```text +HTTP/1.1 401 Unauthorized +Date: Wed, 08 Feb 2023 09:38:27 GMT +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX/3.1.0 +``` + +#### 3. 发送携带正确密钥的请求 + +发送一个携带正确密钥(`secret-key`)的请求。 + +```shell +curl -i "http://127.0.0.1:9080/ip" -H 'apikey: secret-key' +``` + +你将会得到返回 `HTTP/1.1 200 OK`。 + +```text +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 44 +Connection: keep-alive +Date: Thu, 09 Feb 2023 03:27:57 GMT +Access-Control-Allow-Origin: * +Access-Control-Allow-Credentials: true +Server: APISIX/3.1.0 +``` + +### 禁用 Authentication + +将参数设置 `_meta.disable` 为 `true`,即可禁用密钥验证插件。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/getting-started-ip" -X PATCH -d ' +{ + "plugins": { + "key-auth": { + "_meta": { + "disable": true + } + } + } +}' +``` + +你可以发送一个不带任何密钥的请求来验证: + +```shell +curl -i "http://127.0.0.1:9080/ip" +``` + +因为你已经禁用了密钥验证插件,所以你将会得到返回 `HTTP/1.1 200 OK`。 + +## 下一步 + +你已经学习了如何为路由配置密钥验证。在下个教程中,你将学习如何配置限速。 diff --git a/docs/zh/latest/getting-started/load-balancing.md b/docs/zh/latest/getting-started/load-balancing.md new file mode 100644 index 000000000000..2923f8f516ea --- /dev/null +++ b/docs/zh/latest/getting-started/load-balancing.md @@ -0,0 +1,99 @@ +--- +title: 负载均衡 +slug: /getting-started/load-balancing +--- + + + + + +> 本教程由 [API7.ai](https://api7.ai/) 编写。 + +负载均衡管理客户端和服务端之间的流量。它决定由哪个服务来处理特定的请求,从而提高性能、可扩展性和可靠性。在设计需要处理大量流量的系统时,负载均衡是一个关键的考虑因素。 + +Apache APISIX 支持加权负载均衡算法,传入的流量按照预定顺序轮流分配给一组服务器的其中一个。 + +在本教程中,你将创建一个具有两个上游服务的路由,并且启用负载均衡来测试在两个服务之间的切换情况。 + +## 前置条件 + +1. 参考[入门指南](./README.md)完成 APISIX 的安装。 +2. 了解 APISIX 中[路由及上游](./configure-routes.md#route-是什么)的概念。 + +## 启用负载均衡 + +创建一个具有两个上游服务的路由,访问 `/headers` 将被转发到 [httpbin.org](https://httpbin.org/headers) 和 [mock.api7.ai](https://mock.api7.ai/headers) 这两个上游服务,并且会返回请求头。 + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d ' +{ + "id": "getting-started-headers", + "uri": "/headers", + "upstream" : { + "type": "roundrobin", + "nodes": { + "httpbin.org:443": 1, + "mock.api7.ai:443": 1 + }, + "pass_host": "node", + "scheme": "https" + } +}' +``` + +如果路由创建成功,你将会收到返回 `HTTP/1.1 201 OK`。 + +:::info + +1. 将 `pass_host` 字段设置为 `node`,将传递请求头给上游。 +2. 将 `scheme` 字段设置为 `https`,向上游发送请求时将启用 TLS。 + +::: + +## 验证 + +这两个服务返回不同的数据。 + +`httpbin.org` 返回: + +```json +{ + "headers": { + "Accept": "*/*", + "Host": "httpbin.org", + "User-Agent": "curl/7.58.0", + "X-Amzn-Trace-Id": "Root=1-63e34b15-19f666602f22591b525e1e80", + "X-Forwarded-Host": "localhost" + } +} +``` + +`mock.api7.ai` 返回: + +```json +{ + "headers": { + "accept": "*/*", + "host": "mock.api7.ai", + "user-agent": "curl/7.58.0", + "content-type": "application/json", + "x-application-owner": "API7.ai" + } +} +``` + +我们生成 100 个请求来测试负载均衡的效果: + +```shell +hc=$(seq 100 | xargs -i curl "http://127.0.0.1:9080/headers" -sL | grep "httpbin" | wc -l); echo httpbin.org: $hc, mock.api7.ai: $((100 - $hc)) +``` + +结果显示,请求几乎平均分配给这两个上游服务: + +```text +httpbin.org: 51, mock.api7.ai: 49 +``` + +## 下一步 + +你已经学习了如何配置负载均衡。在下个教程中,你将学习如何配置身份验证。 diff --git a/docs/zh/latest/getting-started/rate-limiting.md b/docs/zh/latest/getting-started/rate-limiting.md new file mode 100644 index 000000000000..22f635fd5ca4 --- /dev/null +++ b/docs/zh/latest/getting-started/rate-limiting.md @@ -0,0 +1,101 @@ +--- +title: 限速 +slug: /getting-started/rate-limiting +--- + + + + + +> 本教程由 [API7.ai](https://api7.ai/) 编写。 + +APISIX 是一个统一的控制中心,它管理 API 和微服务的进出流量。除了客户端发来的合理的请求,还可能存在网络爬虫产生的不必要的流量,此外,网络攻击(比如 DDos)也可能产生非法请求。 + +APISIX 提供限速功能,通过限制在规定时间内发送到上游服务的请求数量来保护 APIs 和微服务。请求的计数在内存中完成,具有低延迟和高性能的特点。 + +
+
+Routes Diagram +
+
+ +在本教程中,你将启用 `limit-count` 插件来限制传入流量的速率。 + +## 前置条件 + +1. 参考[入门指南](./README.md)完成 APISIX 的安装。 +2. 完成[配置路由](./configure-routes.md#route-是什么)。 + +## 启用 Rate Limiting + +在教程[配置路由](./configure-routes.md)中,我们已经创建了路由 `getting-started-ip`,我们通过 `PATCH` 方法为该路由增加 `limit-count` 插件: + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes/getting-started-ip" -X PATCH -d ' +{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 10, + "rejected_code": 503 + } + } +}' +``` + +如果增加插件成功,你将得到返回 `HTTP/1.1 201 OK`。上述配置将传入流量的速率限制为每 10 秒最多 2 个请求。 + +### 验证 + +我们同时生成 100 个请求来测试限速插件的效果。 + +```shell +count=$(seq 100 | xargs -i curl "http://127.0.0.1:9080/ip" -I -sL | grep "503" | wc -l); echo \"200\": $((100 - $count)), \"503\": $count +``` + +请求结果同预期一致:在这 100 个请求中,有 2 个请求发送成功(状态码为 `200`),其他请求均被拒绝(状态码为 `503`)。 + +```text +"200": 2, "503": 98 +``` + +## 禁用 Rate Limiting + +将参数设置 `_meta.disable` 为 `true`,即可禁用限速插件。 + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes/getting-started-ip" -X PATCH -d ' +{ + "plugins": { + "limit-count": { + "_meta": { + "disable": true + } + } + } +}' +``` + +### 验证 + +我们再次同时生成 100 个请求来测试限速插件是否已被禁用: + +```shell +count=$(seq 100 | xargs -i curl "http://127.0.0.1:9080/ip" -I -sL | grep "503" | wc -l); echo \"200\": $((100 - $count)), \"503\": $count +``` + +结果显示所有的请求均成功: + +```text +"200": 100, "503": 0 +``` + +## 更多 + +你可以使用 APISIX 的变量来配置限速插件的规则,比如 `$host` 和 `$uri`。此外,APISIX 也支持使用 Redis 集群进行限速配置,即通过 Redis 来进行计数。 + +## 下一步 + +恭喜你!你已经学习了如何配置限速插件,这也意味着你已经完成了所有的入门教程。 + +你可以继续学习其他文档来定制 APISIX,以满足你的生产环境需要。 diff --git a/docs/zh/latest/installation-guide.md b/docs/zh/latest/installation-guide.md index d4d44b2bcef0..9bb1254dd4db 100644 --- a/docs/zh/latest/installation-guide.md +++ b/docs/zh/latest/installation-guide.md @@ -31,7 +31,7 @@ import TabItem from '@theme/TabItem'; 本文将介绍如何在你的环境中安装并运行 APISIX。 -关于如何快速运行 Apache APISIX,请参考[入门指南](./getting-started.md)。 +关于如何快速运行 Apache APISIX,请参考[入门指南](./getting-started/README.md)。 ## 安装 APISIX @@ -337,4 +337,4 @@ systemctl stop apisix 如果你是通过其他方法安装的 APISIX,可以参考[配置文件模板](https://github.com/api7/apisix-build-tools/blob/master/usr/lib/systemd/system/apisix.service)进行修改,并将其添加在 `/usr/lib/systemd/system/apisix.service` 路径下。 -如需了解 APISIX 后续使用,请参考[入门指南](./getting-started.md)获取更多信息。 +如需了解 APISIX 后续使用,请参考[入门指南](./getting-started/README.md)获取更多信息。 diff --git a/docs/zh/latest/plugins/cors.md b/docs/zh/latest/plugins/cors.md index e9c60d465a38..0ced5a57dd45 100644 --- a/docs/zh/latest/plugins/cors.md +++ b/docs/zh/latest/plugins/cors.md @@ -40,7 +40,7 @@ description: 本文介绍了 Apache APISIX cors 插件的基本信息及使用 | expose_headers | string | 否 | "*" | 允许跨域访问时响应方携带哪些非 `CORS 规范` 以外的 Header。如果你有多个 Header,请使用 `,` 分割。当 `allow_credential` 为 `false` 时,可以使用 `*` 来表示允许任意 Header。你也可以在启用了 `allow_credential` 后使用 `**` 强制允许任意 Header,但请注意这样存在安全隐患。 | | max_age | integer | 否 | 5 | 浏览器缓存 CORS 结果的最大时间,单位为秒。在这个时间范围内,浏览器会复用上一次的检查结果,`-1` 表示不缓存。请注意各个浏览器允许的最大时间不同,详情请参考 [Access-Control-Max-Age - MDN](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Max-Age#directives)。 | | allow_credential | boolean | 否 | false | 是否允许跨域访问的请求方携带凭据(如 Cookie 等)。根据 CORS 规范,如果设置该选项为 `true`,那么将不能在其他属性中使用 `*`。 | -| allow_origins_by_regex | array | 否 | nil | 使用正则表达式数组来匹配允许跨域访问的 Origin,如 `[".*\.test.com"]` 可以匹配任何 `test.com` 的子域名 `*`。如果 `allow_origins_by_regex` 属性已经指定,则会忽略 `allow_origins` 属性。 | +| allow_origins_by_regex | array | 否 | nil | 使用正则表达式数组来匹配允许跨域访问的 Origin,如 `[".*\.test.com$"]` 可以匹配任何 `test.com` 的子域名。如果 `allow_origins_by_regex` 属性已经指定,则会忽略 `allow_origins` 属性。 | | allow_origins_by_metadata | array | 否 | nil | 通过引用插件元数据的 `allow_origins` 配置允许跨域访问的 Origin。比如当插件元数据为 `"allow_origins": {"EXAMPLE": "https://example.com"}` 时,配置 `["EXAMPLE"]` 将允许 Origin `https://example.com` 的访问。 | :::info IMPORTANT diff --git a/docs/zh/latest/plugins/grpc-transcode.md b/docs/zh/latest/plugins/grpc-transcode.md index 4c03f4cf9735..44445ab859e1 100644 --- a/docs/zh/latest/plugins/grpc-transcode.md +++ b/docs/zh/latest/plugins/grpc-transcode.md @@ -239,7 +239,7 @@ Trailer: grpc-message 上传 proto 文件: ```shell -curl http://127.0.0.1:9080/apisix/admin/protos/1 \ +curl http://127.0.0.1:9180/apisix/admin/protos/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "content" : "syntax = \"proto3\"; @@ -261,7 +261,7 @@ curl http://127.0.0.1:9080/apisix/admin/protos/1 \ 启用 `grpc-transcode` 插件,并设置选项 `show_status_in_body` 为 `true`: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], @@ -309,7 +309,7 @@ Server: APISIX web server 注意返回体中还存在未解码的字段,如果需要解码该字段,需要在上传的 proto 文件中加上该字段对应的 `message type`。 ```shell -curl http://127.0.0.1:9080/apisix/admin/protos/1 \ +curl http://127.0.0.1:9180/apisix/admin/protos/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "content" : "syntax = \"proto3\"; @@ -336,7 +336,7 @@ curl http://127.0.0.1:9080/apisix/admin/protos/1 \ 同时配置选项 `status_detail_type` 为 `helloworld.ErrorDetail`: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], diff --git a/docs/zh/latest/plugins/kafka-logger.md b/docs/zh/latest/plugins/kafka-logger.md index a704bfbf8ea4..49801f67b4f3 100644 --- a/docs/zh/latest/plugins/kafka-logger.md +++ b/docs/zh/latest/plugins/kafka-logger.md @@ -45,7 +45,7 @@ description: API 网关 Apache APISIX 的 kafka-logger 插件用于将日志作 | brokers.sasl_config.password | string | 是 | | | Kafka broker 中 sasl 配置中的 password,如果 sasl_config 存在,则必须填写 | | kafka_topic | string | 是 | | | 需要推送的 topic。 | | producer_type | string | 否 | async | ["async", "sync"] | 生产者发送消息的模式。 | -| required_acks | integer | 否 | 1 | [0, 1, -1] | 生产者在确认一个请求发送完成之前需要收到的反馈信息的数量。该参数是为了保证发送请求的可靠性。该属性的配置与 Kafka `acks` 属性相同,具体配置请参考 [Apache Kafka 文档](https://kafka.apache.org/documentation/#producerconfigs_acks)。 | +| required_acks | integer | 否 | 1 | [1, -1] | 生产者在确认一个请求发送完成之前需要收到的反馈信息的数量。该参数是为了保证发送请求的可靠性。该属性的配置与 Kafka `acks` 属性相同,具体配置请参考 [Apache Kafka 文档](https://kafka.apache.org/documentation/#producerconfigs_acks)。required_acks 还不支持为 0。 | | key | string | 否 | | | 用于消息分区而分配的密钥。 | | timeout | integer | 否 | 3 | [1,...] | 发送数据的超时时间。 | | name | string | 否 | "kafka logger" | | batch processor 的唯一标识。 | diff --git a/docs/zh/latest/plugins/limit-count.md b/docs/zh/latest/plugins/limit-count.md index 467666b8adbd..dbacdfa00a20 100644 --- a/docs/zh/latest/plugins/limit-count.md +++ b/docs/zh/latest/plugins/limit-count.md @@ -44,7 +44,7 @@ description: 本文介绍了 Apache APISIX limit-count 插件的相关操作, | policy | string | 否 | "local" | ["local", "redis", "redis-cluster"] | 用于检索和增加限制计数的策略。当设置为 `local` 时,计数器被以内存方式保存在节点本地;当设置为 `redis` 时,计数器保存在 Redis 服务节点上,从而可以跨节点共享结果,通常用它来完成全局限速;当设置为 `redis-cluster` 时,使用 Redis 集群而不是单个实例。| | allow_degradation | boolean | 否 | false | | 当插件功能临时不可用时(例如 Redis 超时),当设置为 `true` 时,则表示可以允许插件降级并进行继续请求的操作。 | | show_limit_quota_header | boolean | 否 | true | | 当设置为 `true` 时,在响应头中显示 `X-RateLimit-Limit`(限制的总请求数)和 `X-RateLimit-Remaining`(剩余还可以发送的请求数)字段。 | -| group | string | 否 | | 非空 | 配置相同 group 的路由将共享相同的限流计数器。 | +| group | string | 否 | | 非空 | 配置相同 group 的路由将共享相同的限流计数器。请勿使用先前使用过的值进行配置,插件将报错。 | | redis_host | string | 否 | | | 当使用 `redis` 限速策略时,Redis 服务节点的地址。**当 `policy` 属性设置为 `redis` 时必选。**| | redis_port | integer | 否 | 6379 | [1,...] | 当使用 `redis` 限速策略时,Redis 服务节点的端口。| | redis_username | string | 否 | | | 若使用 Redis ACL 进行身份验证(适用于 Redis 版本 >=6.0),则需要提供 Redis 用户名。若使用 Redis legacy 方式 `requirepass` 进行身份验证,则只需将密码配置在 `redis_password`。当 `policy` 设置为 `redis` 时使用。| diff --git a/docs/zh/latest/plugins/openid-connect.md b/docs/zh/latest/plugins/openid-connect.md index aececc4986e0..9355f951ebec 100644 --- a/docs/zh/latest/plugins/openid-connect.md +++ b/docs/zh/latest/plugins/openid-connect.md @@ -67,6 +67,7 @@ description: OpenID Connect(OIDC)是基于 OAuth 2.0 的身份认证协议 | proxy_opts.http_proxy_authorization | string | 否 | | Basic [base64 username:password] | `http_proxy` 默认的 `Proxy-Authorization` 请求头参数值。 | | proxy_opts.https_proxy_authorization | string | 否 | | Basic [base64 username:password] | 与`http_proxy_authorization`相同,但与`https_proxy`一起使用(因为使用 HTTPS 时,授权是在连接时完成的,因此不能通过传递 Proxy-Authorization 请求头来覆盖此授权)。 | | proxy_opts.no_proxy | string | 否 | | | 不应被代理的主机的逗号分隔列表。 | +| authorization_params | object | false | | | 在请求中发送到授权端点的附加参数 | 注意:schema 中还定义了 `encrypt_fields = {"client_secret"}`,这意味着该字段将会被加密存储在 etcd 中。具体参考 [加密存储字段](../plugin-develop.md#加密存储字段)。 diff --git a/docs/zh/latest/plugins/opentelemetry.md b/docs/zh/latest/plugins/opentelemetry.md index e4794c8e2ca8..474aafb012d3 100644 --- a/docs/zh/latest/plugins/opentelemetry.md +++ b/docs/zh/latest/plugins/opentelemetry.md @@ -88,6 +88,29 @@ plugin_attr: max_export_batch_size: 2 ``` +## 如何使用变量 + +以下`nginx`变量是由`opentelemetry` 设置的。 + +- `opentelemetry_context_traceparent` - [W3C trace context](https://www.w3.org/TR/trace-context/#trace-context-http-headers-format), 例如:`00-0af7651916cd43dd8448eb211c80319c-b9c7c989f97918e1-01` +- `opentelemetry_trace_id` - 当前 span 的 trace_id +- `opentelemetry_span_id` - 当前 span 的 span_id + +如何使用?你需要在配置文件(`./conf/config.yaml`)设置如下: + +```yaml title="./conf/config.yaml" +http: + enable_access_log: true + access_log: "/dev/stdout" + access_log_format: '{"time": "$time_iso8601","opentelemetry_context_traceparent": "$opentelemetry_context_traceparent","opentelemetry_trace_id": "$opentelemetry_trace_id","opentelemetry_span_id": "$opentelemetry_span_id","remote_addr": "$remote_addr","uri": "$uri"}' + access_log_format_escape: json +plugins: + - opentelemetry +plugin_attr: + opentelemetry: + set_ngx_var: true +``` + ## 如何启用 `opentelemetry` 插件默认为禁用状态,你需要在配置文件(`./conf/config.yaml`)中开启该插件: diff --git a/docs/zh/latest/plugins/response-rewrite.md b/docs/zh/latest/plugins/response-rewrite.md index a59c34f54806..be409411b346 100644 --- a/docs/zh/latest/plugins/response-rewrite.md +++ b/docs/zh/latest/plugins/response-rewrite.md @@ -84,7 +84,7 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 \ "set": { "X-Server-id": 3, "X-Server-status": "on", - "X-Server-balancer_addr": "$balancer_ip:$balancer_port" + "X-Server-balancer-addr": "$balancer_ip:$balancer_port" } }, "vars":[ @@ -108,7 +108,7 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 \ ```json "headers": { "add": [ - "X-Server-balancer_addr: $balancer_ip:$balancer_port" + "X-Server-balancer-addr: $balancer_ip:$balancer_port" ], "remove": [ "X-TO-BE-REMOVED" @@ -138,7 +138,7 @@ Transfer-Encoding: chunked Connection: keep-alive X-Server-id: 3 X-Server-status: on -X-Server-balancer_addr: 127.0.0.1:80 +X-Server-balancer-addr: 127.0.0.1:80 {"code":"ok","message":"new json body"} ``` @@ -169,7 +169,7 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 "set": { "X-Server-id":3, "X-Server-status":"on", - "X-Server-balancer_addr":"$balancer_ip:$balancer_port" + "X-Server-balancer-addr":"$balancer_ip:$balancer_port" } }, "filters":[ diff --git a/docs/zh/latest/plugins/zipkin.md b/docs/zh/latest/plugins/zipkin.md index 6f6e0d2b8148..c43321f5f472 100644 --- a/docs/zh/latest/plugins/zipkin.md +++ b/docs/zh/latest/plugins/zipkin.md @@ -235,3 +235,32 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 } }' ``` + +## 如何使用变量 + +以下`nginx`变量是由`zipkin` 设置的。 + +- `zipkin_context_traceparent` - [W3C trace context](https://www.w3.org/TR/trace-context/#trace-context-http-headers-format), 例如:`00-0af7651916cd43dd8448eb211c80319c-b9c7c989f97918e1-01` +- `zipkin_trace_id` - 当前 span 的 trace_id +- `zipkin_span_id` - 当前 span 的 span_id + +如何使用?你需要在配置文件(`./conf/config.yaml`)设置如下: + +```yaml title="./conf/config.yaml" +http: + enable_access_log: true + access_log: "/dev/stdout" + access_log_format: '{"time": "$time_iso8601","zipkin_context_traceparent": "$zipkin_context_traceparent","zipkin_trace_id": "$zipkin_trace_id","zipkin_span_id": "$zipkin_span_id","remote_addr": "$remote_addr","uri": "$uri"}' + access_log_format_escape: json +plugins: + - zipkin +plugin_attr: + zipkin: + set_ngx_var: true +``` + +你也可以在打印日志的时候带上 `trace_id` + +```print error log +log.error(ngx.ERR,ngx_var.zipkin_trace_id,"error message") +``` diff --git a/docs/zh/latest/ssl-protocol.md b/docs/zh/latest/ssl-protocol.md index ee886a621c08..563b2095df90 100644 --- a/docs/zh/latest/ssl-protocol.md +++ b/docs/zh/latest/ssl-protocol.md @@ -21,7 +21,7 @@ title: SSL 协议 # --> -`APISIX` 支持,还支持动态的为每一个 SNI 指定不同的 TLS 协议版本。 +`APISIX` 支持 TLS 协议,还支持动态的为每一个 SNI 指定不同的 TLS 协议版本。 **为了安全考虑,APISIX 默认使用的加密套件不支持 TLSv1.1 以及更低的版本。** **如果你需要启用 TLSv1.1 协议,请在 config.yaml 的配置项 apisix.ssl.ssl_ciphers 增加 TLSv1.1 协议所支持的加密套件。** @@ -171,7 +171,7 @@ $ curl --tls-max 1.3 --tlsv1.3 https://test2.com:9443 -v -k -I * SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 ``` -使用 TLSv1.3 访问 test2.com 失败: +使用 TLSv1.1 访问 test2.com 失败: ```shell curl --tls-max 1.1 --tlsv1.1 https://test2.com:9443 -v -k -I diff --git a/docs/zh/latest/support-fips-in-apisix.md b/docs/zh/latest/support-fips-in-apisix.md new file mode 100644 index 000000000000..92facc21f3cc --- /dev/null +++ b/docs/zh/latest/support-fips-in-apisix.md @@ -0,0 +1,60 @@ +--- +id: support-fips-in-apisix +title: 通过 OpenSSL 3.0 使 APISIX 支持 FIPS 模式 +keywords: + - API 网关 + - Apache APISIX + - 贡献代码 + - 构建 APISIX + - OpenSSL 3.0 FIPS +description: 本文将介绍如何在 Apache APISIX 中使用 OpenSSL 3.0 来编译 apisix-base,即可启用 FIPS 模式。 +--- + + + +目前,OpenSSL 3.0 [支持了](https://www.openssl.org/blog/blog/2022/08/24/FIPS-validation-certificate-issued/) [FIPS](https://en.wikipedia.org/wiki/FIPS_140-2) 模式。为了在 APISIX 中支持 FIPS 模式,你应该使用 OpenSSL 3.0 来编译 apisix-base。 + +## 编译 + +如果你需要使用 OpenSSL 3.0 来编译 apisix-base,请以 root 用户角色来执行以下命令: + +```bash +cd $(mktemp -d) +OPENSSL3_PREFIX=${OPENSSL3_PREFIX-/usr/local} +apt install -y build-essential +git clone https://github.com/openssl/openssl +cd openssl +./Configure --prefix=$OPENSSL3_PREFIX/openssl-3.0 enable-fips +make install +echo $OPENSSL3_PREFIX/openssl-3.0/lib64 > /etc/ld.so.conf.d/openssl3.conf +ldconfig +$OPENSSL3_PREFIX/openssl-3.0/bin/openssl fipsinstall -out $OPENSSL3_PREFIX/openssl-3.0/ssl/fipsmodule.cnf -module $OPENSSL3_PREFIX/openssl-3.0/lib64/ossl-modules/fips.so +sed -i 's@# .include fipsmodule.cnf@.include '"$OPENSSL3_PREFIX"'/openssl-3.0/ssl/fipsmodule.cnf@g; s/# \(fips = fips_sect\)/\1\nbase = base_sect\n\n[base_sect]\nactivate=1\n/g' $OPENSSL3_PREFIX/openssl-3.0/ssl/openssl.cnf +cd .. + +export cc_opt="-I$OPENSSL3_PREFIX/openssl-3.0/include" +export ld_opt="-L$OPENSSL3_PREFIX/openssl-3.0/lib64 -Wl,-rpath,$OPENSSL3_PREFIX/openssl-3.0/lib64" + +wget https://raw.githubusercontent.com/api7/apisix-build-tools/master/build-apisix-base.sh +chmod +x build-apisix-base.sh +./build-apisix-base.sh latest +``` + +apisix-base 将安装在 `/usr/local/openresty-debug`。 diff --git a/docs/zh/latest/terminology/consumer-group.md b/docs/zh/latest/terminology/consumer-group.md index d9390949b579..94f870503c49 100644 --- a/docs/zh/latest/terminology/consumer-group.md +++ b/docs/zh/latest/terminology/consumer-group.md @@ -34,6 +34,8 @@ description: 本文介绍了 Apache APISIX Consumer Group 对象的概念及使 以下示例展示了如何创建消费者组并将其绑定到消费者中。 +创建一个共享相同限流配额的消费者组: + ```shell curl http://127.0.0.1:9180/apisix/admin/consumer_groups/company_a \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' @@ -43,12 +45,14 @@ curl http://127.0.0.1:9180/apisix/admin/consumer_groups/company_a \ "count": 200, "time_window": 60, "rejected_code": 503, - "group": "$consumer_group_id" + "group": "grp_company_a" } } }' ``` +在消费者组中创建消费者: + ```shell curl http://127.0.0.1:9180/apisix/admin/consumers \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/zh/latest/terminology/consumer.md b/docs/zh/latest/terminology/consumer.md index cb869a221bdf..7892b1b87bdf 100644 --- a/docs/zh/latest/terminology/consumer.md +++ b/docs/zh/latest/terminology/consumer.md @@ -56,6 +56,10 @@ Consumer 是某类服务的消费者,需要与用户认证配合才可以使 2. 获取 consumer_name:通过授权认证,即可自然获取到对应的 Consumer name,它是 Consumer 对象的唯一识别标识; 3. 获取 Consumer 上绑定的 Plugin 或 Upstream 信息:完成对不同 Consumer 做不同配置的效果。 +当有不同的使用者请求相同的 API,并且需要根据使用者执行不同的插件和上游配置时,使用 Consumer 是非常合适的。需要与用户身份验证系统结合使用。 + +目前,可以与 Consumer 配置的身份验证插件包括 `basic-auth` 、`hmac-auth`、`jwt-auth`、`key-auth`、`ldap-auth` 和 `wolf-rbac`。 + 你可以参考 [key-auth](../plugins/key-auth.md) 认证授权插件的调用逻辑,进一步理解 Consumer 概念和使用。 :::note 注意 diff --git a/rockspec/apisix-3.5.0-0.rockspec b/rockspec/apisix-3.5.0-0.rockspec new file mode 100644 index 000000000000..1c0ad6d8c1d3 --- /dev/null +++ b/rockspec/apisix-3.5.0-0.rockspec @@ -0,0 +1,103 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +package = "apisix" +version = "3.5.0-0" +supported_platforms = {"linux", "macosx"} + +source = { + url = "git://github.com/apache/apisix", + branch = "3.5.0", +} + +description = { + summary = "Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.", + homepage = "https://github.com/apache/apisix", + license = "Apache License 2.0", +} + +dependencies = { + "lua-resty-ctxdump = 0.1-0", + "api7-lua-resty-dns-client = 7.0.1", + "lua-resty-template = 2.0", + "lua-resty-etcd = 1.10.4", + "api7-lua-resty-http = 0.2.0", + "lua-resty-balancer = 0.04", + "lua-resty-ngxvar = 0.5.2", + "lua-resty-jit-uuid = 0.0.7", + "lua-resty-healthcheck-api7 = 3.0.0", + "api7-lua-resty-jwt = 0.2.5", + "lua-resty-hmac-ffi = 0.06-1", + "lua-resty-cookie = 0.2.0-1", + "lua-resty-session = 3.10", + "opentracing-openresty = 0.1", + "lua-resty-radixtree = 2.8.2", + "lua-protobuf = 0.5.0-1", + "lua-resty-openidc = 1.7.6-3", + "luafilesystem = 1.7.0-2", + "api7-lua-tinyyaml = 0.4.2", + "nginx-lua-prometheus = 0.20230607-1", + "jsonschema = 0.9.8", + "lua-resty-ipmatcher = 0.6.1", + "lua-resty-kafka = 0.22-0", + "lua-resty-logger-socket = 2.0.1-0", + "skywalking-nginx-lua = 0.6.0", + "base64 = 1.5-2", + "binaryheap = 0.4", + "api7-dkjson = 0.1.1", + "resty-redis-cluster = 1.05-1", + "lua-resty-expr = 1.3.2", + "graphql = 0.0.2", + "argparse = 0.7.1-1", + "luasocket = 3.1.0-1", + "luasec = 0.9-1", + "lua-resty-consul = 0.3-2", + "penlight = 1.13.1", + "ext-plugin-proto = 0.6.0", + "casbin = 1.41.8-1", + "inspect == 3.1.1", + "lualdap = 1.2.6-1", + "lua-resty-rocketmq = 0.3.0-0", + "opentelemetry-lua = 0.2-3", + "net-url = 0.9-1", + "xml2lua = 1.5-2", + "nanoid = 0.1-1", + "lua-resty-mediador = 0.1.2-1", + "lua-resty-ldap = 0.1.0-0", + "lua-resty-t1k = 1.1.0" +} + +build = { + type = "make", + build_variables = { + CFLAGS="$(CFLAGS)", + LIBFLAG="$(LIBFLAG)", + LUA_LIBDIR="$(LUA_LIBDIR)", + LUA_BINDIR="$(LUA_BINDIR)", + LUA_INCDIR="$(LUA_INCDIR)", + LUA="$(LUA)", + OPENSSL_INCDIR="$(OPENSSL_INCDIR)", + OPENSSL_LIBDIR="$(OPENSSL_LIBDIR)", + }, + install_variables = { + ENV_INST_PREFIX="$(PREFIX)", + ENV_INST_BINDIR="$(BINDIR)", + ENV_INST_LIBDIR="$(LIBDIR)", + ENV_INST_LUADIR="$(LUADIR)", + ENV_INST_CONFDIR="$(CONFDIR)", + }, +} diff --git a/rockspec/apisix-3.6.0-0.rockspec b/rockspec/apisix-3.6.0-0.rockspec new file mode 100644 index 000000000000..71b4c35f8a5a --- /dev/null +++ b/rockspec/apisix-3.6.0-0.rockspec @@ -0,0 +1,103 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +package = "apisix" +version = "3.6.0-0" +supported_platforms = {"linux", "macosx"} + +source = { + url = "git://github.com/apache/apisix", + branch = "3.6.0", +} + +description = { + summary = "Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.", + homepage = "https://github.com/apache/apisix", + license = "Apache License 2.0", +} + +dependencies = { + "lua-resty-ctxdump = 0.1-0", + "api7-lua-resty-dns-client = 7.0.1", + "lua-resty-template = 2.0", + "lua-resty-etcd = 1.10.5", + "api7-lua-resty-http = 0.2.2-0", + "lua-resty-balancer = 0.04", + "lua-resty-ngxvar = 0.5.2", + "lua-resty-jit-uuid = 0.0.7", + "lua-resty-healthcheck-api7 = 3.0.0", + "api7-lua-resty-jwt = 0.2.5", + "lua-resty-hmac-ffi = 0.06-1", + "lua-resty-cookie = 0.2.0-1", + "lua-resty-session = 3.10", + "opentracing-openresty = 0.1", + "lua-resty-radixtree = 2.8.2", + "lua-protobuf = 0.5.0-1", + "lua-resty-openidc = 1.7.6-3", + "luafilesystem = 1.7.0-2", + "api7-lua-tinyyaml = 0.4.4", + "nginx-lua-prometheus = 0.20230607-1", + "jsonschema = 0.9.8", + "lua-resty-ipmatcher = 0.6.1", + "lua-resty-kafka = 0.22-0", + "lua-resty-logger-socket = 2.0.1-0", + "skywalking-nginx-lua = 0.6.0", + "base64 = 1.5-2", + "binaryheap = 0.4", + "api7-dkjson = 0.1.1", + "resty-redis-cluster = 1.05-1", + "lua-resty-expr = 1.3.2", + "graphql = 0.0.2", + "argparse = 0.7.1-1", + "luasocket = 3.1.0-1", + "luasec = 0.9-1", + "lua-resty-consul = 0.3-2", + "penlight = 1.13.1", + "ext-plugin-proto = 0.6.1", + "casbin = 1.41.8-1", + "inspect == 3.1.1", + "lualdap = 1.2.6-1", + "lua-resty-rocketmq = 0.3.0-0", + "opentelemetry-lua = 0.2-3", + "net-url = 0.9-1", + "xml2lua = 1.5-2", + "nanoid = 0.1-1", + "lua-resty-mediador = 0.1.2-1", + "lua-resty-ldap = 0.1.0-0", + "lua-resty-t1k = 1.1.0" +} + +build = { + type = "make", + build_variables = { + CFLAGS="$(CFLAGS)", + LIBFLAG="$(LIBFLAG)", + LUA_LIBDIR="$(LUA_LIBDIR)", + LUA_BINDIR="$(LUA_BINDIR)", + LUA_INCDIR="$(LUA_INCDIR)", + LUA="$(LUA)", + OPENSSL_INCDIR="$(OPENSSL_INCDIR)", + OPENSSL_LIBDIR="$(OPENSSL_LIBDIR)", + }, + install_variables = { + ENV_INST_PREFIX="$(PREFIX)", + ENV_INST_BINDIR="$(BINDIR)", + ENV_INST_LIBDIR="$(LIBDIR)", + ENV_INST_LUADIR="$(LUADIR)", + ENV_INST_CONFDIR="$(CONFDIR)", + }, +} diff --git a/rockspec/apisix-master-0.rockspec b/rockspec/apisix-master-0.rockspec index 00c67f7bc135..d4e4b71a0db5 100644 --- a/rockspec/apisix-master-0.rockspec +++ b/rockspec/apisix-master-0.rockspec @@ -34,12 +34,13 @@ dependencies = { "lua-resty-ctxdump = 0.1-0", "api7-lua-resty-dns-client = 7.0.1", "lua-resty-template = 2.0", - "lua-resty-etcd = 1.10.4", - "api7-lua-resty-http = 0.2.0", + "lua-resty-etcd = 1.10.5", + "api7-lua-resty-http = 0.2.2-0", "lua-resty-balancer = 0.04", "lua-resty-ngxvar = 0.5.2", "lua-resty-jit-uuid = 0.0.7", - "lua-resty-healthcheck-api7 = 3.0.0", + "lua-resty-worker-events = 1.0.0", + "lua-resty-healthcheck-api7 = 3.2.0", "api7-lua-resty-jwt = 0.2.5", "lua-resty-hmac-ffi = 0.06-1", "lua-resty-cookie = 0.2.0-1", @@ -49,7 +50,7 @@ dependencies = { "lua-protobuf = 0.5.0-1", "lua-resty-openidc = 1.7.6-3", "luafilesystem = 1.7.0-2", - "api7-lua-tinyyaml = 0.4.2", + "api7-lua-tinyyaml = 0.4.4", "nginx-lua-prometheus = 0.20230607-1", "jsonschema = 0.9.8", "lua-resty-ipmatcher = 0.6.1", @@ -67,7 +68,7 @@ dependencies = { "luasec = 0.9-1", "lua-resty-consul = 0.3-2", "penlight = 1.13.1", - "ext-plugin-proto = 0.6.0", + "ext-plugin-proto = 0.6.1", "casbin = 1.41.8-1", "inspect == 3.1.1", "lualdap = 1.2.6-1", diff --git a/t/APISIX.pm b/t/APISIX.pm index 0738f3ecffd7..a8c49348f563 100644 --- a/t/APISIX.pm +++ b/t/APISIX.pm @@ -33,13 +33,6 @@ my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; $ENV{TEST_NGINX_HTML_DIR} ||= html_dir(); $ENV{TEST_NGINX_FAST_SHUTDOWN} ||= 1; -Test::Nginx::Socket::set_http_config_filter(sub { - my $config = shift; - my $snippet = `$apisix_home/t/bin/gen_snippet.lua conf_server`; - $config .= $snippet; - return $config; -}); - sub read_file($) { my $infile = shift; open my $in, "$apisix_home/$infile" @@ -531,7 +524,7 @@ _EOC_ } apisix.http_init(args) - -- set apisix_lua_home into constans module + -- set apisix_lua_home into constants module -- it may be used by plugins to determine the work path of apisix local constants = require("apisix.constants") constants.apisix_lua_home = "$apisix_home" @@ -878,17 +871,6 @@ deployment: _EOC_ if ($yaml_config !~ m/deployment:/) { - # TODO: remove this temporary option once we have using gRPC by default - if ($ENV{TEST_CI_USE_GRPC}) { - $default_deployment .= <<_EOC_; - etcd: - host: - - "http://127.0.0.1:2379" - prefix: /apisix - use_grpc: true -_EOC_ - } - $yaml_config = $default_deployment . $yaml_config; } diff --git a/t/admin/consumer-group-force-delete.t b/t/admin/consumer-group-force-delete.t index 4b2fb2d09a67..d5e96c6f0c55 100644 --- a/t/admin/consumer-group-force-delete.t +++ b/t/admin/consumer-group-force-delete.t @@ -51,7 +51,7 @@ __DATA__ "count": 200, "time_window": 60, "rejected_code": 503, - "group": "$consumer_group_id" + "group": "consumer_group_1" } } }]] diff --git a/t/admin/consumers.t b/t/admin/consumers.t index 23f3cffcc2b1..916f2a960bb9 100644 --- a/t/admin/consumers.t +++ b/t/admin/consumers.t @@ -335,26 +335,6 @@ GET /t } --- request GET /t ---- response_body -passed - - - -=== TEST 11: delete test consumer(pony) ---- config - location /t { - content_by_lua_block { - ngx.sleep(0.3) - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/consumers/pony', - ngx.HTTP_DELETE - ) - - ngx.status = code - ngx.say(body) - } - } ---- request -GET /t ---- response_body -passed +--- error_code: 400 +--- response_body eval +qr/\{"error_msg":"the property is forbidden:.*"\}/ diff --git a/t/admin/protos-force-delete.t b/t/admin/protos-force-delete.t index 909128924bfe..db0e5d8ae5ea 100644 --- a/t/admin/protos-force-delete.t +++ b/t/admin/protos-force-delete.t @@ -90,7 +90,7 @@ passed "scheme": "grpc", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] diff --git a/t/admin/routes4.t b/t/admin/routes4.t index ba9ff44142e3..0bab4506b520 100644 --- a/t/admin/routes4.t +++ b/t/admin/routes4.t @@ -790,21 +790,6 @@ passed ngx.say(body) } } ---- response_body -passed - - - -=== TEST 23: delete test route(id : 1) ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/routes/1', - ngx.HTTP_DELETE - ) - ngx.say("[delete] code: ", code, " message: ", message) - } - } ---- response_body -[delete] code: 200 message: passed +--- error_code: 400 +--- response_body eval +qr/\{"error_msg":"the property is forbidden:.*"\}/ diff --git a/t/admin/schema-validate.t b/t/admin/schema-validate.t new file mode 100644 index 000000000000..46f51021edfd --- /dev/null +++ b/t/admin/schema-validate.t @@ -0,0 +1,400 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("warn"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: validate ok +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "uri": "/httpbin/*", + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 200 + + + +=== TEST 2: validate failed, wrong uri type +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "uri": 666, + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg": {"property \"uri\" validation failed: wrong type: expected string, got number"}} + + + +=== TEST 3: validate failed, length limit +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "uri": "", + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"property \"uri\" validation failed: string too short, expected at least 1, got 0"} + + + +=== TEST 4: validate failed, array type expected +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "uris": "foobar", + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"property \"uris\" validation failed: wrong type: expected array, got string"} + + + +=== TEST 5: validate failed, array size limit +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "uris": [], + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"property \"uris\" validation failed: expect array to have at least 1 items"} + + + +=== TEST 6: validate failed, array unique items +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "uris": ["/foo", "/foo"], + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"property \"uris\" validation failed: expected unique items but items 1 and 2 are equal"} + + + +=== TEST 7: validate failed, uri or uris is mandatory +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"allOf 1 failed: value should match only one schema, but matches none"} + + + +=== TEST 8: validate failed, enum check +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "status": 3, + "uri": "/foo", + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"property \"status\" validation failed: matches none of the enum values"} + + + +=== TEST 9: validate failed, wrong combination +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "script": "xxxxxxxxxxxxxxxxxxxxx", + "plugin_config_id": "foo" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"allOf 1 failed: value should match only one schema, but matches none"} + + + +=== TEST 10: validate failed, id_schema check +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "plugin_config_id": "@@@@@@@@@@@@@@@@", + "uri": "/foo", + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"property \"plugin_config_id\" validation failed: object matches none of the required"} + + + +=== TEST 11: upstream ok +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/upstreams', + ngx.HTTP_POST, + [[{ + "nodes":{ + "nghttp2.org":100 + }, + "type":"roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 200 + + + +=== TEST 12: upstream failed, wrong nodes format +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/upstreams', + ngx.HTTP_POST, + [[{ + "nodes":[ + "nghttp2.org" + ], + "type":"roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"allOf 1 failed: value should match only one schema, but matches none"} diff --git a/t/admin/services.t b/t/admin/services.t index b2383ddc934b..90a5e9271a0f 100644 --- a/t/admin/services.t +++ b/t/admin/services.t @@ -1171,54 +1171,53 @@ GET /t local code, body = t('/apisix/admin/services/1', ngx.HTTP_PUT, [[{ - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "create_time": 1602883670, - "update_time": 1602893670 - } - }]], - [[{ - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "create_time": 1602883670, - "update_time": 1602893670 - } + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/services/1" - }]] - ) - + "type": "roundrobin" + }, + "create_time": 1602883670, + "update_time": 1602893670 + }]]) ngx.status = code ngx.say(body) } } --- request GET /t ---- response_body -passed +--- error_code: 400 +--- response_body eval +qr/\{"error_msg":"the property is forbidden:.*"\}/ -=== TEST 35: delete test service(id: 1) +=== TEST 35: create service and the built-in resource with create_time and update_time(id: 1) --- config location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/services/1', ngx.HTTP_DELETE) - ngx.say("[delete] code: ", code, " message: ", message) + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:8080": 1 + }, + "create_time": 1602883670, + "update_time": 1602893670 + } + }]]) + ngx.status = code + ngx.say(body) } } --- request GET /t ---- response_body -[delete] code: 200 message: passed +--- error_code: 400 +--- response_body eval +qr/\{"error_msg":"the property is forbidden:.*"\}/ diff --git a/t/admin/ssl.t b/t/admin/ssl.t index 2e69e0b4671f..b03eb494f854 100644 --- a/t/admin/ssl.t +++ b/t/admin/ssl.t @@ -268,45 +268,7 @@ passed -=== TEST 9: store exptime ---- config - location /t { - content_by_lua_block { - local core = require("apisix.core") - local t = require("lib.test_admin") - - local ssl_cert = t.read_file("t/certs/apisix.crt") - local ssl_key = t.read_file("t/certs/apisix.key") - local data = { - cert = ssl_cert, key = ssl_key, - sni = "bar.com", - exptime = 1588262400 + 60 * 60 * 24 * 365, - } - - local code, body = t.test('/apisix/admin/ssls/1', - ngx.HTTP_PUT, - core.json.encode(data), - [[{ - "value": { - "sni": "bar.com", - "exptime": 1619798400 - }, - "key": "/apisix/ssls/1" - }]] - ) - - ngx.status = code - ngx.say(body) - } - } ---- request -GET /t ---- response_body -passed - - - -=== TEST 10: string id +=== TEST 9: string id --- config location /t { content_by_lua_block { @@ -334,7 +296,7 @@ passed -=== TEST 11: string id(delete) +=== TEST 10: string id(delete) --- config location /t { content_by_lua_block { @@ -361,7 +323,7 @@ passed -=== TEST 12: invalid id +=== TEST 11: invalid id --- config location /t { content_by_lua_block { @@ -388,7 +350,7 @@ GET /t -=== TEST 13: set ssl with multicerts(id: 1) +=== TEST 12: set ssl with multicerts(id: 1) --- config location /t { content_by_lua_block { @@ -429,7 +391,7 @@ passed -=== TEST 14: mismatched certs and keys +=== TEST 13: mismatched certs and keys --- config location /t { content_by_lua_block { @@ -467,7 +429,7 @@ GET /t -=== TEST 15: set ssl(with labels) +=== TEST 14: set ssl(with labels) --- config location /t { content_by_lua_block { @@ -505,7 +467,7 @@ passed -=== TEST 16: invalid format of label value: set ssl +=== TEST 15: invalid format of label value: set ssl --- config location /t { content_by_lua_block { @@ -542,7 +504,7 @@ GET /t -=== TEST 17: create ssl with manage fields(id: 1) +=== TEST 16: create ssl with manage fields(id: 1) --- config location /t { content_by_lua_block { @@ -554,11 +516,7 @@ GET /t local data = { cert = ssl_cert, key = ssl_key, - sni = "test.com", - create_time = 1602883670, - update_time = 1602893670, - validity_start = 1602873670, - validity_end = 1603893670 + sni = "test.com" } local code, body = t.test('/apisix/admin/ssls/1', @@ -566,11 +524,7 @@ GET /t core.json.encode(data), [[{ "value": { - "sni": "test.com", - "create_time": 1602883670, - "update_time": 1602893670, - "validity_start": 1602873670, - "validity_end": 1603893670 + "sni": "test.com" }, "key": "/apisix/ssls/1" }]] @@ -587,7 +541,7 @@ passed -=== TEST 18: delete test ssl(id: 1) +=== TEST 17: delete test ssl(id: 1) --- config location /t { content_by_lua_block { @@ -603,7 +557,7 @@ GET /t -=== TEST 19: create/patch ssl +=== TEST 18: create/patch ssl --- config location /t { content_by_lua_block { @@ -668,7 +622,7 @@ passed -=== TEST 20: missing sni information +=== TEST 19: missing sni information --- config location /t { content_by_lua_block { @@ -699,7 +653,7 @@ GET /t -=== TEST 21: type client, missing sni information +=== TEST 20: type client, missing sni information --- config location /t { content_by_lua_block { diff --git a/t/admin/upstream-array-nodes.t b/t/admin/upstream-array-nodes.t index 70c7a8ede858..b02a759796a6 100644 --- a/t/admin/upstream-array-nodes.t +++ b/t/admin/upstream-array-nodes.t @@ -251,7 +251,7 @@ no valid upstream node -=== TEST 9: additional properties is valid +=== TEST 9: additional properties is invalid --- config location /t { content_by_lua_block { @@ -277,8 +277,9 @@ no valid upstream node } --- request GET /t ---- response_body -passed +--- error_code: 400 +--- response_body eval +qr/\{"error_msg":"invalid configuration: additional properties forbidden, found .*"\}/ diff --git a/t/admin/upstream.t b/t/admin/upstream.t index 7725e32b6070..b92a2f403f82 100644 --- a/t/admin/upstream.t +++ b/t/admin/upstream.t @@ -330,7 +330,7 @@ GET /t -=== TEST 11: additional properties is valid +=== TEST 11: additional properties is invalid --- config location /t { content_by_lua_block { @@ -354,8 +354,9 @@ GET /t } --- request GET /t ---- response_body -passed +--- error_code: 400 +--- response_body eval +qr/\{"error_msg":"invalid configuration: additional properties forbidden, found .*"\}/ diff --git a/t/admin/upstream4.t b/t/admin/upstream4.t index b657edc6e731..411252486ca0 100644 --- a/t/admin/upstream4.t +++ b/t/admin/upstream4.t @@ -489,26 +489,13 @@ passed ngx.say(body) } } ---- response_body -passed - - - -=== TEST 15: delete test upstream ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/upstreams/up_create_update_time', ngx.HTTP_DELETE) - ngx.say("[delete] code: ", code, " message: ", message) - } - } ---- response_body -[delete] code: 200 message: passed +--- error_code: 400 +--- response_body eval +qr/\{"error_msg":"the property is forbidden:.*"\}/ -=== TEST 16: patch upstream with sub_path, the data is number +=== TEST 15: patch upstream with sub_path, the data is number --- config location /t { content_by_lua_block { @@ -551,7 +538,7 @@ passed -=== TEST 17: set upstream(id: 1) +=== TEST 16: set upstream(id: 1) --- config location /t { content_by_lua_block { @@ -575,7 +562,7 @@ passed -=== TEST 18: set service(id: 1) +=== TEST 17: set service(id: 1) --- config location /t { content_by_lua_block { @@ -598,7 +585,7 @@ passed -=== TEST 19: set route(id: 1) +=== TEST 18: set route(id: 1) --- config location /t { content_by_lua_block { @@ -622,7 +609,7 @@ passed -=== TEST 20: delete upstream(id: 1) +=== TEST 19: delete upstream(id: 1) --- config location /t { content_by_lua_block { @@ -637,7 +624,7 @@ passed -=== TEST 21: delete route(id: 1) +=== TEST 20: delete route(id: 1) --- config location /t { content_by_lua_block { @@ -652,7 +639,7 @@ passed -=== TEST 22: delete service(id: 1) +=== TEST 21: delete service(id: 1) --- config location /t { content_by_lua_block { @@ -667,7 +654,7 @@ passed -=== TEST 23: delete upstream(id: 1) +=== TEST 22: delete upstream(id: 1) --- config location /t { content_by_lua_block { diff --git a/t/bin/gen_snippet.lua b/t/bin/gen_snippet.lua deleted file mode 100755 index b2ab349be348..000000000000 --- a/t/bin/gen_snippet.lua +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env luajit --- --- Licensed to the Apache Software Foundation (ASF) under one or more --- contributor license agreements. See the NOTICE file distributed with --- this work for additional information regarding copyright ownership. --- The ASF licenses this file to You under the Apache License, Version 2.0 --- (the "License"); you may not use this file except in compliance with --- the License. You may obtain a copy of the License at --- --- http://www.apache.org/licenses/LICENSE-2.0 --- --- Unless required by applicable law or agreed to in writing, software --- distributed under the License is distributed on an "AS IS" BASIS, --- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. --- See the License for the specific language governing permissions and --- limitations under the License. --- --- this script generates Nginx configuration in the test --- so we can test some features with test-nginx -local pkg_cpath_org = package.cpath -local pkg_path_org = package.path -local pkg_cpath = "deps/lib64/lua/5.1/?.so;deps/lib/lua/5.1/?.so;" -local pkg_path = "deps/share/lua/5.1/?.lua;" --- modify the load path to load our dependencies -package.cpath = pkg_cpath .. pkg_cpath_org -package.path = pkg_path .. pkg_path_org - - -local file = require("apisix.cli.file") -local schema = require("apisix.cli.schema") -local snippet = require("apisix.cli.snippet") -local util = require("apisix.cli.util") -local yaml_conf, err = file.read_yaml_conf("t/servroot") -if not yaml_conf then - error(err) -end - -if yaml_conf.deployment.role == "data_plane" and - yaml_conf.deployment.config_provider == "yaml" - or yaml_conf.deployment.config_provider == "xds" then - return -end - -local ok, err = schema.validate(yaml_conf) -if not ok then - error(err) -end - -local or_info, err = util.execute_cmd("openresty -V 2>&1") -if not or_info then - error("failed to exec cmd \'openresty -V 2>&1\', err: " .. err) -end - -local use_apisix_base = true -if not or_info:find("apisix-nginx-module", 1, true) then - use_apisix_base = false -end - -local res, err -if arg[1] == "conf_server" then - res, err = snippet.generate_conf_server( - { - apisix_home = "t/servroot/", - use_apisix_base = use_apisix_base, - }, - yaml_conf) -end - -if not res then - error(err or "none") -end -print(res) diff --git a/t/chaos/killetcd/killetcd.go b/t/chaos/killetcd/killetcd.go index 4f92cd9ccb38..069b66581263 100644 --- a/t/chaos/killetcd/killetcd.go +++ b/t/chaos/killetcd/killetcd.go @@ -140,17 +140,12 @@ var _ = ginkgo.Describe("Test Get Success When Etcd Got Killed", func() { // fail to set route since etcd is all killed // while get route could still succeed ginkgo.It("get stats after kill etcd", func() { - timeStart := time.Now() utils.SetRoute(e, httpexpect.Status5xx) utils.GetRoute(eDataPanel, http.StatusOK) utils.TestPrometheusEtcdMetric(ePrometheus, 0) bandwidthAfter, durationAfter = utils.GetEgressBandwidthPerSecond(ePrometheus) bpsAfter = bandwidthAfter / durationAfter - - errorLog, err := utils.Log(apisixPod, cliSet.KubeCli, timeStart) - gomega.Expect(err).To(gomega.BeNil()) - gomega.Ω(errorLog).Should(gomega.ContainSubstring("invalid response code: 502")) }) ginkgo.It("ingress bandwidth per second not change much", func() { diff --git a/t/chaos/utils/Dockerfile b/t/chaos/utils/Dockerfile index 9ab2a0509112..5d5ba6dac932 100644 --- a/t/chaos/utils/Dockerfile +++ b/t/chaos/utils/Dockerfile @@ -17,7 +17,7 @@ ARG ENABLE_PROXY=false -FROM openresty/openresty:1.21.4.1-alpine-fat AS production-stage +FROM openresty/openresty:1.21.4.2-alpine-fat AS production-stage ARG ENABLE_PROXY ARG APISIX_PATH @@ -36,11 +36,6 @@ RUN set -x \ sudo \ && cd apisix \ && git config --global url.https://github.com/.insteadOf git://github.com/ \ - && curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sudo sh -s -- -y \ - && source "$HOME/.cargo/env" \ - && export CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse \ - # next line is for rust cdylib compile on musl - && export RUSTFLAGS="-C target-feature=-crt-static" \ && make deps \ && cp -v bin/apisix /usr/bin/ \ && mv ../apisix /usr/local/apisix \ diff --git a/t/cli/test_access_log.sh b/t/cli/test_access_log.sh index 58faba74e527..01dda603a285 100755 --- a/t/cli/test_access_log.sh +++ b/t/cli/test_access_log.sh @@ -91,12 +91,6 @@ if [ $count_test_access_log -eq 1 ]; then exit 1 fi -count_access_log_off=`grep -c "access_log off;" conf/nginx.conf || true` -if [ $count_access_log_off -ne 5 ]; then - echo "failed: nginx.conf file doesn't find access_log off; when disable access log" - exit 1 -fi - make run sleep 0.1 curl http://127.0.0.1:9080/hi diff --git a/t/cli/test_cmd.sh b/t/cli/test_cmd.sh index c02b4dbeed64..81864aeb9a18 100755 --- a/t/cli/test_cmd.sh +++ b/t/cli/test_cmd.sh @@ -21,34 +21,6 @@ git checkout conf/config.yaml -# remove stale conf server sock -touch conf/config_listen.sock -./bin/apisix start -sleep 0.5 -./bin/apisix stop -sleep 0.5 - -if [ -e conf/config_listen.sock ]; then - echo "failed: should remove stale conf server sock" - exit 1 -fi - -# don't remove stale conf server sock when APISIX is running -./bin/apisix start -sleep 0.5 -./bin/apisix start -sleep 0.5 - -if [ ! -e conf/config_listen.sock ]; then - echo "failed: should not remove stale conf server sock" - exit 1 -fi - -./bin/apisix stop -sleep 0.5 - -echo "passed: stale conf server sock removed" - # check restart with old nginx.pid exist echo "-1" > logs/nginx.pid out=$(./bin/apisix start 2>&1 || true) @@ -115,7 +87,7 @@ deployment: # check if .customized_config_path has been created if [ ! -e conf/.customized_config_path ]; then rm conf/customized_config.yaml - echo ".config_path file should exits" + echo ".customized_config_path should exits" exit 1 fi @@ -130,9 +102,9 @@ fi make stop # check if .customized_config_path has been removed -if [ -e conf/.config_path ]; then +if [ -e conf/.customized_config_path ]; then rm conf/customized_config_path.yaml - echo ".config_path file should be removed" + echo ".customized_config_path should be removed" exit 1 fi diff --git a/t/cli/test_deployment_control_plane.sh b/t/cli/test_deployment_control_plane.sh index 58858f41fd1c..ed3a062a8117 100755 --- a/t/cli/test_deployment_control_plane.sh +++ b/t/cli/test_deployment_control_plane.sh @@ -19,57 +19,6 @@ . ./t/cli/common.sh -echo ' -deployment: - role: control_plane - role_control_plane: - config_provider: etcd - conf_server: - cert: t/certs/mtls_server.crt - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:2379 -' > conf/config.yaml - -out=$(make init 2>&1 || true) -if ! echo "$out" | grep 'property "cert_key" is required'; then - echo "failed: should check deployment schema during init" - exit 1 -fi - -echo "passed: should check deployment schema during init" - -# The 'admin.apisix.dev' is injected by ci/common.sh@set_coredns -echo ' -apisix: - enable_admin: false -deployment: - role: control_plane - role_control_plane: - config_provider: etcd - conf_server: - listen: admin.apisix.dev:12345 - cert: t/certs/mtls_server.crt - cert_key: t/certs/mtls_server.key - admin: - https_admin: "abc" - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - certs: - trusted_ca_cert: t/certs/mtls_ca.crt -' > conf/config.yaml - -out=$(make init 2>&1 || true) -if ! echo "$out" | grep 'property "https_admin" validation failed: wrong type: expected boolean, got string'; then - echo "failed: should check deployment schema during init" - exit 1 -fi - -echo "passed: should check deployment schema during init" - # The 'admin.apisix.dev' is injected by ci/common.sh@set_coredns echo ' apisix: @@ -78,23 +27,16 @@ deployment: role: control_plane role_control_plane: config_provider: etcd - conf_server: - listen: admin.apisix.dev:12345 - cert: t/certs/mtls_server.crt - cert_key: t/certs/mtls_server.key etcd: prefix: "/apisix" host: - http://127.0.0.1:2379 - certs: - trusted_ca_cert: t/certs/mtls_ca.crt ' > conf/config.yaml make run sleep 1 code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') -make stop if [ ! $code -eq 200 ]; then echo "failed: control_plane should enable Admin API" @@ -103,40 +45,6 @@ fi echo "passed: control_plane should enable Admin API" -# use https -# The 'admin.apisix.dev' is injected by ci/common.sh@set_coredns -echo ' -deployment: - role: control_plane - role_control_plane: - config_provider: etcd - conf_server: - listen: admin.apisix.dev:12345 - cert: t/certs/mtls_server.crt - cert_key: t/certs/mtls_server.key - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - certs: - cert: t/certs/mtls_client.crt - cert_key: t/certs/mtls_client.key - trusted_ca_cert: t/certs/mtls_ca.crt -' > conf/config.yaml - -make run -sleep 1 - -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') - -if [ ! $code -eq 200 ]; then - make stop - echo "failed: could not work with etcd" - exit 1 -fi - -echo "passed: work well with etcd in control plane" - curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { diff --git a/t/cli/test_deployment_data_plane.sh b/t/cli/test_deployment_data_plane.sh index ef5ef61e37c6..b7edb0e2411b 100755 --- a/t/cli/test_deployment_data_plane.sh +++ b/t/cli/test_deployment_data_plane.sh @@ -27,14 +27,14 @@ echo ' deployment: role: data_plane role_data_plane: - config_provider: control_plane - control_plane: - host: - - https://127.0.0.1:12379 - prefix: "/apisix" - timeout: 30 - tls: - verify: false + config_provider: etcd + etcd: + host: + - https://127.0.0.1:12379 + prefix: "/apisix" + timeout: 30 + tls: + verify: false ' > conf/config.yaml make run @@ -64,12 +64,12 @@ echo ' deployment: role: data_plane role_data_plane: - config_provider: control_plane - control_plane: - host: - - https://127.0.0.1:12379 - prefix: "/apisix" - timeout: 30 + config_provider: etcd + etcd: + host: + - https://127.0.0.1:12379 + prefix: "/apisix" + timeout: 30 ' > conf/config.yaml out=$(make run 2>&1 || true) diff --git a/t/cli/test_deployment_mtls.sh b/t/cli/test_deployment_mtls.sh deleted file mode 100755 index 5fa4c6984a21..000000000000 --- a/t/cli/test_deployment_mtls.sh +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/env bash - -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -. ./t/cli/common.sh - -exit_if_not_customed_nginx - -# use mTLS -# The 'admin.apisix.dev' is injected by ci/common.sh@set_coredns -echo ' -deployment: - role: control_plane - role_control_plane: - config_provider: etcd - conf_server: - listen: admin.apisix.dev:12345 - cert: t/certs/mtls_server.crt - cert_key: t/certs/mtls_server.key - client_ca_cert: t/certs/mtls_ca.crt - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - certs: - cert: t/certs/mtls_client.crt - cert_key: t/certs/mtls_client.key - trusted_ca_cert: t/certs/mtls_ca.crt -' > conf/config.yaml - -make run -sleep 1 - -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') -make stop - -if [ ! $code -eq 200 ]; then - echo "failed: could not work with etcd" - exit 1 -fi - -echo "passed: work well with etcd in control plane" - -echo ' -deployment: - role: data_plane - role_data_plane: - config_provider: control_plane - control_plane: - host: - - "https://admin.apisix.dev:22379" - prefix: "/apisix" - timeout: 30 - tls: - verify: false - certs: - cert: t/certs/mtls_client.crt - cert_key: t/certs/mtls_client.key - trusted_ca_cert: t/certs/mtls_ca.crt -' > conf/config.yaml - -rm logs/error.log -make run -sleep 1 - -make stop - -if grep '\[error\] .\+ https://admin.apisix.dev:22379' logs/error.log; then - echo "failed: work well with control plane in data plane" - exit 1 -fi - -echo "passed: work well with control plane in data plane" diff --git a/t/cli/test_deployment_traditional.sh b/t/cli/test_deployment_traditional.sh index 2699c3d2aecd..24996eb3b947 100755 --- a/t/cli/test_deployment_traditional.sh +++ b/t/cli/test_deployment_traditional.sh @@ -100,57 +100,6 @@ fi echo "passed: could connect to etcd" -echo ' -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - - https://127.0.0.1:2379 -' > conf/config.yaml - -out=$(make init 2>&1 || true) -if ! echo "$out" | grep 'all nodes in the etcd cluster should enable/disable TLS together'; then - echo "failed: should validate etcd host" - exit 1 -fi - -echo "passed: validate etcd host" - -# The 'admin.apisix.dev' is injected by ci/common.sh@set_coredns - -# etcd mTLS verify -echo ' -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - host: - - "https://admin.apisix.dev:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - verify: false - ' > conf/config.yaml - -make run -sleep 1 - -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') -make stop - -if [ ! $code -eq 200 ]; then - echo "failed: could not work when mTLS is enabled" - exit 1 -fi - -echo "passed: etcd enables mTLS successfully" - echo ' deployment: role: traditional diff --git a/t/cli/test_etcd_grpc.sh b/t/cli/test_etcd_grpc.sh deleted file mode 100755 index 47266c3e4e97..000000000000 --- a/t/cli/test_etcd_grpc.sh +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/env bash - -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# 'make init' operates scripts and related configuration files in the current directory -# The 'apisix' command is a command in the /usr/local/apisix, -# and the configuration file for the operation is in the /usr/local/apisix/conf - -. ./t/cli/common.sh - -exit_if_not_customed_nginx - -# check etcd while enable auth -git checkout conf/config.yaml - -export ETCDCTL_API=3 -etcdctl version -etcdctl --endpoints=127.0.0.1:2379 user add "root:apache-api6" -etcdctl --endpoints=127.0.0.1:2379 role add root -etcdctl --endpoints=127.0.0.1:2379 user grant-role root root -etcdctl --endpoints=127.0.0.1:2379 user get root -etcdctl --endpoints=127.0.0.1:2379 auth enable -etcdctl --endpoints=127.0.0.1:2379 --user=root:apache-api6 del /apisix --prefix - -echo ' -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - host: - - http://127.0.0.1:2379 - prefix: /apisix - timeout: 30 - use_grpc: true - user: root - password: apache-api6 -' > conf/config.yaml - -make run -sleep 1 - -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') -make stop - -if [ ! $code -eq 200 ]; then - echo "failed: could not work with etcd" - exit 1 -fi - -echo "passed: work well with etcd auth enabled" - -etcdctl --endpoints=127.0.0.1:2379 --user=root:apache-api6 auth disable -etcdctl --endpoints=127.0.0.1:2379 role delete root -etcdctl --endpoints=127.0.0.1:2379 user delete root - -# check connect to etcd with ipv6 address in cli -git checkout conf/config.yaml - -echo ' -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - host: - - http://[::1]:2379 - prefix: /apisix - use_grpc: true - timeout: 30 -' > conf/config.yaml - -rm logs/error.log || true -make run -sleep 0.1 - -if grep "update endpoint: http://\[::1\]:2379 to unhealthy" logs/error.log; then - echo "failed: connect to etcd via ipv6 address failed" - exit 1 -fi - -if grep "host or service not provided, or not known" logs/error.log; then - echo "failed: luasocket resolve ipv6 addresses failed" - exit 1 -fi - -make stop - -echo "passed: connect to etcd via ipv6 address successfully" diff --git a/t/cli/test_etcd_grpc_healthcheck.sh b/t/cli/test_etcd_grpc_healthcheck.sh deleted file mode 100755 index 3a977e0265f1..000000000000 --- a/t/cli/test_etcd_grpc_healthcheck.sh +++ /dev/null @@ -1,155 +0,0 @@ -#!/usr/bin/env bash - -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# 'make init' operates scripts and related configuration files in the current directory -# The 'apisix' command is a command in the /usr/local/apisix, -# and the configuration file for the operation is in the /usr/local/apisix/conf - -. ./t/cli/common.sh - -exit_if_not_customed_nginx - -custom_clean_up() { - clean_up - - # stop etcd docker container - docker-compose -f ./t/cli/docker-compose-etcd-cluster.yaml down -} - -trap custom_clean_up EXIT - -export ETCD_ENABLE_GRPC_GATEWAY=false - -# create 3 node etcd cluster in docker -ETCD_NAME_0=etcd0 -ETCD_NAME_1=etcd1 -ETCD_NAME_2=etcd2 -HEALTH_CHECK_RETRY_TIMEOUT=10 - -if [ -f "logs/error.log" ]; then - rm logs/error.log -fi -touch logs/error.log - -echo ' -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "http://0.0.0.0:23790" - - "http://0.0.0.0:23791" - - "http://0.0.0.0:23792" - health_check_timeout: '"$HEALTH_CHECK_RETRY_TIMEOUT"' - timeout: 2 -' > conf/config.yaml - -docker-compose -f ./t/cli/docker-compose-etcd-cluster.yaml up -d - -# case 1: Check apisix not got effected when one etcd node disconnected -make init && make run - -docker stop ${ETCD_NAME_0} -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') -if [ ! $code -eq 200 ]; then - echo "failed: apisix got effect when one etcd node out of a cluster disconnected" - exit 1 -fi -docker start ${ETCD_NAME_0} - -docker stop ${ETCD_NAME_1} -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') -if [ ! $code -eq 200 ]; then - echo "failed: apisix got effect when one etcd node out of a cluster disconnected" - exit 1 -fi -docker start ${ETCD_NAME_1} - -make stop - -echo "passed: apisix not got effected when one etcd node disconnected" - -# case 2: Check when all etcd nodes disconnected, apisix trying to reconnect with backoff, and could successfully recover when reconnected -make init && make run - -docker stop ${ETCD_NAME_0} && docker stop ${ETCD_NAME_1} && docker stop ${ETCD_NAME_2} - -sleep_till=$(date +%s -d "$DATE + $HEALTH_CHECK_RETRY_TIMEOUT second") - -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') -if [ $code -eq 200 ]; then - echo "failed: apisix not got effect when all etcd nodes disconnected" - exit 1 -fi - -docker start ${ETCD_NAME_0} && docker start ${ETCD_NAME_1} && docker start ${ETCD_NAME_2} - -# case 3: sleep till etcd health check try to check again -current_time=$(date +%s) -sleep_seconds=$(( $sleep_till - $current_time + 3)) -if [ "$sleep_seconds" -gt 0 ]; then - sleep $sleep_seconds -fi - -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') -if [ ! $code -eq 200 ]; then - echo "failed: apisix could not recover when etcd node recover" - docker ps - cat logs/error.log - exit 1 -fi - -make stop - -echo "passed: when all etcd nodes disconnected, apisix trying to reconnect with backoff, and could successfully recover when reconnected" - -# case 4: stop one etcd node (result: start successful) -docker stop ${ETCD_NAME_0} - -out=$(make init 2>&1) -if echo "$out" | grep "23790" | grep "connection refused"; then - echo "passed: APISIX successfully to start, stop only one etcd node" -else - echo "failed: stop only one etcd node APISIX should start normally" - exit 1 -fi - -# case 5: stop two etcd nodes (result: start failure) -docker stop ${ETCD_NAME_1} - -out=$(make init 2>&1 || true) -if echo "$out" | grep "23791" | grep "connection refused"; then - echo "passed: APISIX failed to start, etcd cluster must have two or more healthy nodes" -else - echo "failed: two etcd nodes have been stopped, APISIX should fail to start" - exit 1 -fi - -# case 6: stop all etcd nodes (result: start failure) -docker stop ${ETCD_NAME_2} - -out=$(make init 2>&1 || true) -if echo "$out" | grep "23792" | grep "connection refused"; then - echo "passed: APISIX failed to start, all etcd nodes have stopped" -else - echo "failed: all etcd nodes have stopped, APISIX should not be able to start" - exit 1 -fi diff --git a/t/cli/test_etcd_grpc_mtls.sh b/t/cli/test_etcd_grpc_mtls.sh deleted file mode 100755 index 90c151a62d7a..000000000000 --- a/t/cli/test_etcd_grpc_mtls.sh +++ /dev/null @@ -1,182 +0,0 @@ -#!/usr/bin/env bash - -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -. ./t/cli/common.sh - -exit_if_not_customed_nginx - -# The 'admin.apisix.dev' is injected by ci/common.sh@set_coredns - -# etcd mTLS verify -echo ' -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "https://admin.apisix.dev:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - verify: false - ' > conf/config.yaml - -out=$(make init 2>&1 || echo "ouch") -if echo "$out" | grep "bad certificate"; then - echo "failed: apisix should not echo \"bad certificate\"" - exit 1 -fi - -echo "passed: certificate verify success expectedly" - -echo ' -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "https://admin.apisix.dev:22379" - prefix: "/apisix" - tls: - verify: false - ' > conf/config.yaml - -out=$(make init 2>&1 || echo "ouch") -if ! echo "$out" | grep "bad certificate"; then - echo "failed: apisix should echo \"bad certificate\"" - exit 1 -fi - -echo "passed: certificate verify fail expectedly" - -# etcd mTLS verify with CA -echo ' -apisix: - ssl: - ssl_trusted_certificate: t/certs/mtls_ca.crt -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "https://admin.apisix.dev:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - ' > conf/config.yaml - -out=$(make init 2>&1 || echo "ouch") -if echo "$out" | grep "certificate verify failed"; then - echo "failed: apisix should not echo \"certificate verify failed\"" - exit 1 -fi - -if echo "$out" | grep "ouch"; then - echo "failed: apisix should not fail" - exit 1 -fi - -echo "passed: certificate verify with CA success expectedly" - -# etcd mTLS in stream subsystem -echo ' -apisix: - proxy_mode: http&stream - stream_proxy: - tcp: - - addr: 9100 - ssl: - ssl_trusted_certificate: t/certs/mtls_ca.crt -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "https://admin.apisix.dev:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - ' > conf/config.yaml - -out=$(make init 2>&1 || echo "ouch") -if echo "$out" | grep "certificate verify failed"; then - echo "failed: apisix should not echo \"certificate verify failed\"" - exit 1 -fi - -if echo "$out" | grep "ouch"; then - echo "failed: apisix should not fail" - exit 1 -fi - -rm logs/error.log || true -make run -sleep 1 -make stop - -if grep "\[error\]" logs/error.log; then - echo "failed: veirfy etcd certificate during sync should not fail" -fi - -echo "passed: certificate verify in stream subsystem successfully" - -# use host in etcd.host as sni by default -git checkout conf/config.yaml -echo ' -apisix: - ssl: - ssl_trusted_certificate: t/certs/mtls_ca.crt -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "https://127.0.0.2:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - ' > conf/config.yaml - -rm logs/error.log || true -make init -make run -sleep 1 -make stop - -if ! grep -E "cannot validate certificate for 127.0.0.2 because it doesn't contain any IP SANs" logs/error.log; then - echo "failed: should got certificate host mismatch when use host in etcd.host as sni" - exit 1 -fi - - -echo "passed: use host in etcd.host as sni by default" diff --git a/t/cli/test_etcd_grpc_tls.sh b/t/cli/test_etcd_grpc_tls.sh deleted file mode 100755 index 9e429e41b373..000000000000 --- a/t/cli/test_etcd_grpc_tls.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env bash - -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# 'make init' operates scripts and related configuration files in the current directory -# The 'apisix' command is a command in the /usr/local/apisix, -# and the configuration file for the operation is in the /usr/local/apisix/conf - -. ./t/cli/common.sh - -exit_if_not_customed_nginx - -# Check etcd tls verify failure -git checkout conf/config.yaml - -echo ' -apisix: - ssl: - ssl_trusted_certificate: t/certs/mtls_ca.crt -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "https://127.0.0.1:12379" - prefix: "/apisix" - ' > conf/config.yaml - -out=$(make init 2>&1 || true) -if ! echo "$out" | grep "certificate verify failed"; then - echo "failed: apisix should echo \"certificate verify failed\"" - exit 1 -fi - -echo "passed: Show certificate verify failed info successfully" - - -# Check etcd tls without verification -git checkout conf/config.yaml - -echo ' -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "https://127.0.0.1:12379" - prefix: "/apisix" - tls: - verify: false - ' > conf/config.yaml - -out=$(make init 2>&1 || true) -if echo "$out" | grep "certificate verify failed"; then - echo "failed: apisix should not echo \"certificate verify failed\"" - exit 1 -fi - -echo "passed: Certificate verification successfully" diff --git a/t/cli/test_etcd_mtls.sh b/t/cli/test_etcd_mtls.sh index 5d0152ff64f1..a05dcdb784ca 100755 --- a/t/cli/test_etcd_mtls.sh +++ b/t/cli/test_etcd_mtls.sh @@ -168,7 +168,7 @@ make run sleep 1 make stop -if ! grep -E 'upstream SSL certificate does not match \"127.0.0.1\" while SSL handshaking to upstream' logs/error.log; then +if ! grep -F 'certificate host mismatch' logs/error.log; then echo "failed: should got certificate host mismatch when use host in etcd.host as sni" exit 1 fi diff --git a/t/cli/test_opentelemetry_set_ngx_var.sh b/t/cli/test_opentelemetry_set_ngx_var.sh new file mode 100755 index 000000000000..8db6ea420db2 --- /dev/null +++ b/t/cli/test_opentelemetry_set_ngx_var.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +echo ' +plugins: + - opentelemetry +plugin_attr: + opentelemetry: + set_ngx_var: true +' > conf/config.yaml + +make init + +if ! grep "set \$opentelemetry_context_traceparent '';" conf/nginx.conf > /dev/null; then + echo "failed: opentelemetry_context_traceparent not found in nginx.conf" + exit 1 +fi + +if ! grep "set \$opentelemetry_trace_id '';" conf/nginx.conf > /dev/null; then + echo "failed: opentelemetry_trace_id not found in nginx.conf" + exit 1 +fi + +if ! grep "set \$opentelemetry_span_id '';" conf/nginx.conf > /dev/null; then + echo "failed: opentelemetry_span_id not found in nginx.conf" + exit 1 +fi + + +echo "passed: opentelemetry_set_ngx_var configuration is validated" diff --git a/t/cli/test_route_match_with_graphql.sh b/t/cli/test_route_match_with_graphql.sh new file mode 100755 index 000000000000..c67027748146 --- /dev/null +++ b/t/cli/test_route_match_with_graphql.sh @@ -0,0 +1,98 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +echo ' +deployment: + role: data_plane + role_data_plane: + config_provider: yaml + +apisix: + router: + http: radixtree_uri + +nginx_config: + worker_processes: 1 + +' > conf/config.yaml + +echo ' +routes: + - uri: "/hello" + hosts: + - test.com + vars: + - - "graphql_name" + - "==" + - "createAccount" + priority: 30 + id: "graphql1" + upstream_id: "invalid" + + - uri: "/hello" + hosts: + - test.com + plugins: + echo: + body: "test server" + priority: 20 + id: "graphql2" + upstream_id: "invalid" + + - uri: "/hello" + hosts: + - test2.com + plugins: + echo: + body: "test2" + priority: 20 + id: "graphql3" + upstream_id: "invalid" + +upstreams: + - nodes: + 127.0.0.1:1999: 1 + id: "invalid" +#END +' > conf/apisix.yaml + +make run + +dd if=/dev/urandom of=tmp_data.json bs=300K count=1 + +for i in {1..100}; do + curl -s http://127.0.0.1:9080/hello -H "Host: test.com" -H "Content-Type: application/json" -X POST -d @tmp_data.json > /tmp/graphql_request1.txt & + curl -s http://127.0.0.1:9080/hello -H "Host: test2.com" -H "Content-Type: application/json" -X POST -d @tmp_data.json > /tmp/graphql_request2.txt & + + wait + + if diff /tmp/graphql_request1.txt /tmp/graphql_request2.txt > /dev/null; then + make stop + echo "failed: route match error in GraphQL requests, route should not be the same" + exit 1 + fi +done + +make stop + +rm tmp_data.json /tmp/graphql_request1.txt /tmp/graphql_request2.txt + +echo "passed: GraphQL requests can be correctly matched to the route" diff --git a/t/cli/test_zipkin_set_ngx_var.sh b/t/cli/test_zipkin_set_ngx_var.sh new file mode 100755 index 000000000000..3ddd0215524f --- /dev/null +++ b/t/cli/test_zipkin_set_ngx_var.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +echo ' +plugins: + - zipkin +plugin_attr: + zipkin: + set_ngx_var: true +' > conf/config.yaml + +make init + +if ! grep "set \$zipkin_context_traceparent '';" conf/nginx.conf > /dev/null; then + echo "failed: zipkin_context_traceparent not found in nginx.conf" + exit 1 +fi + +if ! grep "set \$zipkin_trace_id '';" conf/nginx.conf > /dev/null; then + echo "failed: zipkin_trace_id not found in nginx.conf" + exit 1 +fi + +if ! grep "set \$zipkin_span_id '';" conf/nginx.conf > /dev/null; then + echo "failed: zipkin_span_id not found in nginx.conf" + exit 1 +fi + + +echo "passed: zipkin_set_ngx_var configuration is validated" diff --git a/t/control/discovery.t b/t/control/discovery.t index c548b3697563..7bf81b15ee94 100644 --- a/t/control/discovery.t +++ b/t/control/discovery.t @@ -77,7 +77,7 @@ GET /t --- error_code: 200 --- response_body {} -{"fetch_interval":3,"keepalive":true,"prefix":"upstreams","servers":["http://127.0.0.1:8500","http://127.0.0.1:8600"],"timeout":{"connect":2000,"read":2000,"wait":60},"weight":1} +{"fetch_interval":3,"keepalive":true,"prefix":"upstreams","servers":["http://127.0.0.1:8500","http://127.0.0.1:8600"],"timeout":{"connect":2000,"read":2000,"wait":60},"token":"","weight":1} --- error_log connect consul diff --git a/t/control/healthcheck.t b/t/control/healthcheck.t index 5d40e970739b..9673ab917ba5 100644 --- a/t/control/healthcheck.t +++ b/t/control/healthcheck.t @@ -51,7 +51,7 @@ routes: upstreams: - nodes: "127.0.0.1:1980": 1 - "127.0.0.2:1988": 1 + "127.0.0.2:1988": 0 type: roundrobin id: 1 checks: @@ -120,11 +120,11 @@ upstreams: --- grep_error_log eval qr/unhealthy TCP increment \(.+\) for '[^']+'/ --- grep_error_log_out -unhealthy TCP increment (1/2) for '(127.0.0.2:1988)' -unhealthy TCP increment (2/2) for '(127.0.0.2:1988)' +unhealthy TCP increment (1/2) for '127.0.0.2(127.0.0.2:1988)' +unhealthy TCP increment (2/2) for '127.0.0.2(127.0.0.2:1988)' --- response_body -[{"counter":{"http_failure":0,"success":0,"tcp_failure":0,"timeout_failure":0},"ip":"127.0.0.1","port":1980,"status":"healthy"},{"counter":{"http_failure":0,"success":0,"tcp_failure":2,"timeout_failure":0},"ip":"127.0.0.2","port":1988,"status":"unhealthy"}] -[{"counter":{"http_failure":0,"success":0,"tcp_failure":0,"timeout_failure":0},"ip":"127.0.0.1","port":1980,"status":"healthy"},{"counter":{"http_failure":0,"success":0,"tcp_failure":2,"timeout_failure":0},"ip":"127.0.0.2","port":1988,"status":"unhealthy"}] +[{"counter":{"http_failure":0,"success":0,"tcp_failure":0,"timeout_failure":0},"hostname":"127.0.0.1","ip":"127.0.0.1","port":1980,"status":"healthy"},{"counter":{"http_failure":0,"success":0,"tcp_failure":2,"timeout_failure":0},"hostname":"127.0.0.2","ip":"127.0.0.2","port":1988,"status":"unhealthy"}] +[{"counter":{"http_failure":0,"success":0,"tcp_failure":0,"timeout_failure":0},"hostname":"127.0.0.1","ip":"127.0.0.1","port":1980,"status":"healthy"},{"counter":{"http_failure":0,"success":0,"tcp_failure":2,"timeout_failure":0},"hostname":"127.0.0.2","ip":"127.0.0.2","port":1988,"status":"unhealthy"}] diff --git a/t/core/config_etcd.t b/t/core/config_etcd.t index 2bdddcdbdb52..d81cf5615a2b 100644 --- a/t/core/config_etcd.t +++ b/t/core/config_etcd.t @@ -84,9 +84,9 @@ end --- request GET /t --- grep_error_log chop -peer closed connection in SSL handshake while SSL handshaking to upstream +peer closed connection in SSL handshake --- grep_error_log_out eval -qr/(peer closed connection in SSL handshake while SSL handshaking to upstream){1,}/ +qr/(peer closed connection in SSL handshake){1,}/ @@ -118,42 +118,7 @@ qr/(closed){1,}/ -=== TEST 4: originate TLS connection to etcd cluster and verify TLS certificate (default behavior) ---- yaml_config -apisix: - node_listen: 1984 - ssl: - ssl_trusted_certificate: t/servroot/conf/cert/etcd.pem -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - host: - - "https://127.0.0.1:12379" ---- extra_init_by_lua -local health_check = require("resty.etcd.health_check") -health_check.get_target_status = function() - return true -end ---- config - location /t { - content_by_lua_block { - ngx.sleep(4) - ngx.say("ok") - } - } ---- timeout: 5 ---- request -GET /t ---- grep_error_log chop -10:certificate has expired ---- grep_error_log_out eval -qr/(10:certificate has expired){1,}/ - - - -=== TEST 5: set route(id: 1) to etcd cluster with TLS +=== TEST 4: set route(id: 1) to etcd cluster with TLS --- yaml_config apisix: node_listen: 1984 @@ -197,7 +162,7 @@ passed -=== TEST 6: get route(id: 1) from etcd cluster with TLS +=== TEST 5: get route(id: 1) from etcd cluster with TLS --- yaml_config apisix: node_listen: 1984 @@ -233,7 +198,7 @@ passed -=== TEST 7: ensure only one auth request per subsystem for all the etcd sync +=== TEST 6: ensure only one auth request per subsystem for all the etcd sync --- yaml_config apisix: node_listen: 1984 @@ -269,7 +234,7 @@ etcd auth failed -=== TEST 8: ensure add prefix automatically for _M.getkey +=== TEST 7: ensure add prefix automatically for _M.getkey --- config location /t { content_by_lua_block { @@ -300,7 +265,7 @@ passed -=== TEST 9: Test ETCD health check mode switch during APISIX startup +=== TEST 8: Test ETCD health check mode switch during APISIX startup --- config location /t { content_by_lua_block { @@ -319,7 +284,7 @@ qr/healthy check use round robin -=== TEST 10: last_err can be nil when the reconnection is successful +=== TEST 9: last_err can be nil when the reconnection is successful --- config location /t { content_by_lua_block { @@ -349,7 +314,7 @@ passed -=== TEST 11: reloaded data may be in res.body.node (special kvs structure) +=== TEST 10: reloaded data may be in res.body.node (special kvs structure) --- yaml_config deployment: role: traditional @@ -396,7 +361,7 @@ qr/readdir key: fake res: \{("value":"bar","key":"foo"|"key":"foo","value":"bar" -=== TEST 12: reloaded data may be in res.body.node (admin_api_version is v2) +=== TEST 11: reloaded data may be in res.body.node (admin_api_version is v2) --- yaml_config deployment: role: traditional @@ -446,7 +411,7 @@ qr/readdir key: fake res: \{.*"nodes":\[\{.*"value":\["bar"\].*\}\].*\}/ -=== TEST 13: test route with special character "-" +=== TEST 12: test route with special character "-" --- yaml_config deployment: role: traditional diff --git a/t/core/ctx2.t b/t/core/ctx2.t index a99844ffd0d2..7782ac9125cd 100644 --- a/t/core/ctx2.t +++ b/t/core/ctx2.t @@ -292,7 +292,20 @@ find ctx.req_post_args.test: true -=== TEST 13: missed (post_arg_test is missing) +=== TEST 13: hit with charset +--- request +POST /hello +test=test +--- more_headers +Content-Type: application/x-www-form-urlencoded;charset=utf-8 +--- response_body +hello world +--- error_log +find ctx.req_post_args.test: true + + + +=== TEST 14: missed (post_arg_test is missing) --- request POST /hello --- more_headers @@ -303,7 +316,7 @@ Content-Type: application/x-www-form-urlencoded -=== TEST 14: missed (post_arg_test is mismatch) +=== TEST 15: missed (post_arg_test is mismatch) --- request POST /hello test=tesy @@ -315,7 +328,7 @@ Content-Type: application/x-www-form-urlencoded -=== TEST 15: register custom variable +=== TEST 16: register custom variable --- config location /t { content_by_lua_block { @@ -351,7 +364,7 @@ Content-Type: application/x-www-form-urlencoded -=== TEST 16: hit +=== TEST 17: hit --- config location /t { content_by_lua_block { @@ -375,7 +388,7 @@ find ctx.var.a6_labels_zone: Singapore -=== TEST 17: register custom variable with no cacheable +=== TEST 18: register custom variable with no cacheable --- config location /t { content_by_lua_block { @@ -412,7 +425,7 @@ find ctx.var.a6_labels_zone: Singapore -=== TEST 18: hit +=== TEST 19: hit --- config location /t { content_by_lua_block { diff --git a/t/core/etcd-grpc-auth-fail.t b/t/core/etcd-grpc-auth-fail.t deleted file mode 100644 index b11f51ae24bf..000000000000 --- a/t/core/etcd-grpc-auth-fail.t +++ /dev/null @@ -1,106 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -BEGIN { - $ENV{"ETCD_ENABLE_AUTH"} = "false"; - delete $ENV{"FLUSH_ETCD"}; -} - -use t::APISIX; - -my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; -my $version = eval { `$nginx_binary -V 2>&1` }; - -if ($version !~ m/\/apisix-nginx-module/) { - plan(skip_all => "apisix-nginx-module not installed"); -} else { - plan('no_plan'); -} - - -repeat_each(1); -no_long_string(); -no_root_location(); -log_level("info"); - -# Authentication is enabled at etcd and credentials are set -system('etcdctl --endpoints="http://127.0.0.1:2379" user add root:5tHkHhYkjr6cQY'); -system('etcdctl --endpoints="http://127.0.0.1:2379" role add root'); -system('etcdctl --endpoints="http://127.0.0.1:2379" user grant-role root root'); -system('etcdctl --endpoints="http://127.0.0.1:2379" role list'); -system('etcdctl --endpoints="http://127.0.0.1:2379" user user list'); -# Grant the user access to the specified directory -system('etcdctl --endpoints="http://127.0.0.1:2379" user add apisix:abc123'); -system('etcdctl --endpoints="http://127.0.0.1:2379" role add apisix'); -system('etcdctl --endpoints="http://127.0.0.1:2379" user grant-role apisix apisix'); -system('etcdctl --endpoints=http://127.0.0.1:2379 role grant-permission apisix --prefix=true readwrite /apisix/'); -system('etcdctl --endpoints="http://127.0.0.1:2379" auth enable'); - -run_tests; - -# Authentication is disabled at etcd -system('etcdctl --endpoints="http://127.0.0.1:2379" --user root:5tHkHhYkjr6cQY auth disable'); -system('etcdctl --endpoints="http://127.0.0.1:2379" user delete root'); -system('etcdctl --endpoints="http://127.0.0.1:2379" role delete root'); -system('etcdctl --endpoints="http://127.0.0.1:2379" user delete apisix'); -system('etcdctl --endpoints="http://127.0.0.1:2379" role delete apisix'); -__DATA__ - -=== TEST 1: Set and Get a value pass ---- config - location /t { - content_by_lua_block { - local core = require("apisix.core") - local key = "/test_key" - local val = "test_value" - local res, err = core.etcd.set(key, val) - ngx.say(err) - } - } ---- request -GET /t ---- error_log eval -qr /(insufficient credentials code: 401|etcdserver: user name is empty)/ - - - -=== TEST 2: etcd grants permissions with a different prefix than the one used by apisix, etcd will forbidden ---- config - location /t { - content_by_lua_block { - local core = require("apisix.core") - local key = "/test_key" - local val = "test_value" - local res, err = core.etcd.set(key, val) - ngx.say(err) - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - host: - - "http://127.0.0.1:2379" - use_grpc: false - prefix: "/apisix" - user: apisix - password: abc123 ---- request -GET /t ---- error_log eval -qr /etcd forbidden code: 403/ diff --git a/t/core/etcd-grpc-auth.t b/t/core/etcd-grpc-auth.t deleted file mode 100644 index 12e2ce28079a..000000000000 --- a/t/core/etcd-grpc-auth.t +++ /dev/null @@ -1,108 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -BEGIN { - $ENV{"ETCD_ENABLE_AUTH"} = "true"; - delete $ENV{"FLUSH_ETCD"}; -} - -use t::APISIX; - -my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; -my $version = eval { `$nginx_binary -V 2>&1` }; - -if ($version !~ m/\/apisix-nginx-module/) { - plan(skip_all => "apisix-nginx-module not installed"); -} else { - plan('no_plan'); -} - - -repeat_each(1); -no_long_string(); -no_root_location(); -log_level("info"); - -# Authentication is enabled at etcd and credentials are set -system('etcdctl --endpoints="http://127.0.0.1:2379" user add root:5tHkHhYkjr6cQY'); -system('etcdctl --endpoints="http://127.0.0.1:2379" role add root'); -system('etcdctl --endpoints="http://127.0.0.1:2379" user grant-role root root'); -system('etcdctl --endpoints="http://127.0.0.1:2379" role list'); -system('etcdctl --endpoints="http://127.0.0.1:2379" user user list'); -# Grant the user access to the specified directory -system('etcdctl --endpoints="http://127.0.0.1:2379" user add apisix:abc123'); -system('etcdctl --endpoints="http://127.0.0.1:2379" role add apisix'); -system('etcdctl --endpoints="http://127.0.0.1:2379" user grant-role apisix apisix'); -system('etcdctl --endpoints=http://127.0.0.1:2379 role grant-permission apisix --prefix=true readwrite /apisix'); -system('etcdctl --endpoints="http://127.0.0.1:2379" auth enable'); - -run_tests; - -# Authentication is disabled at etcd -system('etcdctl --endpoints="http://127.0.0.1:2379" --user root:5tHkHhYkjr6cQY auth disable'); -system('etcdctl --endpoints="http://127.0.0.1:2379" user delete root'); -system('etcdctl --endpoints="http://127.0.0.1:2379" role delete root'); -system('etcdctl --endpoints="http://127.0.0.1:2379" user delete apisix'); -system('etcdctl --endpoints="http://127.0.0.1:2379" role delete apisix'); - - -__DATA__ - -=== TEST 1: Set and Get a value pass with authentication ---- config - location /t { - content_by_lua_block { - local core = require("apisix.core") - local key = "/test_key" - local val = "test_value" - core.etcd.set(key, val) - local res, err = core.etcd.get(key) - ngx.say(res.body.node.value) - core.etcd.delete(val) - } - } ---- request -GET /t ---- response_body -test_value - - - -=== TEST 2: etcd grants permissions with the same prefix as apisix uses, etcd is normal ---- config - location /t { - content_by_lua_block { - local core = require("apisix.core") - local key = "/test_key" - local val = "test_value" - local res, err = core.etcd.set(key, val) - ngx.say(err) - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "http://127.0.0.1:2379" - prefix: "/apisix" - user: apisix - password: abc123 ---- request -GET /t diff --git a/t/core/etcd-grpc-mtls.t b/t/core/etcd-grpc-mtls.t deleted file mode 100644 index 4e9ca306edf2..000000000000 --- a/t/core/etcd-grpc-mtls.t +++ /dev/null @@ -1,292 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -use t::APISIX; - -my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; -my $version = eval { `$nginx_binary -V 2>&1` }; - -if ($version !~ m/\/apisix-nginx-module/) { - plan(skip_all => "apisix-nginx-module not installed"); -} else { - plan('no_plan'); -} - -run_tests; - -__DATA__ - -=== TEST 1: run etcd in init_worker phase ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "https://127.0.0.1:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - verify: false ---- extra_init_worker_by_lua - if ngx.worker.id() ~= 0 then - return - end - - local etcd = require("apisix.core.etcd") - assert(etcd.set("/a", "ab")) - - local out = "" - local res, err = etcd.get("/a") - if not res then - ngx.log(ngx.ERR, err) - return - end - out = out .. res.body.node.value - - local res, err = etcd.delete("/a") - if not res then - ngx.log(ngx.ERR, err) - return - end - out = out .. res.status - - local res, err = etcd.get("/a") - if not res then - ngx.log(ngx.ERR, err) - return - end - out = out .. res.status - ngx.log(ngx.WARN, out) ---- config - location /t { - return 200; - } ---- request -GET /t ---- grep_error_log eval -qr/init_worker_by_lua:\d+: [^,]+/ ---- grep_error_log_out -init_worker_by_lua:31: ab200404 - - - -=== TEST 2: run etcd in init phase (stream) ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "https://127.0.0.1:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - verify: false ---- stream_extra_init_worker_by_lua - if ngx.worker.id() ~= 0 then - return - end - - local etcd = require("apisix.core.etcd") - assert(etcd.set("/a", "ab")) - - local out = "" - local res, err = etcd.get("/a") - if not res then - ngx.log(ngx.ERR, err) - return - end - out = out .. res.body.node.value - - local res, err = etcd.delete("/a") - if not res then - ngx.log(ngx.ERR, err) - return - end - out = out .. res.status - - local res, err = etcd.get("/a") - if not res then - ngx.log(ngx.ERR, err) - return - end - out = out .. res.status - ngx.log(ngx.WARN, out) ---- stream_server_config - content_by_lua_block { - ngx.say("ok") - } ---- stream_enable ---- grep_error_log eval -qr/init_worker_by_lua:\d+: \S+/ ---- grep_error_log_out -init_worker_by_lua:31: ab200404, - - - -=== TEST 3: sync ---- extra_yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "https://127.0.0.1:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - verify: false ---- config - location /t { - content_by_lua_block { - local core = require("apisix.core") - local t = require("lib.test_admin").test - - local consumers, _ = core.config.new("/consumers", { - automatic = true, - item_schema = core.schema.consumer, - }) - - ngx.sleep(0.6) - local idx = consumers.prev_index - - local code, body = t('/apisix/admin/consumers', - ngx.HTTP_PUT, - [[{ - "username": "jobs", - "plugins": { - "basic-auth": { - "username": "jobs", - "password": "678901" - } - } - }]]) - - ngx.sleep(2) - local new_idx = consumers.prev_index - if new_idx > idx then - ngx.say("prev_index updated") - else - ngx.say("prev_index not update") - end - } - } ---- request -GET /t ---- response_body -prev_index updated ---- error_log -waitdir key - - - -=== TEST 4: sync (stream) ---- extra_yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "https://127.0.0.1:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - verify: false ---- stream_server_config - content_by_lua_block { - local core = require("apisix.core") - - local sr, _ = core.config.new("/stream_routes", { - automatic = true, - item_schema = core.schema.stream_routes, - }) - - ngx.sleep(0.6) - local idx = sr.prev_index - - assert(core.etcd.set("/stream_routes/1", - { - plugins = { - } - })) - - ngx.sleep(2) - local new_idx = sr.prev_index - if new_idx > idx then - ngx.say("prev_index updated") - else - ngx.say("prev_index not update") - end - } ---- stream_enable ---- stream_response -prev_index updated ---- error_log -waitdir key - - - -=== TEST 5: ssl_trusted_certificate ---- yaml_config -apisix: - ssl: - ssl_trusted_certificate: t/certs/mtls_ca.crt -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "https://admin.apisix.dev:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key ---- extra_init_worker_by_lua - if ngx.worker.id() ~= 0 then - return - end - - local etcd = require("apisix.core.etcd") - assert(etcd.set("/a", "ab")) - local res, err = etcd.get("/a") - if not res then - ngx.log(ngx.ERR, err) - return - end - ngx.log(ngx.WARN, res.body.node.value) ---- config - location /t { - return 200; - } ---- request -GET /t ---- error_log -init_worker_by_lua:14: ab diff --git a/t/core/etcd.t b/t/core/etcd.t index c0715de7b06b..670ada081ab2 100644 --- a/t/core/etcd.t +++ b/t/core/etcd.t @@ -396,8 +396,8 @@ ab --- request GET /t --- grep_error_log eval -qr/init_by_lua:\d+: \S+/ ---- grep_error_log_out -init_by_lua:12: ab -init_by_lua:19: 200 -init_by_lua:26: 404 +qr/init_by_lua.*: \S+/ +--- grep_error_log_out eval +qr{init_by_lua.* ab +init_by_lua.* 200 +init_by_lua.* 404} diff --git a/t/core/grpc.t b/t/core/grpc.t index 3be331a54245..bd52d9e13409 100644 --- a/t/core/grpc.t +++ b/t/core/grpc.t @@ -44,7 +44,7 @@ __DATA__ local core = require "apisix.core" local gcli = core.grpc assert(gcli.load("t/grpc_server_example/proto/helloworld.proto")) - local conn = assert(gcli.connect("127.0.0.1:50051")) + local conn = assert(gcli.connect("127.0.0.1:10051")) local res, err = conn:call("helloworld.Greeter", "SayHello", { name = "apisix" }) conn:close() @@ -68,7 +68,7 @@ Hello apisix local core = require "apisix.core" local gcli = core.grpc assert(gcli.load("t/grpc_server_example/proto/helloworld.proto")) - local conn = assert(gcli.connect("127.0.0.1:50051")) + local conn = assert(gcli.connect("127.0.0.1:10051")) local st, err = conn:new_server_stream("helloworld.Greeter", "SayHelloServerStream", { name = "apisix" }) if not st then @@ -100,7 +100,7 @@ Hello apisix local core = require "apisix.core" local gcli = core.grpc assert(gcli.load("t/grpc_server_example/proto/helloworld.proto")) - local conn = assert(gcli.connect("127.0.0.1:50051")) + local conn = assert(gcli.connect("127.0.0.1:10051")) local st, err = conn:new_client_stream("helloworld.Greeter", "SayHelloClientStream", { name = "apisix" }) if not st then @@ -139,7 +139,7 @@ Hello apisix!Hello apisix!Hello apisix!Hello apisix! local core = require "apisix.core" local gcli = core.grpc assert(gcli.load("t/grpc_server_example/proto/helloworld.proto")) - local conn = assert(gcli.connect("127.0.0.1:50051")) + local conn = assert(gcli.connect("127.0.0.1:10051")) local st, err = conn:new_bidirectional_stream("helloworld.Greeter", "SayHelloBidirectionalStream", { name = "apisix" }) if not st then diff --git a/t/core/schema.t b/t/core/schema.t index b32297c9ad29..2cf2793f35ab 100644 --- a/t/core/schema.t +++ b/t/core/schema.t @@ -121,6 +121,7 @@ passed: 30 ngx.say("passed") } } +--- timeout: 15 --- request GET /t --- response_body diff --git a/t/core/schema_def.t b/t/core/schema_def.t index b6a7bba05b0c..da3bb51f8b26 100644 --- a/t/core/schema_def.t +++ b/t/core/schema_def.t @@ -139,3 +139,101 @@ qr/ok: false err: property "(id|plugins)" is required/ GET /t --- response_body passed + + + +=== TEST 4: sanity check upstream_schema +--- config + location /t { + content_by_lua_block { + local schema_def = require("apisix.schema_def") + local core = require("apisix.core") + local t = require("lib.test_admin") + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local upstream = { + nodes = { + ["127.0.0.1:8080"] = 1 + }, + type = "roundrobin", + tls = { + client_cert_id = 1, + client_cert = ssl_cert, + client_key = ssl_key + } + } + local ok, err = core.schema.check(schema_def.upstream, upstream) + assert(not ok) + assert(err ~= nil) + + upstream = { + nodes = { + ["127.0.0.1:8080"] = 1 + }, + type = "roundrobin", + tls = { + client_cert_id = 1 + } + } + local ok, err = core.schema.check(schema_def.upstream, upstream) + assert(ok) + assert(err == nil, err) + + upstream = { + nodes = { + ["127.0.0.1:8080"] = 1 + }, + type = "roundrobin", + tls = { + client_cert = ssl_cert, + client_key = ssl_key + } + } + local ok, err = core.schema.check(schema_def.upstream, upstream) + assert(ok) + assert(err == nil, err) + + upstream = { + nodes = { + ["127.0.0.1:8080"] = 1 + }, + type = "roundrobin", + tls = { + } + } + local ok, err = core.schema.check(schema_def.upstream, upstream) + assert(ok) + assert(err == nil, err) + + upstream = { + nodes = { + ["127.0.0.1:8080"] = 1 + }, + type = "roundrobin", + tls = { + client_cert = ssl_cert + } + } + local ok, err = core.schema.check(schema_def.upstream, upstream) + assert(not ok) + assert(err ~= nil) + + upstream = { + nodes = { + ["127.0.0.1:8080"] = 1 + }, + type = "roundrobin", + tls = { + client_cert_id = 1, + client_key = ssl_key + } + } + local ok, err = core.schema.check(schema_def.upstream, upstream) + assert(not ok) + assert(err ~= nil) + + ngx.say("passed") + } + } +--- response_body +passed diff --git a/t/deployment/conf_server.t b/t/deployment/conf_server.t deleted file mode 100644 index 2e15ed8bdc61..000000000000 --- a/t/deployment/conf_server.t +++ /dev/null @@ -1,467 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -use t::APISIX 'no_plan'; - -worker_connections(256); - -add_block_preprocessor(sub { - my ($block) = @_; - - if (!$block->request) { - $block->set_value("request", "GET /t"); - } - -}); - -run_tests(); - -__DATA__ - -=== TEST 1: sync in https ---- config - location /t { - content_by_lua_block { - local core = require("apisix.core") - local t = require("lib.test_admin").test - - local consumers, _ = core.config.new("/consumers", { - automatic = true, - item_schema = core.schema.consumer, - }) - - ngx.sleep(0.6) - local idx = consumers.prev_index - - local code, body = t('/apisix/admin/consumers', - ngx.HTTP_PUT, - [[{ - "username": "jobs", - "plugins": { - "basic-auth": { - "username": "jobs", - "password": "678901" - } - } - }]]) - - ngx.sleep(2) - local new_idx = consumers.prev_index - if new_idx > idx then - ngx.say("prev_index updated") - else - ngx.say("prev_index not update") - end - } - } ---- response_body -prev_index updated ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - admin: - admin_key: ~ - etcd: - prefix: "/apisix" - host: - - https://127.0.0.1:12379 - tls: - verify: false - - - -=== TEST 2: mix ip & domain ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - - http://127.0.0.2:2379 - - http://localhost:2379 - - http://[::1]:2379 ---- error_log -dns resolve localhost, result: ---- response_body -foo - - - -=== TEST 3: resolve domain, result changed ---- extra_init_by_lua - local resolver = require("apisix.core.resolver") - local old_f = resolver.parse_domain - local counter = 0 - resolver.parse_domain = function (domain) - if domain == "localhost" then - counter = counter + 1 - if counter % 2 == 0 then - return "127.0.0.2" - else - return "127.0.0.3" - end - else - return old_f(domain) - end - end ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - # use localhost so the connection is OK in the situation that the DNS - # resolve is not done in APISIX - - http://localhost:2379 ---- response_body -foo ---- error_log -localhost is resolved to: 127.0.0.3 -localhost is resolved to: 127.0.0.2 - - - -=== TEST 4: update balancer if the DNS result changed ---- extra_init_by_lua - local etcd = require("apisix.core.etcd") - etcd.get_etcd_syncer = function () - return etcd.new() - end - - local resolver = require("apisix.core.resolver") - local old_f = resolver.parse_domain - package.loaded.counter = 0 - resolver.parse_domain = function (domain) - if domain == "x.com" then - local counter = package.loaded.counter - package.loaded.counter = counter + 1 - if counter % 2 == 0 then - return "127.0.0.2" - else - return "127.0.0.3" - end - else - return old_f(domain) - end - end - - local picker = require("apisix.balancer.least_conn") - package.loaded.n_picker = 0 - local old_f = picker.new - picker.new = function (nodes, upstream) - package.loaded.n_picker = package.loaded.n_picker + 1 - return old_f(nodes, upstream) - end ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - local counter = package.loaded.counter - local n_picker = package.loaded.n_picker - if counter == n_picker then - ngx.say("OK") - else - ngx.say(counter, " ", n_picker) - end - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - - http://x.com:2379 ---- response_body -foo -OK ---- error_log -x.com is resolved to: 127.0.0.3 -x.com is resolved to: 127.0.0.2 - - - -=== TEST 5: retry ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:1979 - - http://[::1]:1979 - - http://localhost:2379 ---- error_log -connect() failed ---- response_body -foo - - - -=== TEST 6: check default SNI ---- http_config -server { - listen 12345 ssl; - ssl_certificate cert/apisix.crt; - ssl_certificate_key cert/apisix.key; - - ssl_certificate_by_lua_block { - local ngx_ssl = require "ngx.ssl" - ngx.log(ngx.WARN, "Receive SNI: ", ngx_ssl.server_name()) - } - - location / { - proxy_pass http://127.0.0.1:2379; - } -} ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- response_body -foo ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - - https://127.0.0.1:12379 - - https://localhost:12345 - tls: - verify: false ---- error_log -Receive SNI: localhost - - - -=== TEST 7: check configured SNI ---- http_config -server { - listen 12345 ssl; - ssl_certificate cert/apisix.crt; - ssl_certificate_key cert/apisix.key; - - ssl_certificate_by_lua_block { - local ngx_ssl = require "ngx.ssl" - ngx.log(ngx.WARN, "Receive SNI: ", ngx_ssl.server_name()) - } - - location / { - proxy_pass http://127.0.0.1:2379; - } -} ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- response_body -foo ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - - https://127.0.0.1:12379 - - https://127.0.0.1:12345 - tls: - verify: false - sni: "x.com" ---- error_log -Receive SNI: x.com - - - -=== TEST 8: check Host header ---- http_config -server { - listen 12345; - location / { - access_by_lua_block { - ngx.log(ngx.WARN, "Receive Host: ", ngx.var.http_host) - } - proxy_pass http://127.0.0.1:2379; - } -} ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- response_body -foo ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:12345 - - http://localhost:12345 ---- error_log -Receive Host: localhost -Receive Host: 127.0.0.1 - - - -=== TEST 9: check Host header after retry ---- http_config -server { - listen 12345; - location / { - access_by_lua_block { - ngx.log(ngx.WARN, "Receive Host: ", ngx.var.http_host) - } - proxy_pass http://127.0.0.1:2379; - } -} ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- response_body -foo ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:1979 - - http://localhost:12345 ---- error_log -Receive Host: localhost - - - -=== TEST 10: default timeout ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - local etcd_cli = require("resty.etcd") - local f = etcd_cli.new - local timeout - etcd_cli.new = function(conf) - timeout = conf.timeout - return f(conf) - end - etcd.new() - ngx.say(timeout) - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:2379 ---- response_body -30 - - - -=== TEST 11: ipv6 ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - - http://[::1]:2379 diff --git a/t/deployment/conf_server2.t b/t/deployment/conf_server2.t deleted file mode 100644 index 886b0cb420fd..000000000000 --- a/t/deployment/conf_server2.t +++ /dev/null @@ -1,161 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -use t::APISIX 'no_plan'; - -add_block_preprocessor(sub { - my ($block) = @_; - - if (!$block->request) { - $block->set_value("request", "GET /t"); - } - -}); - -run_tests(); - -__DATA__ - -=== TEST 1: health check, ensure unhealthy endpoint is skipped ---- http_config -server { - listen 12345; - location / { - access_by_lua_block { - if package.loaded.start_to_fail then - ngx.exit(502) - end - } - proxy_pass http://127.0.0.1:2379; - } -} ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - - http://localhost:12345 ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - package.loaded.start_to_fail = true - for i = 1, 7 do - assert(etcd.set("/apisix/test", "foo")) - end - package.loaded.start_to_fail = nil - ngx.say('OK') - } - } ---- response_body -OK ---- error_log -report failure, endpoint: localhost:12345 -endpoint localhost:12345 is unhealthy, skipped - - - -=== TEST 2: health check, all endpoints are unhealthy ---- http_config -server { - listen 12345; - location / { - access_by_lua_block { - if package.loaded.start_to_fail then - ngx.exit(502) - end - } - proxy_pass http://127.0.0.1:2379; - } -} ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - - http://localhost:12345 - - http://127.0.0.1:12345 ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - package.loaded.start_to_fail = true - for i = 1, 6 do - etcd.set("/apisix/test", "foo") - end - package.loaded.start_to_fail = nil - local _, err = etcd.set("/apisix/test", "foo") - ngx.say(err) - } - } ---- response_body -invalid response code: 503 ---- error_log -endpoint localhost:12345 is unhealthy, skipped -endpoint 127.0.0.1:12345 is unhealthy, skipped - - - -=== TEST 3: health check, all endpoints recover from unhealthy ---- http_config -server { - listen 12345; - location / { - access_by_lua_block { - if package.loaded.start_to_fail then - ngx.exit(502) - end - } - proxy_pass http://127.0.0.1:2379; - } -} ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - health_check_timeout: 1 - prefix: "/apisix" - host: - - http://localhost:12345 - - http://127.0.0.1:12345 ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - package.loaded.start_to_fail = true - for i = 1, 6 do - etcd.set("/apisix/test", "foo") - end - package.loaded.start_to_fail = nil - ngx.sleep(1.2) - local res, err = etcd.set("/apisix/test", "foo") - ngx.say(err or res.body.node.value) - } - } ---- response_body -foo ---- error_log -endpoint localhost:12345 is unhealthy, skipped -endpoint 127.0.0.1:12345 is unhealthy, skipped diff --git a/t/deployment/grpc/conf_server.t b/t/deployment/grpc/conf_server.t deleted file mode 100644 index 5ea0bbe214b3..000000000000 --- a/t/deployment/grpc/conf_server.t +++ /dev/null @@ -1,458 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -use t::APISIX; - -my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; -my $version = eval { `$nginx_binary -V 2>&1` }; - -if ($version !~ m/\/apisix-nginx-module/) { - plan(skip_all => "apisix-nginx-module not installed"); -} else { - plan('no_plan'); -} - -add_block_preprocessor(sub { - my ($block) = @_; - - if (!$block->request) { - $block->set_value("request", "GET /t"); - } - -}); - -run_tests(); - -__DATA__ - -=== TEST 1: sync in https ---- config - location /t { - content_by_lua_block { - local core = require("apisix.core") - local t = require("lib.test_admin").test - - local consumers, _ = core.config.new("/consumers", { - automatic = true, - item_schema = core.schema.consumer, - }) - - ngx.sleep(0.6) - local idx = consumers.prev_index - - local code, body = t('/apisix/admin/consumers', - ngx.HTTP_PUT, - [[{ - "username": "jobs", - "plugins": { - "basic-auth": { - "username": "jobs", - "password": "678901" - } - } - }]]) - - ngx.sleep(2) - local new_idx = consumers.prev_index - if new_idx > idx then - ngx.say("prev_index updated") - else - ngx.say("prev_index not update") - end - } - } ---- response_body -prev_index updated ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - admin: - admin_key: ~ - etcd: - use_grpc: true - prefix: "/apisix" - host: - - https://127.0.0.1:12379 - tls: - verify: false - - - -=== TEST 2: mix ip & domain ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - prefix: "/apisix" - host: - - http://127.0.0.2:2379 - - http://localhost:2379 - - http://[::1]:2379 ---- response_body -foo - - - -=== TEST 3: check default SNI ---- http_config -server { - listen 12345 http2 ssl; - ssl_certificate cert/apisix.crt; - ssl_certificate_key cert/apisix.key; - - ssl_certificate_by_lua_block { - local ngx_ssl = require "ngx.ssl" - ngx.log(ngx.WARN, "Receive SNI: ", ngx_ssl.server_name()) - } - - location / { - grpc_pass grpc://127.0.0.1:2379; - } -} ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- response_body -foo ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - prefix: "/apisix" - host: - - https://127.0.0.1:12379 - - https://localhost:12345 - timeout: 1 - tls: - verify: false ---- error_log -Receive SNI: localhost - - - -=== TEST 4: check configured SNI ---- http_config -server { - listen 12345 http2 ssl; - ssl_certificate cert/apisix.crt; - ssl_certificate_key cert/apisix.key; - - ssl_certificate_by_lua_block { - local ngx_ssl = require "ngx.ssl" - ngx.log(ngx.WARN, "Receive SNI: ", ngx_ssl.server_name()) - } - - location / { - grpc_pass grpc://127.0.0.1:2379; - } -} ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- response_body -foo ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - prefix: "/apisix" - host: - - https://127.0.0.1:12379 - - https://127.0.0.1:12345 - timeout: 1 - tls: - verify: false - sni: "x.com" ---- error_log -Receive SNI: x.com - - - -=== TEST 5: ipv6 ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - prefix: "/apisix" - host: - - http://[::1]:2379 - - - -=== TEST 6: resolve domain, result changed ---- extra_init_by_lua - local resolver = require("apisix.core.resolver") - local old_f = resolver.parse_domain - local counter = 0 - resolver.parse_domain = function (domain) - if domain == "localhost" then - counter = counter + 1 - if counter % 2 == 0 then - return "127.0.0.2" - else - return "127.0.0.3" - end - else - return old_f(domain) - end - end ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - prefix: "/apisix" - host: - # use localhost so the connection is OK in the situation that the DNS - # resolve is not done in APISIX - - http://localhost:2379 ---- response_body -foo ---- error_log -localhost is resolved to: 127.0.0.3 -localhost is resolved to: 127.0.0.2 - - - -=== TEST 7: update balancer if the DNS result changed ---- extra_init_by_lua - local etcd = require("apisix.core.etcd") - etcd.get_etcd_syncer = function () - return etcd.new() - end - - local resolver = require("apisix.core.resolver") - local old_f = resolver.parse_domain - package.loaded.counter = 0 - resolver.parse_domain = function (domain) - if domain == "x.com" then - local counter = package.loaded.counter - package.loaded.counter = counter + 1 - if counter % 2 == 0 then - return "127.0.0.2" - else - return "127.0.0.3" - end - else - return old_f(domain) - end - end - - local picker = require("apisix.balancer.least_conn") - package.loaded.n_picker = 0 - local old_f = picker.new - picker.new = function (nodes, upstream) - package.loaded.n_picker = package.loaded.n_picker + 1 - return old_f(nodes, upstream) - end ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - local counter = package.loaded.counter - local n_picker = package.loaded.n_picker - if counter == n_picker then - ngx.say("OK") - else - ngx.say(counter, " ", n_picker) - end - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - timeout: 1 - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - - http://x.com:2379 ---- response_body -foo -OK ---- error_log -x.com is resolved to: 127.0.0.3 -x.com is resolved to: 127.0.0.2 - - - -=== TEST 8: retry ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - timeout: 1 - prefix: "/apisix" - host: - - http://127.0.0.1:1979 - - http://[::1]:1979 - - http://localhost:2379 ---- error_log -connect() failed ---- response_body -foo - - - -=== TEST 9: check Host header ---- http_config -server { - listen 12345 http2; - location / { - access_by_lua_block { - ngx.log(ngx.WARN, "Receive Host: ", ngx.var.http_host) - } - grpc_pass grpc://127.0.0.1:2379; - } -} ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- response_body -foo ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - timeout: 1 - prefix: "/apisix" - host: - - http://127.0.0.1:12345 - - http://localhost:12345 ---- error_log -Receive Host: localhost -Receive Host: 127.0.0.1 - - - -=== TEST 10: check Host header after retry ---- http_config -server { - listen 12345 http2; - location / { - access_by_lua_block { - ngx.log(ngx.WARN, "Receive Host: ", ngx.var.http_host) - } - grpc_pass grpc://127.0.0.1:2379; - } -} ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- response_body -foo ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - timeout: 1 - prefix: "/apisix" - host: - - http://127.0.0.1:1979 - - http://localhost:12345 ---- error_log -Receive Host: localhost diff --git a/t/deployment/grpc/mtls.t b/t/deployment/grpc/mtls.t deleted file mode 100644 index 1aca2576dde5..000000000000 --- a/t/deployment/grpc/mtls.t +++ /dev/null @@ -1,118 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -use t::APISIX; - -my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; -my $version = eval { `$nginx_binary -V 2>&1` }; - -if ($version !~ m/\/apisix-nginx-module/) { - plan(skip_all => "apisix-nginx-module not installed"); -} else { - plan('no_plan'); -} - -add_block_preprocessor(sub { - my ($block) = @_; - - if (!$block->request) { - $block->set_value("request", "GET /t"); - } - -}); - -run_tests(); - -__DATA__ - -=== TEST 1: mTLS for control plane ---- exec -grpcurl -import-path ./t/lib -proto etcd.proto -d '{}' -cert t/certs/mtls_client.crt -key t/certs/mtls_client.key -insecure localhost:12345 etcdserverpb.Maintenance.Status ---- response_body eval -qr/"version":/ ---- yaml_config -deployment: - role: control_plane - role_control_plane: - config_provider: etcd - conf_server: - listen: 0.0.0.0:12345 - cert: t/certs/mtls_server.crt - cert_key: t/certs/mtls_server.key - client_ca_cert: t/certs/mtls_ca.crt - etcd: - use_grpc: true - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - certs: - cert: t/certs/mtls_client.crt - cert_key: t/certs/mtls_client.key - trusted_ca_cert: t/certs/mtls_ca.crt - - - -=== TEST 2: no client certificate ---- exec -curl -k https://localhost:12345/version ---- response_body eval -qr/No required SSL certificate was sent/ ---- yaml_config -deployment: - role: control_plane - role_control_plane: - config_provider: etcd - conf_server: - listen: 0.0.0.0:12345 - cert: t/certs/mtls_server.crt - cert_key: t/certs/mtls_server.key - client_ca_cert: t/certs/mtls_ca.crt - etcd: - use_grpc: true - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - certs: - cert: t/certs/mtls_client.crt - cert_key: t/certs/mtls_client.key - trusted_ca_cert: t/certs/mtls_ca.crt - - - -=== TEST 3: wrong client certificate ---- exec -curl --cert t/certs/apisix.crt --key t/certs/apisix.key -k https://localhost:12345/version ---- response_body eval -qr/The SSL certificate error/ ---- yaml_config -deployment: - role: control_plane - role_control_plane: - config_provider: etcd - conf_server: - listen: 0.0.0.0:12345 - cert: t/certs/mtls_server.crt - cert_key: t/certs/mtls_server.key - client_ca_cert: t/certs/mtls_ca.crt - etcd: - use_grpc: true - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - certs: - cert: t/certs/mtls_client.crt - cert_key: t/certs/mtls_client.key - trusted_ca_cert: t/certs/mtls_ca.crt diff --git a/t/deployment/mtls.t b/t/deployment/mtls.t deleted file mode 100644 index 2d8ecb7e6839..000000000000 --- a/t/deployment/mtls.t +++ /dev/null @@ -1,115 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -use t::APISIX; - -my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; -my $version = eval { `$nginx_binary -V 2>&1` }; - -if ($version !~ m/\/apisix-nginx-module/) { - plan(skip_all => "apisix-nginx-module not installed"); -} else { - plan('no_plan'); -} - -add_block_preprocessor(sub { - my ($block) = @_; - - if (!$block->request) { - $block->set_value("request", "GET /t"); - } - -}); - -run_tests(); - -__DATA__ - -=== TEST 1: mTLS for control plane ---- exec -curl --cert t/certs/mtls_client.crt --key t/certs/mtls_client.key -k https://localhost:12345/version ---- response_body eval -qr/"etcdserver":/ ---- yaml_config -deployment: - role: control_plane - role_control_plane: - config_provider: etcd - conf_server: - listen: 0.0.0.0:12345 - cert: t/certs/mtls_server.crt - cert_key: t/certs/mtls_server.key - client_ca_cert: t/certs/mtls_ca.crt - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - certs: - cert: t/certs/mtls_client.crt - cert_key: t/certs/mtls_client.key - trusted_ca_cert: t/certs/mtls_ca.crt - - - -=== TEST 2: no client certificate ---- exec -curl -k https://localhost:12345/version ---- response_body eval -qr/No required SSL certificate was sent/ ---- yaml_config -deployment: - role: control_plane - role_control_plane: - config_provider: etcd - conf_server: - listen: 0.0.0.0:12345 - cert: t/certs/mtls_server.crt - cert_key: t/certs/mtls_server.key - client_ca_cert: t/certs/mtls_ca.crt - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - certs: - cert: t/certs/mtls_client.crt - cert_key: t/certs/mtls_client.key - trusted_ca_cert: t/certs/mtls_ca.crt - - - -=== TEST 3: wrong client certificate ---- exec -curl --cert t/certs/apisix.crt --key t/certs/apisix.key -k https://localhost:12345/version ---- response_body eval -qr/The SSL certificate error/ ---- yaml_config -deployment: - role: control_plane - role_control_plane: - config_provider: etcd - conf_server: - listen: 0.0.0.0:12345 - cert: t/certs/mtls_server.crt - cert_key: t/certs/mtls_server.key - client_ca_cert: t/certs/mtls_ca.crt - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - certs: - cert: t/certs/mtls_client.crt - cert_key: t/certs/mtls_client.key - trusted_ca_cert: t/certs/mtls_ca.crt diff --git a/t/discovery/consul.t b/t/discovery/consul.t index 57a6ab5b4fbd..9ec87202118e 100644 --- a/t/discovery/consul.t +++ b/t/discovery/consul.t @@ -112,6 +112,40 @@ discovery: max_fails: 1 _EOC_ +our $yaml_config_with_acl = <<_EOC_; +apisix: + node_listen: 1984 + enable_control: true + control: + ip: 127.0.0.1 + port: 9090 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul: + servers: + - "http://127.0.0.1:8502" + token: "2b778dd9-f5f1-6f29-b4b4-9a5fa948757a" + skip_services: + - "service_c" + timeout: + connect: 1000 + read: 1000 + wait: 60 + weight: 1 + fetch_interval: 1 + keepalive: true + default_service: + host: "127.0.0.1" + port: 20999 + metadata: + fail_timeout: 1 + weight: 1 + max_fails: 1 +_EOC_ + run_tests(); @@ -579,8 +613,8 @@ upstreams: --- request GET /thc --- response_body -[{"ip":"127.0.0.1","port":30513,"status":"healthy"},{"ip":"127.0.0.1","port":30514,"status":"healthy"}] -[{"ip":"127.0.0.1","port":30513,"status":"healthy"},{"ip":"127.0.0.1","port":30514,"status":"healthy"}] +[{"hostname":"127.0.0.1","ip":"127.0.0.1","port":30513,"status":"healthy"},{"hostname":"127.0.0.1","ip":"127.0.0.1","port":30514,"status":"healthy"}] +[{"hostname":"127.0.0.1","ip":"127.0.0.1","port":30513,"status":"healthy"},{"hostname":"127.0.0.1","ip":"127.0.0.1","port":30514,"status":"healthy"}] --- ignore_error_log @@ -657,3 +691,93 @@ location /sleep { qr/server 1\n/, ] --- ignore_error_log + + + +=== TEST 14: bootstrap acl +--- config +location /v1/acl { + proxy_pass http://127.0.0.1:8502; +} +--- request eval +"PUT /v1/acl/bootstrap\n" . "{\"BootstrapSecret\": \"2b778dd9-f5f1-6f29-b4b4-9a5fa948757a\"}" +--- error_code_like: ^(?:200|403)$ + + + +=== TEST 15: test register and unregister nodes with acl +--- yaml_config eval: $::yaml_config_with_acl +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: service-a + discovery_type: consul + type: roundrobin +#END +--- config +location /v1/agent { + proxy_pass http://127.0.0.1:8502; + proxy_set_header X-Consul-Token "2b778dd9-f5f1-6f29-b4b4-9a5fa948757a"; +} +location /sleep { + content_by_lua_block { + local args = ngx.req.get_uri_args() + local sec = args.sec or "2" + ngx.sleep(tonumber(sec)) + ngx.say("ok") + } +} +--- timeout: 6 +--- pipelined_requests eval +[ + "PUT /v1/agent/service/register\n" . "{\"ID\":\"service-a1\",\"Name\":\"service-a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30513,\"Meta\":{\"service_b_version\":\"4.1\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "PUT /v1/agent/service/register\n" . "{\"ID\":\"service-a2\",\"Name\":\"service-a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30514,\"Meta\":{\"service_b_version\":\"4.1\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "GET /sleep", + + "GET /hello?random1", + "GET /hello?random2", + "GET /hello?random3", + "GET /hello?random4", + + "PUT /v1/agent/service/deregister/service-a1", + "PUT /v1/agent/service/deregister/service-a2", + "PUT /v1/agent/service/register\n" . "{\"ID\":\"service-a1\",\"Name\":\"service-a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30511,\"Meta\":{\"service_b_version\":\"4.1\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "PUT /v1/agent/service/register\n" . "{\"ID\":\"service-a2\",\"Name\":\"service-a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30512,\"Meta\":{\"service_b_version\":\"4.1\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "GET /sleep?sec=5", + + "GET /hello?random1", + "GET /hello?random2", + "GET /hello?random3", + "GET /hello?random4", + + "PUT /v1/agent/service/deregister/service-a1", + "PUT /v1/agent/service/deregister/service-a2", +] +--- response_body_like eval +[ + qr//, + qr//, + qr/ok\n/, + + qr/server [3-4]\n/, + qr/server [3-4]\n/, + qr/server [3-4]\n/, + qr/server [3-4]\n/, + + qr//, + qr//, + qr//, + qr//, + qr/ok\n/, + + qr/server [1-2]\n/, + qr/server [1-2]\n/, + qr/server [1-2]\n/, + qr/server [1-2]\n/, + + qr//, + qr// +] +--- ignore_error_log diff --git a/t/discovery/consul_kv.t b/t/discovery/consul_kv.t index 9363f768d209..0034997c5a39 100644 --- a/t/discovery/consul_kv.t +++ b/t/discovery/consul_kv.t @@ -109,6 +109,37 @@ discovery: max_fails: 1 _EOC_ +our $yaml_config_with_acl = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul_kv: + servers: + - "http://127.0.0.1:8502" + token: "2b778dd9-f5f1-6f29-b4b4-9a5fa948757a" + prefix: "upstreams" + skip_keys: + - "upstreams/unused_api/" + timeout: + connect: 1000 + read: 1000 + wait: 60 + weight: 1 + fetch_interval: 1 + keepalive: true + default_service: + host: "127.0.0.1" + port: 20999 + metadata: + fail_timeout: 1 + weight: 1 + max_fails: 1 +_EOC_ + run_tests(); @@ -450,8 +481,8 @@ upstreams: --- request GET /thc --- response_body -[{"ip":"127.0.0.1","port":30511,"status":"healthy"},{"ip":"127.0.0.2","port":1988,"status":"unhealthy"}] -[{"ip":"127.0.0.1","port":30511,"status":"healthy"},{"ip":"127.0.0.2","port":1988,"status":"unhealthy"}] +[{"hostname":"127.0.0.1","ip":"127.0.0.1","port":30511,"status":"healthy"},{"hostname":"127.0.0.2","ip":"127.0.0.2","port":1988,"status":"unhealthy"}] +[{"hostname":"127.0.0.1","ip":"127.0.0.1","port":30511,"status":"healthy"},{"hostname":"127.0.0.2","ip":"127.0.0.2","port":1988,"status":"unhealthy"}] --- ignore_error_log @@ -576,3 +607,92 @@ qr/retry connecting consul after \d seconds/ --- grep_error_log_out retry connecting consul after 1 seconds retry connecting consul after 4 seconds + + + +=== TEST 13: bootstrap acl +--- config +location /v1/acl { + proxy_pass http://127.0.0.1:8502; +} +--- request eval +"PUT /v1/acl/bootstrap\n" . "{\"BootstrapSecret\": \"2b778dd9-f5f1-6f29-b4b4-9a5fa948757a\"}" +--- error_code_like: ^(?:200|403)$ + + + +=== TEST 14: test register and unregister nodes +--- yaml_config eval: $::yaml_config_with_acl +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: http://127.0.0.1:8502/v1/kv/upstreams/webpages/ + discovery_type: consul_kv + type: roundrobin +#END +--- config +location /v1/kv { + proxy_pass http://127.0.0.1:8502; + proxy_set_header X-Consul-Token "2b778dd9-f5f1-6f29-b4b4-9a5fa948757a"; +} +location /sleep { + content_by_lua_block { + local args = ngx.req.get_uri_args() + local sec = args.sec or "2" + ngx.sleep(tonumber(sec)) + ngx.say("ok") + } +} +--- timeout: 6 +--- request eval +[ + "DELETE /v1/kv/upstreams/webpages/127.0.0.1:30511", + "DELETE /v1/kv/upstreams/webpages/127.0.0.1:30512", + "PUT /v1/kv/upstreams/webpages/127.0.0.1:30513\n" . "{\"weight\": 1, \"max_fails\": 2, \"fail_timeout\": 1}", + "PUT /v1/kv/upstreams/webpages/127.0.0.1:30514\n" . "{\"weight\": 1, \"max_fails\": 2, \"fail_timeout\": 1}", + "GET /sleep", + + "GET /hello?random1", + "GET /hello?random2", + "GET /hello?random3", + "GET /hello?random4", + + "DELETE /v1/kv/upstreams/webpages/127.0.0.1:30513", + "DELETE /v1/kv/upstreams/webpages/127.0.0.1:30514", + "PUT /v1/kv/upstreams/webpages/127.0.0.1:30511\n" . "{\"weight\": 1, \"max_fails\": 2, \"fail_timeout\": 1}", + "PUT /v1/kv/upstreams/webpages/127.0.0.1:30512\n" . "{\"weight\": 1, \"max_fails\": 2, \"fail_timeout\": 1}", + "GET /sleep?sec=5", + + "GET /hello?random1", + "GET /hello?random2", + "GET /hello?random3", + "GET /hello?random4", + +] +--- response_body_like eval +[ + qr/true/, + qr/true/, + qr/true/, + qr/true/, + qr/ok\n/, + + qr/server [3-4]\n/, + qr/server [3-4]\n/, + qr/server [3-4]\n/, + qr/server [3-4]\n/, + + qr/true/, + qr/true/, + qr/true/, + qr/true/, + qr/ok\n/, + + qr/server [1-2]\n/, + qr/server [1-2]\n/, + qr/server [1-2]\n/, + qr/server [1-2]\n/ +] +--- ignore_error_log diff --git a/t/discovery/dns/mix.t b/t/discovery/dns/mix.t index 6453b5c37a01..e8499429842e 100644 --- a/t/discovery/dns/mix.t +++ b/t/discovery/dns/mix.t @@ -128,3 +128,4 @@ connect to 127.0.0.1:1053 connect to 127.0.0.1:1053 connect to 127.0.0.1:1053 connect to 127.0.0.1:1053 +connect to 127.0.0.1:1053 diff --git a/t/discovery/dns/sanity.t b/t/discovery/dns/sanity.t index c26eceeba04b..4e0eaa7ae194 100644 --- a/t/discovery/dns/sanity.t +++ b/t/discovery/dns/sanity.t @@ -434,3 +434,30 @@ upstreams: --- must_die --- error_log matches none of the enum values + + + +=== TEST 20: use resolv.conf +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: # service discovery center + dns: + resolv_conf: build-cache/test_resolve.conf +--- apisix_yaml +upstreams: + - service_name: "sd.test.local:1980" + discovery_type: dns + type: roundrobin + id: 1 +--- grep_error_log eval +qr/upstream nodes: \{[^}]+\}/ +--- grep_error_log_out eval +qr/upstream nodes: \{("127.0.0.1:1980":1,"127.0.0.2:1980":1|"127.0.0.2:1980":1,"127.0.0.1:1980":1)\}/ +--- response_body +hello world diff --git a/t/grpc_server_example/main.go b/t/grpc_server_example/main.go index 6eb4a9b32ae0..f6253df3a729 100644 --- a/t/grpc_server_example/main.go +++ b/t/grpc_server_example/main.go @@ -53,8 +53,8 @@ import ( ) var ( - grpcAddr = ":50051" - grpcsAddr = ":50052" + grpcAddr = ":10051" + grpcsAddr = ":10052" grpcsMtlsAddr string grpcHTTPAddr string diff --git a/t/lib/ext-plugin.lua b/t/lib/ext-plugin.lua index b19a79838591..0ebf7192b804 100644 --- a/t/lib/ext-plugin.lua +++ b/t/lib/ext-plugin.lua @@ -470,6 +470,19 @@ function _M.go(case) local action = http_req_call_rewrite.End(builder) build_action(action, http_req_call_action.Rewrite) + elseif case.rewrite_request_body == true then + local len = 4 + http_req_call_rewrite.StartBodyVector(builder, len) + builder:PrependByte(string.byte("\n")) + builder:PrependByte(string.byte("c")) + builder:PrependByte(string.byte("b")) + builder:PrependByte(string.byte("a")) + local b = builder:EndVector(len) + http_req_call_rewrite.Start(builder) + http_req_call_rewrite.AddBody(builder, b) + local action = http_req_call_rewrite.End(builder) + build_action(action, http_req_call_action.Rewrite) + else http_req_call_resp.Start(builder) end diff --git a/t/node/grpc-proxy-mtls.t b/t/node/grpc-proxy-mtls.t index b238431e252c..b4d31b9d6698 100644 --- a/t/node/grpc-proxy-mtls.t +++ b/t/node/grpc-proxy-mtls.t @@ -59,7 +59,7 @@ routes: client_cert: "-----BEGIN CERTIFICATE-----\nMIIDUzCCAjugAwIBAgIURw+Rc5FSNUQWdJD+quORtr9KaE8wDQYJKoZIhvcNAQEN\nBQAwWDELMAkGA1UEBhMCY24xEjAQBgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwG\nWmh1SGFpMRYwFAYDVQQDDA1jYS5hcGlzaXguZGV2MQwwCgYDVQQLDANvcHMwHhcN\nMjIxMjAxMTAxOTU3WhcNNDIwODE4MTAxOTU3WjBOMQswCQYDVQQGEwJjbjESMBAG\nA1UECAwJR3VhbmdEb25nMQ8wDQYDVQQHDAZaaHVIYWkxGjAYBgNVBAMMEWNsaWVu\ndC5hcGlzaXguZGV2MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzypq\nkrsJ8MaqpS0kr2SboE9aRKOJzd6mY3AZLq3tFpio5cK5oIHkQLfeaaLcd4ycFcZw\nFTpxc+Eth6I0X9on+j4tEibc5IpDnRSAQlzHZzlrOG6WxcOza4VmfcrKqj27oodr\noqXv05r/5yIoRrEN9ZXfA8n2OnjhkP+C3Q68L6dBtPpv+e6HaAuw8MvcsEo+MQwu\ncTZyWqWT2UzKVzToW29dHRW+yZGuYNWRh15X09VSvx+E0s+uYKzN0Cyef2C6VtBJ\nKmJ3NtypAiPqw7Ebfov2Ym/zzU9pyWPi3P1mYPMKQqUT/FpZSXm4iSy0a5qTYhkF\nrFdV1YuYYZL5YGl9aQIDAQABox8wHTAbBgNVHREEFDASghBhZG1pbi5hcGlzaXgu\nZGV2MA0GCSqGSIb3DQEBDQUAA4IBAQBepRpwWdckZ6QdL5EuufYwU7p5SIqkVL/+\nN4/l5YSjPoAZf/M6XkZu/PsLI9/kPZN/PX4oxjZSDH14dU9ON3JjxtSrebizcT8V\naQ13TeW9KSv/i5oT6qBmj+V+RF2YCUhyzXdYokOfsSVtSlA1qMdm+cv0vkjYcImV\nl3L9nVHRPq15dY9sbmWEtFBWvOzqNSuQYax+iYG+XEuL9SPaYlwKRC6eS/dbXa1T\nPPWDQad2X/WmhxPzEHvjSl2bsZF1u0GEdKyhXWMOLCLiYIJo15G7bMz8cTUvkDN3\n6WaWBd6bd2g13Ho/OOceARpkR/ND8PU78Y8cq+zHoOSqH+1aly5H\n-----END CERTIFICATE-----\n", client_key: "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAzypqkrsJ8MaqpS0kr2SboE9aRKOJzd6mY3AZLq3tFpio5cK5\noIHkQLfeaaLcd4ycFcZwFTpxc+Eth6I0X9on+j4tEibc5IpDnRSAQlzHZzlrOG6W\nxcOza4VmfcrKqj27oodroqXv05r/5yIoRrEN9ZXfA8n2OnjhkP+C3Q68L6dBtPpv\n+e6HaAuw8MvcsEo+MQwucTZyWqWT2UzKVzToW29dHRW+yZGuYNWRh15X09VSvx+E\n0s+uYKzN0Cyef2C6VtBJKmJ3NtypAiPqw7Ebfov2Ym/zzU9pyWPi3P1mYPMKQqUT\n/FpZSXm4iSy0a5qTYhkFrFdV1YuYYZL5YGl9aQIDAQABAoIBAD7tUG//lnZnsj/4\nJXONaORaFj5ROrOpFPuRemS+egzqFCuuaXpC2lV6RHnr+XHq6SKII1WfagTb+lt/\nvs760jfmGQSxf1mAUidtqcP+sKc/Pr1mgi/SUTawz8AYEFWD6PHmlqBSLTYml+La\nckd+0pGtk49wEnYSb9n+cv640hra9AYpm9LXUFaypiFEu+xJhtyKKWkmiVGrt/X9\n3aG6MuYeZplW8Xq1L6jcHsieTOB3T+UBfG3O0bELBgTVexOQYI9O4Ejl9/n5/8WP\nAbIw7PaAYc7fBkwOGh7/qYUdHnrm5o9MiRT6dPxrVSf0PZVACmA+JoNjCPv0Typf\n3MMkHoECgYEA9+3LYzdP8j9iv1fP5hn5K6XZAobCD1mnzv3my0KmoSMC26XuS71f\nvyBhjL7zMxGEComvVTF9SaNMfMYTU4CwOJQxLAuT69PEzW6oVEeBoscE5hwhjj6o\n/lr5jMbt807J9HnldSpwllfj7JeiTuqRcCu/cwqKQQ1aB3YBZ7h5pZkCgYEA1ejo\nKrR1hN2FMhp4pj0nZ5+Ry2lyIVbN4kIcoteaPhyQ0AQ0zNoi27EBRnleRwVDYECi\nXAFrgJU+laKsg1iPjvinHibrB9G2p1uv3BEh6lPl9wPFlENTOjPkqjR6eVVZGP8e\nVzxYxIo2x/QLDUeOpxySdG4pdhEHGfvmdGmr2FECgYBeknedzhCR4HnjcTSdmlTA\nwI+p9gt6XYG0ZIewCymSl89UR9RBUeh++HQdgw0z8r+CYYjfH3SiLUdU5R2kIZeW\nzXiAS55OO8Z7cnWFSI17sRz+RcbLAr3l4IAGoi9MO0awGftcGSc/QiFwM1s3bSSz\nPAzYbjHUpKot5Gae0PCeKQKBgQCHfkfRBQ2LY2WDHxFc+0+Ca6jF17zbMUioEIhi\n/X5N6XowyPlI6MM7tRrBsQ7unX7X8Rjmfl/ByschsTDk4avNO+NfTfeBtGymBYWX\nN6Lr8sivdkwoZZzKOSSWSzdos48ELlThnO/9Ti706Lg3aSQK5iY+aakJiC+fXdfT\n1TtsgQKBgQDRYvtK/Cpaq0W6wO3I4R75lHGa7zjEr4HA0Kk/FlwS0YveuTh5xqBj\nwQz2YyuQQfJfJs7kbWOITBT3vuBJ8F+pktL2Xq5p7/ooIXOGS8Ib4/JAS1C/wb+t\nuJHGva12bZ4uizxdL2Q0/n9ziYTiMc/MMh/56o4Je8RMdOMT5lTsRQ==\n-----END RSA PRIVATE KEY-----\n" nodes: - "127.0.0.1:50053": 1 + "127.0.0.1:10053": 1 type: roundrobin #END --- exec @@ -88,7 +88,7 @@ routes: client_cert: "-----BEGIN CERTIFICATE-----\nMIIDUzCCAjugAwIBAgIURw+Rc5FSNUQWdJD+quORtr9KaE8wDQYJKoZIhvcNAQEN\nBQAwWDELMAkGA1UEBhMCY24xEjAQBgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwG\nWmh1SGFpMRYwFAYDVQQDDA1jYS5hcGlzaXguZGV2MQwwCgYDVQQLDANvcHMwHhcN\nMjIxMjAxMTAxOTU3WhcNNDIwODE4MTAxOTU3WjBOMQswCQYDVQQGEwJjbjESMBAG\nA1UECAwJR3VhbmdEb25nMQ8wDQYDVQQHDAZaaHVIYWkxGjAYBgNVBAMMEWNsaWVu\ndC5hcGlzaXguZGV2MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzypq\nkrsJ8MaqpS0kr2SboE9aRKOJzd6mY3AZLq3tFpio5cK5oIHkQLfeaaLcd4ycFcZw\nFTpxc+Eth6I0X9on+j4tEibc5IpDnRSAQlzHZzlrOG6WxcOza4VmfcrKqj27oodr\noqXv05r/5yIoRrEN9ZXfA8n2OnjhkP+C3Q68L6dBtPpv+e6HaAuw8MvcsEo+MQwu\ncTZyWqWT2UzKVzToW29dHRW+yZGuYNWRh15X09VSvx+E0s+uYKzN0Cyef2C6VtBJ\nKmJ3NtypAiPqw7Ebfov2Ym/zzU9pyWPi3P1mYPMKQqUT/FpZSXm4iSy0a5qTYhkF\nrFdV1YuYYZL5YGl9aQIDAQABox8wHTAbBgNVHREEFDASghBhZG1pbi5hcGlzaXgu\nZGV2MA0GCSqGSIb3DQEBDQUAA4IBAQBepRpwWdckZ6QdL5EuufYwU7p5SIqkVL/+\nN4/l5YSjPoAZf/M6XkZu/PsLI9/kPZN/PX4oxjZSDH14dU9ON3JjxtSrebizcT8V\naQ13TeW9KSv/i5oT6qBmj+V+RF2YCUhyzXdYokOfsSVtSlA1qMdm+cv0vkjYcImV\nl3L9nVHRPq15dY9sbmWEtFBWvOzqNSuQYax+iYG+XEuL9SPaYlwKRC6eS/dbXa1T\nPPWDQad2X/WmhxPzEHvjSl2bsZF1u0GEdKyhXWMOLCLiYIJo15G7bMz8cTUvkDN3\n6WaWBd6bd2g13Ho/OOceARpkR/ND8PU78Y8cq+zHoOSqH+1aly5H\n-----END CERTIFICATE-----\n", client_key: "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAzypqkrsJ8MaqpS0kr2SboE9aRKOJzd6mY3AZLq3tFpio5cK5\noIHkQLfeaaLcd4ycFcZwFTpxc+Eth6I0X9on+j4tEibc5IpDnRSAQlzHZzlrOG6W\nxcOza4VmfcrKqj27oodroqXv05r/5yIoRrEN9ZXfA8n2OnjhkP+C3Q68L6dBtPpv\n+e6HaAuw8MvcsEo+MQwucTZyWqWT2UzKVzToW29dHRW+yZGuYNWRh15X09VSvx+E\n0s+uYKzN0Cyef2C6VtBJKmJ3NtypAiPqw7Ebfov2Ym/zzU9pyWPi3P1mYPMKQqUT\n/FpZSXm4iSy0a5qTYhkFrFdV1YuYYZL5YGl9aQIDAQABAoIBAD7tUG//lnZnsj/4\nJXONaORaFj5ROrOpFPuRemS+egzqFCuuaXpC2lV6RHnr+XHq6SKII1WfagTb+lt/\nvs760jfmGQSxf1mAUidtqcP+sKc/Pr1mgi/SUTawz8AYEFWD6PHmlqBSLTYml+La\nckd+0pGtk49wEnYSb9n+cv640hra9AYpm9LXUFaypiFEu+xJhtyKKWkmiVGrt/X9\n3aG6MuYeZplW8Xq1L6jcHsieTOB3T+UBfG3O0bELBgTVexOQYI9O4Ejl9/n5/8WP\nAbIw7PaAYc7fBkwOGh7/qYUdHnrm5o9MiRT6dPxrVSf0PZVACmA+JoNjCPv0Typf\n3MMkHoECgYEA9+3LYzdP8j9iv1fP5hn5K6XZAobCD1mnzv3my0KmoSMC26XuS71f\nvyBhjL7zMxGEComvVTF9SaNMfMYTU4CwOJQxLAuT69PEzW6oVEeBoscE5hwhjj6o\n/lr5jMbt807J9HnldSpwllfj7JeiTuqRcCu/cwqKQQ1aB3YBZ7h5pZkCgYEA1ejo\nKrR1hN2FMhp4pj0nZ5+Ry2lyIVbN4kIcoteaPhyQ0AQ0zNoi27EBRnleRwVDYECi\nXAFrgJU+laKsg1iPjvinHibrB9G2p1uv3BEh6lPl9wPFlENTOjPkqjR6eVVZGP8e\nVzxYxIo2x/QLDUeOpxySdG4pdhEHGfvmdGmr2FECgYBeknedzhCR4HnjcTSdmlTA\nwI+p9gt6XYG0ZIewCymSl89UR9RBUeh++HQdgw0z8r+CYYjfH3SiLUdU5R2kIZeW\nzXiAS55OO8Z7cnWFSI17sRz+RcbLAr3l4IAGoi9MO0awGftcGSc/QiFwM1s3bSSz\nPAzYbjHUpKot5Gae0PCeKQKBgQCHfkfRBQ2LY2WDHxFc+0+Ca6jF17zbMUioEIhi\n/X5N6XowyPlI6MM7tRrBsQ7unX7X8Rjmfl/ByschsTDk4avNO+NfTfeBtGymBYWX\nN6Lr8sivdkwoZZzKOSSWSzdos48ELlThnO/9Ti706Lg3aSQK5iY+aakJiC+fXdfT\n1TtsgQKBgQDRYvtK/Cpaq0W6wO3I4R75lHGa7zjEr4HA0Kk/FlwS0YveuTh5xqBj\nwQz2YyuQQfJfJs7kbWOITBT3vuBJ8F+pktL2Xq5p7/ooIXOGS8Ib4/JAS1C/wb+t\nuJHGva12bZ4uizxdL2Q0/n9ziYTiMc/MMh/56o4Je8RMdOMT5lTsRQ==\n-----END RSA PRIVATE KEY-----\n" nodes: - "127.0.0.1:50053": 1 + "127.0.0.1:10053": 1 type: roundrobin #END --- exec diff --git a/t/node/grpc-proxy-stream.t b/t/node/grpc-proxy-stream.t index 1f10b9aad592..2e3da185d2d7 100644 --- a/t/node/grpc-proxy-stream.t +++ b/t/node/grpc-proxy-stream.t @@ -47,7 +47,7 @@ routes: upstream: scheme: grpc nodes: - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 type: roundrobin #END --- exec @@ -85,7 +85,7 @@ routes: upstream: scheme: grpc nodes: - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 type: roundrobin #END --- exec @@ -111,7 +111,7 @@ routes: upstream: scheme: grpc nodes: - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 type: roundrobin #END --- exec diff --git a/t/node/grpc-proxy-unary.t b/t/node/grpc-proxy-unary.t index f1a063c54a15..62870dc70bd5 100644 --- a/t/node/grpc-proxy-unary.t +++ b/t/node/grpc-proxy-unary.t @@ -47,7 +47,7 @@ routes: upstream: scheme: grpc nodes: - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 type: roundrobin #END --- exec @@ -73,7 +73,7 @@ routes: upstream: scheme: grpc nodes: - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 type: roundrobin #END --- exec @@ -99,7 +99,7 @@ routes: upstream: scheme: grpcs nodes: - "127.0.0.1:50052": 1 + "127.0.0.1:10052": 1 type: roundrobin #END --- exec @@ -131,7 +131,7 @@ routes: upstream: scheme: grpc nodes: - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 type: roundrobin #END --- exec diff --git a/t/node/grpc-proxy.t b/t/node/grpc-proxy.t index 01ba461f09b4..c4f0dd036f2d 100644 --- a/t/node/grpc-proxy.t +++ b/t/node/grpc-proxy.t @@ -207,7 +207,7 @@ routes: upstream: scheme: grpc nodes: - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 type: roundrobin #END --- exec @@ -239,7 +239,7 @@ routes: scheme: grpc pass_host: node nodes: - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 type: roundrobin #END --- exec @@ -251,7 +251,7 @@ grpcurl -import-path ./t/grpc_server_example/proto -proto helloworld.proto -plai --- grep_error_log eval qr/grpc header: "(:authority|host): [^"]+"/ --- grep_error_log_out eval -qr/grpc header: "(:authority|host): 127.0.0.1:50051"/ +qr/grpc header: "(:authority|host): 127.0.0.1:10051"/ @@ -272,7 +272,7 @@ routes: pass_host: rewrite upstream_host: hello.world nodes: - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 type: roundrobin #END --- exec diff --git a/t/node/healthcheck-discovery.t b/t/node/healthcheck-discovery.t index c8cf99b6c1dc..e8e21b04240f 100644 --- a/t/node/healthcheck-discovery.t +++ b/t/node/healthcheck-discovery.t @@ -94,7 +94,7 @@ routes: local httpc = http.new() local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) - ngx.sleep(0.5) + ngx.sleep(1.5) ngx.say(res.status) } diff --git a/t/node/healthcheck-leak-bugfix.t b/t/node/healthcheck-leak-bugfix.t index d3ada8c171f7..1caf5d348abf 100644 --- a/t/node/healthcheck-leak-bugfix.t +++ b/t/node/healthcheck-leak-bugfix.t @@ -31,8 +31,8 @@ __DATA__ local new = healthcheck.new healthcheck.new = function(...) local obj = new(...) - local clear = obj.clear - obj.clear = function(...) + local clear = obj.delayed_clear + obj.delayed_clear = function(...) ngx.log(ngx.WARN, "clear checker") return clear(...) end diff --git a/t/node/healthcheck.t b/t/node/healthcheck.t index 0ae4fbdb9d85..3053a0bb713f 100644 --- a/t/node/healthcheck.t +++ b/t/node/healthcheck.t @@ -486,6 +486,7 @@ qr{\[error\].*while connecting to upstream.*} qr{.*http://127.0.0.1:1960/server_port.* .*http://127.0.0.1:1961/server_port.* .*http://127.0.0.1:1961/server_port.* +.*http://127.0.0.1:1960/server_port.* .*http://127.0.0.1:1961/server_port.* .*http://127.0.0.1:1961/server_port.*} --- timeout: 10 diff --git a/t/node/priority-balancer/health-checker.t b/t/node/priority-balancer/health-checker.t index fb65e3fd2355..7ad685ac86bb 100644 --- a/t/node/priority-balancer/health-checker.t +++ b/t/node/priority-balancer/health-checker.t @@ -102,8 +102,8 @@ upstreams: GET /t --- error_log connect() failed -unhealthy TCP increment (2/2) for '(127.0.0.1:1979) -unhealthy TCP increment (2/2) for '(127.0.0.2:1979) +unhealthy TCP increment (2/2) for '127.0.0.1(127.0.0.1:1979) +unhealthy TCP increment (2/2) for '127.0.0.2(127.0.0.2:1979) --- grep_error_log eval qr/proxy request to \S+/ --- grep_error_log_out @@ -177,7 +177,7 @@ passed GET /t --- error_log connect() failed -unhealthy TCP increment (2/2) for '(127.0.0.1:1979) +unhealthy TCP increment (2/2) for '127.0.0.1(127.0.0.1:1979) --- grep_error_log eval qr/proxy request to \S+/ --- grep_error_log_out diff --git a/t/node/upstream-keepalive-pool.t b/t/node/upstream-keepalive-pool.t index a8a625ad9f08..4fc4a1ae1ceb 100644 --- a/t/node/upstream-keepalive-pool.t +++ b/t/node/upstream-keepalive-pool.t @@ -751,7 +751,7 @@ $/ scheme = "", type = "roundrobin", nodes = { - ["127.0.0.1:50054"] = 1, + ["127.0.0.1:10054"] = 1, }, keepalive_pool = { size = 4 diff --git a/t/node/upstream-websocket.t b/t/node/upstream-websocket.t index a374b638f6f5..a19a202deb00 100644 --- a/t/node/upstream-websocket.t +++ b/t/node/upstream-websocket.t @@ -270,7 +270,10 @@ passed local client = require "resty.websocket.client" local wb = client:new() local uri = "wss://127.0.0.1:1994/websocket_handshake" - local ok, err = wb:connect(uri) + local opts = { + server_name = "127.0.0.1" + } + local ok, err = wb:connect(uri, opts) if not ok then ngx.say("failed to connect: " .. err) return diff --git a/t/plugin/authz-keycloak4.t b/t/plugin/authz-keycloak4.t new file mode 100644 index 000000000000..60aea47f05c4 --- /dev/null +++ b/t/plugin/authz-keycloak4.t @@ -0,0 +1,245 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + $ENV{VAULT_TOKEN} = "root"; + $ENV{CLIENT_SECRET} = "d1ec69e9-55d2-4109-a3ea-befa071579d5"; +} + +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: store secret into vault +--- exec +VAULT_TOKEN='root' VAULT_ADDR='http://0.0.0.0:8200' vault kv put kv/apisix/foo client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5 +--- response_body +Success! Data written to: kv/apisix/foo + + + +=== TEST 2: set client_secret as a reference to secret +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- put secret vault config + local code, body = t('/apisix/admin/secrets/vault/test1', + ngx.HTTP_PUT, + [[{ + "uri": "http://127.0.0.1:8200", + "prefix" : "kv/apisix", + "token" : "root" + }]] + ) + + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "authz-keycloak": { + "token_endpoint": "https://127.0.0.1:8443/realms/University/protocol/openid-connect/token", + "permissions": ["course_resource"], + "client_id": "course_management", + "client_secret": "$secret://vault/test1/foo/client_secret", + "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", + "timeout": 3000, + "ssl_verify": false, + "password_grant_token_generation_incoming_uri": "/api/token" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/api/token" + }]] + ) + + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/api/token" + local headers = { + ["Content-Type"] = "application/x-www-form-urlencoded", + } + + -- no username + local res, err = httpc:request_uri(uri, { + method = "POST", + headers = headers, + body = ngx.encode_args({ + username = "teacher@gmail.com", + password = "123456", + }), + }) + if res.status == 200 then + ngx.print("success\n") + end + } + } +--- request +GET /t +--- response_body +success + + + +=== TEST 3: set client_secret as a reference to env variable +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "authz-keycloak": { + "token_endpoint": "https://127.0.0.1:8443/realms/University/protocol/openid-connect/token", + "permissions": ["course_resource"], + "client_id": "course_management", + "client_secret": "$env://CLIENT_SECRET", + "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", + "timeout": 3000, + "ssl_verify": false, + "password_grant_token_generation_incoming_uri": "/api/token" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/api/token" + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/api/token" + local headers = { + ["Content-Type"] = "application/x-www-form-urlencoded", + } + + -- no username + local res, err = httpc:request_uri(uri, { + method = "POST", + headers = headers, + body = ngx.encode_args({ + username = "teacher@gmail.com", + password = "123456", + }), + }) + if res.status == 200 then + ngx.print("success\n") + end + } + } +--- request +GET /t +--- response_body +success + + + +=== TEST 4: set invalid client_secret as a reference to env variable +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "authz-keycloak": { + "token_endpoint": "https://127.0.0.1:8443/realms/University/protocol/openid-connect/token", + "permissions": ["course_resource"], + "client_id": "course_management", + "client_secret": "$env://INVALID_CLIENT_SECRET", + "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", + "timeout": 3000, + "ssl_verify": false, + "password_grant_token_generation_incoming_uri": "/api/token" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/api/token" + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/api/token" + local headers = { + ["Content-Type"] = "application/x-www-form-urlencoded", + } + + -- no username + local res, err = httpc:request_uri(uri, { + method = "POST", + headers = headers, + body = ngx.encode_args({ + username = "teacher@gmail.com", + password = "123456", + }), + }) + if res.status == 200 then + ngx.print("success\n") + end + } + } +--- request +GET /t +--- grep_error_log eval +qr/Invalid client secret/ +--- grep_error_log_out +Invalid client secret +Invalid client secret diff --git a/t/plugin/batch-requests-grpc.t b/t/plugin/batch-requests-grpc.t index 4acd5005434c..28b9d39d4016 100644 --- a/t/plugin/batch-requests-grpc.t +++ b/t/plugin/batch-requests-grpc.t @@ -128,7 +128,7 @@ passed "scheme": "grpc", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] diff --git a/t/plugin/chaitin-waf.t b/t/plugin/chaitin-waf.t index e2ee42f93bad..cd568df13630 100644 --- a/t/plugin/chaitin-waf.t +++ b/t/plugin/chaitin-waf.t @@ -27,6 +27,7 @@ add_block_preprocessor(sub { server { listen 8088; listen 8089; + listen unix:/tmp/safeline-snserver.sock; content_by_lua_block { require("lib.chaitin_waf_server").pass() } @@ -136,6 +137,10 @@ __DATA__ { "host": "127.0.0.1", "port": 8089 + }, + { + "host": "unix:/tmp/safeline-snserver.sock", + "port": 8000 } ] }]] diff --git a/t/plugin/clickhouse-logger.t b/t/plugin/clickhouse-logger.t index 43ce54cd8a6d..4efcf11e3d70 100644 --- a/t/plugin/clickhouse-logger.t +++ b/t/plugin/clickhouse-logger.t @@ -187,7 +187,9 @@ passed "database": "default", "logtable": "test", "endpoint_addrs": ["http://127.0.0.1:8123", - "http://127.0.0.1:8124"] + "http://127.0.0.1:8124"], + "batch_max_size":1, + "inactive_timeout":1 } }, "upstream": { @@ -229,7 +231,38 @@ echo "select * from default.test" | curl 'http://localhost:8124/' --data-binary -=== TEST 8: use single clickhouse server +=== TEST 8: to show that different endpoints will be chosen randomly +--- config + location /t { + content_by_lua_block { + local code_count = {} + local t = require("lib.test_admin").test + for i = 1, 12 do + local code, body = t('/opentracing', ngx.HTTP_GET) + if code ~= 200 then + ngx.say("code: ", code, " body: ", body) + end + code_count[code] = (code_count[code] or 0) + 1 + end + + local code_arr = {} + for code, count in pairs(code_count) do + table.insert(code_arr, {code = code, count = count}) + end + + ngx.say(require("toolkit.json").encode(code_arr)) + ngx.exit(200) + } + } +--- response_body +[{"code":200,"count":12}] +--- error_log +sending a batch logs to http://127.0.0.1:8123 +sending a batch logs to http://127.0.0.1:8124 + + + +=== TEST 9: use single clickhouse server --- config location /t { content_by_lua_block { @@ -267,7 +300,7 @@ passed -=== TEST 9: hit route +=== TEST 10: hit route --- request GET /opentracing --- error_code: 200 @@ -275,7 +308,7 @@ GET /opentracing -=== TEST 10: get log +=== TEST 11: get log --- exec echo "select * from default.test" | curl 'http://localhost:8123/' --data-binary @- --- response_body_like diff --git a/t/plugin/cors.t b/t/plugin/cors.t index f05385ddd05f..157832b8737e 100644 --- a/t/plugin/cors.t +++ b/t/plugin/cors.t @@ -723,7 +723,7 @@ qr/failed to check the configuration of plugin cors err: you can not/ "expose_headers": "ex-headr1,ex-headr2", "max_age": 50, "allow_credential": true, - "allow_origins_by_regex":[".*\\.test.com"] + "allow_origins_by_regex":[".*\\.test.com$"] } }, "upstream": { @@ -802,7 +802,7 @@ Access-Control-Allow-Credentials: "expose_headers": "ex-headr1,ex-headr2", "max_age": 50, "allow_credential": true, - "allow_origins_by_regex":[".*\\.test.com",".*\\.example.org"] + "allow_origins_by_regex":[".*\\.test.com$",".*\\.example.org$"] } }, "upstream": { diff --git a/t/plugin/cors2.t b/t/plugin/cors2.t index 1bc223bd3e8b..a2b06305bd97 100644 --- a/t/plugin/cors2.t +++ b/t/plugin/cors2.t @@ -107,7 +107,7 @@ done "allow_headers": "request-h", "expose_headers": "expose-h", "max_age": 10, - "allow_origins_by_regex":[".*\\.domain.com"] + "allow_origins_by_regex":[".*\\.domain.com$"] } }, "upstream": { diff --git a/t/plugin/ext-plugin/http-req-call.t b/t/plugin/ext-plugin/http-req-call.t index 6fc8240a7612..782dfa05c678 100644 --- a/t/plugin/ext-plugin/http-req-call.t +++ b/t/plugin/ext-plugin/http-req-call.t @@ -750,3 +750,60 @@ cat X-Resp: foo X-Req: bar X-Same: one, two + + + +=== TEST 27: add route +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "plugins": { + "ext-plugin-pre-req": { + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 28: test rewrite request body +--- request +GET /echo +--- response_body chomp +cat +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({rewrite_request_body = true}) + } + } +--- response_body +abc diff --git a/t/plugin/ext-plugin/sanity.t b/t/plugin/ext-plugin/sanity.t index 1a3732402d71..873a540a8215 100644 --- a/t/plugin/ext-plugin/sanity.t +++ b/t/plugin/ext-plugin/sanity.t @@ -14,7 +14,16 @@ # See the License for the specific language governing permissions and # limitations under the License. # -use t::APISIX 'no_plan'; +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} repeat_each(1); no_long_string(); diff --git a/t/plugin/ext-plugin/sanity2.t b/t/plugin/ext-plugin/sanity2.t index 206e7b090c2d..67260055862e 100644 --- a/t/plugin/ext-plugin/sanity2.t +++ b/t/plugin/ext-plugin/sanity2.t @@ -14,7 +14,16 @@ # See the License for the specific language governing permissions and # limitations under the License. # -use t::APISIX 'no_plan'; +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} repeat_each(1); no_long_string(); diff --git a/t/plugin/grpc-transcode.t b/t/plugin/grpc-transcode.t index 53f676a0ca86..e261bf7bd554 100644 --- a/t/plugin/grpc-transcode.t +++ b/t/plugin/grpc-transcode.t @@ -157,7 +157,7 @@ passed "scheme": "grpc", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] @@ -225,7 +225,7 @@ qr/\{"message":"Hello world"\}/ "scheme": "asf", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] @@ -361,7 +361,7 @@ passed "scheme": "grpc", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] @@ -418,7 +418,7 @@ qr/\{"result":"#2251799813685261"\}/ "scheme": "grpc", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] @@ -466,7 +466,7 @@ qr/\{"message":"Hello apisix"\}/ "scheme": "grpc", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] @@ -581,7 +581,7 @@ passed "scheme": "grpc", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] @@ -734,7 +734,7 @@ failed to encode request data to protobuf "scheme": "grpc", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] diff --git a/t/plugin/grpc-transcode2.t b/t/plugin/grpc-transcode2.t index da91d3ceb265..66baf9a2bac7 100644 --- a/t/plugin/grpc-transcode2.t +++ b/t/plugin/grpc-transcode2.t @@ -87,7 +87,7 @@ __DATA__ "scheme": "grpc", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] @@ -171,7 +171,7 @@ Content-Type: application/json "scheme": "grpc", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] @@ -262,7 +262,7 @@ failed to encode request data to protobuf "scheme": "grpc", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] @@ -311,7 +311,7 @@ Content-Type: application/json "scheme": "grpc", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] @@ -338,7 +338,7 @@ Content-Type: application/json "scheme": "grpc", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] @@ -444,7 +444,7 @@ passed "scheme": "grpc", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] @@ -533,7 +533,7 @@ qr/request log: \{.*body":\"\\u0000\\u0000\\u0000\\u0000\\u0002\\b\\u0003"/ "scheme": "grpc", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] @@ -617,7 +617,7 @@ qr/request log: \{.*body":\"\{\\"result\\":3}/ "scheme": "grpc", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] @@ -644,7 +644,7 @@ qr/request log: \{.*body":\"\{\\"result\\":3}/ "scheme": "grpc", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] @@ -737,7 +737,7 @@ set protobuf option: enum_as_name "scheme": "grpc", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] @@ -763,7 +763,7 @@ set protobuf option: enum_as_name "scheme": "grpc", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] diff --git a/t/plugin/grpc-transcode3.t b/t/plugin/grpc-transcode3.t index 0cdbe6e16bdd..bd4164d3b5ae 100644 --- a/t/plugin/grpc-transcode3.t +++ b/t/plugin/grpc-transcode3.t @@ -92,7 +92,7 @@ __DATA__ "scheme": "grpc", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] @@ -172,7 +172,7 @@ Content-Type: application/json "scheme": "grpc", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] @@ -243,7 +243,7 @@ qr/error/ "scheme": "grpc", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] @@ -316,7 +316,7 @@ grpc-status-details-bin: CA4SDk91dCBvZiBzZXJ2aWNlGlcKKnR5cGUuZ29vZ2xlYXBpcy5jb20 "scheme": "grpc", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] @@ -389,7 +389,7 @@ grpc-status-details-bin: CA4SDk91dCBvZiBzZXJ2aWNlGlcKKnR5cGUuZ29vZ2xlYXBpcy5jb20 "scheme": "grpc", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] @@ -473,7 +473,7 @@ transform response error: failed to call pb.decode to decode details in grpc-sta "scheme": "grpc", "type": "roundrobin", "nodes": { - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 } } }]] diff --git a/t/plugin/inspect.t b/t/plugin/inspect.t index 0bbbfd7ff937..56f56d83ae4b 100644 --- a/t/plugin/inspect.t +++ b/t/plugin/inspect.t @@ -527,3 +527,31 @@ upvar2=yes --- error_log inspect: remove hook: t/lib/test_inspect.lua#27 inspect: all hooks removed + + + +=== TEST 14: jit should be recovered after all hooks are done +--- config + location /t { + content_by_lua_block { + local test = require("lib.test_inspect") + + local t1 = test.hot1() + + write_hooks([[ + local test = require("lib.test_inspect") + local dbg = require "apisix.inspect.dbg" + dbg.set_hook("t/lib/test_inspect.lua", 47, test.hot1, function(info) + return true + end) + ]]) + + ngx.sleep(1.5) + + local t2 = test.hot1() + assert(t2-t1 < t1*0.8, "hot1 consumes at least double times than before") + } + } +--- error_log +inspect: remove hook: t/lib/test_inspect.lua#47 +inspect: all hooks removed diff --git a/t/plugin/limit-conn3.t b/t/plugin/limit-conn3.t new file mode 100644 index 000000000000..a2fe36955800 --- /dev/null +++ b/t/plugin/limit-conn3.t @@ -0,0 +1,126 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the check leak tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: create route with limit-conn plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 1, + "burst": 0, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "$remote_addr", + "key_type": "var_combination" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn", + "host": "www.test.com" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: create ssl(sni: www.test.com) +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "www.test.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "www.test.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 3: use HTTP version 2 to request +--- exec +curl --http2 --parallel -k https://www.test.com:1994/limit_conn https://www.test.com:1994/limit_conn --resolve www.test.com:1994:127.0.0.1 +--- response_body_like +503 Service Temporarily Unavailable.*.hello world diff --git a/t/plugin/limit-req3.t b/t/plugin/limit-req3.t new file mode 100644 index 000000000000..684eb9d1b405 --- /dev/null +++ b/t/plugin/limit-req3.t @@ -0,0 +1,114 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('info'); +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: create route with limit-req plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-req": { + "rate": 0.1, + "burst": 0.1, + "rejected_code": 503, + "key": "$remote_addr", + "key_type": "var_combination" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "host": "www.test.com" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: create ssl(sni: www.test.com) +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "www.test.com"} + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "www.test.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 3: use HTTP version 2 to request +--- exec +curl --http2 --parallel -k https://www.test.com:1994/hello https://www.test.com:1994/hello --resolve www.test.com:1994:127.0.0.1 +--- response_body_like +503 Service Temporarily Unavailable.*.hello world diff --git a/t/plugin/openid-connect4.t b/t/plugin/openid-connect4.t new file mode 100644 index 000000000000..35f33acbe25e --- /dev/null +++ b/t/plugin/openid-connect4.t @@ -0,0 +1,111 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: Set up new route access the auth server with header test +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "discovery": "https://samples.auth0.com/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "authorization_params":{ + "test":"abc" + }, + "ssl_verify": false, + "timeout": 10, + "scope": "apisix", + "proxy_opts": { + "http_proxy": "http://127.0.0.1:8080", + "http_proxy_authorization": "Basic dXNlcm5hbWU6cGFzc3dvcmQK" + }, + "use_pkce": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + } + } +--- response_body +passed + + + +=== TEST 2: Check the uri of the authorization endpoint for passed headers +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, {method = "GET"}) + ngx.status = res.status + local location = res.headers['Location'] + if location and string.find(location, 'https://samples.auth0.com/authorize') ~= -1 and + string.find(location, 'test=abc') ~= -1 then + ngx.say(true) + end + } + } +--- timeout: 10s +--- response_body +true +--- error_code: 302 +--- error_log +use http proxy diff --git a/t/plugin/opentelemetry.t b/t/plugin/opentelemetry.t index 5adc158e9e0a..f9e64853d93d 100644 --- a/t/plugin/opentelemetry.t +++ b/t/plugin/opentelemetry.t @@ -16,7 +16,6 @@ # use t::APISIX 'no_plan'; - add_block_preprocessor(sub { my ($block) = @_; @@ -26,38 +25,23 @@ plugins: - opentelemetry plugin_attr: opentelemetry: + trace_id_source: x-request-id batch_span_processor: max_export_batch_size: 1 inactive_timeout: 0.5 + collector: + address: 127.0.0.1:4318 + request_timeout: 3 + request_headers: + foo: bar _EOC_ $block->set_value("extra_yaml_config", $extra_yaml_config); } - - - if (!$block->extra_init_by_lua) { - my $extra_init_by_lua = <<_EOC_; --- mock exporter http client -local client = require("opentelemetry.trace.exporter.http_client") -client.do_request = function() - ngx.log(ngx.INFO, "opentelemetry export span") - return "ok" -end -_EOC_ - - $block->set_value("extra_init_by_lua", $extra_init_by_lua); - } - - if (!$block->request) { - $block->set_value("request", "GET /t"); - } - if (!defined $block->response_body) { $block->set_value("response_body", "passed\n"); } - $block; }); - repeat_each(1); no_long_string(); no_root_location(); @@ -98,110 +82,29 @@ __DATA__ ngx.say(body) } } +--- request +GET /t === TEST 2: trigger opentelemetry --- request GET /opentracing +--- wait: 2 --- response_body opentracing ---- wait: 1 ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out -opentelemetry export span - - - -=== TEST 3: use default always_off sampler ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ - "plugins": { - "opentelemetry": { - } - }, - "upstream": { - "nodes": { - "127.0.0.1:1980": 1 - }, - "type": "roundrobin" - }, - "uri": "/opentracing" - }]] - ) - if code >= 300 then - ngx.status = code - end - ngx.say(body) - } - } - -=== TEST 4: not trigger opentelemetry ---- request -GET /opentracing ---- response_body -opentracing ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out +=== TEST 3: check log +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/.*opentelemetry-lua.*/ -=== TEST 5: use trace_id_ratio sampler, default fraction = 0 ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ - "plugins": { - "opentelemetry": { - "sampler": { - "name": "trace_id_ratio" - } - } - }, - "upstream": { - "nodes": { - "127.0.0.1:1980": 1 - }, - "type": "roundrobin" - }, - "uri": "/opentracing" - }]] - ) - - if code >= 300 then - ngx.status = code - end - ngx.say(body) - } - } - - - -=== TEST 6: not trigger opentelemetry ---- request -GET /opentracing ---- response_body -opentracing ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out - - - -=== TEST 7: use trace_id_ratio sampler, fraction = 1.0 +=== TEST 4: use trace_id_ratio sampler, fraction = 1.0 --- config location /t { content_by_lua_block { @@ -235,120 +138,29 @@ qr/opentelemetry export span/ ngx.say(body) } } - - - -=== TEST 8: trigger opentelemetry --- request -GET /opentracing ---- response_body -opentracing ---- wait: 1 ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out -opentelemetry export span - - - -=== TEST 9: use parent_base sampler, default root sampler = always_off ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ - "plugins": { - "opentelemetry": { - "sampler": { - "name": "parent_base" - } - } - }, - "upstream": { - "nodes": { - "127.0.0.1:1980": 1 - }, - "type": "roundrobin" - }, - "uri": "/opentracing" - }]] - ) +GET /t - if code >= 300 then - ngx.status = code - end - ngx.say(body) - } - } - -=== TEST 10: not trigger opentelemetry +=== TEST 5: trigger opentelemetry --- request GET /opentracing +--- wait: 2 --- response_body opentracing ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out - -=== TEST 11: use parent_base sampler, root sampler = always_on ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ - "plugins": { - "opentelemetry": { - "sampler": { - "name": "parent_base", - "options": { - "root": { - "name": "always_on" - } - } - } - } - }, - "upstream": { - "nodes": { - "127.0.0.1:1980": 1 - }, - "type": "roundrobin" - }, - "uri": "/opentracing" - }]] - ) - if code >= 300 then - ngx.status = code - end - ngx.say(body) - } - } +=== TEST 6: check log +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/.*opentelemetry-lua.*/ -=== TEST 12: trigger opentelemetry ---- request -GET /opentracing ---- response_body -opentracing ---- wait: 1 ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out -opentelemetry export span - - - -=== TEST 13: use parent_base sampler, root sampler = trace_id_ratio with default fraction = 0 +=== TEST 7: use parent_base sampler, root sampler = trace_id_ratio with default fraction = 0 --- config location /t { content_by_lua_block { @@ -384,36 +196,31 @@ opentelemetry export span ngx.say(body) } } - - - -=== TEST 14: not trigger opentelemetry --- request -GET /opentracing ---- response_body -opentracing ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out +GET /t -=== TEST 15: trigger opentelemetry, trace_flag = 1 +=== TEST 8: trigger opentelemetry, trace_flag = 1 --- request GET /opentracing --- more_headers traceparent: 00-00000000000000000000000000000001-0000000000000001-01 +--- wait: 2 --- response_body opentracing ---- wait: 1 ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out -opentelemetry export span -=== TEST 16: use parent_base sampler, root sampler = trace_id_ratio with fraction = 1 +=== TEST 9: check log +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/.*"traceId":"00000000000000000000000000000001",.*/ + + + +=== TEST 10: use parent_base sampler, root sampler = trace_id_ratio with fraction = 1 --- config location /t { content_by_lua_block { @@ -452,36 +259,31 @@ opentelemetry export span ngx.say(body) } } +--- request +GET /t -=== TEST 17: trigger opentelemetry +=== TEST 11: trigger opentelemetry, trace_flag = 1 --- request GET /opentracing +--- more_headers +traceparent: 00-00000000000000000000000000000001-0000000000000001-01 +--- wait: 2 --- response_body opentracing ---- wait: 1 ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out -opentelemetry export span -=== TEST 18: not trigger opentelemetry, trace_flag = 0 ---- request -GET /opentracing ---- more_headers -traceparent: 00-00000000000000000000000000000001-0000000000000001-00 ---- response_body -opentracing ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out +=== TEST 12: check log +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/.*"traceId":"00000000000000000000000000000001",.*/ -=== TEST 19: set additional_attributes +=== TEST 13: set additional_attributes --- config location /t { content_by_lua_block { @@ -531,122 +333,38 @@ qr/opentelemetry export span/ ngx.say(body) } } +--- request +GET /t -=== TEST 20: trigger opentelemetry, test trace_id_source=x-request-id, custom resource, additional_attributes ---- extra_yaml_config -plugins: - - opentelemetry -plugin_attr: - opentelemetry: - trace_id_source: x-request-id - resource: - service.name: test - test_key: test_val - batch_span_processor: - max_export_batch_size: 1 - inactive_timeout: 0.5 ---- extra_init_by_lua - local core = require("apisix.core") - local otlp = require("opentelemetry.trace.exporter.otlp") - local span_kind = require("opentelemetry.trace.span_kind") - otlp.export_spans = function(self, spans) - if (#spans ~= 1) then - ngx.log(ngx.ERR, "unexpected spans length: ", #spans) - return - end - - local span = spans[1] - if span:context().trace_id ~= "01010101010101010101010101010101" then - ngx.log(ngx.ERR, "unexpected trace id: ", span:context().trace_id) - return - end - - local current_span_kind = span:plain().kind - if current_span_kind ~= span_kind.server then - ngx.log(ngx.ERR, "expected span.kind to be server but got ", current_span_kind) - return - end - - if span.name ~= "/opentracing?foo=bar&a=b" then - ngx.log(ngx.ERR, "expect span name: /opentracing?foo=bar&a=b, but got ", span.name) - return - end - - local expected_resource_attrs = { - test_key = "test_val", - } - expected_resource_attrs["service.name"] = "test" - expected_resource_attrs["telemetry.sdk.language"] = "lua" - expected_resource_attrs["telemetry.sdk.name"] = "opentelemetry-lua" - expected_resource_attrs["telemetry.sdk.version"] = "0.1.1" - expected_resource_attrs["hostname"] = core.utils.gethostname() - local actual_resource_attrs = span.tracer.provider.resource:attributes() - if #actual_resource_attrs ~= 6 then - ngx.log(ngx.ERR, "expect len(actual_resource) = 6, but got ", #actual_resource_attrs) - return - end - for _, attr in ipairs(actual_resource_attrs) do - local expected_val = expected_resource_attrs[attr.key] - if not expected_val then - ngx.log(ngx.ERR, "unexpected resource attr key: ", attr.key) - return - end - if attr.value.string_value ~= expected_val then - ngx.log(ngx.ERR, "unexpected resource attr val: ", attr.value.string_value) - return - end - end - - local expected_attributes = { - service = "service_name", - route = "route_name", - http_user_agent = "test_nginx", - arg_foo = "bar", - cookie_token = "auth_token", - remote_addr = "127.0.0.1", - } - if #span.attributes ~= 6 then - ngx.log(ngx.ERR, "expect len(span.attributes) = 6, but got ", #span.attributes) - return - end - for _, attr in ipairs(span.attributes) do - local expected_val = expected_attributes[attr.key] - if not expected_val then - ngx.log(ngx.ERR, "unexpected attr key: ", attr.key) - return - end - if attr.value.string_value ~= expected_val then - ngx.log(ngx.ERR, "unexpected attr val: ", attr.value.string_value) - return - end - end - - ngx.log(ngx.INFO, "opentelemetry export span") - end +=== TEST 14: trigger opentelemetry --- request GET /opentracing?foo=bar&a=b --- more_headers X-Request-Id: 01010101010101010101010101010101 User-Agent: test_nginx Cookie: token=auth_token; +--- wait: 2 --- response_body opentracing ---- wait: 1 ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out -opentelemetry export span -=== TEST 21: create route for /specific_status +=== TEST 15: check log +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/.*\/opentracing\?foo=bar.*/ + + + +=== TEST 16: create route for /specific_status --- config location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/2', + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, [[{ "name": "route_name", @@ -673,54 +391,21 @@ opentelemetry export span ngx.say(body) } } +--- request +GET /t -=== TEST 22: 500 status, test span.status ---- extra_init_by_lua - local otlp = require("opentelemetry.trace.exporter.otlp") - otlp.export_spans = function(self, spans) - if (#spans ~= 1) then - ngx.log(ngx.ERR, "unexpected spans length: ", #spans) - return - end - - local span = spans[1] - if span.status.code ~= 2 then - ngx.log(ngx.ERR, "unexpected status.code: ", span.status.code) - end - if span.status.message ~= "upstream response status: 500" then - ngx.log(ngx.ERR, "unexpected status.message: ", span.status.message) - end - - ngx.log(ngx.INFO, "opentelemetry export span") - end +=== TEST 17: test response empty body --- request -GET /specific_status ---- more_headers -X-Test-Upstream-Status: 500 ---- error_code: 500 +HEAD /specific_status --- response_body -upstream status: 500 ---- wait: 1 ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out -opentelemetry export span +--- wait: 2 -=== TEST 23: test response empty body ---- extra_init_by_lua - local otlp = require("opentelemetry.trace.exporter.otlp") - otlp.export_spans = function(self, spans) - ngx.log(ngx.INFO, "opentelemetry export span") - end ---- request -HEAD /specific_status ---- response_body ---- wait: 1 ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out -opentelemetry export span +=== TEST 18: check log +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/.*\/specific_status.*/ diff --git a/t/plugin/opentelemetry2.t b/t/plugin/opentelemetry2.t index 052cdbc12b66..3344ce97bb61 100644 --- a/t/plugin/opentelemetry2.t +++ b/t/plugin/opentelemetry2.t @@ -28,6 +28,7 @@ plugins: - opentelemetry plugin_attr: opentelemetry: + trace_id_source: x-request-id batch_span_processor: max_export_batch_size: 1 inactive_timeout: 0.5 @@ -35,30 +36,6 @@ _EOC_ $block->set_value("extra_yaml_config", $extra_yaml_config); } - - if (!$block->extra_init_by_lua) { - my $extra_init_by_lua = <<_EOC_; --- mock exporter http client -local client = require("opentelemetry.trace.exporter.http_client") -client.do_request = function() - ngx.log(ngx.INFO, "opentelemetry export span") - return "ok" -end -local ctx_new = require("opentelemetry.context").new -require("opentelemetry.context").new = function (...) - local ctx = ctx_new(...) - local current = ctx.current - ctx.current = function (...) - ngx.log(ngx.INFO, "opentelemetry context current") - return current(...) - end - return ctx -end -_EOC_ - - $block->set_value("extra_init_by_lua", $extra_init_by_lua); - } - if (!$block->request) { $block->set_value("request", "GET /t"); } @@ -130,96 +107,12 @@ passed --- request GET /hello --- error_code: 401 ---- wait: 1 ---- grep_error_log eval -qr/(opentelemetry export span|opentelemetry context current|plugin body_filter phase)/ ---- grep_error_log_out -plugin body_filter phase -plugin body_filter phase -opentelemetry context current -opentelemetry export span +--- wait: 2 -=== TEST 3: set additional_attributes with match ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ - "name": "route_name", - "plugins": { - "opentelemetry": { - "sampler": { - "name": "always_on" - }, - "additional_header_prefix_attributes": [ - "x-my-header-*" - ] - } - }, - "upstream": { - "nodes": { - "127.0.0.1:1980": 1 - }, - "type": "roundrobin" - }, - "uri": "/attributes" - }]] - ) - - if code >= 300 then - ngx.status = code - end - ngx.say(body) - } - } ---- response_body -passed - - - -=== TEST 4: opentelemetry expands headers ---- extra_init_by_lua - local otlp = require("opentelemetry.trace.exporter.otlp") - otlp.export_spans = function(self, spans) - if (#spans ~= 1) then - ngx.log(ngx.ERR, "unexpected spans length: ", #spans) - return - end - - local attributes_names = {} - local attributes = {} - local span = spans[1] - for _, attribute in ipairs(span.attributes) do - if attribute.key == "hostname" then - -- remove any randomness - goto skip - end - table.insert(attributes_names, attribute.key) - attributes[attribute.key] = attribute.value.string_value or "" - ::skip:: - end - table.sort(attributes_names) - for _, attribute in ipairs(attributes_names) do - ngx.log(ngx.INFO, "attribute " .. attribute .. ": \"" .. attributes[attribute] .. "\"") - end - - ngx.log(ngx.INFO, "opentelemetry export span") - end ---- request -GET /attributes ---- more_headers -x-my-header-name: william -x-my-header-nick: bill ---- wait: 1 ---- error_code: 404 ---- grep_error_log eval -qr/attribute .+?:.[^,]*/ ---- grep_error_log_out -attribute route: "route_name" -attribute service: "" -attribute x-my-header-name: "william" -attribute x-my-header-nick: "bill" +=== TEST 3: check log +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/.*\/hello.*/ diff --git a/t/plugin/opentelemetry3.t b/t/plugin/opentelemetry3.t new file mode 100644 index 000000000000..6171d12f6276 --- /dev/null +++ b/t/plugin/opentelemetry3.t @@ -0,0 +1,170 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +plugins: + - http-logger + - opentelemetry +plugin_attr: + opentelemetry: + set_ngx_var: true + batch_span_processor: + max_export_batch_size: 1 + inactive_timeout: 0.5 +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + + my $upstream_server_config = $block->upstream_server_config // <<_EOC_; + set \$opentelemetry_context_traceparent ""; + set \$opentelemetry_trace_id ""; + set \$opentelemetry_span_id ""; + access_log logs/error.log opentelemetry_log; +_EOC_ + + $block->set_value("upstream_server_config", $upstream_server_config); + + my $http_config = $block->http_config // <<_EOC_; + log_format opentelemetry_log '{"time": "\$time_iso8601","opentelemetry_context_traceparent": "\$opentelemetry_context_traceparent","opentelemetry_trace_id": "\$opentelemetry_trace_id","opentelemetry_span_id": "\$opentelemetry_span_id","remote_addr": "\$remote_addr","uri": "\$uri"}'; +_EOC_ + + $block->set_value("http_config", $http_config); + + if (!$block->extra_init_by_lua) { + my $extra_init_by_lua = <<_EOC_; +-- mock exporter http client +local client = require("opentelemetry.trace.exporter.http_client") +client.do_request = function() + ngx.log(ngx.INFO, "opentelemetry export span") + return "ok" +end +_EOC_ + + $block->set_value("extra_init_by_lua", $extra_init_by_lua); + } + + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + $block; +}); + +run_tests; + +__DATA__ + +=== TEST 1: add plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/http-logger', + ngx.HTTP_PUT, + [[{ + "log_format": { + "opentelemetry_context_traceparent": "$opentelemetry_context_traceparent", + "opentelemetry_trace_id": "$opentelemetry_trace_id", + "opentelemetry_span_id": "$opentelemetry_span_id" + } + }]] + ) + if code >= 300 then + ngx.status = code + return body + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:1980/log", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2, + "concat_method": "new_line" + }, + "opentelemetry": { + "sampler": { + "name": "always_on" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >=300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: trigger opentelemetry with open set variables +--- request +GET /hello +--- response_body +hello world +--- wait: 1 +--- grep_error_log eval +qr/opentelemetry export span/ +--- grep_error_log_out +opentelemetry export span +--- error_log eval +qr/request log: \{.*"opentelemetry_context_traceparent":"00-\w{32}-\w{16}-01".*\}/ + + + +=== TEST 3: trigger opentelemetry with disable set variables +--- yaml_config +plugin_attr: + opentelemetry: + set_ngx_var: false +--- request +GET /hello +--- response_body +hello world +--- error_log eval +qr/request log: \{.*"opentelemetry_context_traceparent":"".*\}/ diff --git a/t/plugin/opentelemetry-bugfix-pb-state.t b/t/plugin/opentelemetry4-bugfix-pb-state.t similarity index 86% rename from t/plugin/opentelemetry-bugfix-pb-state.t rename to t/plugin/opentelemetry4-bugfix-pb-state.t index b6f2e1052e24..5607e912edc4 100644 --- a/t/plugin/opentelemetry-bugfix-pb-state.t +++ b/t/plugin/opentelemetry4-bugfix-pb-state.t @@ -16,55 +16,34 @@ # use t::APISIX 'no_plan'; - add_block_preprocessor(sub { my ($block) = @_; if (!$block->extra_yaml_config) { my $extra_yaml_config = <<_EOC_; plugins: - - example-plugin - - key-auth - opentelemetry plugin_attr: opentelemetry: + trace_id_source: x-request-id batch_span_processor: max_export_batch_size: 1 inactive_timeout: 0.5 + collector: + address: 127.0.0.1:4318 + request_timeout: 3 + request_headers: + foo: bar _EOC_ $block->set_value("extra_yaml_config", $extra_yaml_config); } - - if (!$block->extra_init_by_lua) { - my $extra_init_by_lua = <<_EOC_; --- mock exporter http client -local client = require("opentelemetry.trace.exporter.http_client") -client.do_request = function() - ngx.log(ngx.INFO, "opentelemetry export span") - return "ok" -end -local ctx_new = require("opentelemetry.context").new -require("opentelemetry.context").new = function (...) - local ctx = ctx_new(...) - local current = ctx.current - ctx.current = function (...) - ngx.log(ngx.INFO, "opentelemetry context current") - return current(...) - end - return ctx -end -_EOC_ - - $block->set_value("extra_init_by_lua", $extra_init_by_lua); - } - - if (!$block->request) { - $block->set_value("request", "GET /t"); - } - $block; }); +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("debug"); run_tests; @@ -105,6 +84,8 @@ __DATA__ ngx.say(body) } } +--- request +GET /t --- response_body passed @@ -179,6 +160,8 @@ passed ngx.status = res.status } } +--- request +GET /t --- wait: 1 --- error_code: 200 --- no_error_log diff --git a/t/plugin/proxy-cache/memory.t b/t/plugin/proxy-cache/memory.t index 5984dbd47776..e617f8a1b0fc 100644 --- a/t/plugin/proxy-cache/memory.t +++ b/t/plugin/proxy-cache/memory.t @@ -661,3 +661,46 @@ GET /hello --- more_headers Cache-Control: only-if-cached --- error_code: 504 + + + +=== TEST 36: configure plugin without memory_cache zone for cache_strategy = memory +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-cache": { + "cache_strategy": "memory", + "cache_key":["$host","$uri"], + "cache_bypass": ["$arg_bypass"], + "cache_control": true, + "cache_method": ["GET"], + "cache_ttl": 10, + "cache_http_status": [200] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1986": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body_like +.*err: invalid or empty cache_zone for cache_strategy: memory.* +--- error_code: 400 diff --git a/t/plugin/proxy-mirror3.t b/t/plugin/proxy-mirror3.t index 65a23dc823d9..967ef0c11680 100644 --- a/t/plugin/proxy-mirror3.t +++ b/t/plugin/proxy-mirror3.t @@ -63,7 +63,7 @@ routes: upstream: scheme: grpc nodes: - "127.0.0.1:50051": 1 + "127.0.0.1:10051": 1 type: roundrobin #END --- exec diff --git a/t/plugin/traffic-split2.t b/t/plugin/traffic-split2.t index 5100337e48d9..746d7441bac5 100644 --- a/t/plugin/traffic-split2.t +++ b/t/plugin/traffic-split2.t @@ -799,3 +799,66 @@ GET /t --- error_code: 500 --- error_log failed to find upstream by id: invalid-id + + + +=== TEST 21: use upstream with https scheme +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local data = { + uri = "/hello", + plugins = { + ["traffic-split"] = { + rules = { + { + match = { { + vars = { { "arg_scheme", "==", "https" } } + } }, + weighted_upstreams = { + { + upstream = { + type = "roundrobin", + pass_host = "node", + nodes = { + ["127.0.0.1:1983"] = 1, + }, + scheme = "https" + }, + weight = 1 + } + } + } + } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 22: hit route +--- request +GET /hello?scheme=https +--- error_code: 200 diff --git a/t/plugin/traffic-split5.t b/t/plugin/traffic-split5.t index b627d38418ba..96e284d5bf06 100644 --- a/t/plugin/traffic-split5.t +++ b/t/plugin/traffic-split5.t @@ -460,3 +460,78 @@ GET /server_port?name=jack --- error_log eval qr/event timer add: \d+: 12345000:\d+/ --- error_code: 502 + + + +=== TEST 9: set upstream for post_arg_id test case +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/hello", + plugins = { + ["traffic-split"] = { + rules = { + { + match = { { + vars = { { "post_arg_id", "==", "1" } } + } }, + weighted_upstreams = { + { + upstream = { + name = "upstream_A", + type = "roundrobin", + nodes = { + ["127.0.0.1:1970"] = 1 + } + }, + weight = 1 + } + } + } + } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1974"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: post_arg_id = 1 without content-type charset +--- request +POST /hello +id=1 +--- more_headers +Content-Type: application/x-www-form-urlencoded +--- response_body +1970 + + + +=== TEST 11: post_arg_id = 1 with content-type charset +--- request +POST /hello +id=1 +--- more_headers +Content-Type: application/x-www-form-urlencoded;charset=UTF-8 +--- response_body +1970 diff --git a/t/plugin/zipkin3.t b/t/plugin/zipkin3.t new file mode 100644 index 000000000000..f3aef6b5d8fe --- /dev/null +++ b/t/plugin/zipkin3.t @@ -0,0 +1,129 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +plugins: + - zipkin +plugin_attr: + zipkin: + set_ngx_var: true +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + + my $upstream_server_config = $block->upstream_server_config // <<_EOC_; + set \$zipkin_context_traceparent ""; + set \$zipkin_trace_id ""; + set \$zipkin_span_id ""; +_EOC_ + + $block->set_value("upstream_server_config", $upstream_server_config); + + my $extra_init_by_lua = <<_EOC_; + local zipkin = require("apisix.plugins.zipkin") + local orig_func = zipkin.access + zipkin.access = function (...) + local traceparent = ngx.var.zipkin_context_traceparent + if traceparent == nil or traceparent == '' then + ngx.log(ngx.ERR,"ngx_var.zipkin_context_traceparent is empty") + else + ngx.log(ngx.ERR,"ngx_var.zipkin_context_traceparent:",ngx.var.zipkin_context_traceparent) + end + + local orig = orig_func(...) + return orig + end +_EOC_ + + $block->set_value("extra_init_by_lua", $extra_init_by_lua); + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + $block; +}); + +run_tests; + +__DATA__ + +=== TEST 1: add plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "zipkin": { + "endpoint": "http://127.0.0.1:9999/mock_zipkin", + "sample_ratio": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: trigger zipkin with open set variables +--- request +GET /echo +--- error_log eval +qr/ngx_var.zipkin_context_traceparent:00-\w{32}-\w{16}-01*/ + + + +=== TEST 3: trigger zipkin with disable set variables +--- yaml_config +plugin_attr: + zipkin: + set_ngx_var: false +--- request +GET /echo +--- error_log +ngx_var.zipkin_context_traceparent is empty diff --git a/t/secret/vault.t b/t/secret/vault.t index a31183e37a2c..b3db13179447 100644 --- a/t/secret/vault.t +++ b/t/secret/vault.t @@ -210,7 +210,7 @@ value --- request GET /t --- response_body_like -failed to decode result, res: {\"errors\":\[\"permission denied\"\]}\n +failed to decode result, res: \{\"errors\":\[\"permission denied\"\]}\n @@ -235,4 +235,4 @@ failed to decode result, res: {\"errors\":\[\"permission denied\"\]}\n --- request GET /t --- response_body_like -failed to decode result, res: {\"errors\":\[\"permission denied\"\]}\n +failed to decode result, res: \{\"errors\":\[\"permission denied\"\]}\n diff --git a/t/stream-node/sanity-with-service.t b/t/stream-node/sanity-with-service.t new file mode 100644 index 000000000000..799a96aa8434 --- /dev/null +++ b/t/stream-node/sanity-with-service.t @@ -0,0 +1,294 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('info'); +no_root_location(); + +run_tests(); + +__DATA__ + +=== TEST 1: set stream route(id: 1) -> service(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + + code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "service_id": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit route +--- stream_request eval +mmm +--- stream_response +hello world + + + +=== TEST 3: set stream route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.2", + "service_id": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: not hit route +--- stream_enable +--- stream_response + + + +=== TEST 5: delete route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: set service upstream (id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + }]] + ) + if code >= 300 then + ngx.status = code + end + + code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream_id": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: set stream route (id: 1) with service (id: 1) which uses upstream_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "service_id": 1 + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: hit route +--- stream_request eval +mmm +--- stream_response +hello world + + + +=== TEST 9: set stream route (id: 1) which uses upstream_id and remote address with IP CIDR +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1/26", + "service_id": "1" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: hit route +--- stream_request eval +mmm +--- stream_response +hello world + + + +=== TEST 11: reject bad CIDR +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": ":/8", + "service_id": "1" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid remote_addr: :/8"} + + + +=== TEST 12: skip upstream http host check in stream subsystem +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1995": 1, + "127.0.0.2:1995": 1 + }, + "pass_host": "node", + "type": "roundrobin" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 13: hit route +--- stream_request eval +mmm +--- stream_response +hello world diff --git a/t/wasm/fault-injection.t b/t/wasm/fault-injection.t index e1cf2a43f9da..f690e9ea294c 100644 --- a/t/wasm/fault-injection.t +++ b/t/wasm/fault-injection.t @@ -180,3 +180,101 @@ GET /hello --- error_code: 401 --- response_body_like eval qr/401 Authorization Required<\/title>/ + + + +=== TEST 7: fault injection +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm_fault_injection": { + "conf": { + "http_status": 401, + "body": "HIT\n" + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: hit +--- request +GET /hello +--- error_code: 401 +--- response_body +HIT + + + +=== TEST 9: fault injection, with 0 percentage +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm_fault_injection": { + "conf": { + "http_status": 401, + "percentage": 0 + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- ret_code: 401 +--- response_body +passed + + + +=== TEST 10: hit +--- request +GET /hello +--- response_body +hello world diff --git a/t/wasm/global-rule.t b/t/wasm/global-rule.t index 05f34e3f5b1d..f9fd322e38f2 100644 --- a/t/wasm/global-rule.t +++ b/t/wasm/global-rule.t @@ -57,13 +57,6 @@ __DATA__ local code, body = t('/apisix/admin/global_rules/1', ngx.HTTP_PUT, [[{ - "uri": "/hello", - "upstream": { - "type": "roundrobin", - "nodes": { - "127.0.0.1:1980": 1 - } - }, "plugins": { "wasm_log": { "conf": "blahblah" diff --git a/t/wasm/request-body.t b/t/wasm/request-body.t index e45db39b56a0..156df2527ae8 100644 --- a/t/wasm/request-body.t +++ b/t/wasm/request-body.t @@ -200,3 +200,52 @@ hello qr/request get body: \w+/ --- grep_error_log_out request get body: ell + + + +=== TEST 8: invalid conf type no set conf +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm-request-body": { + "setting": {"processReqBody":true, "start":1, "size":3} + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- error_code: 400 +--- response_body_like eval +qr/property.*conf.*is required/ + + + +=== TEST 9: hit +--- request +POST /hello +hello +--- grep_error_log eval +qr/request get body: \w+/ +--- grep_error_log_out +request get body: ell diff --git a/t/wasm/route.t b/t/wasm/route.t index 6e949dd6b0d8..717e54ebc951 100644 --- a/t/wasm/route.t +++ b/t/wasm/route.t @@ -51,48 +51,147 @@ run_tests(); __DATA__ -=== TEST 1: check schema +=== TEST 1: scheme check with empty json body --- config location /t { content_by_lua_block { - local json = require("toolkit.json") local t = require("lib.test_admin").test - for _, case in ipairs({ - {input = { - }}, - {input = { - conf = {} - }}, - {input = { - conf = "" - }}, - }) do - local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - { - id = "1", - uri = "/echo", - upstream = { - type = "roundrobin", - nodes = {} - }, - plugins = { - wasm_log = case.input + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 } - } - ) - ngx.say(json.decode(body).error_msg) + }, + "plugins": { + "wasm_log": {} + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return end + + ngx.say(body) } } ---- response_body -failed to check the configuration of plugin wasm_log err: property "conf" is required -failed to check the configuration of plugin wasm_log err: property "conf" validation failed: wrong type: expected string, got table -failed to check the configuration of plugin wasm_log err: property "conf" validation failed: string too short, expected at least 1, got 0 +--- error_code: 400 +--- error_log eval +qr/invalid request body/ + + + +=== TEST 2: scheme check with conf type number +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm_log": {"conf": 123} + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- error_code: 400 +--- error_log eval +qr/invalid request body/ + + + +=== TEST 3: scheme check with conf json type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm_log": {"conf": {}}} + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- error_code: 400 +--- response_body_like eval +qr/value should match only one schema, but matches none/ -=== TEST 2: sanity + +=== TEST 4: scheme check with conf json type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm_log": {"conf": ""}} + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- error_code: 400 +--- response_body_like eval +qr/value should match only one schema, but matches none/ + + + +=== TEST 5: sanity --- config location /t { content_by_lua_block { @@ -132,7 +231,7 @@ passed -=== TEST 3: hit +=== TEST 6: hit --- request GET /hello --- grep_error_log eval @@ -143,7 +242,7 @@ run plugin ctx 1 with conf zzz in http ctx 2 -=== TEST 4: run wasm plugin in rewrite phase (prior to the one run in access phase) +=== TEST 7: run wasm plugin in rewrite phase (prior to the one run in access phase) --- extra_yaml_config wasm: plugins: @@ -164,7 +263,7 @@ run plugin ctx 1 with conf blahblah in http ctx 2 -=== TEST 5: plugin from service +=== TEST 8: plugin from service --- config location /t { content_by_lua_block { @@ -231,7 +330,7 @@ passed -=== TEST 6: hit +=== TEST 9: hit --- config location /t { content_by_lua_block { @@ -262,7 +361,7 @@ run plugin ctx 3 with conf blahblah in http ctx 4 -=== TEST 7: plugin from plugin_config +=== TEST 10: plugin from plugin_config --- config location /t { content_by_lua_block { @@ -335,7 +434,7 @@ passed -=== TEST 8: hit +=== TEST 11: hit --- config location /t { content_by_lua_block { diff --git a/t/xrpc/dubbo.t b/t/xrpc/dubbo.t new file mode 100644 index 000000000000..290eadb3c6fa --- /dev/null +++ b/t/xrpc/dubbo.t @@ -0,0 +1,168 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +xrpc: + protocols: + - name: dubbo +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + + my $config = $block->config // <<_EOC_; + location /t { + content_by_lua_block { + ngx.req.read_body() + local sock = ngx.socket.tcp() + sock:settimeout(1000) + local ok, err = sock:connect("127.0.0.1", 1985) + if not ok then + ngx.log(ngx.ERR, "failed to connect: ", err) + return ngx.exit(503) + end + + local bytes, err = sock:send(ngx.req.get_body_data()) + if not bytes then + ngx.log(ngx.ERR, "send stream request error: ", err) + return ngx.exit(503) + end + while true do + local data, err = sock:receiveany(4096) + if not data then + sock:close() + break + end + ngx.print(data) + end + } + } +_EOC_ + + $block->set_value("config", $config); + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]\nRPC is not finished"); + } + + $block; +}); + +worker_connections(1024); +run_tests; + +__DATA__ + +=== TEST 1: init +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "dubbo" + }, + upstream = { + nodes = { + ["127.0.0.1:20880"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: use dubbo_backend_provider server. request=org.apache.dubbo.backend.DemoService,service_version:1.0.1#hello,response=dubbo success & 200 +--- request eval +"GET /t +\xda\xbb\xc2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xef\x05\x32\x2e\x30\x2e\x32\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x05\x31\x2e\x30\x2e\x30\x05\x68\x65\x6c\x6c\x6f\x0f\x4c\x6a\x61\x76\x61\x2f\x75\x74\x69\x6c\x2f\x4d\x61\x70\x3b\x48\x04\x6e\x61\x6d\x65\x08\x7a\x68\x61\x6e\x67\x73\x61\x6e\x5a\x48\x04\x70\x61\x74\x68\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x12\x72\x65\x6d\x6f\x74\x65\x2e\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x0b\x73\x70\x2d\x63\x6f\x6e\x73\x75\x6d\x65\x72\x09\x69\x6e\x74\x65\x72\x66\x61\x63\x65\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x07\x76\x65\x72\x73\x69\x6f\x6e\x05\x31\x2e\x30\x2e\x30\x07\x74\x69\x6d\x65\x6f\x75\x74\x04\x31\x30\x30\x30\x5a" +--- response_body eval +"\xda\xbb\x02\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x30\x94\x48\x04\x62\x6f\x64\x79\x0e\x64\x75\x62\x62\x6f\x20\x73\x75\x63\x63\x65\x73\x73\x0a\x06\x73\x74\x61\x74\x75\x73\x03\x32\x30\x30\x5a\x48\x05\x64\x75\x62\x62\x6f\x05\x32\x2e\x30\x2e\x32\x5a" +--- stream_conf_enable +--- log_level: debug +--- no_error_log + + + +=== TEST 3: heart beat. request=\xe2|11..,response=\x22|00... +--- request eval +"GET /t +\xda\xbb\xe2\x00\x00\x00\x00\x00\x00\x00\x00\x34\x00\x00\x00\x01\x4e" +--- response_body eval +"\xda\xbb\x22\x14\x00\x00\x00\x00\x00\x00\x00\x34\x00\x00\x00\x01\x4e" +--- stream_conf_enable +--- log_level: debug +--- no_error_log + + + +=== TEST 4: no response. Different from test2 \x82=10000010, the second bit=0 of the third byte means no need to return +--- request eval +"GET /t +\xda\xbb\x82\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xef\x05\x32\x2e\x30\x2e\x32\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x05\x31\x2e\x30\x2e\x30\x05\x68\x65\x6c\x6c\x6f\x0f\x4c\x6a\x61\x76\x61\x2f\x75\x74\x69\x6c\x2f\x4d\x61\x70\x3b\x48\x04\x6e\x61\x6d\x65\x08\x7a\x68\x61\x6e\x67\x73\x61\x6e\x5a\x48\x04\x70\x61\x74\x68\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x12\x72\x65\x6d\x6f\x74\x65\x2e\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x0b\x73\x70\x2d\x63\x6f\x6e\x73\x75\x6d\x65\x72\x09\x69\x6e\x74\x65\x72\x66\x61\x63\x65\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x07\x76\x65\x72\x73\x69\x6f\x6e\x05\x31\x2e\x30\x2e\x30\x07\x74\x69\x6d\x65\x6f\x75\x74\x04\x31\x30\x30\x30\x5a" +--- response_body eval +"" +--- stream_conf_enable +--- log_level: debug +--- no_error_log + + + +=== TEST 5: failed response. request=org.apache.dubbo.backend.DemoService,service_version:1.0.1#fail,response=503 +--- request eval +"GET /t +\xda\xbb\xc2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xee\x05\x32\x2e\x30\x2e\x32\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x05\x31\x2e\x30\x2e\x30\x04\x66\x61\x69\x6c\x0f\x4c\x6a\x61\x76\x61\x2f\x75\x74\x69\x6c\x2f\x4d\x61\x70\x3b\x48\x04\x6e\x61\x6d\x65\x08\x7a\x68\x61\x6e\x67\x73\x61\x6e\x5a\x48\x04\x70\x61\x74\x68\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x12\x72\x65\x6d\x6f\x74\x65\x2e\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x0b\x73\x70\x2d\x63\x6f\x6e\x73\x75\x6d\x65\x72\x09\x69\x6e\x74\x65\x72\x66\x61\x63\x65\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x07\x76\x65\x72\x73\x69\x6f\x6e\x05\x31\x2e\x30\x2e\x30\x07\x74\x69\x6d\x65\x6f\x75\x74\x04\x31\x30\x30\x30\x5a" +--- response_body eval +"\xda\xbb\x02\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x2d\x94\x48\x04\x62\x6f\x64\x79\x0b\x64\x75\x62\x62\x6f\x20\x66\x61\x69\x6c\x0a\x06\x73\x74\x61\x74\x75\x73\x03\x35\x30\x33\x5a\x48\x05\x64\x75\x62\x62\x6f\x05\x32\x2e\x30\x2e\x32\x5a" +--- stream_conf_enable +--- log_level: debug +--- no_error_log + + + +=== TEST 6: invalid magic(dabc<>dabb) for heart beat. +--- request eval +"GET /t +\xda\xbc\xe2\x00\x00\x00\x00\x00\x00\x00\x00\x34\x00\x00\x00\x01\x4e" +--- error_log +unknown magic number +--- stream_conf_enable diff --git a/utils/install-dependencies.sh b/utils/install-dependencies.sh index 220407808985..85cfa7483081 100755 --- a/utils/install-dependencies.sh +++ b/utils/install-dependencies.sh @@ -30,11 +30,6 @@ function detect_aur_helper() { fi } -function install_rust() { - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sudo sh -s -- -y - source "$HOME/.cargo/env" -} - function install_dependencies_with_aur() { detect_aur_helper $AUR_HELPER -S openresty --noconfirm @@ -68,8 +63,6 @@ function install_dependencies_with_yum() { # shellcheck disable=SC2086 sudo yum install -y openresty $common_dep fi - - install_rust } # Install dependencies on ubuntu and debian @@ -92,8 +85,6 @@ function install_dependencies_with_apt() { # install OpenResty and some compilation tools sudo apt-get install -y git openresty curl openresty-openssl111-dev make gcc libpcre3 libpcre3-dev libldap2-dev unzip - - install_rust } # Install dependencies on mac osx