diff --git a/.asf.yaml b/.asf.yaml
index fa106d0cab42..69f3f2c8d43e 100644
--- a/.asf.yaml
+++ b/.asf.yaml
@@ -53,6 +53,14 @@ github:
dismiss_stale_reviews: true
require_code_owner_reviews: true
required_approving_review_count: 2
+ release/3.6:
+ required_pull_request_reviews:
+ require_code_owner_reviews: true
+ required_approving_review_count: 2
+ release/3.5:
+ required_pull_request_reviews:
+ require_code_owner_reviews: true
+ required_approving_review_count: 2
release/3.4:
required_pull_request_reviews:
require_code_owner_reviews: true
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index c90a8e90082d..95e72fe578b9 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -31,7 +31,7 @@ jobs:
test_dir:
- t/plugin/[a-k]*
- t/plugin/[l-z]*
- - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/deployment t/discovery t/error_page t/misc
+ - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/misc
- t/node t/pubsub t/router t/script t/secret t/stream-node t/utils t/wasm t/xds-library t/xrpc
runs-on: ${{ matrix.platform }}
@@ -42,7 +42,7 @@ jobs:
steps:
- name: Check out code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
with:
submodules: recursive
@@ -136,7 +136,7 @@ jobs:
[[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after
echo "Linux launch services, done."
- name: Start Dubbo Backend
- if: matrix.os_name == 'linux_openresty' && steps.test_env.outputs.type == 'plugin'
+ if: matrix.os_name == 'linux_openresty' && (steps.test_env.outputs.type == 'plugin' || steps.test_env.outputs.type == 'last')
run: |
sudo apt install -y maven
cd t/lib/dubbo-backend
diff --git a/.github/workflows/centos7-ci.yml b/.github/workflows/centos7-ci.yml
index dc08b0fd384e..2be0c39cbb94 100644
--- a/.github/workflows/centos7-ci.yml
+++ b/.github/workflows/centos7-ci.yml
@@ -30,12 +30,12 @@ jobs:
test_dir:
- t/plugin/[a-k]*
- t/plugin/[l-z]*
- - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/deployment t/discovery t/error_page t/misc
+ - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/misc
- t/node t/pubsub t/router t/script t/secret t/stream-node t/utils t/wasm t/xds-library
steps:
- name: Check out code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
with:
submodules: recursive
diff --git a/.github/workflows/chaos.yml b/.github/workflows/chaos.yml
index 2bf3518dd2ab..7b47664c55e9 100644
--- a/.github/workflows/chaos.yml
+++ b/.github/workflows/chaos.yml
@@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 35
steps:
- - uses: actions/checkout@v3.2.0
+ - uses: actions/checkout@v4
with:
submodules: recursive
diff --git a/.github/workflows/cli-master.yml b/.github/workflows/cli-master.yml
index dd77dcd1537c..d521a9d7a103 100644
--- a/.github/workflows/cli-master.yml
+++ b/.github/workflows/cli-master.yml
@@ -33,7 +33,7 @@ jobs:
steps:
- name: Check out code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
with:
submodules: recursive
diff --git a/.github/workflows/cli.yml b/.github/workflows/cli.yml
index 7aa6554095b9..7c50e3fc58d2 100644
--- a/.github/workflows/cli.yml
+++ b/.github/workflows/cli.yml
@@ -38,7 +38,7 @@ jobs:
steps:
- name: Check out code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
with:
submodules: recursive
diff --git a/.github/workflows/close-unresponded.yml b/.github/workflows/close-unresponded.yml
index 52e81228eba2..9508af7ded1c 100644
--- a/.github/workflows/close-unresponded.yml
+++ b/.github/workflows/close-unresponded.yml
@@ -20,7 +20,7 @@ jobs:
- name: Prune Stale
uses: actions/stale@v8
with:
- days-before-issue-stale: 14
+ days-before-issue-stale: 60
days-before-issue-close: 3
stale-issue-message: >
Due to lack of the reporter's response this issue has been labeled with "no response".
@@ -35,4 +35,5 @@ jobs:
# Issues with these labels will never be considered stale.
only-labels: 'wait for update'
stale-issue-label: 'no response'
+ exempt-issue-labels: "don't close"
ascending: true
diff --git a/.github/workflows/code-lint.yml b/.github/workflows/code-lint.yml
index 07a1807f811e..9e2befc9ac1f 100644
--- a/.github/workflows/code-lint.yml
+++ b/.github/workflows/code-lint.yml
@@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- - uses: actions/checkout@v3.2.0
+ - uses: actions/checkout@v4
- name: Install
run: |
. ./ci/common.sh
@@ -37,7 +37,7 @@ jobs:
timeout-minutes: 5
steps:
- name: Checkout code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
- name: Shellcheck code
run: |
diff --git a/.github/workflows/doc-lint.yml b/.github/workflows/doc-lint.yml
index 99340f991e2c..f644f59ce03e 100644
--- a/.github/workflows/doc-lint.yml
+++ b/.github/workflows/doc-lint.yml
@@ -5,11 +5,13 @@ on:
paths:
- "docs/**"
- "**/*.md"
+ - ".github/workflows/doc-lint.yml"
pull_request:
branches: [master, "release/**"]
paths:
- "docs/**"
- "**/*.md"
+ - ".github/workflows/doc-lint.yml"
permissions:
contents: read
@@ -20,9 +22,9 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 1
steps:
- - uses: actions/checkout@v3.2.0
+ - uses: actions/checkout@v4
- name: 🚀 Use Node.js
- uses: actions/setup-node@v3.8.0
+ uses: actions/setup-node@v4.0.0
with:
node-version: "12.x"
- run: npm install -g markdownlint-cli@0.25.0
@@ -47,7 +49,7 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 1
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
submodules: recursive
- name: Check Chinese copywriting
diff --git a/.github/workflows/fips.yml b/.github/workflows/fips.yml
index aeaf121f1fe3..2115b0a7b8c8 100644
--- a/.github/workflows/fips.yml
+++ b/.github/workflows/fips.yml
@@ -30,7 +30,7 @@ jobs:
# The RSA and SHA tests are fully covered by jwt-auth and hmac-auth plugin tests, while other plugins only repeat such tests.
- t/plugin/jwt-auth2.t t/plugin/jwt-auth.t t/plugin/hmac-auth.t
# all SSL related core tests are covered by below two lists.
- - t/admin/ssl* t/admin/schema.t t/admin/upstream.t t/config-center-yaml/ssl.t t/core/etcd-mtls.t t/core/config_etcd.t t/deployment/conf_server.t t/misc/patch.t
+ - t/admin/ssl* t/admin/schema.t t/admin/upstream.t t/config-center-yaml/ssl.t t/core/etcd-mtls.t t/core/config_etcd.t t/misc/patch.t
- t/node/grpc-proxy-unary.t t/node/upstream-keepalive-pool.t t/node/upstream-websocket.t t/node/client-mtls.t t/node/upstream-mtls.t t/pubsub/kafka.t t/router/radixtree-sni2.t t/router/multi-ssl-certs.t t/router/radixtree-sni.t t/stream-node/mtls.t t/stream-node/tls.t t/stream-node/upstream-tls.t t/stream-node/sni.t
- t/fips
@@ -42,7 +42,7 @@ jobs:
steps:
- name: Check out code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
with:
submodules: recursive
diff --git a/.github/workflows/fuzzing-ci.yaml b/.github/workflows/fuzzing-ci.yaml
index ec3701532d77..4d313ebfeb1a 100644
--- a/.github/workflows/fuzzing-ci.yaml
+++ b/.github/workflows/fuzzing-ci.yaml
@@ -27,7 +27,7 @@ jobs:
steps:
- name: Check out code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
with:
submodules: recursive
diff --git a/.github/workflows/gm-cron.yaml b/.github/workflows/gm-cron.yaml
index 669a21798be3..f0327540498e 100644
--- a/.github/workflows/gm-cron.yaml
+++ b/.github/workflows/gm-cron.yaml
@@ -20,7 +20,7 @@ jobs:
test_dir:
- t/plugin/[a-k]*
- t/plugin/[l-z]*
- - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/deployment t/discovery t/error_page t/misc
+ - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/misc
- t/node t/pubsub t/router t/script t/stream-node t/utils t/wasm t/xds-library t/xrpc
runs-on: ${{ matrix.platform }}
@@ -33,7 +33,7 @@ jobs:
# scripts or a separate action?
steps:
- name: Check out code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
with:
submodules: recursive
diff --git a/.github/workflows/gm.yml b/.github/workflows/gm.yml
index 003e567bace8..297c746caebd 100644
--- a/.github/workflows/gm.yml
+++ b/.github/workflows/gm.yml
@@ -39,7 +39,7 @@ jobs:
steps:
- name: Check out code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
with:
submodules: recursive
diff --git a/.github/workflows/kubernetes-ci.yml b/.github/workflows/kubernetes-ci.yml
index b8d33af7c956..dc8857739b85 100644
--- a/.github/workflows/kubernetes-ci.yml
+++ b/.github/workflows/kubernetes-ci.yml
@@ -37,7 +37,7 @@ jobs:
steps:
- name: Check out code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
with:
submodules: recursive
diff --git a/.github/workflows/license-checker.yml b/.github/workflows/license-checker.yml
index 2122e0db8fbc..830f1a1802e3 100644
--- a/.github/workflows/license-checker.yml
+++ b/.github/workflows/license-checker.yml
@@ -30,8 +30,8 @@ jobs:
timeout-minutes: 3
steps:
- - uses: actions/checkout@v3.2.0
+ - uses: actions/checkout@v4
- name: Check License Header
- uses: apache/skywalking-eyes@v0.4.0
+ uses: apache/skywalking-eyes@v0.5.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/link-check.yml b/.github/workflows/link-check.yml
old mode 100755
new mode 100644
index 106a9d582c49..20b2f16ec94e
--- a/.github/workflows/link-check.yml
+++ b/.github/workflows/link-check.yml
@@ -32,7 +32,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Get script
run: |
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index 10f852db30a3..56cd00c02c8b 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -11,7 +11,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out code.
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
- name: spell check
run: |
pip install codespell==2.1.0
@@ -30,10 +30,10 @@ jobs:
steps:
- name: Check out code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
- name: Setup Nodejs env
- uses: actions/setup-node@v3.8.0
+ uses: actions/setup-node@v4.0.0
with:
node-version: '12'
diff --git a/.github/workflows/performance.yml b/.github/workflows/performance.yml
index c054303ccd74..17cf28691a4a 100644
--- a/.github/workflows/performance.yml
+++ b/.github/workflows/performance.yml
@@ -18,7 +18,7 @@ jobs:
steps:
- name: Check out code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
with:
submodules: recursive
diff --git a/.github/workflows/redhat-ci.yaml b/.github/workflows/redhat-ci.yaml
index cf03ae002186..9bd8d39e35aa 100644
--- a/.github/workflows/redhat-ci.yaml
+++ b/.github/workflows/redhat-ci.yaml
@@ -26,12 +26,12 @@ jobs:
test_dir:
- t/plugin/[a-k]*
- t/plugin/[l-z]*
- - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/deployment t/discovery t/error_page t/misc
+ - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/misc
- t/node t/pubsub t/router t/script t/secret t/stream-node t/utils t/wasm t/xds-library
steps:
- name: Check out code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
with:
submodules: recursive
diff --git a/.github/workflows/semantic.yml b/.github/workflows/semantic.yml
index a2b606667fad..85df2c0816f7 100644
--- a/.github/workflows/semantic.yml
+++ b/.github/workflows/semantic.yml
@@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repository code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
with:
submodules: recursive
- uses: ./.github/actions/action-semantic-pull-request
diff --git a/.github/workflows/tars-ci.yml b/.github/workflows/tars-ci.yml
index a646d86ce1fe..9e1c9fa2963e 100644
--- a/.github/workflows/tars-ci.yml
+++ b/.github/workflows/tars-ci.yml
@@ -37,7 +37,7 @@ jobs:
steps:
- name: Check out code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
with:
submodules: recursive
diff --git a/.github/workflows/update-labels.yml b/.github/workflows/update-labels.yml
index 262604f23cf0..80919aefb841 100644
--- a/.github/workflows/update-labels.yml
+++ b/.github/workflows/update-labels.yml
@@ -1,19 +1,21 @@
-name: Update label when user responds
+name: Update labels when user responds in issue and pr
permissions:
issues: write
+ pull-requests: write
on:
issue_comment:
types: [created]
+ pull_request_review_comment:
+ types: [created]
jobs:
- run-check:
- if: ${{ !github.event.issue.pull_request }} # don't execute for PR comments
+ issue_commented:
+ if: github.event.issue && !github.event.issue.pull_request && github.event.comment.user.login == github.event.issue.user.login && contains(github.event.issue.labels.*.name, 'wait for update') && !contains(github.event.issue.labels.*.name, 'user responded')
runs-on: ubuntu-latest
steps:
- name: update labels when user responds
uses: actions/github-script@v6
- if: ${{ github.event.comment.user.login == github.event.issue.user.login && contains(github.event.issue.labels.*.name, 'wait for update') && !contains(github.event.issue.labels.*.name, 'user responded') }}
with:
script: |
github.rest.issues.addLabels({
@@ -28,3 +30,33 @@ jobs:
repo: context.repo.repo,
name: "wait for update"
})
+
+ pr_commented:
+ if: github.event.issue && github.event.issue.pull_request && github.event.comment.user.login == github.event.issue.user.login && (contains(github.event.issue.labels.*.name, 'wait for update') || contains(github.event.issue.labels.*.name, 'discuss') || contains(github.event.issue.labels.*.name, 'need test cases')) && !contains(github.event.issue.labels.*.name, 'user responded')
+ runs-on: ubuntu-latest
+ steps:
+ - name: update label when user responds
+ uses: actions/github-script@v6
+ with:
+ script: |
+ github.rest.issues.addLabels({
+ issue_number: context.issue.number,
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ labels: ["user responded"]
+ })
+
+ pr_review_commented:
+ if: github.event.pull_request && github.event.comment.user.login == github.event.pull_request.user.login && (contains(github.event.pull_request.labels.*.name, 'wait for update') || contains(github.event.pull_request.labels.*.name, 'discuss') || contains(github.event.issue.labels.*.name, 'need test cases')) && !contains(github.event.pull_request.labels.*.name, 'user responded')
+ runs-on: ubuntu-latest
+ steps:
+ - name: update label when user responds
+ uses: actions/github-script@v6
+ with:
+ script: |
+ github.rest.issues.addLabels({
+ issue_number: context.issue.number,
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ labels: ["user responded"]
+ })
diff --git a/.licenserc.yaml b/.licenserc.yaml
index 315fa71bcf42..8b423f25cdd1 100644
--- a/.licenserc.yaml
+++ b/.licenserc.yaml
@@ -23,6 +23,8 @@ header:
paths-ignore:
- '.gitignore'
+ - '.gitattributes'
+ - '.gitmodules'
- 'LICENSE'
- 'NOTICE'
- '**/*.json'
@@ -46,7 +48,11 @@ header:
# Exclude plugin-specific configuration files
- 't/plugin/authz-casbin'
- 't/coredns'
+ - 't/fuzzing/requirements.txt'
+ - 't/perf/requirements.txt'
- 'autodocs/'
- 'docs/**/*.md'
+ - '.ignore_words'
+ - '.luacheckrc'
comment: on-failure
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 78f9cc93a4dd..ce5f1d01726b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -23,6 +23,8 @@ title: Changelog
## Table of Contents
+- [3.6.0](#360)
+- [3.5.0](#350)
- [3.4.0](#340)
- [3.3.0](#330)
- [3.2.1](#321)
@@ -71,6 +73,73 @@ title: Changelog
- [0.7.0](#070)
- [0.6.0](#060)
+## 3.6.0
+
+### Change
+
+- :warning: Remove gRPC support between APISIX and etcd and remove `etcd.use_grpc` configuration option: [#10015](https://github.com/apache/apisix/pull/10015)
+- :warning: Remove conf server. The data plane no longer supports direct communication with the control plane, and the configuration should be adjusted from `config_provider: control_plane` to `config_provider: etcd`: [#10012](https://github.com/apache/apisix/pull/10012)
+- :warning: Enforce strict schema validation on the properties of the core APISIX resources: [#10233](https://github.com/apache/apisix/pull/10233)
+
+### Core
+
+- :sunrise: Support configuring the buffer size of the access log: [#10225](https://github.com/apache/apisix/pull/10225)
+- :sunrise: Support the use of local DNS resolvers in service discovery by configuring `resolv_conf`: [#9770](https://github.com/apache/apisix/pull/9770)
+- :sunrise: Remove Rust dependency for installation: [#10121](https://github.com/apache/apisix/pull/10121)
+- :sunrise: Support Dubbo protocol in xRPC [#9660](https://github.com/apache/apisix/pull/9660)
+
+### Plugins
+
+- :sunrise: Support https in traffic-split plugin: [#9115](https://github.com/apache/apisix/pull/9115)
+- :sunrise: Support rewrite request body in external plugin:[#9990](https://github.com/apache/apisix/pull/9990)
+- :sunrise: Support set nginx variables in opentelemetry plugin: [#8871](https://github.com/apache/apisix/pull/8871)
+- :sunrise: Support unix sock host pattern in the chaitin-waf plugin: [#10161](https://github.com/apache/apisix/pull/10161)
+
+### Bugfixes
+
+- Fix GraphQL POST request route matching exception: [#10198](https://github.com/apache/apisix/pull/10198)
+- Fix error on array of multiline string in `apisix.yaml`: [#10193](https://github.com/apache/apisix/pull/10193)
+- Add error handlers for invalid `cache_zone` configuration in the `proxy-cache` plugin: [#10138](https://github.com/apache/apisix/pull/10138)
+
+## 3.5.0
+
+### Change
+
+- :warning: remove snowflake algorithm in the request-id plugin: [#9715](https://github.com/apache/apisix/pull/9715)
+- :warning: No longer compatible with OpenResty 1.19, it needs to be upgraded to 1.21+: [#9913](https://github.com/apache/apisix/pull/9913)
+- :warning: Remove the configuration item `apisix.stream_proxy.only`, the L4/L7 proxy needs to be enabled through the configuration item `apisix.proxy_mode`: [#9607](https://github.com/apache/apisix/pull/9607)
+- :warning: The admin-api `/apisix/admin/plugins?all=true` marked as deprecated: [#9580](https://github.com/apache/apisix/pull/9580)
+- :warning: allowlist and denylist can't be enabled at the same time in ua-restriction plugin: [#9841](https://github.com/apache/apisix/pull/9841)
+
+### Core
+
+- :sunrise: Support host level dynamic setting of tls protocol version: [#9903](https://github.com/apache/apisix/pull/9903)
+- :sunrise: Support force delete resource: [#9810](https://github.com/apache/apisix/pull/9810)
+- :sunrise: Support pulling env vars from yaml keys: [#9855](https://github.com/apache/apisix/pull/9855)
+- :sunrise: Add schema validate API in admin-api: [#10065](https://github.com/apache/apisix/pull/10065)
+
+### Plugins
+
+- :sunrise: Add chaitin-waf plugin: [#9838](https://github.com/apache/apisix/pull/9838)
+- :sunrise: Support vars for file-logger plugin: [#9712](https://github.com/apache/apisix/pull/9712)
+- :sunrise: Support adding response headers for mock plugin: [#9720](https://github.com/apache/apisix/pull/9720)
+- :sunrise: Support regex_uri with unsafe_uri for proxy-rewrite plugin: [#9813](https://github.com/apache/apisix/pull/9813)
+- :sunrise: Support set client_email field for google-cloud-logging plugin: [#9813](https://github.com/apache/apisix/pull/9813)
+- :sunrise: Support sending headers upstream returned by OPA server for opa plugin: [#9710](https://github.com/apache/apisix/pull/9710)
+- :sunrise: Support configuring proxy server for openid-connect plugin: [#9948](https://github.com/apache/apisix/pull/9948)
+
+### Bugfixes
+
+- Fix(log-rotate): the max_kept configuration doesn't work when using custom name: [#9749](https://github.com/apache/apisix/pull/9749)
+- Fix(limit_conn): do not use the http variable in stream mode: [#9816](https://github.com/apache/apisix/pull/9816)
+- Fix(loki-logger): getting an error with log_labels: [#9850](https://github.com/apache/apisix/pull/9850)
+- Fix(limit-count): X-RateLimit-Reset shouldn't be set to 0 after request be rejected: [#9978](https://github.com/apache/apisix/pull/9978)
+- Fix(nacos): attempt to index upvalue 'applications' (a nil value): [#9960](https://github.com/apache/apisix/pull/9960)
+- Fix(etcd): can't sync etcd data if key has special character: [#9967](https://github.com/apache/apisix/pull/9967)
+- Fix(tencent-cloud-cls): dns parsing failure: [#9843](https://github.com/apache/apisix/pull/9843)
+- Fix(reload): worker not exited when executing quit or reload command [#9909](https://github.com/apache/apisix/pull/9909)
+- Fix(traffic-split): upstream_id validity verification [#10008](https://github.com/apache/apisix/pull/10008)
+
## 3.4.0
### Core
diff --git a/Makefile b/Makefile
index c6979cd6f906..4031e314300c 100644
--- a/Makefile
+++ b/Makefile
@@ -26,6 +26,7 @@ VERSION ?= master
project_name ?= apache-apisix
project_release_name ?= $(project_name)-$(VERSION)-src
+OTEL_CONFIG ?= ./ci/pod/otelcol-contrib/data-otlp.json
# Hyperconverged Infrastructure
ENV_OS_NAME ?= $(shell uname -s | tr '[:upper:]' '[:lower:]')
@@ -68,6 +69,8 @@ endif
ifeq ($(ENV_OS_NAME), darwin)
ifeq ($(ENV_OS_ARCH), arm64)
ENV_HOMEBREW_PREFIX := /opt/homebrew
+ ENV_INST_BINDIR := $(ENV_INST_PREFIX)/local/bin
+ ENV_INST_LUADIR := $(shell which lua | xargs realpath | sed 's/bin\/lua//g')
endif
# OSX archive `._` cache file
@@ -147,14 +150,6 @@ help:
fi
@echo
-### check-rust : check if Rust is installed in the environment
-.PHONY: check-rust
-check-rust:
- @if ! [ $(shell command -v rustc) ]; then \
- echo "ERROR: Rust is not installed. Please install Rust before continuing." >&2; \
- exit 1; \
- fi;
-
### deps : Installing dependencies
.PHONY: deps
@@ -382,6 +377,9 @@ install: runtime
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/stream/xrpc/protocols/redis
$(ENV_INSTALL) apisix/stream/xrpc/protocols/redis/*.lua $(ENV_INST_LUADIR)/apisix/stream/xrpc/protocols/redis/
+ $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/stream/xrpc/protocols/dubbo
+ $(ENV_INSTALL) apisix/stream/xrpc/protocols/dubbo/*.lua $(ENV_INST_LUADIR)/apisix/stream/xrpc/protocols/dubbo/
+
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/utils
$(ENV_INSTALL) apisix/utils/*.lua $(ENV_INST_LUADIR)/apisix/utils/
@@ -450,6 +448,8 @@ compress-tar:
.PHONY: ci-env-up
ci-env-up:
@$(call func_echo_status, "$@ -> [ Start ]")
+ touch $(OTEL_CONFIG)
+ chmod 777 $(OTEL_CONFIG)
$(ENV_DOCKER_COMPOSE) up -d
@$(call func_echo_success_status, "$@ -> [ Done ]")
@@ -474,5 +474,6 @@ ci-env-rebuild:
.PHONY: ci-env-down
ci-env-down:
@$(call func_echo_status, "$@ -> [ Start ]")
+ rm $(OTEL_CONFIG)
$(ENV_DOCKER_COMPOSE) down
@$(call func_echo_success_status, "$@ -> [ Done ]")
diff --git a/README.md b/README.md
index 9d304c5fed92..46e630743d61 100644
--- a/README.md
+++ b/README.md
@@ -217,6 +217,7 @@ A wide variety of companies and organizations use APISIX API Gateway for researc
- HONOR
- Horizon Robotics
- iQIYI
+- Lenovo
- NASA JPL
- Nayuki
- OPPO
@@ -226,6 +227,7 @@ A wide variety of companies and organizations use APISIX API Gateway for researc
- Travelsky
- vivo
- Sina Weibo
+- WeCity
- WPS
- XPENG
- Zoom
diff --git a/apisix/admin/init.lua b/apisix/admin/init.lua
index 0d4ef932362f..333c798e6ada 100644
--- a/apisix/admin/init.lua
+++ b/apisix/admin/init.lua
@@ -376,6 +376,41 @@ local function reload_plugins(data, event, source, pid)
end
+local function schema_validate()
+ local uri_segs = core.utils.split_uri(ngx.var.uri)
+ core.log.info("uri: ", core.json.delay_encode(uri_segs))
+
+ local seg_res = uri_segs[6]
+ local resource = resources[seg_res]
+ if not resource then
+ core.response.exit(404, {error_msg = "Unsupported resource type: ".. seg_res})
+ end
+
+ local req_body, err = core.request.get_body(MAX_REQ_BODY)
+ if err then
+ core.log.error("failed to read request body: ", err)
+ core.response.exit(400, {error_msg = "invalid request body: " .. err})
+ end
+
+ if req_body then
+ local data, err = core.json.decode(req_body)
+ if err then
+ core.log.error("invalid request body: ", req_body, " err: ", err)
+ core.response.exit(400, {error_msg = "invalid request body: " .. err,
+ req_body = req_body})
+ end
+
+ req_body = data
+ end
+
+ local ok, err = core.schema.check(resource.schema, req_body)
+ if ok then
+ core.response.exit(200)
+ end
+ core.response.exit(400, {error_msg = err})
+end
+
+
local uri_route = {
{
paths = [[/apisix/admin]],
@@ -392,6 +427,11 @@ local uri_route = {
methods = {"GET"},
handler = get_plugins_list,
},
+ {
+ paths = [[/apisix/admin/schema/validate/*]],
+ methods = {"POST"},
+ handler = schema_validate,
+ },
{
paths = reload_event,
methods = {"PUT"},
diff --git a/apisix/admin/resource.lua b/apisix/admin/resource.lua
index 35fe3bba2476..b03f1b069ea6 100644
--- a/apisix/admin/resource.lua
+++ b/apisix/admin/resource.lua
@@ -19,6 +19,7 @@ local utils = require("apisix.admin.utils")
local apisix_ssl = require("apisix.ssl")
local setmetatable = setmetatable
local tostring = tostring
+local ipairs = ipairs
local type = type
@@ -49,7 +50,38 @@ local function split_typ_and_id(id, sub_path)
end
-function _M:check_conf(id, conf, need_id, typ)
+local function check_forbidden_properties(conf, forbidden_properties)
+ local not_allow_properties = "the property is forbidden: "
+
+ if conf then
+ for _, v in ipairs(forbidden_properties) do
+ if conf[v] then
+ return not_allow_properties .. " " .. v
+ end
+ end
+
+ if conf.upstream then
+ for _, v in ipairs(forbidden_properties) do
+ if conf.upstream[v] then
+ return not_allow_properties .. " upstream." .. v
+ end
+ end
+ end
+
+ if conf.plugins then
+ for _, v in ipairs(forbidden_properties) do
+ if conf.plugins[v] then
+ return not_allow_properties .. " plugins." .. v
+ end
+ end
+ end
+ end
+
+ return nil
+end
+
+
+function _M:check_conf(id, conf, need_id, typ, allow_time)
if self.name == "secrets" then
id = typ .. "/" .. id
end
@@ -76,6 +108,15 @@ function _M:check_conf(id, conf, need_id, typ)
conf.id = id
end
+ -- check create time and update time
+ if not allow_time then
+ local forbidden_properties = {"create_time", "update_time"}
+ local err = check_forbidden_properties(conf, forbidden_properties)
+ if err then
+ return nil, {error_msg = err}
+ end
+ end
+
core.log.info("conf : ", core.json.delay_encode(conf))
-- check the resource own rules
@@ -355,7 +396,7 @@ function _M:patch(id, conf, sub_path, args)
core.log.info("new conf: ", core.json.delay_encode(node_value, true))
- local ok, err = self:check_conf(id, node_value, true, typ)
+ local ok, err = self:check_conf(id, node_value, true, typ, true)
if not ok then
return 400, err
end
diff --git a/apisix/admin/services.lua b/apisix/admin/services.lua
index dc14bda44ec6..4218b77f22dd 100644
--- a/apisix/admin/services.lua
+++ b/apisix/admin/services.lua
@@ -16,6 +16,7 @@
--
local core = require("apisix.core")
local get_routes = require("apisix.router").http_routes
+local get_stream_routes = require("apisix.router").stream_routes
local apisix_upstream = require("apisix.upstream")
local resource = require("apisix.admin.resource")
local schema_plugin = require("apisix.admin.plugins").check_schema
@@ -99,6 +100,21 @@ local function delete_checker(id)
end
end
+ local stream_routes, stream_routes_ver = get_stream_routes()
+ core.log.info("stream_routes: ", core.json.delay_encode(stream_routes, true))
+ core.log.info("stream_routes_ver: ", stream_routes_ver)
+ if stream_routes_ver and stream_routes then
+ for _, route in ipairs(stream_routes) do
+ if type(route) == "table" and route.value
+ and route.value.service_id
+ and tostring(route.value.service_id) == id then
+ return 400, {error_msg = "can not delete this service directly,"
+ .. " stream_route [" .. route.value.id
+ .. "] is still using it now"}
+ end
+ end
+ end
+
return nil, nil
end
diff --git a/apisix/admin/stream_routes.lua b/apisix/admin/stream_routes.lua
index c16a9a7938c3..6e1c6e6385c3 100644
--- a/apisix/admin/stream_routes.lua
+++ b/apisix/admin/stream_routes.lua
@@ -42,6 +42,23 @@ local function check_conf(id, conf, need_id, schema)
end
end
+ local service_id = conf.service_id
+ if service_id then
+ local key = "/services/" .. service_id
+ local res, err = core.etcd.get(key)
+ if not res then
+ return nil, {error_msg = "failed to fetch service info by "
+ .. "service id [" .. service_id .. "]: "
+ .. err}
+ end
+
+ if res.status ~= 200 then
+ return nil, {error_msg = "failed to fetch service info by "
+ .. "service id [" .. service_id .. "], "
+ .. "response code: " .. res.status}
+ end
+ end
+
local ok, err = stream_route_checker(conf, true)
if not ok then
return nil, {error_msg = err}
diff --git a/apisix/balancer.lua b/apisix/balancer.lua
index f836533171e7..0fe2e6539922 100644
--- a/apisix/balancer.lua
+++ b/apisix/balancer.lua
@@ -79,7 +79,7 @@ local function fetch_health_nodes(upstream, checker)
if ok then
up_nodes = transform_node(up_nodes, node)
elseif err then
- core.log.error("failed to get health check target status, addr: ",
+ core.log.warn("failed to get health check target status, addr: ",
node.host, ":", port or node.port, ", host: ", host, ", err: ", err)
end
end
diff --git a/apisix/cli/etcd.lua b/apisix/cli/etcd.lua
index 51cac2a508e6..b67248095d92 100644
--- a/apisix/cli/etcd.lua
+++ b/apisix/cli/etcd.lua
@@ -280,103 +280,7 @@ local function prepare_dirs_via_http(yaml_conf, args, index, host, host_count)
end
-local function grpc_request(url, yaml_conf, key)
- local cmd
-
- local auth = ""
- if yaml_conf.etcd.user then
- local user = yaml_conf.etcd.user
- local password = yaml_conf.etcd.password
- auth = str_format("--user=%s:%s", user, password)
- end
-
- if str_sub(url, 1, 8) == "https://" then
- local host = url:sub(9)
-
- local verify = true
- local certificate, pkey, cafile
- if yaml_conf.etcd.tls then
- local cfg = yaml_conf.etcd.tls
-
- if cfg.verify == false then
- verify = false
- end
-
- certificate = cfg.cert
- pkey = cfg.key
-
- local apisix_ssl = yaml_conf.apisix.ssl
- if apisix_ssl and apisix_ssl.ssl_trusted_certificate then
- cafile = apisix_ssl.ssl_trusted_certificate
- end
- end
-
- cmd = str_format(
- "etcdctl --insecure-transport=false %s %s %s %s " ..
- "%s --endpoints=%s put %s init_dir",
- verify and "" or "--insecure-skip-tls-verify",
- certificate and "--cert " .. certificate or "",
- pkey and "--key " .. pkey or "",
- cafile and "--cacert " .. cafile or "",
- auth, host, key)
- else
- local host = url:sub(#("http://") + 1)
-
- cmd = str_format(
- "etcdctl %s --endpoints=%s put %s init_dir",
- auth, host, key)
- end
-
- local res, err = util.execute_cmd(cmd)
- return res, err
-end
-
-
-local function prepare_dirs_via_grpc(yaml_conf, args, index, host)
- local is_success = true
-
- local errmsg
- local dirs = {}
- for name in pairs(constants.HTTP_ETCD_DIRECTORY) do
- dirs[name] = true
- end
- for name in pairs(constants.STREAM_ETCD_DIRECTORY) do
- dirs[name] = true
- end
-
- for dir_name in pairs(dirs) do
- local key = (yaml_conf.etcd.prefix or "") .. dir_name .. "/"
- local res, err
- local retry_time = 0
- while retry_time < 2 do
- res, err = grpc_request(host, yaml_conf, key)
- retry_time = retry_time + 1
- if res then
- break
- end
- print(str_format("Warning! Request etcd endpoint \'%s\' error, %s, retry time=%s",
- host, err, retry_time))
- end
-
- if not res then
- errmsg = str_format("request etcd endpoint \"%s\" error, %s\n", host, err)
- util.die(errmsg)
- end
-
- if args and args["verbose"] then
- print(res)
- end
- end
-
- return is_success
-end
-
-
-local function prepare_dirs(use_grpc, yaml_conf, args, index, host, host_count)
- if use_grpc then
- return prepare_dirs_via_grpc(yaml_conf, args, index, host)
- end
-
+local function prepare_dirs(yaml_conf, args, index, host, host_count)
return prepare_dirs_via_http(yaml_conf, args, index, host, host_count)
end
@@ -400,8 +304,6 @@ function _M.init(env, args)
util.die("failed to read `etcd` field from yaml file when init etcd")
end
- local etcd_conf = yaml_conf.etcd
-
-- convert old single etcd config to multiple etcd config
if type(yaml_conf.etcd.host) == "string" then
yaml_conf.etcd.host = {yaml_conf.etcd.host}
@@ -477,22 +379,9 @@ function _M.init(env, args)
util.die("the etcd cluster needs at least 50% and above healthy nodes\n")
end
- if etcd_conf.use_grpc and not env.use_apisix_base then
- io_stderr:write("'use_grpc: true' in the etcd configuration " ..
- "is not supported by vanilla OpenResty\n")
- end
-
- local use_grpc = etcd_conf.use_grpc and env.use_apisix_base
- if use_grpc then
- local ok, err = util.execute_cmd("command -v etcdctl")
- if not ok then
- util.die("can't find etcdctl: ", err, "\n")
- end
- end
-
local etcd_ok = false
for index, host in ipairs(etcd_healthy_hosts) do
- if prepare_dirs(use_grpc, yaml_conf, args, index, host, host_count) then
+ if prepare_dirs(yaml_conf, args, index, host, host_count) then
etcd_ok = true
break
end
diff --git a/apisix/cli/file.lua b/apisix/cli/file.lua
index 149c4e913c35..94e790db65e3 100644
--- a/apisix/cli/file.lua
+++ b/apisix/cli/file.lua
@@ -292,26 +292,14 @@ function _M.read_yaml_conf(apisix_home)
default_conf.apisix.enable_admin = true
elseif default_conf.deployment.role == "data_plane" then
+ default_conf.etcd = default_conf.deployment.etcd
if default_conf.deployment.role_data_plane.config_provider == "yaml" then
default_conf.deployment.config_provider = "yaml"
elseif default_conf.deployment.role_data_plane.config_provider == "xds" then
default_conf.deployment.config_provider = "xds"
- else
- default_conf.etcd = default_conf.deployment.role_data_plane.control_plane
end
default_conf.apisix.enable_admin = false
end
-
- if default_conf.etcd and default_conf.deployment.certs then
- -- copy certs configuration to keep backward compatible
- local certs = default_conf.deployment.certs
- local etcd = default_conf.etcd
- if not etcd.tls then
- etcd.tls = {}
- end
- etcd.tls.cert = certs.cert
- etcd.tls.key = certs.cert_key
- end
end
if default_conf.deployment.config_provider == "yaml" then
diff --git a/apisix/cli/ngx_tpl.lua b/apisix/cli/ngx_tpl.lua
index ab8407b572ec..3e1aadd9b543 100644
--- a/apisix/cli/ngx_tpl.lua
+++ b/apisix/cli/ngx_tpl.lua
@@ -115,10 +115,6 @@ http {
}
}
{% end %}
-
- {% if conf_server then %}
- {* conf_server *}
- {% end %}
}
{% end %}
@@ -369,8 +365,12 @@ http {
log_format main escape={* http.access_log_format_escape *} '{* http.access_log_format *}';
uninitialized_variable_warn off;
+ {% if http.access_log_buffer then %}
+ access_log {* http.access_log *} main buffer={* http.access_log_buffer *} flush=3;
+ {% else %}
access_log {* http.access_log *} main buffer=16384 flush=3;
{% end %}
+ {% end %}
open_file_cache max=1000 inactive=60;
client_max_body_size {* http.client_max_body_size *};
keepalive_timeout {* http.keepalive_timeout *};
@@ -469,7 +469,7 @@ http {
}
apisix.http_init(args)
- -- set apisix_lua_home into constans module
+ -- set apisix_lua_home into constants module
-- it may be used by plugins to determine the work path of apisix
local constants = require("apisix.constants")
constants.apisix_lua_home = "{*apisix_lua_home*}"
@@ -576,10 +576,6 @@ http {
}
{% end %}
- {% if conf_server then %}
- {* conf_server *}
- {% end %}
-
{% if deployment_role ~= "control_plane" then %}
{% if enabled_plugins["proxy-cache"] then %}
@@ -639,6 +635,22 @@ http {
proxy_ssl_trusted_certificate {* ssl.ssl_trusted_certificate *};
{% end %}
+ # opentelemetry_set_ngx_var starts
+ {% if opentelemetry_set_ngx_var then %}
+ set $opentelemetry_context_traceparent '';
+ set $opentelemetry_trace_id '';
+ set $opentelemetry_span_id '';
+ {% end %}
+ # opentelemetry_set_ngx_var ends
+
+ # zipkin_set_ngx_var starts
+ {% if zipkin_set_ngx_var then %}
+ set $zipkin_context_traceparent '';
+ set $zipkin_trace_id '';
+ set $zipkin_span_id '';
+ {% end %}
+ # zipkin_set_ngx_var ends
+
# http server configuration snippet starts
{% if http_server_configuration_snippet then %}
{* http_server_configuration_snippet *}
diff --git a/apisix/cli/ops.lua b/apisix/cli/ops.lua
index 8ba08c7fa974..0eaebae56c43 100644
--- a/apisix/cli/ops.lua
+++ b/apisix/cli/ops.lua
@@ -21,7 +21,6 @@ local file = require("apisix.cli.file")
local schema = require("apisix.cli.schema")
local ngx_tpl = require("apisix.cli.ngx_tpl")
local cli_ip = require("apisix.cli.ip")
-local snippet = require("apisix.cli.snippet")
local profile = require("apisix.core.profile")
local template = require("resty.template")
local argparse = require("argparse")
@@ -533,11 +532,6 @@ Please modify "admin_key" in conf/config.yaml .
proxy_mirror_timeouts = yaml_conf.plugin_attr["proxy-mirror"].timeout
end
- local conf_server, err = snippet.generate_conf_server(env, yaml_conf)
- if err then
- util.die(err, "\n")
- end
-
if yaml_conf.deployment and yaml_conf.deployment.role then
local role = yaml_conf.deployment.role
env.deployment_role = role
@@ -548,6 +542,16 @@ Please modify "admin_key" in conf/config.yaml .
end
end
+ local opentelemetry_set_ngx_var
+ if enabled_plugins["opentelemetry"] and yaml_conf.plugin_attr["opentelemetry"] then
+ opentelemetry_set_ngx_var = yaml_conf.plugin_attr["opentelemetry"].set_ngx_var
+ end
+
+ local zipkin_set_ngx_var
+ if enabled_plugins["zipkin"] and yaml_conf.plugin_attr["zipkin"] then
+ zipkin_set_ngx_var = yaml_conf.plugin_attr["zipkin"].set_ngx_var
+ end
+
-- Using template.render
local sys_conf = {
lua_path = env.pkg_path_org,
@@ -568,7 +572,8 @@ Please modify "admin_key" in conf/config.yaml .
control_server_addr = control_server_addr,
prometheus_server_addr = prometheus_server_addr,
proxy_mirror_timeouts = proxy_mirror_timeouts,
- conf_server = conf_server,
+ opentelemetry_set_ngx_var = opentelemetry_set_ngx_var,
+ zipkin_set_ngx_var = zipkin_set_ngx_var
}
if not yaml_conf.apisix then
@@ -814,20 +819,11 @@ local function start(env, ...)
-- start a new APISIX instance
- local conf_server_sock_path = env.apisix_home .. "/conf/config_listen.sock"
- if pl_path.exists(conf_server_sock_path) then
- -- remove stale sock (if exists) so that APISIX can start
- local ok, err = os_remove(conf_server_sock_path)
- if not ok then
- util.die("failed to remove stale conf server sock file, error: ", err)
- end
- end
-
local parser = argparse()
parser:argument("_", "Placeholder")
parser:option("-c --config", "location of customized config.yaml")
-- TODO: more logs for APISIX cli could be added using this feature
- parser:flag("--verbose", "show init_etcd debug information")
+ parser:flag("-v --verbose", "show init_etcd debug information")
local args = parser:parse()
local customized_yaml = args["config"]
diff --git a/apisix/cli/schema.lua b/apisix/cli/schema.lua
index 3684232f1a7f..6f6450b46e0c 100644
--- a/apisix/cli/schema.lua
+++ b/apisix/cli/schema.lua
@@ -62,11 +62,6 @@ local etcd_schema = {
minimum = 1,
description = "etcd connection timeout in seconds",
},
- use_grpc = {
- type = "boolean",
- -- TODO: set true by default in v3.2
- default = false,
- },
},
required = {"prefix", "host"}
}
@@ -388,60 +383,23 @@ local deployment_schema = {
config_provider = {
enum = {"etcd"}
},
- conf_server = {
- properties = {
- listen = {
- type = "string",
- default = "0.0.0.0:9280",
- },
- cert = { type = "string" },
- cert_key = { type = "string" },
- client_ca_cert = { type = "string" },
- },
- required = {"cert", "cert_key"}
- },
- },
- required = {"config_provider", "conf_server"}
- },
- certs = {
- properties = {
- cert = { type = "string" },
- cert_key = { type = "string" },
- trusted_ca_cert = { type = "string" },
- },
- dependencies = {
- cert = {
- required = {"cert_key"},
- },
},
- default = {},
+ required = {"config_provider"}
},
},
required = {"etcd", "role_control_plane"}
},
data_plane = {
properties = {
+ etcd = etcd_schema,
role_data_plane = {
properties = {
config_provider = {
- enum = {"control_plane", "yaml", "xds"}
+ enum = {"etcd", "yaml", "xds"}
},
},
required = {"config_provider"}
},
- certs = {
- properties = {
- cert = { type = "string" },
- cert_key = { type = "string" },
- trusted_ca_cert = { type = "string" },
- },
- dependencies = {
- cert = {
- required = {"cert_key"},
- },
- },
- default = {},
- },
},
required = {"role_data_plane"}
}
diff --git a/apisix/cli/snippet.lua b/apisix/cli/snippet.lua
deleted file mode 100644
index 95069a0ab263..000000000000
--- a/apisix/cli/snippet.lua
+++ /dev/null
@@ -1,206 +0,0 @@
---
--- Licensed to the Apache Software Foundation (ASF) under one or more
--- contributor license agreements. See the NOTICE file distributed with
--- this work for additional information regarding copyright ownership.
--- The ASF licenses this file to You under the Apache License, Version 2.0
--- (the "License"); you may not use this file except in compliance with
--- the License. You may obtain a copy of the License at
---
--- http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
---
-local template = require("resty.template")
-local pl_path = require("pl.path")
-local ipairs = ipairs
-
-
--- this module provide methods to generate snippets which will be used in the nginx.conf template
-local _M = {}
-local conf_server_tpl = [[
-upstream apisix_conf_backend {
- server 0.0.0.0:80;
- balancer_by_lua_block {
- local conf_server = require("apisix.conf_server")
- conf_server.balancer()
- }
- keepalive 320;
- keepalive_requests 1000;
- keepalive_timeout 60s;
-}
-
-{% if trusted_ca_cert then %}
-lua_ssl_trusted_certificate {* trusted_ca_cert *};
-{% end %}
-
-server {
- {% if control_plane then %}
- {% if directive_prefix == "grpc" then %}
- listen {* control_plane.listen *} ssl http2;
- {% else %}
- listen {* control_plane.listen *} ssl;
- {% end %}
- ssl_certificate {* control_plane.cert *};
- ssl_certificate_key {* control_plane.cert_key *};
-
- {% if control_plane.client_ca_cert then %}
- ssl_verify_client on;
- ssl_client_certificate {* control_plane.client_ca_cert *};
- {% end %}
-
- {% else %}
- {% if directive_prefix == "grpc" then %}
- listen unix:{* home *}/conf/config_listen.sock http2;
- {% else %}
- listen unix:{* home *}/conf/config_listen.sock;
- {% end %}
- {% end %}
-
- access_log off;
-
- set $upstream_host '';
-
- access_by_lua_block {
- local conf_server = require("apisix.conf_server")
- conf_server.access()
- }
-
- location / {
- {% if enable_https then %}
- {* directive_prefix *}_pass {* scheme_name *}s://apisix_conf_backend;
- {* directive_prefix *}_ssl_protocols TLSv1.2 TLSv1.3;
- {* directive_prefix *}_ssl_server_name on;
-
- {% if etcd_tls_verify then %}
- {* directive_prefix *}_ssl_verify on;
- {* directive_prefix *}_ssl_trusted_certificate {* ssl_trusted_certificate *};
- {% end %}
-
- {% if sni then %}
- {* directive_prefix *}_ssl_name {* sni *};
- {% else %}
- {* directive_prefix *}_ssl_name $upstream_host;
- {% end %}
-
- {% if client_cert then %}
- {* directive_prefix *}_ssl_certificate {* client_cert *};
- {* directive_prefix *}_ssl_certificate_key {* client_cert_key *};
- {% end %}
-
- {% else %}
- {* directive_prefix *}_pass {* scheme_name *}://apisix_conf_backend;
- {% end %}
-
- {% if scheme_name == "http" then %}
- proxy_http_version 1.1;
- proxy_set_header Connection "";
- {% end %}
-
- {* directive_prefix *}_set_header Host $upstream_host;
- {* directive_prefix *}_next_upstream error timeout non_idempotent
- http_500 http_502 http_503 http_504;
- }
-
- log_by_lua_block {
- local conf_server = require("apisix.conf_server")
- conf_server.log()
- }
-}
-]]
-
-
-local function is_grpc_used(env, etcd)
- local is_grpc_available = env.use_apisix_base
- return is_grpc_available and etcd.use_grpc
-end
-
-
-function _M.generate_conf_server(env, conf)
- if not (conf.deployment and (
- conf.deployment.role == "traditional" or
- conf.deployment.role == "control_plane"))
- then
- return nil, nil
- end
-
- -- we use proxy even the role is traditional so that we can test the proxy in daily dev
- local etcd = conf.deployment.etcd
- local servers = etcd.host
- local enable_https = false
- local prefix = "https://"
- if servers[1]:find(prefix, 1, true) then
- enable_https = true
- end
-
- for i, s in ipairs(servers) do
- if (s:find(prefix, 1, true) ~= nil) ~= enable_https then
- return nil, "all nodes in the etcd cluster should enable/disable TLS together"
- end
-
- local _, to = s:find("://", 1, true)
- if not to then
- return nil, "bad etcd endpoint format"
- end
- end
-
- local control_plane
- if conf.deployment.role == "control_plane" then
- control_plane = conf.deployment.role_control_plane.conf_server
- control_plane.cert = pl_path.abspath(control_plane.cert)
- control_plane.cert_key = pl_path.abspath(control_plane.cert_key)
-
- if control_plane.client_ca_cert then
- control_plane.client_ca_cert = pl_path.abspath(control_plane.client_ca_cert)
- end
- end
-
- local trusted_ca_cert
- if conf.deployment.certs then
- if conf.deployment.certs.trusted_ca_cert then
- trusted_ca_cert = pl_path.abspath(conf.deployment.certs.trusted_ca_cert)
- end
- end
-
- local conf_render = template.compile(conf_server_tpl)
- local tls = etcd.tls
- local client_cert
- local client_cert_key
- local ssl_trusted_certificate
- local etcd_tls_verify
- local use_grpc = is_grpc_used(env, etcd)
- if tls then
- if tls.cert then
- client_cert = pl_path.abspath(tls.cert)
- client_cert_key = pl_path.abspath(tls.key)
- end
-
- etcd_tls_verify = tls.verify
- if enable_https and etcd_tls_verify then
- if not conf.apisix.ssl.ssl_trusted_certificate then
- return nil, "should set ssl_trusted_certificate if etcd tls verify is enabled"
- end
- ssl_trusted_certificate = pl_path.abspath(conf.apisix.ssl.ssl_trusted_certificate)
- end
- end
-
- return conf_render({
- sni = tls and tls.sni,
- home = env.apisix_home or ".",
- control_plane = control_plane,
- enable_https = enable_https,
- client_cert = client_cert,
- client_cert_key = client_cert_key,
- trusted_ca_cert = trusted_ca_cert,
- etcd_tls_verify = etcd_tls_verify,
- ssl_trusted_certificate = ssl_trusted_certificate,
- scheme_name = use_grpc and "grpc" or "http",
- directive_prefix = use_grpc and "grpc" or "proxy",
- })
-end
-
-
-return _M
diff --git a/apisix/constants.lua b/apisix/constants.lua
index 72209aa4d905..0b3ec160b53d 100644
--- a/apisix/constants.lua
+++ b/apisix/constants.lua
@@ -37,6 +37,7 @@ return {
},
STREAM_ETCD_DIRECTORY = {
["/upstreams"] = true,
+ ["/services"] = true,
["/plugins"] = true,
["/ssls"] = true,
["/stream_routes"] = true,
diff --git a/apisix/core/config_etcd.lua b/apisix/core/config_etcd.lua
index e3e40672c95f..357f24fa1e6e 100644
--- a/apisix/core/config_etcd.lua
+++ b/apisix/core/config_etcd.lua
@@ -362,40 +362,6 @@ local function readdir(etcd_cli, key, formatter)
end
-local function grpc_waitdir(self, etcd_cli, key, modified_index, timeout)
- local watching_stream = self.watching_stream
- if not watching_stream then
- local attr = {}
- attr.start_revision = modified_index
- local opts = {}
- opts.timeout = timeout
-
- local st, err = etcd_cli:create_grpc_watch_stream(key, attr, opts)
- if not st then
- log.error("create watch stream failed: ", err)
- return nil, err
- end
-
- log.info("create watch stream for key: ", key, ", modified_index: ", modified_index)
-
- self.watching_stream = st
- watching_stream = st
- end
-
- return etcd_cli:read_grpc_watch_stream(watching_stream)
-end
-
-
-local function flush_watching_streams(self)
- local etcd_cli = self.etcd_cli
- if not etcd_cli.use_grpc then
- return
- end
-
- self.watching_stream = nil
-end
-
-
local function http_waitdir(self, etcd_cli, key, modified_index, timeout)
if not watch_ctx.idx[key] then
watch_ctx.idx[key] = 1
@@ -470,12 +436,7 @@ local function waitdir(self)
return nil, "not inited"
end
- local res, err
- if etcd_cli.use_grpc then
- res, err = grpc_waitdir(self, etcd_cli, key, modified_index, timeout)
- else
- res, err = http_waitdir(self, etcd_cli, key, modified_index, timeout)
- end
+ local res, err = http_waitdir(self, etcd_cli, key, modified_index, timeout)
if not res then
-- log.error("failed to get key from etcd: ", err)
@@ -620,13 +581,9 @@ local function sync_data(self)
return nil, "missing 'key' arguments"
end
- if not self.etcd_cli.use_grpc then
- init_watch_ctx(self.key)
- end
+ init_watch_ctx(self.key)
if self.need_reload then
- flush_watching_streams(self)
-
local res, err = readdir(self.etcd_cli, self.key)
if not res then
return false, err
@@ -916,7 +873,6 @@ local function _automatic_fetch(premature, self)
end
if not exiting() and self.running then
- flush_watching_streams(self)
ngx_timer_at(0, _automatic_fetch, self)
end
end
@@ -1118,10 +1074,6 @@ function _M.init()
return true
end
- if local_conf.etcd.use_grpc then
- return true
- end
-
-- don't go through proxy during start because the proxy is not available
local etcd_cli, prefix, err = etcd_apisix.new_without_proxy()
if not etcd_cli then
@@ -1147,21 +1099,6 @@ function _M.init_worker()
return true
end
- if not local_conf.etcd.use_grpc then
- return true
- end
-
- -- don't go through proxy during start because the proxy is not available
- local etcd_cli, prefix, err = etcd_apisix.new_without_proxy()
- if not etcd_cli then
- return nil, "failed to start a etcd instance: " .. err
- end
-
- local res, err = readdir(etcd_cli, prefix, create_formatter(prefix))
- if not res then
- return nil, err
- end
-
return true
end
diff --git a/apisix/core/config_util.lua b/apisix/core/config_util.lua
index 7e57ed402fd8..7313e0116ad2 100644
--- a/apisix/core/config_util.lua
+++ b/apisix/core/config_util.lua
@@ -114,7 +114,7 @@ function _M.fire_all_clean_handlers(item)
clean_handler.f(item)
end
- item.clean_handlers = nil
+ item.clean_handlers = {}
end
diff --git a/apisix/core/ctx.lua b/apisix/core/ctx.lua
index 5128061d58fe..6d77b43811ca 100644
--- a/apisix/core/ctx.lua
+++ b/apisix/core/ctx.lua
@@ -260,7 +260,9 @@ do
elseif core_str.has_prefix(key, "post_arg_") then
-- only match default post form
- if request.header(nil, "Content-Type") == "application/x-www-form-urlencoded" then
+ local content_type = request.header(nil, "Content-Type")
+ if content_type ~= nil and core_str.has_prefix(content_type,
+ "application/x-www-form-urlencoded") then
local arg_key = sub_str(key, 10)
local args = request.get_post_args()[arg_key]
if args then
diff --git a/apisix/core/etcd.lua b/apisix/core/etcd.lua
index b52517cd40b5..5cd1038581dd 100644
--- a/apisix/core/etcd.lua
+++ b/apisix/core/etcd.lua
@@ -28,25 +28,15 @@ local clone_tab = require("table.clone")
local health_check = require("resty.etcd.health_check")
local pl_path = require("pl.path")
local ipairs = ipairs
-local pcall = pcall
local setmetatable = setmetatable
local string = string
local tonumber = tonumber
-local ngx_config_prefix = ngx.config.prefix()
-local ngx_socket_tcp = ngx.socket.tcp
local ngx_get_phase = ngx.get_phase
-local is_http = ngx.config.subsystem == "http"
local _M = {}
-local function has_mtls_support()
- local s = ngx_socket_tcp()
- return s.tlshandshake ~= nil
-end
-
-
local function _new(etcd_conf)
local prefix = etcd_conf.prefix
etcd_conf.http_host = etcd_conf.host
@@ -72,17 +62,6 @@ local function _new(etcd_conf)
end
end
- if etcd_conf.use_grpc then
- if ngx_get_phase() == "init" then
- etcd_conf.use_grpc = false
- else
- local ok = pcall(require, "resty.grpc")
- if not ok then
- etcd_conf.use_grpc = false
- end
- end
- end
-
local etcd_cli, err = etcd.new(etcd_conf)
if not etcd_cli then
return nil, nil, err
@@ -129,64 +108,7 @@ local function new()
etcd_conf.trusted_ca = local_conf.apisix.ssl.ssl_trusted_certificate
end
- local proxy_by_conf_server = false
-
- if local_conf.deployment then
- if local_conf.deployment.role == "traditional"
- -- we proxy the etcd requests in traditional mode so we can test the CP's behavior in
- -- daily development. However, a stream proxy can't be the CP.
- -- Hence, generate a HTTP conf server to proxy etcd requests in stream proxy is
- -- unnecessary and inefficient.
- and is_http
- then
- local sock_prefix = ngx_config_prefix
- etcd_conf.unix_socket_proxy =
- "unix:" .. sock_prefix .. "/conf/config_listen.sock"
- etcd_conf.host = {"http://127.0.0.1:2379"}
- proxy_by_conf_server = true
-
- elseif local_conf.deployment.role == "control_plane" then
- local addr = local_conf.deployment.role_control_plane.conf_server.listen
- etcd_conf.host = {"https://" .. addr}
- etcd_conf.tls = {
- verify = false,
- }
-
- if has_mtls_support() and local_conf.deployment.certs.cert then
- local cert = local_conf.deployment.certs.cert
- local cert_key = local_conf.deployment.certs.cert_key
- etcd_conf.tls.cert = cert
- etcd_conf.tls.key = cert_key
- end
-
- proxy_by_conf_server = true
-
- elseif local_conf.deployment.role == "data_plane" then
- if has_mtls_support() and local_conf.deployment.certs.cert then
- local cert = local_conf.deployment.certs.cert
- local cert_key = local_conf.deployment.certs.cert_key
-
- if not etcd_conf.tls then
- etcd_conf.tls = {}
- end
-
- etcd_conf.tls.cert = cert
- etcd_conf.tls.key = cert_key
- end
- end
-
- if local_conf.deployment.certs and local_conf.deployment.certs.trusted_ca_cert then
- etcd_conf.trusted_ca = local_conf.deployment.certs.trusted_ca_cert
- end
- end
-
- -- if an unhealthy etcd node is selected in a single admin read/write etcd operation,
- -- the retry mechanism for health check can select another healthy etcd node
- -- to complete the read/write etcd operation.
- if proxy_by_conf_server then
- -- health check is done in conf server
- health_check.disable()
- elseif not health_check.conf then
+ if not health_check.conf then
health_check.init({
max_fails = 1,
retry = true,
@@ -349,10 +271,6 @@ do
return nil, nil, err
end
- if tmp_etcd_cli.use_grpc then
- etcd_cli_init_phase = tmp_etcd_cli
- end
-
return tmp_etcd_cli, prefix
end
diff --git a/apisix/core/response.lua b/apisix/core/response.lua
index cfbac1467341..04430abd5266 100644
--- a/apisix/core/response.lua
+++ b/apisix/core/response.lua
@@ -70,7 +70,9 @@ function resp_exit(code, ...)
error("failed to encode data: " .. err, -2)
else
idx = idx + 1
- t[idx] = body .. "\n"
+ t[idx] = body
+ idx = idx + 1
+ t[idx] = "\n"
end
elseif v ~= nil then
@@ -80,7 +82,7 @@ function resp_exit(code, ...)
end
if idx > 0 then
- ngx_print(concat_tab(t, "", 1, idx))
+ ngx_print(t)
end
if code then
diff --git a/apisix/core/version.lua b/apisix/core/version.lua
index 7ba204811a82..ff16402d38b4 100644
--- a/apisix/core/version.lua
+++ b/apisix/core/version.lua
@@ -20,5 +20,5 @@
-- @module core.version
return {
- VERSION = "3.4.0"
+ VERSION = "3.6.0"
}
diff --git a/apisix/discovery/consul/init.lua b/apisix/discovery/consul/init.lua
index ae1e4c64cc9c..32e306709e95 100644
--- a/apisix/discovery/consul/init.lua
+++ b/apisix/discovery/consul/init.lua
@@ -32,6 +32,7 @@ local ngx_timer_every = ngx.timer.every
local log = core.log
local json_delay_encode = core.json.delay_encode
local ngx_worker_id = ngx.worker.id
+local exiting = ngx.worker.exiting
local thread_spawn = ngx.thread.spawn
local thread_wait = ngx.thread.wait
local thread_kill = ngx.thread.kill
@@ -197,21 +198,20 @@ local function get_opts(consul_server, is_catalog)
port = consul_server.port,
connect_timeout = consul_server.connect_timeout,
read_timeout = consul_server.read_timeout,
+ default_args = {
+ token = consul_server.token,
+ }
}
if not consul_server.keepalive then
return opts
end
+ opts.default_args.wait = consul_server.wait_timeout --blocked wait!=0; unblocked by wait=0
+
if is_catalog then
- opts.default_args = {
- wait = consul_server.wait_timeout, --blocked wait!=0; unblocked by wait=0
- index = consul_server.catalog_index,
- }
+ opts.default_args.index = consul_server.catalog_index
else
- opts.default_args = {
- wait = consul_server.wait_timeout, --blocked wait!=0; unblocked by wait=0
- index = consul_server.health_index,
- }
+ opts.default_args.index = consul_server.health_index
end
return opts
@@ -277,7 +277,7 @@ end
local function check_keepalive(consul_server, retry_delay)
- if consul_server.keepalive then
+ if consul_server.keepalive and not exiting() then
local ok, err = ngx_timer_at(0, _M.connect, consul_server, retry_delay)
if not ok then
log.error("create ngx_timer_at got error: ", err)
@@ -396,6 +396,9 @@ function _M.connect(premature, consul_server, retry_delay)
port = consul_server.port,
connect_timeout = consul_server.connect_timeout,
read_timeout = consul_server.read_timeout,
+ default_args = {
+ token = consul_server.token
+ }
})
local catalog_success, catalog_res, catalog_err = pcall(function()
return consul_client:get(consul_server.consul_watch_catalog_url)
@@ -545,6 +548,7 @@ local function format_consul_params(consul_conf)
core.table.insert(consul_server_list, {
host = host,
port = port,
+ token = consul_conf.token,
connect_timeout = consul_conf.timeout.connect,
read_timeout = consul_conf.timeout.read,
wait_timeout = consul_conf.timeout.wait,
diff --git a/apisix/discovery/consul/schema.lua b/apisix/discovery/consul/schema.lua
index 3e998b015ce1..d7cf2954abf3 100644
--- a/apisix/discovery/consul/schema.lua
+++ b/apisix/discovery/consul/schema.lua
@@ -24,6 +24,7 @@ return {
type = "string",
}
},
+ token = {type = "string", default = ""},
fetch_interval = {type = "integer", minimum = 1, default = 3},
keepalive = {
type = "boolean",
diff --git a/apisix/discovery/consul_kv/init.lua b/apisix/discovery/consul_kv/init.lua
index 2dad772ace75..6d616e059190 100644
--- a/apisix/discovery/consul_kv/init.lua
+++ b/apisix/discovery/consul_kv/init.lua
@@ -320,18 +320,14 @@ end
local function format_consul_params(consul_conf)
local consul_server_list = core.table.new(0, #consul_conf.servers)
- local args
+ local args = {
+ token = consul_conf.token,
+ recurse = true
+ }
- if consul_conf.keepalive == false then
- args = {
- recurse = true,
- }
- elseif consul_conf.keepalive then
- args = {
- recurse = true,
- wait = consul_conf.timeout.wait, --blocked wait!=0; unblocked by wait=0
- index = 0,
- }
+ if consul_conf.keepalive then
+ args.wait = consul_conf.timeout.wait --blocked wait!=0; unblocked by wait=0
+ args.index = 0
end
for _, v in pairs(consul_conf.servers) do
diff --git a/apisix/discovery/consul_kv/schema.lua b/apisix/discovery/consul_kv/schema.lua
index a2ebb5d07919..4c02b2c80dd0 100644
--- a/apisix/discovery/consul_kv/schema.lua
+++ b/apisix/discovery/consul_kv/schema.lua
@@ -24,6 +24,7 @@ return {
type = "string",
}
},
+ token = {type = "string", default = ""},
fetch_interval = {type = "integer", minimum = 1, default = 3},
keepalive = {
type = "boolean",
diff --git a/apisix/discovery/dns/init.lua b/apisix/discovery/dns/init.lua
index 609ad5ea163f..601de0ebc9ee 100644
--- a/apisix/discovery/dns/init.lua
+++ b/apisix/discovery/dns/init.lua
@@ -64,14 +64,14 @@ end
function _M.init_worker()
local local_conf = config_local.local_conf()
local servers = local_conf.discovery.dns.servers
-
+ local resolv_conf = local_conf.discovery.dns.resolv_conf
local default_order = {"last", "SRV", "A", "AAAA", "CNAME"}
local order = core.table.try_read_attr(local_conf, "discovery", "dns", "order")
order = order or default_order
local opts = {
hosts = {},
- resolvConf = {},
+ resolvConf = resolv_conf,
nameservers = servers,
order = order,
}
diff --git a/apisix/discovery/dns/schema.lua b/apisix/discovery/dns/schema.lua
index 989938ab1fa3..03c7934ae4cf 100644
--- a/apisix/discovery/dns/schema.lua
+++ b/apisix/discovery/dns/schema.lua
@@ -24,6 +24,9 @@ return {
type = "string",
},
},
+ resolv_conf = {
+ type = "string",
+ },
order = {
type = "array",
minItems = 1,
@@ -34,5 +37,12 @@ return {
},
},
},
- required = {"servers"}
+ oneOf = {
+ {
+ required = {"servers"},
+ },
+ {
+ required = {"resolv_conf"},
+ }
+ }
}
diff --git a/apisix/http/route.lua b/apisix/http/route.lua
index d475646b56c6..dbf11abf5e28 100644
--- a/apisix/http/route.lua
+++ b/apisix/http/route.lua
@@ -103,8 +103,8 @@ function _M.create_radixtree_uri_router(routes, uri_routes, with_parameter)
end
-function _M.match_uri(uri_router, match_opts, api_ctx)
- core.table.clear(match_opts)
+function _M.match_uri(uri_router, api_ctx)
+ local match_opts = core.tablepool.fetch("route_match_opts", 0, 4)
match_opts.method = api_ctx.var.request_method
match_opts.host = api_ctx.var.host
match_opts.remote_addr = api_ctx.var.remote_addr
@@ -112,6 +112,7 @@ function _M.match_uri(uri_router, match_opts, api_ctx)
match_opts.matched = core.tablepool.fetch("matched_route_record", 0, 4)
local ok = uri_router:dispatch(api_ctx.var.uri, match_opts, api_ctx, match_opts)
+ core.tablepool.release("route_match_opts", match_opts)
return ok
end
diff --git a/apisix/http/router/radixtree_host_uri.lua b/apisix/http/router/radixtree_host_uri.lua
index 532576e53d4a..680a04fbe815 100644
--- a/apisix/http/router/radixtree_host_uri.lua
+++ b/apisix/http/router/radixtree_host_uri.lua
@@ -142,8 +142,6 @@ local function create_radixtree_router(routes)
return true
end
-
- local match_opts = {}
function _M.match(api_ctx)
local user_routes = _M.user_routes
local _, service_version = get_services()
@@ -162,7 +160,7 @@ end
function _M.matching(api_ctx)
core.log.info("route match mode: radixtree_host_uri")
- core.table.clear(match_opts)
+ local match_opts = core.tablepool.fetch("route_match_opts", 0, 16)
match_opts.method = api_ctx.var.request_method
match_opts.remote_addr = api_ctx.var.remote_addr
match_opts.vars = api_ctx.var
@@ -181,11 +179,13 @@ function _M.matching(api_ctx)
api_ctx.curr_req_matched._host = api_ctx.real_curr_req_matched_host:reverse()
api_ctx.real_curr_req_matched_host = nil
end
+ core.tablepool.release("route_match_opts", match_opts)
return true
end
end
local ok = only_uri_router:dispatch(api_ctx.var.uri, match_opts, api_ctx, match_opts)
+ core.tablepool.release("route_match_opts", match_opts)
return ok
end
diff --git a/apisix/http/router/radixtree_uri.lua b/apisix/http/router/radixtree_uri.lua
index 6e546364ac14..7c1b5c0c147a 100644
--- a/apisix/http/router/radixtree_uri.lua
+++ b/apisix/http/router/radixtree_uri.lua
@@ -27,7 +27,6 @@ local _M = {version = 0.2}
local uri_routes = {}
local uri_router
- local match_opts = {}
function _M.match(api_ctx)
local user_routes = _M.user_routes
local _, service_version = get_services()
@@ -51,8 +50,7 @@ end
function _M.matching(api_ctx)
core.log.info("route match mode: radixtree_uri")
-
- return base_router.match_uri(uri_router, match_opts, api_ctx)
+ return base_router.match_uri(uri_router, api_ctx)
end
diff --git a/apisix/http/router/radixtree_uri_with_parameter.lua b/apisix/http/router/radixtree_uri_with_parameter.lua
index 4bf7f3ebee5f..3f10f4fcac49 100644
--- a/apisix/http/router/radixtree_uri_with_parameter.lua
+++ b/apisix/http/router/radixtree_uri_with_parameter.lua
@@ -27,7 +27,6 @@ local _M = {}
local uri_routes = {}
local uri_router
- local match_opts = {}
function _M.match(api_ctx)
local user_routes = _M.user_routes
local _, service_version = get_services()
@@ -51,8 +50,7 @@ end
function _M.matching(api_ctx)
core.log.info("route match mode: radixtree_uri_with_parameter")
-
- return base_router.match_uri(uri_router, match_opts, api_ctx)
+ return base_router.match_uri(uri_router, api_ctx)
end
diff --git a/apisix/http/service.lua b/apisix/http/service.lua
index 83bcb9b9d341..97b224d622c8 100644
--- a/apisix/http/service.lua
+++ b/apisix/http/service.lua
@@ -61,7 +61,7 @@ function _M.init_worker()
filter = filter,
})
if not services then
- error("failed to create etcd instance for fetching upstream: " .. err)
+ error("failed to create etcd instance for fetching /services: " .. err)
return
end
end
diff --git a/apisix/init.lua b/apisix/init.lua
index 86b68cf62208..4cfd179d25a6 100644
--- a/apisix/init.lua
+++ b/apisix/init.lua
@@ -170,7 +170,7 @@ end
function _M.http_exit_worker()
- -- TODO: we can support stream plugin later - currently there is not `destory` method
+ -- TODO: we can support stream plugin later - currently there is not `destroy` method
-- in stream plugins
plugin.exit_worker()
require("apisix.plugins.ext-plugin.init").exit_worker()
@@ -1021,6 +1021,7 @@ function _M.stream_init_worker()
plugin.init_worker()
xrpc.init_worker()
router.stream_init_worker()
+ require("apisix.http.service").init_worker()
apisix_upstream.init_worker()
local we = require("resty.worker.events")
@@ -1078,6 +1079,34 @@ function _M.stream_preread_phase()
api_ctx.matched_upstream = upstream
+ elseif matched_route.value.service_id then
+ local service = service_fetch(matched_route.value.service_id)
+ if not service then
+ core.log.error("failed to fetch service configuration by ",
+ "id: ", matched_route.value.service_id)
+ return core.response.exit(404)
+ end
+
+ matched_route = plugin.merge_service_stream_route(service, matched_route)
+ api_ctx.matched_route = matched_route
+ api_ctx.conf_type = "stream_route&service"
+ api_ctx.conf_version = matched_route.modifiedIndex .. "&" .. service.modifiedIndex
+ api_ctx.conf_id = matched_route.value.id .. "&" .. service.value.id
+ api_ctx.service_id = service.value.id
+ api_ctx.service_name = service.value.name
+ api_ctx.matched_upstream = matched_route.value.upstream
+ if matched_route.value.upstream_id and not matched_route.value.upstream then
+ local upstream = apisix_upstream.get_by_id(matched_route.value.upstream_id)
+ if not upstream then
+ if is_http then
+ return core.response.exit(502)
+ end
+
+ return ngx_exit(1)
+ end
+
+ api_ctx.matched_upstream = upstream
+ end
else
if matched_route.has_domain then
local err
diff --git a/apisix/inspect/dbg.lua b/apisix/inspect/dbg.lua
index 7f4e7b114424..2fd78782faac 100644
--- a/apisix/inspect/dbg.lua
+++ b/apisix/inspect/dbg.lua
@@ -98,6 +98,9 @@ local function hook(_, arg)
if #hooks == 0 then
core.log.warn("inspect: all hooks removed")
debug.sethook()
+ if jit then
+ jit.on()
+ end
end
end
end
diff --git a/apisix/plugin.lua b/apisix/plugin.lua
index bde2b89a5393..fa1d814b290a 100644
--- a/apisix/plugin.lua
+++ b/apisix/plugin.lua
@@ -43,6 +43,9 @@ local stream_local_plugins_hash = core.table.new(0, 32)
local merged_route = core.lrucache.new({
ttl = 300, count = 512
})
+local merged_stream_route = core.lrucache.new({
+ ttl = 300, count = 512
+})
local expr_lrucache = core.lrucache.new({
ttl = 300, count = 512
})
@@ -637,6 +640,49 @@ function _M.merge_service_route(service_conf, route_conf)
end
+local function merge_service_stream_route(service_conf, route_conf)
+ -- because many fields in Service are not supported by stream route,
+ -- so we copy the stream route as base object
+ local new_conf = core.table.deepcopy(route_conf)
+ if service_conf.value.plugins then
+ for name, conf in pairs(service_conf.value.plugins) do
+ if not new_conf.value.plugins then
+ new_conf.value.plugins = {}
+ end
+
+ if not new_conf.value.plugins[name] then
+ new_conf.value.plugins[name] = conf
+ end
+ end
+ end
+
+ new_conf.value.service_id = nil
+
+ if not new_conf.value.upstream and service_conf.value.upstream then
+ new_conf.value.upstream = service_conf.value.upstream
+ end
+
+ if not new_conf.value.upstream_id and service_conf.value.upstream_id then
+ new_conf.value.upstream_id = service_conf.value.upstream_id
+ end
+
+ return new_conf
+end
+
+
+function _M.merge_service_stream_route(service_conf, route_conf)
+ core.log.info("service conf: ", core.json.delay_encode(service_conf, true))
+ core.log.info(" stream route conf: ", core.json.delay_encode(route_conf, true))
+
+ local version = route_conf.modifiedIndex .. "#" .. service_conf.modifiedIndex
+ local route_service_key = route_conf.value.id .. "#"
+ .. version
+ return merged_stream_route(route_service_key, version,
+ merge_service_stream_route,
+ service_conf, route_conf)
+end
+
+
local function merge_consumer_route(route_conf, consumer_conf, consumer_group_conf)
if not consumer_conf.plugins or
core.table.nkeys(consumer_conf.plugins) == 0
diff --git a/apisix/plugins/authz-keycloak.lua b/apisix/plugins/authz-keycloak.lua
index f2c02727c0ce..99fe96cb06e7 100644
--- a/apisix/plugins/authz-keycloak.lua
+++ b/apisix/plugins/authz-keycloak.lua
@@ -20,6 +20,7 @@ local sub_str = string.sub
local type = type
local ngx = ngx
local plugin_name = "authz-keycloak"
+local fetch_secrets = require("apisix.secret").fetch_secrets
local log = core.log
local pairs = pairs
@@ -757,6 +758,8 @@ local function generate_token_using_password_grant(conf,ctx)
end
function _M.access(conf, ctx)
+ -- resolve secrets
+ conf = fetch_secrets(conf)
local headers = core.request.headers(ctx)
local need_grant_token = conf.password_grant_token_generation_incoming_uri and
ctx.var.request_uri == conf.password_grant_token_generation_incoming_uri and
diff --git a/apisix/plugins/chaitin-waf.lua b/apisix/plugins/chaitin-waf.lua
index afb4c108b3fb..cc870a47f47e 100644
--- a/apisix/plugins/chaitin-waf.lua
+++ b/apisix/plugins/chaitin-waf.lua
@@ -95,7 +95,7 @@ local metadata_schema = {
properties = {
host = {
type = "string",
- pattern = "^\\*?[0-9a-zA-Z-._\\[\\]:]+$"
+ pattern = "^\\*?[0-9a-zA-Z-._\\[\\]:/]+$"
},
port = {
type = "integer",
diff --git a/apisix/plugins/cors.lua b/apisix/plugins/cors.lua
index 4f0bfa5d37aa..f4a59ce5e301 100644
--- a/apisix/plugins/cors.lua
+++ b/apisix/plugins/cors.lua
@@ -98,7 +98,7 @@ local schema = {
type = "array",
description =
"you can use regex to allow specific origins when no credentials," ..
- "for example use [.*\\.test.com] to allow a.test.com and b.test.com",
+ "for example use [.*\\.test.com$] to allow a.test.com and b.test.com",
items = {
type = "string",
minLength = 1,
diff --git a/apisix/plugins/ext-plugin/init.lua b/apisix/plugins/ext-plugin/init.lua
index b6fcf6fea82c..7d47bb96efbb 100644
--- a/apisix/plugins/ext-plugin/init.lua
+++ b/apisix/plugins/ext-plugin/init.lua
@@ -65,6 +65,7 @@ local ipairs = ipairs
local pairs = pairs
local tostring = tostring
local type = type
+local ngx = ngx
local events_list
@@ -655,6 +656,13 @@ local rpc_handlers = {
end
end
+ local body_len = rewrite:BodyLength()
+ if body_len > 0 then
+ local body = rewrite:BodyAsString()
+ ngx.req.read_body()
+ ngx.req.set_body_data(body)
+ end
+
local len = rewrite:RespHeadersLength()
if len > 0 then
local rewrite_resp_headers = {}
diff --git a/apisix/plugins/openid-connect.lua b/apisix/plugins/openid-connect.lua
index 927e4ddbd8aa..0bd39f20d2cb 100644
--- a/apisix/plugins/openid-connect.lua
+++ b/apisix/plugins/openid-connect.lua
@@ -156,6 +156,10 @@ local schema = {
description = "Comma separated list of hosts that should not be proxied.",
}
},
+ },
+ authorization_params = {
+ description = "Extra authorization params to the authorize endpoint",
+ type = "object"
}
},
encrypt_fields = {"client_secret"},
diff --git a/apisix/plugins/opentelemetry.lua b/apisix/plugins/opentelemetry.lua
index f8013e6f7675..0c84fad49da2 100644
--- a/apisix/plugins/opentelemetry.lua
+++ b/apisix/plugins/opentelemetry.lua
@@ -47,6 +47,7 @@ local type = type
local pairs = pairs
local ipairs = ipairs
local unpack = unpack
+local string_format = string.format
local lrucache = core.lrucache.new({
type = 'plugin', count = 128, ttl = 24 * 60 * 60,
@@ -112,6 +113,11 @@ local attr_schema = {
},
default = {},
},
+ set_ngx_var = {
+ type = "boolean",
+ description = "set nginx variables",
+ default = false,
+ },
},
}
@@ -332,6 +338,17 @@ function _M.rewrite(conf, api_ctx)
kind = span_kind.server,
attributes = attributes,
})
+
+ if plugin_info.set_ngx_var then
+ local span_context = ctx:span():context()
+ ngx_var.opentelemetry_context_traceparent = string_format("00-%s-%s-%02x",
+ span_context.trace_id,
+ span_context.span_id,
+ span_context.trace_flags)
+ ngx_var.opentelemetry_trace_id = span_context.trace_id
+ ngx_var.opentelemetry_span_id = span_context.span_id
+ end
+
api_ctx.otel_context_token = ctx:attach()
-- inject trace context into the headers of upstream HTTP request
diff --git a/apisix/plugins/proxy-cache/init.lua b/apisix/plugins/proxy-cache/init.lua
index 333c20e20e1b..918f755994ea 100644
--- a/apisix/plugins/proxy-cache/init.lua
+++ b/apisix/plugins/proxy-cache/init.lua
@@ -25,6 +25,7 @@ local plugin_name = "proxy-cache"
local STRATEGY_DISK = "disk"
local STRATEGY_MEMORY = "memory"
+local DEFAULT_CACHE_ZONE = "disk_cache_one"
local schema = {
type = "object",
@@ -33,7 +34,7 @@ local schema = {
type = "string",
minLength = 1,
maxLength = 100,
- default = "disk_cache_one",
+ default = DEFAULT_CACHE_ZONE,
},
cache_strategy = {
type = "string",
@@ -129,14 +130,23 @@ function _M.check_schema(conf)
local found = false
local local_conf = core.config.local_conf()
if local_conf.apisix.proxy_cache then
+ local err = "cache_zone " .. conf.cache_zone .. " not found"
for _, cache in ipairs(local_conf.apisix.proxy_cache.zones) do
+ -- cache_zone passed in plugin config matched one of the proxy_cache zones
if cache.name == conf.cache_zone then
- found = true
+ -- check for the mismatch between cache_strategy and corresponding cache zone
+ if (conf.cache_strategy == STRATEGY_MEMORY and cache.disk_path) or
+ (conf.cache_strategy == STRATEGY_DISK and not cache.disk_path) then
+ err = "invalid or empty cache_zone for cache_strategy: "..conf.cache_strategy
+ else
+ found = true
+ end
+ break
end
end
if found == false then
- return false, "cache_zone " .. conf.cache_zone .. " not found"
+ return false, err
end
end
diff --git a/apisix/plugins/proxy-cache/memory.lua b/apisix/plugins/proxy-cache/memory.lua
index 0112db63b568..9d5c665a8d92 100644
--- a/apisix/plugins/proxy-cache/memory.lua
+++ b/apisix/plugins/proxy-cache/memory.lua
@@ -32,6 +32,10 @@ end
function _M:set(key, obj, ttl)
+ if self.dict == nil then
+ return nil, "invalid cache_zone provided"
+ end
+
local obj_json = core.json.encode(obj)
if not obj_json then
return nil, "could not encode object"
@@ -43,6 +47,10 @@ end
function _M:get(key)
+ if self.dict == nil then
+ return nil, "invalid cache_zone provided"
+ end
+
-- If the key does not exist or has expired, then res_json will be nil.
local res_json, err = self.dict:get(key)
if not res_json then
@@ -63,6 +71,9 @@ end
function _M:purge(key)
+ if self.dict == nil then
+ return nil, "invalid cache_zone provided"
+ end
self.dict:delete(key)
end
diff --git a/apisix/plugins/traffic-split.lua b/apisix/plugins/traffic-split.lua
index 1d621426a137..f546225c8c95 100644
--- a/apisix/plugins/traffic-split.lua
+++ b/apisix/plugins/traffic-split.lua
@@ -173,6 +173,7 @@ local function set_upstream(upstream_info, ctx)
key = upstream_info.key,
nodes = new_nodes,
timeout = upstream_info.timeout,
+ scheme = upstream_info.scheme
}
local ok, err = upstream.check_schema(up_conf)
@@ -190,7 +191,9 @@ local function set_upstream(upstream_info, ctx)
end
core.log.info("upstream_key: ", upstream_key)
upstream.set(ctx, upstream_key, ctx.conf_version, up_conf)
-
+ if upstream_info.scheme == "https" then
+ upstream.set_scheme(ctx, up_conf)
+ end
return
end
diff --git a/apisix/plugins/zipkin.lua b/apisix/plugins/zipkin.lua
index 0c0c4748daff..efebd5115035 100644
--- a/apisix/plugins/zipkin.lua
+++ b/apisix/plugins/zipkin.lua
@@ -20,13 +20,17 @@ local zipkin_codec = require("apisix.plugins.zipkin.codec")
local new_random_sampler = require("apisix.plugins.zipkin.random_sampler").new
local new_reporter = require("apisix.plugins.zipkin.reporter").new
local ngx = ngx
+local ngx_var = ngx.var
local ngx_re = require("ngx.re")
local pairs = pairs
local tonumber = tonumber
+local to_hex = require "resty.string".to_hex
local plugin_name = "zipkin"
local ZIPKIN_SPAN_VER_1 = 1
local ZIPKIN_SPAN_VER_2 = 2
+local plugin = require("apisix.plugin")
+local string_format = string.format
local lrucache = core.lrucache.new({
@@ -69,6 +73,8 @@ function _M.check_schema(conf)
return core.schema.check(schema, conf)
end
+local plugin_info = plugin.plugin_attr(plugin_name) or {}
+
local function create_tracer(conf,ctx)
conf.route_id = ctx.route_id
@@ -205,9 +211,23 @@ function _M.rewrite(plugin_conf, ctx)
ctx.opentracing_sample = tracer.sampler:sample(per_req_sample_ratio or conf.sample_ratio)
if not ctx.opentracing_sample then
request_span:set_baggage_item("x-b3-sampled","0")
+ else
+ request_span:set_baggage_item("x-b3-sampled","1")
+ end
+
+ if plugin_info.set_ngx_var then
+ local span_context = request_span:context()
+ ngx_var.zipkin_context_traceparent = string_format("00-%s-%s-%02x",
+ to_hex(span_context.trace_id),
+ to_hex(span_context.span_id),
+ span_context:get_baggage_item("x-b3-sampled"))
+ ngx_var.zipkin_trace_id = span_context.trace_id
+ ngx_var.zipkin_span_id = span_context.span_id
+ end
+
+ if not ctx.opentracing_sample then
return
end
- request_span:set_baggage_item("x-b3-sampled","1")
local request_span = ctx.opentracing.request_span
if conf.span_version == ZIPKIN_SPAN_VER_1 then
diff --git a/apisix/schema_def.lua b/apisix/schema_def.lua
index 01e0649e0a91..e3e9a05aca26 100644
--- a/apisix/schema_def.lua
+++ b/apisix/schema_def.lua
@@ -283,6 +283,7 @@ local health_checker = {
{required = {"active"}},
{required = {"active", "passive"}},
},
+ additionalProperties = false,
}
@@ -401,16 +402,10 @@ local upstream_schema = {
},
},
dependencies = {
- client_cert = {
- required = {"client_key"},
- ["not"] = {required = {"client_cert_id"}}
- },
- client_key = {
- required = {"client_cert"},
- ["not"] = {required = {"client_cert_id"}}
- },
+ client_cert = {required = {"client_key"}},
+ client_key = {required = {"client_cert"}},
client_cert_id = {
- ["not"] = {required = {"client_client", "client_key"}}
+ ["not"] = {required = {"client_cert", "client_key"}}
}
}
},
@@ -501,7 +496,8 @@ local upstream_schema = {
oneOf = {
{required = {"nodes"}},
{required = {"service_name", "discovery_type"}},
- }
+ },
+ additionalProperties = false
}
-- TODO: add more nginx variable support
@@ -662,6 +658,7 @@ _M.route = {
{required = {"script", "plugin_config_id"}},
}
},
+ additionalProperties = false,
}
@@ -689,6 +686,7 @@ _M.service = {
uniqueItems = true,
},
},
+ additionalProperties = false,
}
@@ -707,6 +705,7 @@ _M.consumer = {
desc = desc_def,
},
required = {"username"},
+ additionalProperties = false,
}
@@ -779,10 +778,6 @@ _M.ssl = {
},
required = {"ca"},
},
- exptime = {
- type = "integer",
- minimum = 1588262400, -- 2020/5/1 0:0:0
- },
labels = labels_def,
status = {
description = "ssl status, 1 to enable, 0 to disable",
@@ -799,8 +794,6 @@ _M.ssl = {
enum = {"TLSv1.1", "TLSv1.2", "TLSv1.3"}
},
},
- validity_end = timestamp_def,
- validity_start = timestamp_def,
create_time = timestamp_def,
update_time = timestamp_def
},
@@ -817,7 +810,8 @@ _M.ssl = {
{required = {"snis", "key", "cert"}}
}
},
- ["else"] = {required = {"key", "cert"}}
+ ["else"] = {required = {"key", "cert"}},
+ additionalProperties = false,
}
@@ -834,6 +828,7 @@ _M.proto = {
}
},
required = {"content"},
+ additionalProperties = false,
}
@@ -846,6 +841,7 @@ _M.global_rule = {
update_time = timestamp_def
},
required = {"id", "plugins"},
+ additionalProperties = false,
}
@@ -879,6 +875,7 @@ local xrpc_protocol_schema = {
dependencies = {
name = {"conf"},
},
+ additionalProperties = false,
},
},
@@ -911,9 +908,11 @@ _M.stream_route = {
},
upstream = upstream_schema,
upstream_id = id_schema,
+ service_id = id_schema,
plugins = plugins_schema,
protocol = xrpc_protocol_schema,
- }
+ },
+ additionalProperties = false,
}
@@ -929,6 +928,7 @@ _M.plugins = {
stream = {
type = "boolean"
},
+ additionalProperties = false,
},
required = {"name"}
}
@@ -938,6 +938,9 @@ _M.plugins = {
_M.plugin_config = {
type = "object",
properties = {
+ name = {
+ type = "string",
+ },
id = id_schema,
desc = desc_def,
plugins = plugins_schema,
@@ -946,6 +949,7 @@ _M.plugin_config = {
update_time = timestamp_def
},
required = {"id", "plugins"},
+ additionalProperties = false,
}
@@ -960,6 +964,7 @@ _M.consumer_group = {
update_time = timestamp_def
},
required = {"id", "plugins"},
+ additionalProperties = false,
}
diff --git a/apisix/stream/router/ip_port.lua b/apisix/stream/router/ip_port.lua
index 977bcb2d3a4e..284cc456edbc 100644
--- a/apisix/stream/router/ip_port.lua
+++ b/apisix/stream/router/ip_port.lua
@@ -110,6 +110,8 @@ do
for _, route in ipairs(items) do
local hit = match_addrs(route, vars)
if hit then
+ route.value.remote_addr_matcher = nil
+ route.value.server_addr_matcher = nil
ctx.matched_route = route
return true
end
@@ -175,6 +177,8 @@ do
for _, route in ipairs(other_routes) do
local hit = match_addrs(route, api_ctx.var)
if hit then
+ route.value.remote_addr_matcher = nil
+ route.value.server_addr_matcher = nil
api_ctx.matched_route = route
return true
end
diff --git a/apisix/stream/xrpc/protocols/dubbo/init.lua b/apisix/stream/xrpc/protocols/dubbo/init.lua
new file mode 100644
index 000000000000..19160d6c544e
--- /dev/null
+++ b/apisix/stream/xrpc/protocols/dubbo/init.lua
@@ -0,0 +1,231 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local core = require("apisix.core")
+local sdk = require("apisix.stream.xrpc.sdk")
+local xrpc_socket = require("resty.apisix.stream.xrpc.socket")
+local math_random = math.random
+local ngx = ngx
+local OK = ngx.OK
+local str_format = string.format
+local DECLINED = ngx.DECLINED
+local DONE = ngx.DONE
+local bit = require("bit")
+local ffi = require("ffi")
+local ffi_str = ffi.string
+
+
+-- dubbo protocol spec: https://cn.dubbo.apache.org/zh-cn/overview/reference/protocols/tcp/
+local header_len = 16
+local _M = {}
+
+
+function _M.init_downstream(session)
+ session.req_id_seq = 0
+ session.resp_id_seq = 0
+ session.cmd_labels = { session.route.id, "" }
+ return xrpc_socket.downstream.socket()
+end
+
+
+local function parse_dubbo_header(header)
+ for i = 1, header_len do
+ local currentByte = header:byte(i)
+ if not currentByte then
+ return nil
+ end
+ end
+
+ local magic_number = str_format("%04x", header:byte(1) * 256 + header:byte(2))
+ local message_flag = header:byte(3)
+ local status = header:byte(4)
+ local request_id = 0
+ for i = 5, 12 do
+ request_id = request_id * 256 + header:byte(i)
+ end
+
+ local byte13Val = header:byte(13) * 256 * 256 * 256
+ local byte14Val = header:byte(14) * 256 * 256
+ local data_length = byte13Val + byte14Val + header:byte(15) * 256 + header:byte(16)
+
+ local is_request = bit.band(bit.rshift(message_flag, 7), 0x01) == 1 and 1 or 0
+ local is_two_way = bit.band(bit.rshift(message_flag, 6), 0x01) == 1 and 1 or 0
+ local is_event = bit.band(bit.rshift(message_flag, 5), 0x01) == 1 and 1 or 0
+
+ return {
+ magic_number = magic_number,
+ message_flag = message_flag,
+ is_request = is_request,
+ is_two_way = is_two_way,
+ is_event = is_event,
+ status = status,
+ request_id = request_id,
+ data_length = data_length
+ }
+end
+
+
+local function read_data(sk, is_req)
+ local header_data, err = sk:read(header_len)
+ if not header_data then
+ return nil, err, false
+ end
+
+ local header_str = ffi_str(header_data, header_len)
+ local header_info = parse_dubbo_header(header_str)
+ if not header_info then
+ return nil, "header insufficient", false
+ end
+
+ local is_valid_magic_number = header_info.magic_number == "dabb"
+ if not is_valid_magic_number then
+ return nil, str_format("unknown magic number: \"%s\"", header_info.magic_number), false
+ end
+
+ local body_data, err = sk:read(header_info.data_length)
+ if not body_data then
+ core.log.error("failed to read dubbo request body")
+ return nil, err, false
+ end
+
+ local ctx = ngx.ctx
+ ctx.dubbo_serialization_id = bit.band(header_info.message_flag, 0x1F)
+
+ if is_req then
+ ctx.dubbo_req_body_data = body_data
+ else
+ ctx.dubbo_rsp_body_data = body_data
+ end
+
+ return true, nil, false
+end
+
+
+local function read_req(sk)
+ return read_data(sk, true)
+end
+
+
+local function read_reply(sk)
+ return read_data(sk, false)
+end
+
+
+local function handle_reply(session, sk)
+ local ok, err = read_reply(sk)
+ if not ok then
+ return nil, err
+ end
+
+ local ctx = sdk.get_req_ctx(session, 10)
+
+ return ctx
+end
+
+
+function _M.from_downstream(session, downstream)
+ local read_pipeline = false
+ session.req_id_seq = session.req_id_seq + 1
+ local ctx = sdk.get_req_ctx(session, session.req_id_seq)
+ session._downstream_ctx = ctx
+ while true do
+ local ok, err, pipelined = read_req(downstream)
+ if not ok then
+ if err ~= "timeout" and err ~= "closed" then
+ core.log.error("failed to read request: ", err)
+ end
+
+ if read_pipeline and err == "timeout" then
+ break
+ end
+
+ return DECLINED
+ end
+
+ if not pipelined then
+ break
+ end
+
+ if not read_pipeline then
+ read_pipeline = true
+ -- set minimal read timeout to read pipelined data
+ downstream:settimeouts(0, 0, 1)
+ end
+ end
+
+ if read_pipeline then
+ -- set timeout back
+ downstream:settimeouts(0, 0, 0)
+ end
+
+ return OK, ctx
+end
+
+
+function _M.connect_upstream(session, ctx)
+ local conf = session.upstream_conf
+ local nodes = conf.nodes
+ if #nodes == 0 then
+ core.log.error("failed to connect: no nodes")
+ return DECLINED
+ end
+
+ local node = nodes[math_random(#nodes)]
+ local sk = sdk.connect_upstream(node, conf)
+ if not sk then
+ return DECLINED
+ end
+
+ core.log.debug("dubbo_connect_upstream end")
+
+ return OK, sk
+end
+
+function _M.disconnect_upstream(session, upstream)
+ sdk.disconnect_upstream(upstream, session.upstream_conf)
+end
+
+
+function _M.to_upstream(session, ctx, downstream, upstream)
+ local ok, _ = upstream:move(downstream)
+ if not ok then
+ return DECLINED
+ end
+
+ return OK
+end
+
+
+function _M.from_upstream(session, downstream, upstream)
+ local ctx,err = handle_reply(session, upstream)
+ if err then
+ return DECLINED
+ end
+
+ local ok, _ = downstream:move(upstream)
+ if not ok then
+ return DECLINED
+ end
+
+ return DONE, ctx
+end
+
+
+function _M.log(_, _)
+end
+
+
+return _M
diff --git a/apisix/stream/xrpc/protocols/dubbo/schema.lua b/apisix/stream/xrpc/protocols/dubbo/schema.lua
new file mode 100644
index 000000000000..3a9d73325498
--- /dev/null
+++ b/apisix/stream/xrpc/protocols/dubbo/schema.lua
@@ -0,0 +1,32 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local core = require("apisix.core")
+
+
+local schema = {
+ type = "object",
+}
+
+local _M = {}
+
+
+function _M.check_schema(conf)
+ return core.schema.check(schema, conf)
+end
+
+
+return _M
diff --git a/apisix/upstream.lua b/apisix/upstream.lua
index 416bbea7cfa4..d8e3f3a98750 100644
--- a/apisix/upstream.lua
+++ b/apisix/upstream.lua
@@ -83,7 +83,7 @@ _M.set = set_directly
local function release_checker(healthcheck_parent)
local checker = healthcheck_parent.checker
core.log.info("try to release checker: ", tostring(checker))
- checker:clear()
+ checker:delayed_clear(3)
checker:stop()
end
@@ -175,7 +175,7 @@ local function set_upstream_scheme(ctx, upstream)
ctx.var["upstream_scheme"] = ctx.upstream_scheme
end
-
+_M.set_scheme = set_upstream_scheme
local scheme_to_port = {
http = 80,
diff --git a/apisix/wasm.lua b/apisix/wasm.lua
index a27641504a2b..c8b863aeac9d 100644
--- a/apisix/wasm.lua
+++ b/apisix/wasm.lua
@@ -15,6 +15,7 @@
-- limitations under the License.
--
local core = require("apisix.core")
+local type = type
local support_wasm, wasm = pcall(require, "resty.proxy-wasm")
local ngx_var = ngx.var
@@ -23,8 +24,10 @@ local schema = {
type = "object",
properties = {
conf = {
- type = "string",
- minLength = 1,
+ oneOf = {
+ { type = "object", minProperties = 1},
+ { type = "string", minLength = 1},
+ }
},
},
required = {"conf"}
@@ -51,7 +54,13 @@ local function fetch_plugin_ctx(conf, ctx, plugin)
local plugin_ctx = ctxs[key]
local err
if not plugin_ctx then
- plugin_ctx, err = wasm.on_configure(plugin, conf.conf)
+ if type(conf.conf) == "table" then
+ plugin_ctx, err = wasm.on_configure(plugin, core.json.encode(conf.conf))
+ elseif type(conf.conf) == "string" then
+ plugin_ctx, err = wasm.on_configure(plugin, conf.conf)
+ else
+ return nil, "invalid conf type"
+ end
if not plugin_ctx then
return nil, err
end
diff --git a/ci/centos7-ci.sh b/ci/centos7-ci.sh
index 6b6483a4f065..cf506ef54e55 100755
--- a/ci/centos7-ci.sh
+++ b/ci/centos7-ci.sh
@@ -33,7 +33,7 @@ install_dependencies() {
# install openresty to make apisix's rpm test work
yum install -y yum-utils && yum-config-manager --add-repo https://openresty.org/package/centos/openresty.repo
- yum install -y openresty-1.21.4.1 openresty-debug-1.21.4.1 openresty-openssl111-debug-devel pcre pcre-devel
+ yum install -y openresty-1.21.4.2 openresty-debug-1.21.4.2 openresty-openssl111-debug-devel pcre pcre-devel
# install luarocks
./utils/linux-install-luarocks.sh
@@ -58,14 +58,9 @@ install_dependencies() {
cd t/grpc_server_example
CGO_ENABLED=0 go build
- ./grpc_server_example \
- -grpc-address :50051 -grpcs-address :50052 -grpcs-mtls-address :50053 -grpc-http-address :50054 \
- -crt ../certs/apisix.crt -key ../certs/apisix.key -ca ../certs/mtls_ca.crt \
- > grpc_server_example.log 2>&1 || (cat grpc_server_example.log && exit 1)&
-
cd ../../
- # wait for grpc_server_example to fully start
- sleep 3
+
+ start_grpc_server_example
# installing grpcurl
install_grpcurl
@@ -73,9 +68,6 @@ install_dependencies() {
# install nodejs
install_nodejs
- # install rust
- install_rust
-
# grpc-web server && client
cd t/plugin/grpc-web
./setup.sh
diff --git a/ci/common.sh b/ci/common.sh
index 2840b7d8a711..0aa9f9e85bda 100644
--- a/ci/common.sh
+++ b/ci/common.sh
@@ -100,14 +100,6 @@ install_nodejs () {
npm config set registry https://registry.npmjs.org/
}
-install_rust () {
- curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sudo sh -s -- -y
- source "$HOME/.cargo/env"
- # 1.69.0 version required to compile lua-resty-ldap
- rustup install 1.69.0
- rustup default 1.69.0
-}
-
set_coredns() {
# test a domain name is configured as upstream
echo "127.0.0.1 test.com" | sudo tee -a /etc/hosts
@@ -148,6 +140,9 @@ set_coredns() {
pushd t/coredns || exit 1
../../build-cache/coredns -dns.port=1053 &
popd || exit 1
+
+ touch build-cache/test_resolve.conf
+ echo "nameserver 127.0.0.1:1053" > build-cache/test_resolve.conf
}
GRPC_SERVER_EXAMPLE_VER=20210819
@@ -156,3 +151,24 @@ linux_get_dependencies () {
apt update
apt install -y cpanminus build-essential libncurses5-dev libreadline-dev libssl-dev perl libpcre3 libpcre3-dev libldap2-dev
}
+
+function start_grpc_server_example() {
+ ./t/grpc_server_example/grpc_server_example \
+ -grpc-address :10051 -grpcs-address :10052 -grpcs-mtls-address :10053 -grpc-http-address :10054 \
+ -crt ./t/certs/apisix.crt -key ./t/certs/apisix.key -ca ./t/certs/mtls_ca.crt \
+ > grpc_server_example.log 2>&1 &
+
+ for (( i = 0; i <= 10; i++ )); do
+ sleep 0.5
+ GRPC_PROC=`ps -ef | grep grpc_server_example | grep -v grep || echo "none"`
+ if [[ $GRPC_PROC == "none" || "$i" -eq 10 ]]; then
+ echo "failed to start grpc_server_example"
+ ss -antp | grep 1005 || echo "no proc listen port 1005x"
+ cat grpc_server_example.log
+
+ exit 1
+ fi
+
+ ss -lntp | grep 10051 | grep grpc_server && break
+ done
+}
diff --git a/ci/linux_apisix_current_luarocks_runner.sh b/ci/linux_apisix_current_luarocks_runner.sh
index a8836f43b691..96aac508f762 100755
--- a/ci/linux_apisix_current_luarocks_runner.sh
+++ b/ci/linux_apisix_current_luarocks_runner.sh
@@ -34,9 +34,6 @@ script() {
sudo rm -rf /usr/local/share/lua/5.1/apisix
- # install rust
- install_rust
-
# install APISIX with local version
luarocks install rockspec/apisix-master-0.rockspec --only-deps > build.log 2>&1 || (cat build.log && exit 1)
luarocks make rockspec/apisix-master-0.rockspec > build.log 2>&1 || (cat build.log && exit 1)
diff --git a/ci/linux_apisix_master_luarocks_runner.sh b/ci/linux_apisix_master_luarocks_runner.sh
index 3e99baf34116..afc487ddd160 100755
--- a/ci/linux_apisix_master_luarocks_runner.sh
+++ b/ci/linux_apisix_master_luarocks_runner.sh
@@ -38,9 +38,6 @@ script() {
mkdir tmp && cd tmp
cp -r ../utils ./
- # install rust
- install_rust
-
# install APISIX by luarocks
luarocks install $APISIX_MAIN > build.log 2>&1 || (cat build.log && exit 1)
cp ../bin/apisix /usr/local/bin/apisix
diff --git a/ci/linux_openresty_common_runner.sh b/ci/linux_openresty_common_runner.sh
index 743dfac7d980..466fe8b69651 100755
--- a/ci/linux_openresty_common_runner.sh
+++ b/ci/linux_openresty_common_runner.sh
@@ -33,9 +33,6 @@ do_install() {
./ci/linux-install-etcd-client.sh
- # install rust
- install_rust
-
create_lua_deps
# sudo apt-get install tree -y
@@ -75,20 +72,7 @@ script() {
set_coredns
- ./t/grpc_server_example/grpc_server_example \
- -grpc-address :50051 -grpcs-address :50052 -grpcs-mtls-address :50053 -grpc-http-address :50054 \
- -crt ./t/certs/apisix.crt -key ./t/certs/apisix.key -ca ./t/certs/mtls_ca.crt \
- &
-
- # ensure grpc server example is already started
- for (( i = 0; i <= 100; i++ )); do
- if [[ "$i" -eq 100 ]]; then
- echo "failed to start grpc_server_example in time"
- exit 1
- fi
- nc -zv 127.0.0.1 50051 && break
- sleep 1
- done
+ start_grpc_server_example
# APISIX_ENABLE_LUACOV=1 PERL5LIB=.:$PERL5LIB prove -Itest-nginx/lib -r t
FLUSH_ETCD=1 prove --timer -Itest-nginx/lib -I./ -r $TEST_FILE_SUB_DIR | tee /tmp/test.result
diff --git a/ci/linux_openresty_runner.sh b/ci/linux_openresty_runner.sh
index 877248913368..2e39224efc59 100755
--- a/ci/linux_openresty_runner.sh
+++ b/ci/linux_openresty_runner.sh
@@ -18,5 +18,4 @@
export OPENRESTY_VERSION=source
-#export TEST_CI_USE_GRPC=true
. ./ci/linux_openresty_common_runner.sh
diff --git a/ci/pod/docker-compose.first.yml b/ci/pod/docker-compose.first.yml
index 62ef7a328c16..aee79a8387c7 100644
--- a/ci/pod/docker-compose.first.yml
+++ b/ci/pod/docker-compose.first.yml
@@ -46,6 +46,15 @@ services:
networks:
consul_net:
+ consul_3:
+ image: hashicorp/consul:1.16.2
+ restart: unless-stopped
+ ports:
+ - "8502:8500"
+ command: [ "consul", "agent", "-server", "-bootstrap-expect=1", "-client", "0.0.0.0", "-log-level", "info", "-data-dir=/consul/data", "-enable-script-checks", "-ui", "-hcl", "acl = {\nenabled = true\ndefault_policy = \"deny\"\nenable_token_persistence = true\ntokens = {\nagent = \"2b778dd9-f5f1-6f29-b4b4-9a5fa948757a\"\n}}" ]
+ networks:
+ consul_net:
+
## Nacos cluster
nacos_auth:
hostname: nacos1
diff --git a/ci/pod/docker-compose.plugin.yml b/ci/pod/docker-compose.plugin.yml
index 4ea069b8c5f2..748b28b868f6 100644
--- a/ci/pod/docker-compose.plugin.yml
+++ b/ci/pod/docker-compose.plugin.yml
@@ -341,6 +341,13 @@ services:
- '8124:8123'
networks:
clickhouse_net:
+ otel-collector:
+ image: otel/opentelemetry-collector-contrib
+ volumes:
+ - ./ci/pod/otelcol-contrib:/etc/otelcol-contrib:rw
+ ports:
+ - '4318:4318'
+
networks:
apisix_net:
diff --git a/ci/pod/openfunction/function-example/test-body/go.mod b/ci/pod/openfunction/function-example/test-body/go.mod
index b9e81701913d..3e2f6155748e 100644
--- a/ci/pod/openfunction/function-example/test-body/go.mod
+++ b/ci/pod/openfunction/function-example/test-body/go.mod
@@ -25,7 +25,7 @@ require (
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 // indirect
google.golang.org/grpc v1.40.0 // indirect
google.golang.org/protobuf v1.28.0 // indirect
- gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
+ gopkg.in/yaml.v3 v3.0.0 // indirect
k8s.io/klog/v2 v2.30.0 // indirect
skywalking.apache.org/repo/goapi v0.0.0-20220401015832-2c9eee9481eb // indirect
)
diff --git a/ci/pod/openfunction/function-example/test-body/go.sum b/ci/pod/openfunction/function-example/test-body/go.sum
index 1fb1db392365..35f77fd70a02 100644
--- a/ci/pod/openfunction/function-example/test-body/go.sum
+++ b/ci/pod/openfunction/function-example/test-body/go.sum
@@ -1695,8 +1695,9 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA=
+gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v0.0.0-20181223230014-1083505acf35/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
diff --git a/ci/pod/otelcol-contrib/config.yaml b/ci/pod/otelcol-contrib/config.yaml
new file mode 100644
index 000000000000..438f04c8b9fe
--- /dev/null
+++ b/ci/pod/otelcol-contrib/config.yaml
@@ -0,0 +1,30 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+receivers:
+ otlp:
+ protocols:
+ grpc:
+ http:
+exporters:
+ file:
+ path: /etc/otelcol-contrib/data-otlp.json
+service:
+ pipelines:
+ traces:
+ receivers: [otlp]
+ exporters: [file]
diff --git a/ci/redhat-ci.sh b/ci/redhat-ci.sh
index 4b307e64811a..ff867fb71622 100755
--- a/ci/redhat-ci.sh
+++ b/ci/redhat-ci.sh
@@ -32,7 +32,7 @@ install_dependencies() {
# install openresty to make apisix's rpm test work
yum install -y yum-utils && yum-config-manager --add-repo https://openresty.org/package/centos/openresty.repo
- yum install -y openresty-1.21.4.1 openresty-debug-1.21.4.1 openresty-openssl111-debug-devel pcre pcre-devel xz
+ yum install -y openresty-1.21.4.2 openresty-debug-1.21.4.2 openresty-openssl111-debug-devel pcre pcre-devel xz
# install luarocks
./utils/linux-install-luarocks.sh
@@ -58,14 +58,10 @@ install_dependencies() {
pushd t/grpc_server_example
CGO_ENABLED=0 go build
- ./grpc_server_example \
- -grpc-address :50051 -grpcs-address :50052 -grpcs-mtls-address :50053 -grpc-http-address :50054 \
- -crt ../certs/apisix.crt -key ../certs/apisix.key -ca ../certs/mtls_ca.crt \
- > grpc_server_example.log 2>&1 || (cat grpc_server_example.log && exit 1)&
-
popd
- # wait for grpc_server_example to fully start
- sleep 3
+
+ yum install -y iproute procps
+ start_grpc_server_example
# installing grpcurl
install_grpcurl
@@ -73,9 +69,6 @@ install_dependencies() {
# install nodejs
install_nodejs
- # install rust
- install_rust
-
# grpc-web server && client
pushd t/plugin/grpc-web
./setup.sh
diff --git a/conf/config-default.yaml b/conf/config-default.yaml
index 359e2c2d806e..38d67823e5a9 100755
--- a/conf/config-default.yaml
+++ b/conf/config-default.yaml
@@ -76,7 +76,6 @@ apisix:
# http is the default proxy mode. proxy_mode can be one of `http`, `stream`, or `http&stream`
proxy_mode: http
# stream_proxy: # TCP/UDP L4 proxy
- # only: true # Enable L4 proxy only without L7 proxy.
# tcp:
# - addr: 9100 # Set the TCP proxy listening ports.
# tls: true
@@ -109,8 +108,9 @@ apisix:
# Disabled by default because it renders Perfect Forward Secrecy (FPS)
# useless. See https://github.com/mozilla/server-side-tls/issues/135.
- key_encrypt_salt: # Set the encryption key for AES-128-CBC. It should be a
- - edd1c9f0985e76a2 # hexadecimal string of length 16.
+ key_encrypt_salt: # This field is only used to encrypt the private key of SSL.
+ - edd1c9f0985e76a2 # Set the encryption key for AES-128-CBC. It should be a
+ # hexadecimal string of length 16.
# If not set, APISIX saves the original data into etcd.
# CAUTION: If you would like to update the key, add the new key as the
# first item in the array and keep the older keys below the newly added
@@ -212,6 +212,7 @@ nginx_config: # Config for render the template to generate n
http:
enable_access_log: true # Enable HTTP proxy access logging.
access_log: logs/access.log # Location of the access log.
+ access_log_buffer: 16384 # buffer size of access log.
access_log_format: "$remote_addr - $remote_user [$time_local] $http_host \"$request\" $status $body_bytes_sent $request_time \"$http_referer\" \"$http_user_agent\" $upstream_addr $upstream_status $upstream_response_time \"$upstream_scheme://$upstream_host$upstream_uri\""
# Customize log format: http://nginx.org/en/docs/varindex.html
access_log_format_escape: default # Escape default or json characters in variables.
@@ -278,6 +279,7 @@ nginx_config: # Config for render the template to generate n
# dns:
# servers:
# - "127.0.0.1:8600" # Replace with the address of your DNS server.
+# resolv_conf: /etc/resolv.conf # Replace with the path to the local DNS resolv config. Configure either "servers" or "resolv_conf".
# order: # Resolve DNS records this order.
# - last # Try the latest successful type for a hostname.
# - SRV
@@ -299,7 +301,9 @@ nginx_config: # Config for render the template to generate n
# - "http://${username}:${password}@${host1}:${port1}"
# prefix: "/nacos/v1/"
# fetch_interval: 30 # Default 30s
-# weight: 100 # Default 100
+# `weight` is the `default_weight` that will be attached to each discovered node that
+# doesn't have a weight explicitly provided in nacos results
+# weight: 100 # Default 100.
# timeout:
# connect: 2000 # Default 2000ms
# send: 2000 # Default 2000ms
@@ -566,6 +570,7 @@ plugin_attr: # Plugin attributes
inactive_timeout: 1 # Set the timeout for spans to wait in the export queue before being sent,
# if the queue is not full.
max_export_batch_size: 16 # Set the maximum number of spans to include in each batch sent to the
+ set_ngx_var: false # export opentelemetry variables to nginx variables
# OpenTelemetry collector.
prometheus: # Plugin: prometheus
export_uri: /apisix/prometheus/metrics # Set the URI for the Prometheus metrics endpoint.
@@ -610,35 +615,19 @@ plugin_attr: # Plugin attributes
hooks_file: "/usr/local/apisix/plugin_inspect_hooks.lua" # Set the path to the Lua file that defines
# hooks. Only administrators should have
# write access to this file for security.
+ zipkin: # Plugin: zipkin
+ set_ngx_var: false # export zipkin variables to nginx variables
deployment: # Deployment configurations
role: traditional # Set deployment mode: traditional, control_plane, or data_plane.
role_traditional:
config_provider: etcd # Set the configuration center.
- # role_data_plane: # Set data plane details if role is data_plane.
- # config_provider: control_plane # Set the configuration center: control_plane, or yaml.
- # control_plane: # Set control plane details if config_provider is control_plane.
- # host: # Set the address of control plane.
- # - https://${control_plane_IP}:9280
- # prefix: /apisix # Set etcd prefix.
- # timeout: 30 # Set timeout in seconds.
- # certs:
- # cert: /path/to/client.crt # Set path to the client certificate.
- # cert_key: /path/to/client.key # Set path to the client key.
- # trusted_ca_cert: /path/to/ca.crt # Set path to the trusted CA certificate.
-
- # role_control_plane: # Set control plane details if role is control_plane.
- # config_provider: etcd # Set the configuration center.
- # conf_server:
- # listen: 0.0.0.0:9280 # Set the address of the conf server.
- # cert: /path/to/server.crt # Set path to the server certificate.
- # cert_key: /path/to/server.key # Set path to the server key.
- # client_ca_cert: /path/to/ca.crt # Set path to the trusted CA certificate.
- # certs:
- # cert: /path/to/client.crt # Set path to the client certificate.
- # cert_key: /path/to/client.key # Set path to the client key.
- # trusted_ca_cert: /path/to/ca.crt # Set path to the trusted CA certificate.
+ #role_data_plane: # Set data plane details if role is data_plane.
+ # config_provider: etcd # Set the configuration center: etcd, xds, or yaml.
+
+ #role_control_plane: # Set control plane details if role is control_plane.
+ # config_provider: etcd # Set the configuration center.
admin: # Admin API
admin_key_required: true # Enable Admin API authentication by default for security.
@@ -673,9 +662,7 @@ deployment: # Deployment configurations
host: # Set etcd address(es) in the same etcd cluster.
- "http://127.0.0.1:2379" # If TLS is enabled for etcd, use https://127.0.0.1:2379.
prefix: /apisix # Set etcd prefix.
- use_grpc: false # Use gRPC (experimental) for etcd configuration sync.
timeout: 30 # Set timeout in seconds.
- # Set a higher timeout (e.g. an hour) if `use_grpc` is true.
# resync_delay: 5 # Set resync time in seconds after a sync failure.
# The actual resync time would be resync_delay plus 50% random jitter.
# health_check_timeout: 10 # Set timeout in seconds for etcd health check.
diff --git a/docs/en/latest/FAQ.md b/docs/en/latest/FAQ.md
index b462e01582b8..79e3f3d48a51 100644
--- a/docs/en/latest/FAQ.md
+++ b/docs/en/latest/FAQ.md
@@ -105,7 +105,7 @@ Mainland China users can use `luarocks.cn` as the LuaRocks server. You can use t
make deps ENV_LUAROCKS_SERVER=https://luarocks.cn
```
-If this does not solve your problem, you can try getting a detailed log by using the `--verbose` flag to diagnose the problem.
+If this does not solve your problem, you can try getting a detailed log by using the `--verbose` or `-v` flag to diagnose the problem.
## How do I build the APISIX-Base environment?
diff --git a/docs/en/latest/admin-api.md b/docs/en/latest/admin-api.md
index e34468eacc4b..77c2141336c8 100644
--- a/docs/en/latest/admin-api.md
+++ b/docs/en/latest/admin-api.md
@@ -325,8 +325,6 @@ ID's as a text string must be of a length between 1 and 64 characters and they s
| timeout | False | Auxiliary | Sets the timeout (in seconds) for connecting to, and sending and receiving messages between the Upstream and the Route. This will overwrite the `timeout` value configured in your [Upstream](#upstream). | {"connect": 3, "send": 3, "read": 3} |
| enable_websocket | False | Auxiliary | Enables a websocket. Set to `false` by default. | |
| status | False | Auxiliary | Enables the current Route. Set to `1` (enabled) by default. | `1` to enable, `0` to disable |
-| create_time | False | Auxiliary | Epoch timestamp (in seconds) of the created time. If missing, this field will be populated automatically. | 1602883670 |
-| update_time | False | Auxiliary | Epoch timestamp (in seconds) of the updated time. If missing, this field will be populated automatically. | 1602883670 |
Example configuration:
@@ -630,8 +628,6 @@ Service resource request address: /apisix/admin/services/{id}
| labels | False | Match Rules | Attributes of the Service specified as key-value pairs. | {"version":"v2","build":"16","env":"production"} |
| enable_websocket | False | Auxiliary | Enables a websocket. Set to `false` by default. | |
| hosts | False | Match Rules | Matches with any one of the multiple `host`s specified in the form of a non-empty list. | ["foo.com", "*.bar.com"] |
-| create_time | False | Auxiliary | Epoch timestamp (in seconds) of the created time. If missing, this field will be populated automatically. | 1602883670 |
-| update_time | False | Auxiliary | Epoch timestamp (in seconds) of the updated time. If missing, this field will be populated automatically. | 1602883670 |
Example configuration:
@@ -815,8 +811,6 @@ Consumer resource request address: /apisix/admin/consumers/{username}
| plugins | False | Plugin | Plugins that are executed during the request/response cycle. See [Plugin](terminology/plugin.md) for more. | |
| desc | False | Auxiliary | Description of usage scenarios. | customer xxxx |
| labels | False | Match Rules | Attributes of the Consumer specified as key-value pairs. | {"version":"v2","build":"16","env":"production"} |
-| create_time | False | Auxiliary | Epoch timestamp (in seconds) of the created time. If missing, this field will be populated automatically. | 1602883670 |
-| update_time | False | Auxiliary | Epoch timestamp (in seconds) of the updated time. If missing, this field will be populated automatically. | 1602883670 |
Example Configuration:
@@ -893,32 +887,30 @@ For notes on ID syntax please refer to: [ID Syntax](#quick-note-on-id-syntax)
In addition to the equalization algorithm selections, Upstream also supports passive health check and retry for the upstream. See the table below for more details:
-| Name | Optional | Description | Example |
-| --------------------------- | ------------------------------------------- |----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| ------------------------------------------------------------------------------------------------------------------------------------------ |
-| type | optional | Load balancing algorithm to be used, and the default value is `roundrobin`. | |
-| nodes | required, can't be used with `service_name` | IP addresses (with optional ports) of the Upstream nodes represented as a hash table or an array. In the hash table, the key is the IP address and the value is the weight of the node for the load balancing algorithm. For hash table case, if the key is IPv6 address with port, then the IPv6 address must be quoted with square brackets. In the array, each item is a hash table with keys `host`, `weight`, and the optional `port` and `priority`. Empty nodes are treated as placeholders and clients trying to access this Upstream will receive a 502 response. | `192.168.1.100:80`, `[::1]:80` |
-| service_name | required, can't be used with `nodes` | Service name used for [service discovery](discovery.md). | `a-bootiful-client` |
-| discovery_type | required, if `service_name` is used | The type of service [discovery](discovery.md). | `eureka` |
-| hash_on | optional | Only valid if the `type` is `chash`. Supports Nginx variables (`vars`), custom headers (`header`), `cookie` and `consumer`. Defaults to `vars`. | |
-| key | optional | Only valid if the `type` is `chash`. Finds the corresponding node `id` according to `hash_on` and `key` values. When `hash_on` is set to `vars`, `key` is a required parameter and it supports [Nginx variables](http://nginx.org/en/docs/varindex.html). When `hash_on` is set as `header`, `key` is a required parameter, and `header name` can be customized. When `hash_on` is set to `cookie`, `key` is also a required parameter, and `cookie name` can be customized. When `hash_on` is set to `consumer`, `key` need not be set and the `key` used by the hash algorithm would be the authenticated `consumer_name`. If the specified `hash_on` and `key` fail to fetch the values, it will default to `remote_addr`. | `uri`, `server_name`, `server_addr`, `request_uri`, `remote_port`, `remote_addr`, `query_string`, `host`, `hostname`, `arg_***`, `arg_***` |
-| checks | optional | Configures the parameters for the [health check](./tutorials/health-check.md). | |
-| retries | optional | Sets the number of retries while passing the request to Upstream using the underlying Nginx mechanism. Set according to the number of available backend nodes by default. Setting this to `0` disables retry. | |
-| retry_timeout | optional | Timeout to continue with retries. Setting this to `0` disables the retry timeout. | |
-| timeout | optional | Sets the timeout (in seconds) for connecting to, and sending and receiving messages to and from the Upstream. | `{"connect": 0.5,"send": 0.5,"read": 0.5}` |
-| name | optional | Identifier for the Upstream. | |
-| desc | optional | Description of usage scenarios. | |
-| pass_host | optional | Configures the `host` when the request is forwarded to the upstream. Can be one of `pass`, `node` or `rewrite`. Defaults to `pass` if not specified. `pass`- transparently passes the client's host to the Upstream. `node`- uses the host configured in the node of the Upstream. `rewrite`- Uses the value configured in `upstream_host`. | |
-| upstream_host | optional | Specifies the host of the Upstream request. This is only valid if the `pass_host` is set to `rewrite`. | |
-| scheme | optional | The scheme used when communicating with the Upstream. For an L7 proxy, this value can be one of `http`, `https`, `grpc`, `grpcs`. For an L4 proxy, this value could be one of `tcp`, `udp`, `tls`. Defaults to `http`. | |
-| labels | optional | Attributes of the Upstream specified as `key-value` pairs. | {"version":"v2","build":"16","env":"production"} |
-| create_time | optional | Epoch timestamp (in seconds) of the created time. If missing, this field will be populated automatically. | 1602883670 |
-| update_time | optional | Epoch timestamp (in seconds) of the updated time. If missing, this field will be populated automatically. | 1602883670 |
-| tls.client_cert | optional, can't be used with `tls.client_cert_id` | Sets the client certificate while connecting to a TLS Upstream. | |
-| tls.client_key | optional, can't be used with `tls.client_cert_id` | Sets the client private key while connecting to a TLS Upstream. | |
-| tls.client_cert_id | optional, can't be used with `tls.client_cert` and `tls.client_key` | Set the referenced [SSL](#ssl) id. | |
-| keepalive_pool.size | optional | Sets `keepalive` directive dynamically. | |
-| keepalive_pool.idle_timeout | optional | Sets `keepalive_timeout` directive dynamically. | |
-| keepalive_pool.requests | optional | Sets `keepalive_requests` directive dynamically. | |
+| Parameter | Required | Type | Description | Example |
+|-----------------------------|------------------------------------------------------------------|-------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------|
+| type | False | Enumeration | Load balancing algorithm to be used, and the default value is `roundrobin`. | |
+| nodes | True, can't be used with `service_name` | Node | IP addresses (with optional ports) of the Upstream nodes represented as a hash table or an array. In the hash table, the key is the IP address and the value is the weight of the node for the load balancing algorithm. For hash table case, if the key is IPv6 address with port, then the IPv6 address must be quoted with square brackets. In the array, each item is a hash table with keys `host`, `weight`, and the optional `port` and `priority`. Empty nodes are treated as placeholders and clients trying to access this Upstream will receive a 502 response. | `192.168.1.100:80`, `[::1]:80` |
+| service_name | True, can't be used with `nodes` | String | Service name used for [service discovery](discovery.md). | `a-bootiful-client` |
+| discovery_type | True, if `service_name` is used | String | The type of service [discovery](discovery.md). | `eureka` |
+| hash_on | False | Auxiliary | Only valid if the `type` is `chash`. Supports Nginx variables (`vars`), custom headers (`header`), `cookie` and `consumer`. Defaults to `vars`. | |
+| key | False | Match Rules | Only valid if the `type` is `chash`. Finds the corresponding node `id` according to `hash_on` and `key` values. When `hash_on` is set to `vars`, `key` is a required parameter and it supports [Nginx variables](http://nginx.org/en/docs/varindex.html). When `hash_on` is set as `header`, `key` is a required parameter, and `header name` can be customized. When `hash_on` is set to `cookie`, `key` is also a required parameter, and `cookie name` can be customized. When `hash_on` is set to `consumer`, `key` need not be set and the `key` used by the hash algorithm would be the authenticated `consumer_name`. | `uri`, `server_name`, `server_addr`, `request_uri`, `remote_port`, `remote_addr`, `query_string`, `host`, `hostname`, `arg_***`, `arg_***` |
+| checks | False | Health Checker | Configures the parameters for the [health check](./tutorials/health-check.md). | |
+| retries | False | Integer | Sets the number of retries while passing the request to Upstream using the underlying Nginx mechanism. Set according to the number of available backend nodes by default. Setting this to `0` disables retry. | |
+| retry_timeout | False | Integer | Timeout to continue with retries. Setting this to `0` disables the retry timeout. | |
+| timeout | False | Timeout | Sets the timeout (in seconds) for connecting to, and sending and receiving messages to and from the Upstream. | `{"connect": 0.5,"send": 0.5,"read": 0.5}` |
+| name | False | Auxiliary | Identifier for the Upstream. | |
+| desc | False | Auxiliary | Description of usage scenarios. | |
+| pass_host | False | Enumeration | Configures the `host` when the request is forwarded to the upstream. Can be one of `pass`, `node` or `rewrite`. Defaults to `pass` if not specified. `pass`- transparently passes the client's host to the Upstream. `node`- uses the host configured in the node of the Upstream. `rewrite`- Uses the value configured in `upstream_host`. | |
+| upstream_host | False | Auxiliary | Specifies the host of the Upstream request. This is only valid if the `pass_host` is set to `rewrite`. | |
+| scheme | False | Auxiliary | The scheme used when communicating with the Upstream. For an L7 proxy, this value can be one of `http`, `https`, `grpc`, `grpcs`. For an L4 proxy, this value could be one of `tcp`, `udp`, `tls`. Defaults to `http`. | |
+| labels | False | Match Rules | Attributes of the Upstream specified as `key-value` pairs. | {"version":"v2","build":"16","env":"production"} |
+| tls.client_cert | False, can't be used with `tls.client_cert_id` | HTTPS certificate | Sets the client certificate while connecting to a TLS Upstream. | |
+| tls.client_key | False, can't be used with `tls.client_cert_id` | HTTPS certificate private key | Sets the client private key while connecting to a TLS Upstream. | |
+| tls.client_cert_id | False, can't be used with `tls.client_cert` and `tls.client_key` | SSL | Set the referenced [SSL](#ssl) id. | |
+| keepalive_pool.size | False | Auxiliary | Sets `keepalive` directive dynamically. | |
+| keepalive_pool.idle_timeout | False | Auxiliary | Sets `keepalive_timeout` directive dynamically. | |
+| keepalive_pool.requests | False | Auxiliary | Sets `keepalive_requests` directive dynamically. | |
An Upstream can be one of the following `types`:
@@ -935,7 +927,6 @@ The following should be considered when setting the `hash_on` value:
- When set to `cookie`, a `key` is required. This key is equal to "cookie\_`key`". The cookie name is case-sensitive.
- When set to `consumer`, the `key` is optional and the key is set to the `consumer_name` captured from the authentication Plugin.
- When set to `vars_combinations`, the `key` is required. The value of the key can be a combination of any of the [Nginx variables](http://nginx.org/en/docs/varindex.html) like `$request_uri$remote_addr`.
-- When no value is set for either `hash_on` or `key`, the key defaults to `remote_addr`.
The features described below requires APISIX to be run on [APISIX-Base](./FAQ.md#how-do-i-build-the-apisix-base-environment):
@@ -1211,8 +1202,6 @@ For notes on ID syntax please refer to: [ID Syntax](#quick-note-on-id-syntax)
| client.skip_mtls_uri_regex | False | An array of regular expressions, in PCRE format | Used to match URI, if matched, this request bypasses the client certificate checking, i.e. skip the MTLS. | ["/hello[0-9]+", "/foobar"] |
| snis | True, only if `type` is `server` | Match Rules | A non-empty array of HTTPS SNI | |
| labels | False | Match Rules | Attributes of the resource specified as key-value pairs. | {"version":"v2","build":"16","env":"production"} |
-| create_time | False | Auxiliary | Epoch timestamp (in seconds) of the created time. If missing, this field will be populated automatically. | 1602883670 |
-| update_time | False | Auxiliary | Epoch timestamp (in seconds) of the updated time. If missing, this field will be populated automatically. | 1602883670 |
| type | False | Auxiliary | Identifies the type of certificate, default `server`. | `client` Indicates that the certificate is a client certificate, which is used when APISIX accesses the upstream; `server` Indicates that the certificate is a server-side certificate, which is used by APISIX when verifying client requests. |
| status | False | Auxiliary | Enables the current SSL. Set to `1` (enabled) by default. | `1` to enable, `0` to disable |
| ssl_protocols | False | An array of ssl protocols | It is used to control the SSL/TLS protocol version used between servers and clients. See [SSL Protocol](./ssl-protocol.md) for more examples. | `["TLSv1.2", "TLSv2.3"]` |
@@ -1254,8 +1243,6 @@ Global Rule resource request address: /apisix/admin/global_rules/{id}
| Parameter | Required | Description | Example |
| ----------- | -------- | ------------------------------------------------------------------------------------------------------------------ | ---------- |
| plugins | True | Plugins that are executed during the request/response cycle. See [Plugin](terminology/plugin.md) for more. | |
-| create_time | False | Epoch timestamp (in seconds) of the created time. If missing, this field will be populated automatically. | 1602883670 |
-| update_time | False | Epoch timestamp (in seconds) of the updated time. If missing, this field will be populated automatically. | 1602883670 |
## Consumer group
@@ -1283,8 +1270,6 @@ Consumer group resource request address: /apisix/admin/consumer_groups/{id}
| plugins | True | Plugins that are executed during the request/response cycle. See [Plugin](terminology/plugin.md) for more. | |
| desc | False | Description of usage scenarios. | customer xxxx |
| labels | False | Attributes of the Consumer group specified as key-value pairs. | {"version":"v2","build":"16","env":"production"} |
-| create_time | False | Epoch timestamp (in seconds) of the created time. If missing, this field will be populated automatically. | 1602883670 |
-| update_time | False | Epoch timestamp (in seconds) of the updated time. If missing, this field will be populated automatically. | 1602883670 |
## Plugin config
@@ -1312,8 +1297,6 @@ Plugin Config resource request address: /apisix/admin/plugin_configs/{id}
| plugins | True | Plugins that are executed during the request/response cycle. See [Plugin](terminology/plugin.md) for more. | |
| desc | False | Description of usage scenarios. | customer xxxx |
| labels | False | Attributes of the Plugin config specified as key-value pairs. | {"version":"v2","build":"16","env":"production"} |
-| create_time | False | Epoch timestamp (in seconds) of the created time. If missing, this field will be populated automatically. | 1602883670 |
-| update_time | False | Epoch timestamp (in seconds) of the updated time. If missing, this field will be populated automatically. | 1602883670 |
## Plugin Metadata
@@ -1362,6 +1345,18 @@ Plugin resource request address: /apisix/admin/plugins/{plugin_name}
| ------ | ----------------------------------- | ------------ | ---------------------------------------------- |
| GET | /apisix/admin/plugins/list | NULL | Fetches a list of all Plugins. |
| GET | /apisix/admin/plugins/{plugin_name} | NULL | Fetches the specified Plugin by `plugin_name`. |
+| GET | /apisix/admin/plugins?all=true | NULL | Get all properties of all plugins. |
+| GET | /apisix/admin/plugins?all=true&subsystem=stream| NULL | Gets properties of all Stream plugins.|
+| GET | /apisix/admin/plugins?all=true&subsystem=http | NULL | Gets properties of all HTTP plugins. |
+| PUT | /apisix/admin/plugins/reload | NULL | Reloads the plugin according to the changes made in code |
+| GET | apisix/admin/plugins/{plugin_name}?subsystem=stream | NULL | Gets properties of a specified plugin if it is supported in Stream/L4 subsystem. |
+| GET | apisix/admin/plugins/{plugin_name}?subsystem=http | NULL | Gets properties of a specified plugin if it is supported in HTTP/L7 subsystem. |
+
+:::caution
+
+The interface of getting properties of all plugins via `/apisix/admin/plugins?all=true` will be deprecated soon.
+
+:::
### Request Body Parameters
@@ -1424,6 +1419,7 @@ Stream Route resource request address: /apisix/admin/stream_routes/{id}
| ----------- | -------- | -------- | ------------------------------------------------------------------- | ----------------------------- |
| upstream | False | Upstream | Configuration of the [Upstream](./terminology/upstream.md). | |
| upstream_id | False | Upstream | Id of the [Upstream](terminology/upstream.md) service. | |
+| service_id | False | String | Id of the [Service](terminology/service.md) service. | |
| remote_addr | False | IPv4, IPv4 CIDR, IPv6 | Filters Upstream forwards by matching with client IP. | "127.0.0.1" or "127.0.0.1/32" or "::1" |
| server_addr | False | IPv4, IPv4 CIDR, IPv6 | Filters Upstream forwards by matching with APISIX Server IP. | "127.0.0.1" or "127.0.0.1/32" or "::1" |
| server_port | False | Integer | Filters Upstream forwards by matching with APISIX Server port. | 9090 |
@@ -1517,8 +1513,55 @@ Proto resource request address: /apisix/admin/protos/{id}
### Request Body Parameters
-| Parameter | Required | Type | Description | Example |
-| ----------- | -------- | -------- | ------------------------------------------------------------------- | ----------------------------- |
-| content | True | String | content of `.proto` or `.pb` files | See [here](./plugins/grpc-transcode.md#enabling-the-plugin) |
-| create_time | False | Epoch timestamp (in seconds) of the created time. If missing, this field will be populated automatically. | 1602883670 |
-| update_time | False | Epoch timestamp (in seconds) of the updated time. If missing, this field will be populated automatically. | 1602883670 |
+| Parameter | Required | Type | Description | Example |
+|-----------|----------|---------|--------------------------------------| ----------------------------- |
+| content | True | String | content of `.proto` or `.pb` files | See [here](./plugins/grpc-transcode.md#enabling-the-plugin) |
+
+## Schema validation
+
+Check the validity of a configuration against its entity schema. This allows you to test your input before submitting a request to the entity endpoints of the Admin API.
+
+Note that this only performs the schema validation checks, checking that the input configuration is well-formed. Requests to the entity endpoint using the given configuration may still fail due to other reasons, such as invalid foreign key relationships or uniqueness check failures against the contents of the data store.
+
+### Schema validation
+
+Schema validation request address: /apisix/admin/schema/validate/{resource}
+
+### Request Methods
+
+| Method | Request URI | Request Body | Description |
+| ------ | -------------------------------- | ------------ | ----------------------------------------------- |
+| POST | /apisix/admin/schema/validate/{resource} | {..resource conf..} | Validate the resource configuration against corresponding schema. |
+
+### Request Body Parameters
+
+* 200: validate ok.
+* 400: validate failed, with error as response body in JSON format.
+
+Example:
+
+```bash
+curl http://127.0.0.1:9180/apisix/admin/schema/validate/routes \
+ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X POST -i -d '{
+ "uri": 1980,
+ "upstream": {
+ "scheme": "https",
+ "type": "roundrobin",
+ "nodes": {
+ "nghttp2.org": 1
+ }
+ }
+}'
+HTTP/1.1 400 Bad Request
+Date: Mon, 21 Aug 2023 07:37:13 GMT
+Content-Type: application/json
+Transfer-Encoding: chunked
+Connection: keep-alive
+Server: APISIX/3.4.0
+Access-Control-Allow-Origin: *
+Access-Control-Allow-Credentials: true
+Access-Control-Expose-Headers: *
+Access-Control-Max-Age: 3600
+
+{"error_msg":"property \"uri\" validation failed: wrong type: expected string, got number"}
+```
diff --git a/docs/en/latest/building-apisix.md b/docs/en/latest/building-apisix.md
index 01d4ac331240..e4804eac4ed4 100644
--- a/docs/en/latest/building-apisix.md
+++ b/docs/en/latest/building-apisix.md
@@ -37,7 +37,9 @@ If you are looking to quickly get started with APISIX, check out the other [inst
:::note
-If you want to build and package APISIX for a specific platform, see [apisix-build-tools](https://github.com/api7/apisix-build-tools).
+To build an APISIX docker image from source code, see [build image from source code](https://apisix.apache.org/docs/docker/build/#build-an-image-from-customizedpatched-source-code).
+
+To build and package APISIX for a specific platform, see [apisix-build-tools](https://github.com/api7/apisix-build-tools) instead.
:::
@@ -52,7 +54,7 @@ curl https://raw.githubusercontent.com/apache/apisix/master/utils/install-depend
Save the APISIX version to an environment variable to be used next:
```shell
-APISIX_VERSION='3.4.0'
+APISIX_VERSION='3.6.0'
```
Clone the APISIX source code of this version into a new directory `apisix-APISIX_VERSION`:
@@ -63,7 +65,7 @@ git clone --depth 1 --branch ${APISIX_VERSION} https://github.com/apache/apisix.
Alternatively, you can also download the source package from the [Downloads](https://apisix.apache.org/downloads/) page. Note that source packages here are not distributed with test cases.
-Next, navigate to the directory, install dependencies, and build APISIX. You should have [Rust](https://www.rust-lang.org) installed in your environment first before running `make deps`:
+Next, navigate to the directory, install dependencies, and build APISIX.
```shell
cd apisix-${APISIX_VERSION}
diff --git a/docs/en/latest/config.json b/docs/en/latest/config.json
index ee172c33290c..e1c8391f275b 100644
--- a/docs/en/latest/config.json
+++ b/docs/en/latest/config.json
@@ -1,5 +1,5 @@
{
- "version": "3.4.0",
+ "version": "3.6.0",
"sidebar": [
{
"type": "category",
diff --git a/docs/en/latest/deployment-modes.md b/docs/en/latest/deployment-modes.md
index 9f75a1d99c06..bc195121c946 100644
--- a/docs/en/latest/deployment-modes.md
+++ b/docs/en/latest/deployment-modes.md
@@ -39,10 +39,6 @@ Each of these deployment modes are explained in detail below.
In the traditional deployment mode, one instance of APISIX will be both the `data_plane` and the `control_plane`.
-![traditional deployment mode](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/deployment-traditional.png)
-
-There will be a conf server that listens on the UNIX socket and acts as a proxy between APISIX and etcd. Both the data and the control planes connect to this conf server via HTTP.
-
An example configuration of the traditional deployment mode is shown below:
```yaml title="conf/config.yaml"
@@ -73,16 +69,9 @@ The instance of APISIX deployed as the traditional role will:
In the decoupled deployment mode the `data_plane` and `control_plane` instances of APISIX are deployed separately, i.e., one instance of APISIX is configured to be a *data plane* and the other to be a *control plane*.
-![decoupled](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/deployment-cp_and_dp.png)
-
The instance of APISIX deployed as the data plane will:
-1. Fetch the configuration from the *control plane*. The default port is `9280`.
-2. Performs a health check on all configured control plane addresses before starting the service.
- 1. If the control plane addresses are unavailable, the startup fails and an exception is thrown.
- 2. If at least one control plane address is available, it prints the unhealthy control planes logs, and starts the APISIX service.
- 3. If all control planes are normal, APISIX service is started normally.
-3. Once the service is started, it will handle the user requests.
+Once the service is started, it will handle the user requests.
The example below shows the configuration of an APISIX instance as *data plane* in the decoupled mode:
@@ -90,23 +79,13 @@ The example below shows the configuration of an APISIX instance as *data plane*
deployment:
role: data_plane
role_data_plane:
- config_provider: control_plane
- control_plane:
- host:
- - https://${Control_Plane_IP}:9280
- prefix: /apisix
- timeout: 30
- certs:
- cert: /path/to/client.crt
- cert_key: /path/to/client.key
- trusted_ca_cert: /path/to/ca.crt
+ config_provider: etcd
#END
```
The instance of APISIX deployed as the control plane will:
1. Listen on port `9180` and handle Admin API requests.
-2. Provide the conf server which will listen on port `9280`. Both the control plane and the data plane will connect to this via HTTPS enforced by mTLS.
The example below shows the configuration of an APISIX instance as *control plane* in the decoupled mode:
@@ -115,48 +94,14 @@ deployment:
role: control_plane
role_control_plane:
config_provider: etcd
- conf_server:
- listen: 0.0.0.0:9280
- cert: /path/to/server.crt
- cert_key: /path/to/server.key
- client_ca_cert: /path/to/ca.crt
etcd:
host:
- https://${etcd_IP}:${etcd_Port}
prefix: /apisix
timeout: 30
- certs:
- cert: /path/to/client.crt
- cert_key: /path/to/client.key
- trusted_ca_cert: /path/to/ca.crt
#END
```
-:::tip
-
-As OpenResty <= 1.21.4 does not support sending mTLS requests, to accept connections from APISIX running on these OpenResty versions, you need to disable the client certificate verification in the control plane instance as shown below:
-
-```yaml title="conf/config.yaml"
-deployment:
- role: control_plane
- role_control_plane:
- config_provider: etcd
- conf_server:
- listen: 0.0.0.0:9280
- cert: /path/to/server.crt
- cert_key: /path/to/server.key
- etcd:
- host:
- - https://${etcd_IP}:${etcd_Port}
- prefix: /apisix
- timeout: 30
- certs:
- trusted_ca_cert: /path/to/ca.crt
-#END
-```
-
-:::
-
## Standalone
Turning on the APISIX node in Standalone mode will no longer use the default etcd as the configuration center.
diff --git a/docs/en/latest/discovery/consul.md b/docs/en/latest/discovery/consul.md
index b4eab61e982d..85e6b9ba2c8b 100644
--- a/docs/en/latest/discovery/consul.md
+++ b/docs/en/latest/discovery/consul.md
@@ -37,6 +37,7 @@ discovery:
servers: # make sure service name is unique in these consul servers
- "http://127.0.0.1:8500" # `http://127.0.0.1:8500` and `http://127.0.0.1:8600` are different clusters
- "http://127.0.0.1:8600" # `consul` service is default skip service
+ token: "..." # if your consul cluster has enabled acl access control, you need to specify the token
skip_services: # if you need to skip special services
- "service_a"
timeout:
diff --git a/docs/en/latest/discovery/consul_kv.md b/docs/en/latest/discovery/consul_kv.md
index bfb434417033..e0a2602c074b 100644
--- a/docs/en/latest/discovery/consul_kv.md
+++ b/docs/en/latest/discovery/consul_kv.md
@@ -40,6 +40,7 @@ discovery:
servers:
- "http://127.0.0.1:8500"
- "http://127.0.0.1:8600"
+ token: "..." # if your consul cluster has enabled acl access control, you need to specify the token
prefix: "upstreams"
skip_keys: # if you need to skip special keys
- "upstreams/unused_api/"
diff --git a/docs/en/latest/discovery/nacos.md b/docs/en/latest/discovery/nacos.md
index 35fee254b0dd..9a7084577d30 100644
--- a/docs/en/latest/discovery/nacos.md
+++ b/docs/en/latest/discovery/nacos.md
@@ -38,6 +38,8 @@ discovery:
- "http://${username}:${password}@${host1}:${port1}"
prefix: "/nacos/v1/"
fetch_interval: 30 # default 30 sec
+ # `weight` is the `default_weight` that will be attached to each discovered node that
+ # doesn't have a weight explicitly provided in nacos results
weight: 100 # default 100
timeout:
connect: 2000 # default 2000 ms
diff --git a/docs/en/latest/external-plugin.md b/docs/en/latest/external-plugin.md
index 8094f1062ba8..7c81e9d1e40f 100644
--- a/docs/en/latest/external-plugin.md
+++ b/docs/en/latest/external-plugin.md
@@ -23,9 +23,9 @@ title: External Plugin
## What are external plugin and plugin runner
-APISIX supports writing plugins in Lua. This type of plugins will be executed
-inside APISIX. Sometimes you want to develop plugin in other languages, so APISIX
-provides sidecars that loading your plugins and run them when the requests hit
+APISIX supports writing plugins in Lua. This type of plugin will be executed
+inside APISIX. Sometimes you want to develop plugins in other languages, so APISIX
+provides sidecars that load your plugins and run them when the requests hit
APISIX. These sidecars are called plugin runners and your plugins are called
external plugins.
@@ -49,7 +49,7 @@ plugins. Like other plugins, they can be enabled and reconfigured on the fly.
## How is it implemented
-If you are instested in the implementation of Plugin Runner, please refer to [The Implementation of Plugin Runner](./internal/plugin-runner.md).
+If you are interested in the implementation of Plugin Runner, please refer to [The Implementation of Plugin Runner](./internal/plugin-runner.md).
## Supported plugin runners
@@ -60,7 +60,7 @@ If you are instested in the implementation of Plugin Runner, please refer to [Th
## Configuration for plugin runner in APISIX
-To run plugin runner in the prod, add the section below to `config.yaml`:
+To run the plugin runner in the prod, add the section below to `config.yaml`:
```yaml
ext-plugin:
@@ -99,7 +99,7 @@ path will be generated dynamically.
### When managing by APISIX, the runner can't access my environment variable
-Since `v2.7`, APISIX can pass environment to the runner.
+Since `v2.7`, APISIX can pass environment variables to the runner.
However, Nginx will hide all environment variables by default. So you need to
declare your variable first in the `conf/config.yaml`:
@@ -115,7 +115,7 @@ nginx_config:
Since `v2.7`, APISIX will stop the runner with SIGTERM when it is running on
OpenResty 1.19+.
-However, APISIX needs to wait the runner to quit so that we can ensure the resource
+However, APISIX needs to wait for the runner to quit so that we can ensure the resource
for the process group is freed.
Therefore, we send SIGTERM first. And then after 1 second, if the runner is still
diff --git a/docs/en/latest/internal/testing-framework.md b/docs/en/latest/internal/testing-framework.md
index db84a23e0663..7fcdf01e4d37 100644
--- a/docs/en/latest/internal/testing-framework.md
+++ b/docs/en/latest/internal/testing-framework.md
@@ -285,7 +285,7 @@ hash_on: header
chash_key: "custom-one"
```
-The default log level is `info`, but you can get the debug level log with `-- log_level: debug`.
+The default log level is `info`, but you can get the debug level log with `--- log_level: debug`.
## Upstream
diff --git a/docs/en/latest/plugins/authz-keycloak.md b/docs/en/latest/plugins/authz-keycloak.md
index d656e7095ea3..21ac21b80edd 100644
--- a/docs/en/latest/plugins/authz-keycloak.md
+++ b/docs/en/latest/plugins/authz-keycloak.md
@@ -48,7 +48,7 @@ Refer to [Authorization Services Guide](https://www.keycloak.org/docs/latest/aut
| token_endpoint | string | False | | https://host.domain/auth/realms/foo/protocol/openid-connect/token | An OAuth2-compliant token endpoint that supports the `urn:ietf:params:oauth:grant-type:uma-ticket` grant type. If provided, overrides the value from discovery. |
| resource_registration_endpoint | string | False | | https://host.domain/auth/realms/foo/authz/protection/resource_set | A UMA-compliant resource registration endpoint. If provided, overrides the value from discovery. |
| client_id | string | True | | | The identifier of the resource server to which the client is seeking access. |
-| client_secret | string | False | | | The client secret, if required. |
+| client_secret | string | False | | | The client secret, if required. You can use APISIX secret to store and reference this value. APISIX currently supports storing secrets in two ways. [Environment Variables and HashiCorp Vault](../terminology/secret.md) |
| grant_type | string | False | "urn:ietf:params:oauth:grant-type:uma-ticket" | ["urn:ietf:params:oauth:grant-type:uma-ticket"] | |
| policy_enforcement_mode | string | False | "ENFORCING" | ["ENFORCING", "PERMISSIVE"] | |
| permissions | array[string] | False | | | An array of strings, each representing a set of one or more resources and scopes the client is seeking access. |
diff --git a/docs/en/latest/plugins/cors.md b/docs/en/latest/plugins/cors.md
index 7d46c7a5a675..dad8279656aa 100644
--- a/docs/en/latest/plugins/cors.md
+++ b/docs/en/latest/plugins/cors.md
@@ -40,7 +40,7 @@ The `cors` Plugins lets you enable [CORS](https://developer.mozilla.org/en-US/do
| expose_headers | string | False | "*" | Headers in the response allowed when accessing a cross-origin resource. Use `,` to add multiple headers. If `allow_credential` is set to `false`, you can enable CORS for all response headers by using `*`. If `allow_credential` is set to `true`, you can forcefully allow CORS on all response headers by using `**` but it will pose some security issues. |
| max_age | integer | False | 5 | Maximum time in seconds the result is cached. If the time is within this limit, the browser will check the cached result. Set to `-1` to disable caching. Note that the maximum value is browser dependent. See [Access-Control-Max-Age](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Max-Age#Directives) for more details. |
| allow_credential | boolean | False | false | When set to `true`, allows requests to include credentials like cookies. According to CORS specification, if you set this to `true`, you cannot use '*' to allow all for the other attributes. |
-| allow_origins_by_regex | array | False | nil | Regex to match with origin for enabling CORS. For example, `[".*\.test.com"]` can match all subdomain of `test.com`. When set to specified range, only domains in this range will be allowed, no matter what `allow_origins` is. |
+| allow_origins_by_regex | array | False | nil | Regex to match origins that allow CORS. For example, `[".*\.test.com$"]` can match all subdomains of `test.com`. When set to specified range, only domains in this range will be allowed, no matter what `allow_origins` is. |
| allow_origins_by_metadata | array | False | nil | Origins to enable CORS referenced from `allow_origins` set in the Plugin metadata. For example, if `"allow_origins": {"EXAMPLE": "https://example.com"}` is set in the Plugin metadata, then `["EXAMPLE"]` can be used to allow CORS on the origin `https://example.com`. |
:::info IMPORTANT
diff --git a/docs/en/latest/plugins/degraphql.md b/docs/en/latest/plugins/degraphql.md
index b0eaaf83bf05..7407a435c531 100644
--- a/docs/en/latest/plugins/degraphql.md
+++ b/docs/en/latest/plugins/degraphql.md
@@ -97,7 +97,7 @@ Now we can use RESTful API to query the same data that is proxy by APISIX.
First, we need to create a route in APISIX, and enable the degreaph plugin on the route, we need to define the GraphQL query in the plugin's config.
```bash
-curl --location --request PUT 'http://localhost:9080/apisix/admin/routes/1' \
+curl --location --request PUT 'http://localhost:9180/apisix/admin/routes/1' \
--header 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \
--header 'Content-Type: application/json' \
--data-raw '{
@@ -210,7 +210,7 @@ we can execute it on `http://localhost:8080/playground`, and get the data as bel
We convert the GraphQL query to JSON string like `"query($name: String!, $githubAccount: String!) {\n persons(filter: { name: $name, githubAccount: $githubAccount }) {\n id\n name\n blog\n githubAccount\n talks {\n id\n title\n }\n }\n}"`, so we create a route like this:
```bash
-curl --location --request PUT 'http://localhost:9080/apisix/admin/routes/1' \
+curl --location --request PUT 'http://localhost:9180/apisix/admin/routes/1' \
--header 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \
--header 'Content-Type: application/json' \
--data-raw '{
diff --git a/docs/en/latest/plugins/grpc-transcode.md b/docs/en/latest/plugins/grpc-transcode.md
index 56680946dff5..9d0fdb46f77c 100644
--- a/docs/en/latest/plugins/grpc-transcode.md
+++ b/docs/en/latest/plugins/grpc-transcode.md
@@ -238,7 +238,7 @@ If the gRPC service returns an error, there may be a `grpc-status-details-bin` f
Upload the proto file:
```shell
-curl http://127.0.0.1:9080/apisix/admin/protos/1 \
+curl http://127.0.0.1:9180/apisix/admin/protos/1 \
-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
{
"content" : "syntax = \"proto3\";
@@ -260,7 +260,7 @@ curl http://127.0.0.1:9080/apisix/admin/protos/1 \
Enable the `grpc-transcode` plugin,and set the option `show_status_in_body` to `true`:
```shell
-curl http://127.0.0.1:9080/apisix/admin/routes/1 \
+curl http://127.0.0.1:9180/apisix/admin/routes/1 \
-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
{
"methods": ["GET"],
@@ -308,7 +308,7 @@ Server: APISIX web server
Note that there is an undecoded field in the return body. If you need to decode the field, you need to add the `message type` of the field in the uploaded proto file.
```shell
-curl http://127.0.0.1:9080/apisix/admin/protos/1 \
+curl http://127.0.0.1:9180/apisix/admin/protos/1 \
-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
{
"content" : "syntax = \"proto3\";
@@ -335,7 +335,7 @@ curl http://127.0.0.1:9080/apisix/admin/protos/1 \
Also configure the option `status_detail_type` to `helloworld.ErrorDetail`.
```shell
-curl http://127.0.0.1:9080/apisix/admin/routes/1 \
+curl http://127.0.0.1:9180/apisix/admin/routes/1 \
-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
{
"methods": ["GET"],
diff --git a/docs/en/latest/plugins/kafka-logger.md b/docs/en/latest/plugins/kafka-logger.md
index 5d62fc758f8f..229256baeb6e 100644
--- a/docs/en/latest/plugins/kafka-logger.md
+++ b/docs/en/latest/plugins/kafka-logger.md
@@ -47,7 +47,7 @@ It might take some time to receive the log data. It will be automatically sent a
| brokers.sasl_config.password | string | True | | | The password of sasl_config. If sasl_config exists, it's required. |
| kafka_topic | string | True | | | Target topic to push the logs for organisation. |
| producer_type | string | False | async | ["async", "sync"] | Message sending mode of the producer. |
-| required_acks | integer | False | 1 | [0, 1, -1] | Number of acknowledgements the leader needs to receive for the producer to consider the request complete. This controls the durability of the sent records. The attribute follows the same configuration as the Kafka `acks` attribute. See [Apache Kafka documentation](https://kafka.apache.org/documentation/#producerconfigs_acks) for more. |
+| required_acks | integer | False | 1 | [1, -1] | Number of acknowledgements the leader needs to receive for the producer to consider the request complete. This controls the durability of the sent records. The attribute follows the same configuration as the Kafka `acks` attribute. `required_acks` cannot be 0. See [Apache Kafka documentation](https://kafka.apache.org/documentation/#producerconfigs_acks) for more. |
| key | string | False | | | Key used for allocating partitions for messages. |
| timeout | integer | False | 3 | [1,...] | Timeout for the upstream to send data. |
| name | string | False | "kafka logger" | | Unique identifier for the batch processor. |
diff --git a/docs/en/latest/plugins/limit-conn.md b/docs/en/latest/plugins/limit-conn.md
index 0f463873cab7..af0e59bc0e9f 100644
--- a/docs/en/latest/plugins/limit-conn.md
+++ b/docs/en/latest/plugins/limit-conn.md
@@ -28,7 +28,7 @@ description: This document contains information about the Apache APISIX limit-co
## Description
-The `limit-con` Plugin limits the number of concurrent requests to your services.
+The `limit-conn` Plugin limits the number of concurrent requests to your services.
## Attributes
diff --git a/docs/en/latest/plugins/limit-count.md b/docs/en/latest/plugins/limit-count.md
index 24164a756cd5..46a775a00226 100644
--- a/docs/en/latest/plugins/limit-count.md
+++ b/docs/en/latest/plugins/limit-count.md
@@ -43,7 +43,7 @@ The `limit-count` Plugin limits the number of requests to your service by a give
| policy | string | False | "local" | ["local", "redis", "redis-cluster"] | Rate-limiting policies to use for retrieving and increment the limit count. When set to `local` the counters will be locally stored in memory on the node. When set to `redis` counters are stored on a Redis server and will be shared across the nodes. It is done usually for global speed limiting, and setting to `redis-cluster` uses a Redis cluster instead of a single instance. |
| allow_degradation | boolean | False | false | | When set to `true` enables Plugin degradation when the Plugin is temporarily unavailable (for example, a Redis timeout) and allows requests to continue. |
| show_limit_quota_header | boolean | False | true | | When set to `true`, adds `X-RateLimit-Limit` (total number of requests) and `X-RateLimit-Remaining` (remaining number of requests) to the response header. |
-| group | string | False | | non-empty | Group to share the counter with. Routes configured with the same group will share the counter. |
+| group | string | False | | non-empty | Group to share the counter with. Routes configured with the same group will share the same counter. Do not configure with a value that was previously used in this attribute before as the plugin would not allow. |
| redis_host | string | required when `policy` is `redis` | | | Address of the Redis server. Used when the `policy` attribute is set to `redis`. |
| redis_port | integer | False | 6379 | [1,...] | Port of the Redis server. Used when the `policy` attribute is set to `redis`. |
| redis_username | string | False | | | Username for Redis authentication if Redis ACL is used (for Redis version >= 6.0). If you use the legacy authentication method `requirepass` to configure Redis password, configure only the `redis_password`. Used when the `policy` is set to `redis`. |
diff --git a/docs/en/latest/plugins/openid-connect.md b/docs/en/latest/plugins/openid-connect.md
index 493370240362..0130d192113d 100644
--- a/docs/en/latest/plugins/openid-connect.md
+++ b/docs/en/latest/plugins/openid-connect.md
@@ -67,6 +67,7 @@ description: OpenID Connect allows the client to obtain user information from th
| proxy_opts.http_proxy_authorization | string | False | | Basic [base64 username:password] | Default `Proxy-Authorization` header value to be used with `http_proxy`. |
| proxy_opts.https_proxy_authorization | string | False | | Basic [base64 username:password] | As `http_proxy_authorization` but for use with `https_proxy` (since with HTTPS the authorisation is done when connecting, this one cannot be overridden by passing the `Proxy-Authorization` request header). |
| proxy_opts.no_proxy | string | False | | | Comma separated list of hosts that should not be proxied. |
+| authorization_params | object | False | | | Additional parameters to send in the in the request to the authorization endpoint. |
NOTE: `encrypt_fields = {"client_secret"}` is also defined in the schema, which means that the field will be stored encrypted in etcd. See [encrypted storage fields](../plugin-develop.md#encrypted-storage-fields).
diff --git a/docs/en/latest/plugins/opentelemetry.md b/docs/en/latest/plugins/opentelemetry.md
index eca682a061aa..55171d539f74 100644
--- a/docs/en/latest/plugins/opentelemetry.md
+++ b/docs/en/latest/plugins/opentelemetry.md
@@ -89,6 +89,29 @@ plugin_attr:
max_export_batch_size: 2
```
+## Variables
+
+The following nginx variables are set by OpenTelemetry:
+
+- `opentelemetry_context_traceparent` - [W3C trace context](https://www.w3.org/TR/trace-context/#trace-context-http-headers-format), e.g.: `00-0af7651916cd43dd8448eb211c80319c-b9c7c989f97918e1-01`
+- `opentelemetry_trace_id` - Trace Id of the current span
+- `opentelemetry_span_id` - Span Id of the current span
+
+How to use variables? you have to add it to your configuration file (`conf/config.yaml`):
+
+```yaml title="./conf/config.yaml"
+http:
+ enable_access_log: true
+ access_log: "/dev/stdout"
+ access_log_format: '{"time": "$time_iso8601","opentelemetry_context_traceparent": "$opentelemetry_context_traceparent","opentelemetry_trace_id": "$opentelemetry_trace_id","opentelemetry_span_id": "$opentelemetry_span_id","remote_addr": "$remote_addr","uri": "$uri"}'
+ access_log_format_escape: json
+plugins:
+ - opentelemetry
+plugin_attr:
+ opentelemetry:
+ set_ngx_var: true
+```
+
## Enable Plugin
To enable the Plugin, you have to add it to your configuration file (`conf/config.yaml`):
diff --git a/docs/en/latest/plugins/proxy-cache.md b/docs/en/latest/plugins/proxy-cache.md
index 885fe2334300..8b31baa46ef7 100644
--- a/docs/en/latest/plugins/proxy-cache.md
+++ b/docs/en/latest/plugins/proxy-cache.md
@@ -62,7 +62,7 @@ You can add your cache configuration in you APISIX configuration file (`conf/con
```yaml title="conf/config.yaml"
apisix:
proxy_cache:
- cache_ttl: 10s # 如果上游未指定缓存时间,则为默认磁盘缓存时间
+ cache_ttl: 10s # default cache TTL for caching on disk
zones:
- name: disk_cache_one
memory_size: 50m
diff --git a/docs/en/latest/plugins/response-rewrite.md b/docs/en/latest/plugins/response-rewrite.md
index 392d367254f2..9f1312e0bed1 100644
--- a/docs/en/latest/plugins/response-rewrite.md
+++ b/docs/en/latest/plugins/response-rewrite.md
@@ -83,7 +83,7 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f1
"set": {
"X-Server-id": 3,
"X-Server-status": "on",
- "X-Server-balancer_addr": "$balancer_ip:$balancer_port"
+ "X-Server-balancer-addr": "$balancer_ip:$balancer_port"
}
},
"vars":[
@@ -107,7 +107,7 @@ Besides `set` operation, you can also `add` or `remove` response header like:
```json
"headers": {
"add": [
- "X-Server-balancer_addr: $balancer_ip:$balancer_port"
+ "X-Server-balancer-addr: $balancer_ip:$balancer_port"
],
"remove": [
"X-TO-BE-REMOVED"
@@ -137,7 +137,7 @@ Transfer-Encoding: chunked
Connection: keep-alive
X-Server-id: 3
X-Server-status: on
-X-Server-balancer_addr: 127.0.0.1:80
+X-Server-balancer-addr: 127.0.0.1:80
{"code":"ok","message":"new json body"}
```
@@ -170,7 +170,7 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13
"set": {
"X-Server-id":3,
"X-Server-status":"on",
- "X-Server-balancer_addr":"$balancer_ip:$balancer_port"
+ "X-Server-balancer-addr":"$balancer_ip:$balancer_port"
}
},
"filters":[
diff --git a/docs/en/latest/plugins/ua-restriction.md b/docs/en/latest/plugins/ua-restriction.md
index 070f08ba6a85..8438553dc962 100644
--- a/docs/en/latest/plugins/ua-restriction.md
+++ b/docs/en/latest/plugins/ua-restriction.md
@@ -30,7 +30,7 @@ description: This document contains information about the Apache APISIX ua-restr
The `ua-restriction` Plugin allows you to restrict access to a Route or Service based on the `User-Agent` header with an `allowlist` and a `denylist`.
-A common scenario is to set crawler rules. `User-Agent` is the identity of the client when sending requests to the server, and the user can whitelist or blacklist some crawler request headers in the `ua-restriction` Plugin.
+A common scenario is to set crawler rules. `User-Agent` is the identity of the client when sending requests to the server, and the user can allow or deny some crawler request headers in the `ua-restriction` Plugin.
## Attributes
diff --git a/docs/en/latest/plugins/zipkin.md b/docs/en/latest/plugins/zipkin.md
index 2a772e608f0d..16d89bec8e30 100644
--- a/docs/en/latest/plugins/zipkin.md
+++ b/docs/en/latest/plugins/zipkin.md
@@ -235,3 +235,32 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13
}
}'
```
+
+## Variables
+
+The following nginx variables are set by zipkin:
+
+- `zipkin_context_traceparent` - [W3C trace context](https://www.w3.org/TR/trace-context/#trace-context-http-headers-format), e.g.: `00-0af7651916cd43dd8448eb211c80319c-b9c7c989f97918e1-01`
+- `zipkin_trace_id` - Trace Id of the current span
+- `zipkin_span_id` - Span Id of the current span
+
+How to use variables? you have to add it to your configuration file (`conf/config.yaml`):
+
+```yaml title="./conf/config.yaml"
+http:
+ enable_access_log: true
+ access_log: "/dev/stdout"
+ access_log_format: '{"time": "$time_iso8601","zipkin_context_traceparent": "$zipkin_context_traceparent","zipkin_trace_id": "$zipkin_trace_id","zipkin_span_id": "$zipkin_span_id","remote_addr": "$remote_addr","uri": "$uri"}'
+ access_log_format_escape: json
+plugins:
+ - zipkin
+plugin_attr:
+ zipkin:
+ set_ngx_var: true
+```
+
+You can also include a trace_id when printing logs
+
+```print error log
+log.error(ngx.ERR,ngx_var.zipkin_trace_id,"error message")
+```
diff --git a/docs/en/latest/profile.md b/docs/en/latest/profile.md
index af226c09a55c..5c4bca67928b 100644
--- a/docs/en/latest/profile.md
+++ b/docs/en/latest/profile.md
@@ -1,5 +1,11 @@
---
-title: Configuration file switching based on environment variables
+title: Configuration based on environments
+keywords:
+ - Apache APISIX
+ - API Gateway
+ - Configuration
+ - Environment
+description: This document describes how you can change APISIX configuration based on environments.
---
-The reason the configuration is extracted from the code is to better adapt to changes. Usually our applications have different
-operating environments such as development environment and production environment. Certain configurations of these applications
-will definitely be different, such as the address of the configuration center.
+Extracting configuration from the code makes APISIX adaptable to changes in the operating environments. For example, APISIX can be deployed in a development environment for testing and then moved to a production environment. The configuration for APISIX in these environments would be different.
-If the configuration of all environments is placed in the same file, it is very difficult to manage. After receiving new
-requirements, we need to change the parameters in the configuration file to the development environment when developing the
-development environment. You have to change it back. It's very easy to make mistakes.
+APISIX supports managing multiple configurations through environment variables in two different ways:
-The solution to the above problem is to distinguish the current running environment through environment variables, and switch
-between different configuration files through environment variables. The corresponding environment variable in APISIX is: `APISIX_PROFILE`
+1. Using environment variables in the configuration file
+2. Using an environment variable to switch between multiple configuration profiles
-When `APISIX_PROFILE` is not set, the following three configuration files are used by default:
+## Using environment variables in the configuration file
-* conf/config.yaml
-* conf/apisix.yaml
-* conf/debug.yaml
+This is useful when you want to change some configurations based on the environment.
+
+To use environment variables, you can use the syntax `key_name: ${{ENVIRONMENT_VARIABLE_NAME:=}}`. You can also set a default value to fall back to if no environment variables are set by adding it to the configuration as `key_name: ${{ENVIRONMENT_VARIABLE_NAME:=VALUE}}`. The example below shows how you can modify your configuration file to use environment variables to set the listening ports of APISIX:
+
+```yaml title="config.yaml"
+apisix:
+ node_listen:
+ - ${{APISIX_NODE_LISTEN:=}}
+deployment:
+ admin:
+ admin_listen:
+ port: ${{DEPLOYMENT_ADMIN_ADMIN_LISTEN:=}}
+```
+
+When you run APISIX, you can set these environment variables dynamically:
+
+```shell
+export APISIX_NODE_LISTEN=8132
+export DEPLOYMENT_ADMIN_ADMIN_LISTEN=9232
+```
+
+Now when you start APISIX, it will listen on port `8132` and expose the Admin API on port `9232`.
+
+To use default values if no environment variables are set, you can add it to your configuration file as shown below:
+
+```yaml title="config.yaml"
+apisix:
+ node_listen:
+ - ${{APISIX_NODE_LISTEN:=9080}}
+deployment:
+ admin:
+ admin_listen:
+ port: ${{DEPLOYMENT_ADMIN_ADMIN_LISTEN:=9180}}
+```
+
+Now if you don't specify these environment variables when running APISIX, it will fall back to the default values and expose the Admin API on port `9180` and listen on port `9080`.
+
+## Using the `APISIX_PROFILE` environment variable
-If the value of `APISIX_PROFILE` is set to `prod`, the following three configuration files are used:
+If you have multiple configuration changes for multiple environments, it might be better to have a different configuration file for each.
+
+Although this might increase the number of configuration files, you would be able to manage each independently and can even do version management.
+
+APISIX uses the `APISIX_PROFILE` environment variable to switch between environments, i.e. to switch between different sets of configuration files. If the value of `APISIX_PROFILE` is `env`, then APISIX will look for the configuration files `conf/config-env.yaml`, `conf/apisix-env.yaml`, and `conf/debug-env.yaml`.
+
+For example for the production environment, you can have:
* conf/config-prod.yaml
* conf/apisix-prod.yaml
* conf/debug-prod.yaml
-Although this way will increase the number of configuration files, it can be managed independently, and then version management
-tools such as git can be configured, and version management can be better achieved.
+And for the development environment:
+
+* conf/config-dev.yaml
+* conf/apisix-dev.yaml
+* conf/debug-dev.yaml
+
+And if no environment is specified, APISIX can use the default configuration files:
+
+* conf/config.yaml
+* conf/apisix.yaml
+* conf/debug.yaml
+
+To use a particular configuration, you can specify it in the environment variable:
+
+```shell
+export APISIX_PROFILE=prod
+```
+
+APISIX will now use the `-prod.yaml` configuration files.
diff --git a/docs/en/latest/terminology/consumer-group.md b/docs/en/latest/terminology/consumer-group.md
index 1cb06c3d769b..2f91657805ee 100644
--- a/docs/en/latest/terminology/consumer-group.md
+++ b/docs/en/latest/terminology/consumer-group.md
@@ -35,7 +35,9 @@ instead of managing each consumer individually.
## Example
-The example below illustrates how to create a Consumer Group and bind it to a Consumer:
+The example below illustrates how to create a Consumer Group and bind it to a Consumer.
+
+Create a Consumer Group which shares the same rate limiting quota:
```shell
curl http://127.0.0.1:9180/apisix/admin/consumer_groups/company_a \
@@ -46,12 +48,14 @@ curl http://127.0.0.1:9180/apisix/admin/consumer_groups/company_a \
"count": 200,
"time_window": 60,
"rejected_code": 503,
- "group": "$consumer_group_id"
+ "group": "grp_company_a"
}
}
}'
```
+Create a Consumer within the Consumer Group:
+
```shell
curl http://127.0.0.1:9180/apisix/admin/consumers \
-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
diff --git a/docs/en/latest/terminology/consumer.md b/docs/en/latest/terminology/consumer.md
index 0b2a1ad3e827..591396c4f336 100644
--- a/docs/en/latest/terminology/consumer.md
+++ b/docs/en/latest/terminology/consumer.md
@@ -60,6 +60,8 @@ The process of identifying a Consumer in APISIX is described below:
Consumers are useful when you have different consumers requesting the same API and you need to execute different Plugin and Upstream configurations based on the consumer. These need to be used in conjunction with the user authentication system.
+Authentcation plugins that can be configured with a Consumer include `basic-auth`, `hmac-auth`, `jwt-auth`, `key-auth`, `ldap-auth`, and `wolf-rbac`.
+
Refer to the documentation for the [key-auth](../plugins/key-auth.md) authentication Plugin to further understand the concept of a Consumer.
:::note
diff --git a/docs/en/latest/terminology/plugin-config.md b/docs/en/latest/terminology/plugin-config.md
index 59b46b08933e..4ed78e6d5776 100644
--- a/docs/en/latest/terminology/plugin-config.md
+++ b/docs/en/latest/terminology/plugin-config.md
@@ -30,7 +30,7 @@ description: Plugin Config in Apache APISIX.
Plugin Configs are used to extract commonly used [Plugin](./plugin.md) configurations and can be bound directly to a [Route](./route.md).
-While configuring the same plugin, only one copy of the configuration is valid. The order of precedence is always `Consumer` > `Consumer Group` > `Route` > `Plugin Config` > `Service`.
+While configuring the same plugin, only one copy of the configuration is valid. Please read the [plugin execution order](../terminology/plugin.md#plugins-execution-order) and [plugin merging order](../terminology/plugin.md#plugins-merging-precedence).
## Example
diff --git a/docs/en/latest/tutorials/add-multiple-api-versions.md b/docs/en/latest/tutorials/add-multiple-api-versions.md
index e48c0c581433..f125a542f09c 100644
--- a/docs/en/latest/tutorials/add-multiple-api-versions.md
+++ b/docs/en/latest/tutorials/add-multiple-api-versions.md
@@ -105,7 +105,7 @@ docker compose up -d
You first need to [Route](https://apisix.apache.org/docs/apisix/terminology/route/) your HTTP requests from the gateway to an [Upstream](https://apisix.apache.org/docs/apisix/terminology/upstream/) (your API). With APISIX, you can create a route by sending an HTTP request to the gateway.
```shell
-curl http://apisix:9080/apisix/admin/routes/1 -H 'X-API-KEY: xyz' -X PUT -d '
+curl http://apisix:9180/apisix/admin/routes/1 -H 'X-API-KEY: xyz' -X PUT -d '
{
"name": "Direct Route to Old API",
"methods": ["GET"],
@@ -142,7 +142,7 @@ In the previous step, we created a route that wrapped an upstream inside its con
Let's create the shared upstream by running below curl cmd:
```shell
-curl http://apisix:9080/apisix/admin/upstreams/1 -H 'X-API-KEY: xyz' -X PUT -d '
+curl http://apisix:9180/apisix/admin/upstreams/1 -H 'X-API-KEY: xyz' -X PUT -d '
{
"name": "Old API",
"type": "roundrobin",
@@ -161,7 +161,7 @@ In the scope of this tutorial, we will use _URI path-based versioning_ because i
Before introducing the new version, we also need to rewrite the query that comes to the API gateway before forwarding it to the upstream. Because both the old and new versions should point to the same upstream and the upstream exposes endpoint `/hello`, not `/v1/hello`. Let’s create a plugin configuration to rewrite the path:
```shell
-curl http://apisix:9080/apisix/admin/plugin_configs/1 -H 'X-API-KEY: xyz' -X PUT -d '
+curl http://apisix:9180/apisix/admin/plugin_configs/1 -H 'X-API-KEY: xyz' -X PUT -d '
{
"plugins": {
"proxy-rewrite": {
@@ -176,7 +176,7 @@ We can now create the second versioned route that references the existing upstr
> Note that we can create routes for different API versions.
```shell
-curl http://apisix:9080/apisix/admin/routes/2 -H 'X-API-KEY: xyz' -X PUT -d '
+curl http://apisix:9180/apisix/admin/routes/2 -H 'X-API-KEY: xyz' -X PUT -d '
{
"name": "Versioned Route to Old API",
"methods": ["GET"],
@@ -209,7 +209,7 @@ Hello world
We have versioned our API, but our API consumers probably still use the legacy non-versioned API. We want them to migrate, but we cannot just delete the legacy route as our users are unaware of it. Fortunately, the `301 HTTP` status code is our friend: we can let users know that the resource has moved from `http://apisix.org/hello` to `http://apisix.org/v1/hello`. It requires configuring the [redirect plugin](https://apisix.apache.org/docs/apisix/plugins/redirect/) on the initial route:
```shell
-curl http://apisix:9080/apisix/admin/routes/1 -H 'X-API-KEY: xyz' -X PATCH -d '
+curl http://apisix:9180/apisix/admin/routes/1 -H 'X-API-KEY: xyz' -X PATCH -d '
{
"plugins": {
"redirect": {
diff --git a/docs/en/latest/tutorials/protect-api.md b/docs/en/latest/tutorials/protect-api.md
index 22caa1b16080..38c0bd6240f6 100644
--- a/docs/en/latest/tutorials/protect-api.md
+++ b/docs/en/latest/tutorials/protect-api.md
@@ -37,7 +37,7 @@ This represents the configuration of the plugins that are executed during the HT
:::note
-If [Route](../terminology/route.md), [Service](../terminology/service.md), [Plugin Config](../terminology/plugin-config.md) or Consumer are all bound to the same for plugins, only one plugin configuration will take effect. The priority of plugin configurations is: Consumer > Route > Plugin Config > Service. At the same time, there are 6 stages involved in the plugin execution process, namely `rewrite`, `access`, `before_proxy`, `header_filter`, `body_filter` and `log`.
+If [Route](../terminology/route.md), [Service](../terminology/service.md), [Plugin Config](../terminology/plugin-config.md) or [Consumer](../terminology/consumer.md) are all bound to the same for plugins, only one plugin configuration will take effect. The priority of plugin configurations is described in [plugin execution order](../terminology/plugin.md#plugins-execution-order). At the same time, there are various stages involved in the plugin execution process. See [plugin execution lifecycle](../terminology/plugin.md#plugins-execution-order).
:::
diff --git a/docs/zh/latest/CHANGELOG.md b/docs/zh/latest/CHANGELOG.md
index be16cb06949a..a3ce74cca888 100644
--- a/docs/zh/latest/CHANGELOG.md
+++ b/docs/zh/latest/CHANGELOG.md
@@ -23,6 +23,8 @@ title: CHANGELOG
## Table of Contents
+- [3.6.0](#360)
+- [3.5.0](#350)
- [3.4.0](#340)
- [3.3.0](#330)
- [3.2.1](#321)
@@ -71,6 +73,73 @@ title: CHANGELOG
- [0.7.0](#070)
- [0.6.0](#060)
+## 3.6.0
+
+### Change
+
+- :warning: 移除 `etcd.use_grpc`,不再支持使用 gRPC 协议与 etcd 进行通信:[#10015](https://github.com/apache/apisix/pull/10015)
+- :warning: 移除 conf server,数据平面不再支持与控制平面进行通信,需要从 `config_provider: control_plane` 调整为 `config_provider: etcd`:[#10012](https://github.com/apache/apisix/pull/10012)
+- :warning: 严格验证核心资源的输入:[#10233](https://github.com/apache/apisix/pull/10233)
+
+### Core
+
+- :sunrise: 支持配置访问日志的缓冲区大小:[#10225](https://github.com/apache/apisix/pull/10225)
+- :sunrise: 支持在 DNS 发现服务中允许配置 `resolv_conf` 来使用本地 DNS 解析器:[#9770](https://github.com/apache/apisix/pull/9770)
+- :sunrise: 安装不再依赖 Rust:[#10121](https://github.com/apache/apisix/pull/10121)
+- :sunrise: 在 xRPC 中添加 Dubbo 协议支持:[#9660](https://github.com/apache/apisix/pull/9660)
+
+### Plugins
+
+- :sunrise: 在 `traffic-split` 插件中支持 HTTPS:[#9115](https://github.com/apache/apisix/pull/9115)
+- :sunrise: 在 `ext-plugin` 插件中支持重写请求体:[#9990](https://github.com/apache/apisix/pull/9990)
+- :sunrise: 在 `opentelemetry` 插件中支持设置 NGINX 变量:[#8871](https://github.com/apache/apisix/pull/8871)
+- :sunrise: 在 `chaitin-waf` 插件中支持 UNIX sock 主机模式:[#10161](https://github.com/apache/apisix/pull/10161)
+
+### Bugfixes
+
+- 修复 GraphQL POST 请求路由匹配异常:[#10198](https://github.com/apache/apisix/pull/10198)
+- 修复 `apisix.yaml` 中多行字符串数组的错误:[#10193](https://github.com/apache/apisix/pull/10193)
+- 修复在 proxy-cache 插件中缺少 cache_zone 时提供错误而不是 nil panic:[#10138](https://github.com/apache/apisix/pull/10138)
+
+## 3.5.0
+
+### Change
+
+- :warning: request-id 插件移除雪花算法:[#9715](https://github.com/apache/apisix/pull/9715)
+- :warning: 不再兼容 OpenResty 1.19 版本,需要将其升级到 1.21+ 版本:[#9913](https://github.com/apache/apisix/pull/9913)
+- :warning: 删除配置项 `apisix.stream_proxy.only`,L4/L7 代理需要通过配置项 `apesix.proxy_mode` 来启用:[#9607](https://github.com/apache/apisix/pull/9607)
+- :warning: admin-api 的 `/apisix/admin/plugins?all=true` 接口标记为弃用:[#9580](https://github.com/apache/apisix/pull/9580)
+- :warning: ua-restriction 插件不允许同时启用黑名单和白名单:[#9841](https://github.com/apache/apisix/pull/9841)
+
+### Core
+
+- :sunrise: 支持根据 host 级别动态设置 TLS 协议版本:[#9903](https://github.com/apache/apisix/pull/9903)
+- :sunrise: 支持强制删除资源:[#9810](https://github.com/apache/apisix/pull/9810)
+- :sunrise: 支持从 yaml 中提取环境变量:[#9855](https://github.com/apache/apisix/pull/9855)
+- :sunrise: admin-api 新增 schema validate API 校验资源配置:[#10065](https://github.com/apache/apisix/pull/10065)
+
+### Plugins
+
+- :sunrise: 新增 chaitin-waf 插件:[#9838](https://github.com/apache/apisix/pull/9838)
+- :sunrise: file-logger 支持设置 var 变量:[#9712](https://github.com/apache/apisix/pull/9712)
+- :sunrise: mock 插件支持添加响应头:[#9720](https://github.com/apache/apisix/pull/9720)
+- :sunrise: proxy-rewrite 插件支持正则匹配 URL 编码:[#9813](https://github.com/apache/apisix/pull/9813)
+- :sunrise: google-cloud-logging 插件支持 client_email 配置:[#9813](https://github.com/apache/apisix/pull/9813)
+- :sunrise: opa 插件支持向上游发送 OPA server 返回的头:[#9710](https://github.com/apache/apisix/pull/9710)
+- :sunrise: openid-connect 插件支持配置代理服务器:[#9948](https://github.com/apache/apisix/pull/9948)
+
+### Bugfixes
+
+- 修复 log-rotate 插件使用自定义名称时,max_kept 配置不起作用:[#9749](https://github.com/apache/apisix/pull/9749)
+- 修复 limit_conn 在 stream 模式下非法使用 http 变量:[#9816](https://github.com/apache/apisix/pull/9816)
+- 修复 loki-logger 插件在获取 log_labels 时会索引空值:[#9850](https://github.com/apache/apisix/pull/9850)
+- 修复使用 limit-count 插件时,当请求被拒绝后,X-RateLimit-Reset 不应设置为 0:[#9978](https://github.com/apache/apisix/pull/9978)
+- 修复 nacos 插件在运行时索引一个空值:[#9960](https://github.com/apache/apisix/pull/9960)
+- 修复 etcd 在同步数据时,如果密钥有特殊字符,则同步异常:[#9967](https://github.com/apache/apisix/pull/9967)
+- 修复 tencent-cloud-cls 插件 DNS 解析失败:[#9843](https://github.com/apache/apisix/pull/9843)
+- 修复执行 reload 或 quit 命令时 worker 未退出:[#9909](https://github.com/apache/apisix/pull/9909)
+- 修复在 traffic-split 插件中 upstream_id 有效性验证:[#10008](https://github.com/apache/apisix/pull/10008)
+
## 3.4.0
### Core
diff --git a/docs/zh/latest/FAQ.md b/docs/zh/latest/FAQ.md
index 923cf22f5cf4..944f96195420 100644
--- a/docs/zh/latest/FAQ.md
+++ b/docs/zh/latest/FAQ.md
@@ -109,7 +109,7 @@ luarocks config rocks_servers
make deps ENV_LUAROCKS_SERVER=https://luarocks.cn
```
-如果通过上述操作仍然无法解决问题,可以尝试使用 `--verbose` 参数获取详细的日志来诊断问题。
+如果通过上述操作仍然无法解决问题,可以尝试使用 `--verbose` 或 `-v` 参数获取详细的日志来诊断问题。
## 如何构建 APISIX-Base 环境?
diff --git a/docs/zh/latest/admin-api.md b/docs/zh/latest/admin-api.md
index e1fd063e8a61..899fb4c44deb 100644
--- a/docs/zh/latest/admin-api.md
+++ b/docs/zh/latest/admin-api.md
@@ -326,8 +326,6 @@ Route 也称之为路由,可以通过定义一些规则来匹配客户端的
| timeout | 否 | 辅助 | 为 Route 设置 Upstream 连接、发送消息和接收消息的超时时间(单位为秒)。该配置将会覆盖在 Upstream 中配置的 [timeout](#upstream) 选项。 | {"connect": 3, "send": 3, "read": 3} |
| enable_websocket | 否 | 辅助 | 当设置为 `true` 时,启用 `websocket`(boolean), 默认值为 `false`。 | |
| status | 否 | 辅助 | 当设置为 `1` 时,启用该路由,默认值为 `1`。 | `1` 表示启用,`0` 表示禁用。 |
-| create_time | 否 | 辅助 | epoch 时间戳,单位为秒。如果不指定则自动创建。 | 1602883670 |
-| update_time | 否 | 辅助 | epoch 时间戳,单位为秒。如果不指定则自动创建。 | 1602883670 |
:::note 注意
@@ -637,8 +635,6 @@ Service 是某类 API 的抽象(也可以理解为一组 Route 的抽象)。
| labels | 否 | 匹配规则 | 标识附加属性的键值对。 | {"version":"v2","build":"16","env":"production"} |
| enable_websocket | 否 | 辅助 | `websocket`(boolean) 配置,默认值为 `false`。 | |
| hosts | 否 | 匹配规则 | 非空列表形态的 `host`,表示允许有多个不同 `host`,匹配其中任意一个即可。| ["foo.com", "\*.bar.com"] |
-| create_time | 否 | 辅助 | epoch 时间戳,单位为秒。如果不指定则自动创建。 | 1602883670 |
-| update_time | 否 | 辅助 | epoch 时间戳,单位为秒。如果不指定则自动创建。 | 1602883670 |
Service 对象 JSON 配置示例:
@@ -822,8 +818,6 @@ Consumer 资源请求地址:/apisix/admin/consumers/{username}
| plugins | 否 | Plugin | 该 Consumer 对应的插件配置,它的优先级是最高的:Consumer > Route > Plugin Config > Service。对于具体插件配置,请参考 [Plugins](#plugin)。 | |
| desc | 否 | 辅助 | consumer 描述。 | |
| labels | 否 | 匹配规则 | 标识附加属性的键值对。 | {"version":"v2","build":"16","env":"production"} |
-| create_time | 否 | 辅助 | epoch 时间戳,单位为秒。如果不指定则自动创建。 | 1602883670 |
-| update_time | 否 | 辅助 | epoch 时间戳,单位为秒。如果不指定则自动创建。 | 1602883670 |
Consumer 对象 JSON 配置示例:
@@ -919,8 +913,6 @@ APISIX 的 Upstream 除了基本的负载均衡算法选择外,还支持对上
| upstream_host | 否 | 辅助 | 指定上游请求的 host,只在 `pass_host` 配置为 `rewrite` 时有效。 | |
| scheme | 否 | 辅助 | 跟上游通信时使用的 scheme。对于 7 层代理,可选值为 [`http`, `https`, `grpc`, `grpcs`]。对于 4 层代理,可选值为 [`tcp`, `udp`, `tls`]。默认值为 `http`,详细信息请参考下文。 |
| labels | 否 | 匹配规则 | 标识附加属性的键值对。 | {"version":"v2","build":"16","env":"production"} |
-| create_time | 否 | 辅助 | epoch 时间戳,单位为秒。如果不指定则自动创建。 | 1602883670 |
-| update_time | 否 | 辅助 | epoch 时间戳,单位为秒。如果不指定则自动创建。 | 1602883670 |
| tls.client_cert | 否,不能和 `tls.client_cert_id` 一起使用 | https 证书 | 设置跟上游通信时的客户端证书,详细信息请参考下文。 | |
| tls.client_key | 否,不能和 `tls.client_cert_id` 一起使用 | https 证书私钥 | 设置跟上游通信时的客户端私钥,详细信息请参考下文。 | |
| tls.client_cert_id | 否,不能和 `tls.client_cert`、`tls.client_key` 一起使用 | SSL | 设置引用的 SSL id,详见 [SSL](#ssl)。 | |
@@ -942,7 +934,6 @@ APISIX 的 Upstream 除了基本的负载均衡算法选择外,还支持对上
- 设为 `header` 时,`key` 为必传参数,其值为自定义的 Header name,即 "http\_`key`"。
- 设为 `cookie` 时,`key` 为必传参数,其值为自定义的 cookie name,即 "cookie\_`key`"。请注意 cookie name 是**区分大小写字母**的。例如:`cookie_x_foo` 与 `cookie_X_Foo` 表示不同的 `cookie`。
- 设为 `consumer` 时,`key` 不需要设置。此时哈希算法采用的 `key` 为认证通过的 `consumer_name`。
-- 如果指定的 `hash_on` 和 `key` 获取不到值时,使用默认值:`remote_addr`。
以下特性需要 APISIX 运行于 [APISIX-Base](./FAQ.md#如何构建-APISIX-Base-环境?):
@@ -1211,8 +1202,6 @@ SSL 资源请求地址:/apisix/admin/ssls/{id}
| client.skip_mtls_uri_regex | 否 | PCRE 正则表达式数组 | 用来匹配请求的 URI,如果匹配,则该请求将绕过客户端证书的检查,也就是跳过 MTLS。 | ["/hello[0-9]+", "/foobar"] |
| snis | 是 | 匹配规则 | 非空数组形式,可以匹配多个 SNI。 | |
| labels | 否 | 匹配规则 | 标识附加属性的键值对。 | {"version":"v2","build":"16","env":"production"} |
-| create_time | 否 | 辅助 | epoch 时间戳,单位为秒。如果不指定则自动创建。 | 1602883670 |
-| update_time | 否 | 辅助 | epoch 时间戳,单位为秒。如果不指定则自动创建。 | 1602883670 |
| type | 否 | 辅助 | 标识证书的类型,默认值为 `server`。 | `client` 表示证书是客户端证书,APISIX 访问上游时使用;`server` 表示证书是服务端证书,APISIX 验证客户端请求时使用。 |
| status | 否 | 辅助 | 当设置为 `1` 时,启用此 SSL,默认值为 `1`。 | `1` 表示启用,`0` 表示禁用 |
| ssl_protocols | 否 | tls 协议字符串数组 | 用于控制服务器与客户端之间使用的 SSL/TLS 协议版本。更多的配置示例,请参考[SSL 协议](./ssl-protocol.md)。 | |
@@ -1254,8 +1243,6 @@ Global Rule 资源请求地址:/apisix/admin/global_rules/{id}
| 名称 | 必选项 | 类型 | 描述 | 示例值 |
| ----------- | ------ | ------ | ------------------------------------------------- | ---------- |
| plugins | 是 | Plugin | 插件配置。详细信息请参考 [Plugin](terminology/plugin.md)。 | |
-| create_time | 否 | 辅助 | epoch 时间戳,单位为秒,如果不指定则自动创建。 | 1602883670 |
-| update_time | 否 | 辅助 | epoch 时间戳,单位为秒,如果不指定则自动创建。 | 1602883670 |
## Consumer Group
@@ -1283,8 +1270,6 @@ Consumer Group 资源请求地址:/apisix/admin/consumer_groups/{id}
|plugins | 是 |Plugin| 插件配置。详细信息请参考 [Plugin](terminology/plugin.md)。 | |
|desc | 否 | 辅助 | 标识描述、使用场景等。 | Consumer 测试。|
|labels | 否 | 辅助 | 标识附加属性的键值对。 |{"version":"v2","build":"16","env":"production"}|
-|create_time| 否 | 辅助 | epoch 时间戳,单位为秒,如果不指定则自动创建。 |1602883670|
-|update_time| 否 | 辅助 | epoch 时间戳,单位为秒,如果不指定则自动创建。 |1602883670|
## Plugin Config
@@ -1312,8 +1297,6 @@ Plugin Config 资源请求地址:/apisix/admin/plugin_configs/{id}
|plugins | 是 |Plugin| 更多信息请参考 [Plugin](terminology/plugin.md)。||
|desc | 否 | 辅助 | 标识描述、使用场景等。 |customer xxxx|
|labels | 否 | 辅助 | 标识附加属性的键值对。 |{"version":"v2","build":"16","env":"production"}|
-|create_time| 否 | 辅助 | epoch 时间戳,单位为秒,如果不指定则自动创建。 |1602883670|
-|update_time| 否 | 辅助 | epoch 时间戳,单位为秒,如果不指定则自动创建。 |1602883670|
## Plugin Metadata
@@ -1376,6 +1359,16 @@ Plugin 资源请求地址:/apisix/admin/plugins/{plugin_name}
| GET | /apisix/admin/plugins/{plugin_name} | 无 | 获取资源。 |
| GET | /apisix/admin/plugins?all=true | 无 | 获取所有插件的所有属性。 |
| GET | /apisix/admin/plugins?all=true&subsystem=stream| 无 | 获取所有 Stream 插件的属性。|
+| GET | /apisix/admin/plugins?all=true&subsystem=http| 无 | 获取所有 HTTP 插件的属性。|
+| PUT | /apisix/admin/plugins/reload | 无 | 根据代码中所做的更改重新加载插件。 |
+| GET | apisix/admin/plugins/{plugin_name}?subsystem=stream | 无 | 获取指定 Stream 插件的属性。 |
+| GET | apisix/admin/plugins/{plugin_name}?subsystem=http | 无 | 获取指定 HTTP 插件的属性。 |
+
+:::caution
+
+获取所有插件属性的接口 `/apisix/admin/plugins?all=true` 将很快被弃用。
+
+:::
### 使用示例 {#plugin-example}
@@ -1435,6 +1428,7 @@ Plugin 资源请求地址:/apisix/admin/stream_routes/{id}
| ---------------- | ------| -------- | ------------------------------------------------------------------------------| ------ |
| upstream | 否 | Upstream | Upstream 配置,详细信息请参考 [Upstream](terminology/upstream.md)。 | |
| upstream_id | 否 | Upstream | 需要使用的 Upstream id,详细信息请 [Upstream](terminology/upstream.md)。 | |
+| service_id | 否 | String | 需要使用的 [Service](terminology/service.md) id. | |
| remote_addr | 否 | IPv4, IPv4 CIDR, IPv6 | 过滤选项:如果客户端 IP 匹配,则转发到上游 | "127.0.0.1" 或 "127.0.0.1/32" 或 "::1" |
| server_addr | 否 | IPv4, IPv4 CIDR, IPv6 | 过滤选项:如果 APISIX 服务器的 IP 与 `server_addr` 匹配,则转发到上游。 | "127.0.0.1" 或 "127.0.0.1/32" 或 "::1" |
| server_port | 否 | 整数 | 过滤选项:如果 APISIX 服务器的端口 与 `server_port` 匹配,则转发到上游。 | 9090 |
diff --git a/docs/zh/latest/building-apisix.md b/docs/zh/latest/building-apisix.md
index 95672c82b1c7..abeac2033c68 100644
--- a/docs/zh/latest/building-apisix.md
+++ b/docs/zh/latest/building-apisix.md
@@ -53,7 +53,7 @@ curl https://raw.githubusercontent.com/apache/apisix/master/utils/install-depend
然后,创建一个目录并设置环境变量 `APISIX_VERSION`:
```shell
-APISIX_VERSION='3.4.0'
+APISIX_VERSION='3.6.0'
mkdir apisix-${APISIX_VERSION}
```
diff --git a/docs/zh/latest/config.json b/docs/zh/latest/config.json
index 7ac2791cad6c..1ff81e6b64d1 100644
--- a/docs/zh/latest/config.json
+++ b/docs/zh/latest/config.json
@@ -1,9 +1,16 @@
{
- "version": "3.4.0",
+ "version": "3.6.0",
"sidebar": [
{
- "type": "doc",
- "id": "getting-started"
+ "type": "category",
+ "label": "Getting Started",
+ "items": [
+ "getting-started/README",
+ "getting-started/configure-routes",
+ "getting-started/load-balancing",
+ "getting-started/key-authentication",
+ "getting-started/rate-limiting"
+ ]
},
{
"type": "doc",
@@ -216,6 +223,10 @@
"type": "doc",
"id": "building-apisix"
},
+ {
+ "type": "doc",
+ "id": "support-fips-in-apisix"
+ },
{
"type": "doc",
"id": "external-plugin"
diff --git a/docs/zh/latest/getting-started.md b/docs/zh/latest/getting-started.md
deleted file mode 100644
index 969e30956fc9..000000000000
--- a/docs/zh/latest/getting-started.md
+++ /dev/null
@@ -1,249 +0,0 @@
----
-title: 快速入门指南
-keywords:
- - APISIX
- - APISIX 入门指南
- - APISIX docker 安装教程
-description: 本文档将引导你了解如何开始使用 Apache APISIX。
----
-
-
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-本文将为你介绍 Apache APISIX 的概念、功能以及如何使用 APISIX。
-
-通过本文你可以了解到以下内容:
-
-- Apache APISIX 是什么?
-- APISIX 的架构及主要概念。
-- 如何使用 Docker 安装并运行 APISIX。
-- 如何使用 Admin API 创建第一个路由并配置上游。
-- 如何使用 APISIX Dashboard。
-- 如何寻求帮助。
-
-## Apache APISIX 是什么?
-
-Apache APISIX 是由 API7.ai(支流科技)捐赠给 Apache 软件基金会的云原生 API 网关,它兼具动态、实时、高性能等特点,提供了负载均衡、动态上游、灰度发布(金丝雀发布)、服务熔断、身份认证、可观测性等丰富的流量管理功能。我们可以使用 Apache APISIX 来处理传统的南北向流量,也可以处理服务间的东西向流量。同时,它也支持作为 [K8s Ingress Controller](https://github.com/apache/apisix-ingress-controller) 来使用。
-
-### 主要特性
-
-- 多平台支持:APISIX 提供了多平台解决方案,它不但支持裸机运行,也支持在 Kubernetes 中使用,还支持与 AWS Lambda、Azure Function、Lua 函数和 Apache OpenWhisk 等云服务集成。
-- 全动态能力:APISIX 支持热加载,这意味着你不需要重启服务就可以更新 APISIX 的配置。请访问[为什么 Apache APISIX 选择 Nginx + Lua 这个技术栈?](https://apisix.apache.org/zh/blog/2021/08/25/why-apache-apisix-chose-nginx-and-lua/)以了解实现原理。
-- 精细化路由:APISIX 支持使用 [NGINX 内置变量](https://nginx.org/en/docs/varindex.html)做为路由的匹配条件,你可以自定义匹配函数来过滤请求,匹配路由。
-- 运维友好:APISIX 支持与以下工具和平台集成:[HashiCorp Vault](./terminology/secret.md#使用-vault-管理密钥)、[Zipkin](./plugins/zipkin.md)、[Apache SkyWalking](./plugins/skywalking.md)、[Consul](../../en/latest/discovery/consul_kv.md)、[Nacos](./discovery/nacos.md)、[Eureka](./discovery.md)。通过 [APISIX Dashboard](/docs/dashboard/USER_GUIDE),运维人员可以通过友好且直观的 UI 配置 APISIX。
-- 多语言插件支持:APISIX 支持多种开发语言进行插件开发,开发人员可以选择擅长语言的 SDK 开发自定义插件。
-
-## 主要概念
-
-下图为 Apache APISIX 的架构:
-
-![flow-software-architecture](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/flow-software-architecture.png)
-
-下表是本文涉及到的 APISIX 的主要概念和组件:
-
-| 概念/组件 | 描述 |
-|-------------|--------------------------------------------------------------------------------------------------|
-| Route | 通过路由定义规则来匹配客户端请求,根据匹配结果加载并执行相应的插件,最后把请求转发给到指定的上游应用。 |
-| Upstream | 上游的作用是按照配置规则对服务节点进行负载均衡,它的地址信息可以直接配置到路由或服务上。 |
-| Admin API | 用户可以通过 Admin API 控制 APISIX 实例。 |
-
-## 前提条件
-
-在开始使用 APISIX 之前,请确保你已经安装以下应用:
-
-- [Docker](https://www.docker.com/) 和 [Docker Compose](https://docs.docker.com/compose/)。
-- [curl](https://curl.se/docs/manpage.html) 用于测试 API。你也可以使用 [Hoppscotch](https://hoppscotch.io/) 之类的工具。
-- 本文使用的上游服务是 [httpbin.org](https://httpbin.org),你可以使用它进行测试。这是一个返回服务,它将返回我们在请求中传递的参数。
-
-**请求内容:**
-
-请求 URL 由以下参数构成:
-
-- Protocol:即网络传输协议,在示例中,我们使用的是 `HTTP` 协议。
-- Port:即端口,示例中使用的 `80` 端口。
-- Host:即主机地址,示例中使用的是 `httpbin.org`。
-- Path:即路径,示例中的路径是 `/get`。
-- Query Parameters:即查询字符串,这里有两个字符串,分别是 `foo1` 和 `foo2`。
-
-运行以下命令,发送请求:
-
-```bash
-curl --location --request GET "http://httpbin.org/get?foo1=bar1&foo2=bar2"
-```
-
-**响应内容:**
-
-```json
-{
- "args": {
- "foo1": "bar1",
- "foo2": "bar2"
- },
- "headers": {
- "Accept": "*/*",
- "Host": "httpbin.org",
- "User-Agent": "curl/7.29.0",
- "X-Amzn-Trace-Id": "Root=1-6088fe84-24f39487166cce1f0e41efc9"
- },
- "origin": "58.152.81.42",
- "url": "http://httpbin.org/get?foo1=bar1&foo2=bar2"
-}
-```
-
-## 安装 APISIX
-
-APISIX 可以借助 quickstart 脚本快速安装并启动。
-
-```sh
-curl -sL https://run.api7.ai/apisix/quickstart | sh
-```
-
-该命令在本地安装并运行了基于 Docker 的 APISIX 和 etcd 容器,其中 APISIX 采用 etcd 保存和同步配置信息。APISIX 和 etcd 容器使用 [**host**](https://docs.docker.com/network/host/) 的 Docker 网络模式,因此可以从本地直接访问。
-
-如果一切顺利,将输出如下信息。
-
-```text
-✔ APISIX is ready!
-```
-
-:::note
-
-你也可以参考 [APISIX 安装指南](./installation-guide.md)了解不同的安装方法。
-
-:::
-
-:::info IMPORTANT
-
-请确保其他系统进程没有占用 **9080、9180、9443 和 2379** 端口。
-
-:::
-
-你可以通过 curl 来访问正在运行的 APISIX 实例。比如,你可以发送一个简单的 HTTP 请求来验证 APISIX 运行状态是否正常。
-
-```sh
-curl "http://127.0.0.1:9080" --head | grep Server
-```
-
-如果一切顺利,将输出如下信息。
-
-```text
-Server: APISIX/Version
-```
-
-`Version` 是指您已经安装的 APISIX 的版本。例如,`APISIX/3.3.0`。
-
-现在,你已经成功安装并运行了 APISIX!
-
-## 创建路由
-
-APISIX 提供了强大的 [Admin API](./admin-api.md) 和 [Dashboard](https://github.com/apache/apisix-dashboard) 供用户使用。在下述示例中,我们将使用 Admin API 创建一个 [Route](./terminology/route.md) 并与 [Upstream](./terminology/upstream.md) 绑定,当一个请求到达 APISIX 时,APISIX 会将请求转发到指定的上游服务中。
-
-以下示例代码中,我们将为路由配置匹配规则,以便 APISIX 可以将请求转发到对应的上游服务:
-
-```bash
-curl "http://127.0.0.1:9180/apisix/admin/routes/1" -X PUT -d '
-{
- "methods": ["GET"],
- "host": "example.com",
- "uri": "/anything/*",
- "upstream": {
- "type": "roundrobin",
- "nodes": {
- "httpbin.org:80": 1
- }
- }
-}'
-```
-
-该配置意味着,当请求满足下述的**所有**规则时,请求将被转发到上游服务(`httpbin.org:80`):
-
-- 请求的 HTTP 方法为 `GET`。
-- 请求头包含 `host` 字段,且它的值为 `example.com`。
-- 请求路径匹配 `/anything/*`,`*` 意味着任意的子路径,例如 `/anything/foo?arg=10`。
-
-当路由创建完成后,可以通过以下命令访问上游服务:
-
-```bash
-curl -i -X GET "http://127.0.0.1:9080/anything/foo?arg=10" -H "Host: example.com"
-```
-
-该请求将被 APISIX 转发到 `http://httpbin.org:80/anything/foo?arg=10`。
-
-## 使用上游服务创建路由
-
-你可以通过以下命令创建一个上游,并在路由中使用它,而不是直接将其配置在路由中:
-
-```bash
-curl "http://127.0.0.1:9180/apisix/admin/upstreams/1" -X PUT -d '
-{
- "type": "roundrobin",
- "nodes": {
- "httpbin.org:80": 1
- }
-}'
-```
-
-该上游配置与上一节配置在路由中的上游相同。同样使用了 `roundrobin` 作为负载均衡机制,并设置了 `httpbin.org:80` 为上游服务。为了将该上游绑定到路由,此处需要把 `upstream_id` 设置为 `"1"`。更多字段信息,请参考 [Admin API](./admin-api.md)。
-
-上游服务创建完成后,可以通过以下命令绑定到指定路由:
-
-```bash
-curl "http://127.0.0.1:9180/apisix/admin/routes/1" -X PUT -d '
-{
- "methods": ["GET"],
- "host": "example.com",
- "uri": "/anything/*",
- "upstream_id": "1"
-}'
-```
-
-我们已经创建了路由与上游服务,现在可以通过以下命令访问上游服务:
-
-```bash
-curl -i -X GET "http://127.0.0.1:9080/anything/foo?arg=10" -H "Host: example.com"
-```
-
-该请求将被 APISIX 转发到 `http://httpbin.org:80/anything/foo?arg=10`。
-
-## 使用 APISIX Dashboard
-
-你还可以使用 APISIX Dashboard 创建和配置类似于上述步骤中所创建的路由。
-
-如果你已经完成上述操作步骤,就可以通过 [`localhost:9000`](http://localhost:9000/) 访问 APISIX Dashboard。
-
-单击侧边栏中的 [`Route`](http://localhost:9000/routes/list),可以查看已经配置的路由列表。你也可以看到在上述步骤中使用 Admin API 创建的路由。
-
-你也可以通过单击 [`Create`](http://localhost:9000/routes/create) 按钮并按照提示创建新路由:
-
-![Creating a Route with APISIX Dashboard](../../assets/images/create-a-route.png)
-
-新创建的路由将被添加到路由列表中:
-
-![Creating a Route with APISIX Dashboard](../../assets/images/list-of-routes.png)
-
-想要了解更多关于 APISIX Dashboard 的信息,请参考 [APISIX Dashboard 文档](/docs/dashboard/USER_GUIDE)。
-
-## 总结
-
-完成上述步骤后,APISIX 就可以正常运行了。如果想利用 APISIX 实现身份验证、安全性、限流限速和可观测性等功能,可通过添加插件实现。各类插件的详细信息请参考[插件市场](/plugins)。
-
-如果你在使用当中遇到困难,可以通过 [APISIX 社区频道](/docs/general/join)或者在 GitHub 上[提交一个 issue](/docs/general/submit-issue) 寻求帮助。
diff --git a/docs/zh/latest/getting-started/README.md b/docs/zh/latest/getting-started/README.md
new file mode 100644
index 000000000000..7575132a3dcc
--- /dev/null
+++ b/docs/zh/latest/getting-started/README.md
@@ -0,0 +1,71 @@
+---
+title: 入门指南
+description: 本教程使用脚本在本地环境快速安装 Apache APISIX,并且通过管理 API 来验证是否安装成功。
+---
+
+