diff --git a/.asf.yaml b/.asf.yaml
new file mode 100644
index 000000000000..ad1e99e2a4d5
--- /dev/null
+++ b/.asf.yaml
@@ -0,0 +1,48 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+github:
+ description: The Cloud-Native API Gateway
+ homepage: https://apisix.apache.org/
+ labels:
+ - api-gateway
+ - cloud-native
+ - nginx
+ - lua
+ - luajit
+ - apigateway
+ - microservices
+ - api
+ - loadbalancing
+ - reverse-proxy
+ - api-management
+ - apisix
+ - serverless
+ - iot
+ - devops
+ - kubernetes
+ - docker
+
+ enabled_merge_buttons:
+ squash: true
+ merge: false
+ rebase: false
+
+ notifications:
+ commits: notifications@apisix.apache.org
+ issues: notifications@apisix.apache.org
+ pullrequests: notifications@apisix.apache.org
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index f126750da8a1..c5cd79d01373 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -1,17 +1,10 @@
-NOTE: Please read the Contributing.md guidelines before submitting your patch:
+### What this PR does / why we need it:
+
+
-https://github.com/apache/incubator-apisix/blob/master/Contributing.md#how-to-add-a-new-feature-or-change-an-existing-one
+### Pre-submission checklist:
-### Summary
-
-SUMMARY_HERE
-
-### Full changelog
-
-* [Implement ...]
-* [Add related tests]
-* ...
-
-### Issues resolved
-
-Fix #XXX
+* [ ] Did you explain what problem does this PR solve? Or what new features have been added?
+* [ ] Have you added corresponding test cases?
+* [ ] Have you modified the corresponding document?
+* [ ] Is this PR backward compatible?
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
new file mode 100644
index 000000000000..c46c97f95aa0
--- /dev/null
+++ b/.github/workflows/build.yml
@@ -0,0 +1,64 @@
+name: CI
+
+on:
+ push:
+ branches: [ master ]
+ pull_request:
+ branches: [ master ]
+
+jobs:
+ build:
+ strategy:
+ fail-fast: false
+ matrix:
+ platform: [ubuntu-18.04]
+ os_name: [linux_openresty, linux_tengine, linux_apisix_master_luarocks, linux_apisix_current_luarocks, linux_openresty_mtls]
+ include:
+ - platform: macos-latest
+ os_name: osx_openresty
+
+ runs-on: ${{ matrix.platform }}
+ env:
+ SERVER_NAME: ${{ matrix.os_name }}
+
+
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v2
+
+#----------------------------------------------------------------------------
+ - name: Linux Get dependencies
+ if: matrix.platform == 'ubuntu-18.04'
+ run: sudo apt install -y cpanminus build-essential libncurses5-dev libreadline-dev libssl-dev perl etcd
+
+ - name: Linux Before install
+ if: matrix.platform == 'ubuntu-18.04'
+ run: sudo ./.travis/${{ matrix.os_name }}_runner.sh before_install
+
+ - name: Linux Install
+ if: matrix.platform == 'ubuntu-18.04'
+ run: sudo ./.travis/${{ matrix.os_name }}_runner.sh do_install
+
+ - name: Linux Script
+ if: matrix.platform == 'ubuntu-18.04'
+ run: sudo ./.travis/${{ matrix.os_name }}_runner.sh script
+
+# - name: Linux After success
+# if: matrix.platform == 'ubuntu-18.04'
+# run: sudo ./.travis/${{ matrix.os_name }}_runner.sh after_success
+#----------------------------------------------------------------------------
+ - name: MacOS Before install
+ if: matrix.platform == 'macos-latest'
+ run: ./.travis/${{ matrix.os_name }}_runner.sh before_install
+
+ - name: MacOS Install
+ if: matrix.platform == 'macos-latest'
+ run: ./.travis/${{ matrix.os_name }}_runner.sh do_install
+
+ - name: MacOS Script
+ if: matrix.platform == 'macos-latest'
+ run: ./.travis/${{ matrix.os_name }}_runner.sh script
+
+# - name: MacOS After success
+# if: matrix.platform == 'macos-latest'
+# run: ./.travis/${{ matrix.os_name }}_runner.sh after_success
diff --git a/.gitignore b/.gitignore
index 8327a3f240e9..75c9907ca5c1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -53,6 +53,7 @@ proxy_temp
fastcgi_temp
client_body_temp
utils/lj-releng
+default.etcd/
.idea/
*.iml
\.*
diff --git a/.luacheckrc b/.luacheckrc
index 0d51c0005cc0..6b3ce0494567 100644
--- a/.luacheckrc
+++ b/.luacheckrc
@@ -1,3 +1,4 @@
std = "ngx_lua"
unused_args = false
redefined = false
+max_line_length = 100
diff --git a/.travis.yml b/.travis.yml
index d33d27cc2bce..ebb3e6929969 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,30 +1,16 @@
-dist: xenial
+dist: bionic
sudo: required
matrix:
+ allow_failures:
+ - os: osx
include:
- - os: linux
- services:
- - docker
- env: OSNAME=linux_openresty
- os: osx
env: OSNAME=osx_openresty
cache:
directories:
- $HOME/Library/Caches/Homebrew
- /usr/local/Homebrew
- - os: linux
- services:
- - docker
- env: OSNAME=linux_tengine
- - os: linux
- services:
- - docker
- env: OSNAME=linux_apisix_master_luarocks
- - os: linux
- services:
- - docker
- env: OSNAME=linux_apisix_current_luarocks
language: c
@@ -41,6 +27,7 @@ addons:
homebrew:
update: true
+
cache:
directories:
- build-cache
diff --git a/.travis/apisix_cli_test.sh b/.travis/apisix_cli_test.sh
index d67c7f837b7d..e30cd5b97c24 100755
--- a/.travis/apisix_cli_test.sh
+++ b/.travis/apisix_cli_test.sh
@@ -23,6 +23,8 @@
set -ex
+git checkout conf/config.yaml
+
# check whether the 'reuseport' is in nginx.conf .
make init
@@ -72,3 +74,90 @@ done
sed -i '/dns_resolver:/,+4s/^#//' conf/config.yaml
echo "passed: system nameserver imported"
+
+# enable enable_dev_mode
+sed -i 's/enable_dev_mode: false/enable_dev_mode: true/g' conf/config.yaml
+
+make init
+
+count=`grep -c "worker_processes 1;" conf/nginx.conf`
+if [ $count -ne 1 ]; then
+ echo "failed: worker_processes is not 1 when enable enable_dev_mode"
+ exit 1
+fi
+
+count=`grep -c "listen 9080.*reuseport" conf/nginx.conf || true`
+if [ $count -ne 0 ]; then
+ echo "failed: reuseport should be disabled when enable enable_dev_mode"
+ exit 1
+fi
+
+git checkout conf/config.yaml
+
+# check whether the 'worker_cpu_affinity' is in nginx.conf .
+
+make init
+
+grep -E "worker_cpu_affinity" conf/nginx.conf > /dev/null
+if [ ! $? -eq 0 ]; then
+ echo "failed: nginx.conf file is missing worker_cpu_affinity configuration"
+ exit 1
+fi
+
+echo "passed: nginx.conf file contains worker_cpu_affinity configuration"
+
+# check admin https enabled
+
+sed -i 's/\# port_admin: 9180/port_admin: 9180/' conf/config.yaml
+sed -i 's/\# https_admin: true/https_admin: true/' conf/config.yaml
+
+make init
+
+grep "listen 9180 ssl" conf/nginx.conf > /dev/null
+if [ ! $? -eq 0 ]; then
+ echo "failed: failed to enabled https for admin"
+ exit 1
+fi
+
+make run
+
+code=$(curl -k -i -m 20 -o /dev/null -s -w %{http_code} https://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1')
+if [ ! $code -eq 200 ]; then
+ echo "failed: failed to enabled https for admin"
+ exit 1
+fi
+
+echo "passed: admin https enabled"
+
+# rollback to the default
+
+make stop
+
+sed -i 's/port_admin: 9180/\# port_admin: 9180/' conf/config.yaml
+sed -i 's/https_admin: true/\# https_admin: true/' conf/config.yaml
+
+make init
+
+set +ex
+
+grep "listen 9180 ssl" conf/nginx.conf > /dev/null
+if [ ! $? -eq 1 ]; then
+ echo "failed: failed to rollback to the default admin config"
+ exit 1
+fi
+
+set -ex
+
+echo "passed: rollback to the default admin config"
+
+# check the 'worker_shutdown_timeout' in 'nginx.conf' .
+
+make init
+
+grep -E "worker_shutdown_timeout 240s" conf/nginx.conf > /dev/null
+if [ ! $? -eq 0 ]; then
+ echo "failed: worker_shutdown_timeout in nginx.conf is required 240s"
+ exit 1
+fi
+
+echo "passed: worker_shutdown_timeout in nginx.conf is ok"
diff --git a/.travis/linux_apisix_current_luarocks_runner.sh b/.travis/linux_apisix_current_luarocks_runner.sh
index b67e115fa7f5..0264fc5ba826 100755
--- a/.travis/linux_apisix_current_luarocks_runner.sh
+++ b/.travis/linux_apisix_current_luarocks_runner.sh
@@ -47,6 +47,11 @@ script() {
export PATH=$OPENRESTY_PREFIX/nginx/sbin:$OPENRESTY_PREFIX/luajit/bin:$OPENRESTY_PREFIX/bin:$PATH
openresty -V
sudo service etcd start
+ sudo service etcd stop
+ mkdir -p ~/etcd-data
+ /usr/bin/etcd --listen-client-urls 'http://0.0.0.0:2379' --advertise-client-urls='http://0.0.0.0:2379' --data-dir ~/etcd-data > /dev/null 2>&1 &
+ etcd --version
+ sleep 5
sudo rm -rf /usr/local/apisix
diff --git a/.travis/linux_apisix_master_luarocks_runner.sh b/.travis/linux_apisix_master_luarocks_runner.sh
index 2c76087fa20b..7705c97559ea 100755
--- a/.travis/linux_apisix_master_luarocks_runner.sh
+++ b/.travis/linux_apisix_master_luarocks_runner.sh
@@ -20,6 +20,7 @@ set -ex
export_or_prefix() {
export OPENRESTY_PREFIX="/usr/local/openresty-debug"
+ export APISIX_MAIN="https://raw.githubusercontent.com/apache/incubator-apisix/master/rockspec/apisix-master-0.rockspec"
}
do_install() {
@@ -46,7 +47,11 @@ script() {
export_or_prefix
export PATH=$OPENRESTY_PREFIX/nginx/sbin:$OPENRESTY_PREFIX/luajit/bin:$OPENRESTY_PREFIX/bin:$PATH
openresty -V
- sudo service etcd start
+ sudo service etcd stop
+ mkdir -p ~/etcd-data
+ /usr/bin/etcd --listen-client-urls 'http://0.0.0.0:2379' --advertise-client-urls='http://0.0.0.0:2379' --data-dir ~/etcd-data > /dev/null 2>&1 &
+ etcd --version
+ sleep 5
sudo rm -rf /usr/local/apisix
@@ -62,7 +67,7 @@ script() {
sudo PATH=$PATH ./utils/install-apisix.sh remove > build.log 2>&1 || (cat build.log && exit 1)
# install APISIX by luarocks
- sudo luarocks install rockspec/apisix-master-0.rockspec > build.log 2>&1 || (cat build.log && exit 1)
+ sudo luarocks install $APISIX_MAIN > build.log 2>&1 || (cat build.log && exit 1)
# show install files
luarocks show apisix
diff --git a/.travis/linux_openresty_mtls_runner.sh b/.travis/linux_openresty_mtls_runner.sh
new file mode 100755
index 000000000000..8b7e035e73b2
--- /dev/null
+++ b/.travis/linux_openresty_mtls_runner.sh
@@ -0,0 +1,182 @@
+#!/usr/bin/env bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -ex
+
+export_or_prefix() {
+ export OPENRESTY_PREFIX="/usr/local/openresty-debug"
+}
+
+create_lua_deps() {
+ echo "Create lua deps cache"
+
+ make deps
+ luarocks install luacov-coveralls --tree=deps --local > build.log 2>&1 || (cat build.log && exit 1)
+
+ sudo rm -rf build-cache/deps
+ sudo cp -r deps build-cache/
+ sudo cp rockspec/apisix-master-0.rockspec build-cache/
+}
+
+before_install() {
+ sudo cpanm --notest Test::Nginx >build.log 2>&1 || (cat build.log && exit 1)
+}
+
+do_install() {
+ export_or_prefix
+
+ wget -qO - https://openresty.org/package/pubkey.gpg | sudo apt-key add -
+ sudo apt-get -y update --fix-missing
+ sudo apt-get -y install software-properties-common
+ sudo add-apt-repository -y "deb http://openresty.org/package/ubuntu $(lsb_release -sc) main"
+ sudo add-apt-repository -y ppa:longsleep/golang-backports
+
+ sudo apt-get update
+ sudo apt-get install openresty-debug lua5.1 liblua5.1-0-dev
+
+ wget https://github.com/luarocks/luarocks/archive/v2.4.4.tar.gz
+ tar -xf v2.4.4.tar.gz
+ cd luarocks-2.4.4
+ ./configure --prefix=/usr > build.log 2>&1 || (cat build.log && exit 1)
+ make build > build.log 2>&1 || (cat build.log && exit 1)
+ sudo make install > build.log 2>&1 || (cat build.log && exit 1)
+ cd ..
+ rm -rf luarocks-2.4.4
+
+ sudo luarocks install luacheck > build.log 2>&1 || (cat build.log && exit 1)
+
+
+ if [ ! -f "build-cache/apisix-master-0.rockspec" ]; then
+ create_lua_deps
+
+ else
+ src=`md5sum rockspec/apisix-master-0.rockspec | awk '{print $1}'`
+ src_cp=`md5sum build-cache/apisix-master-0.rockspec | awk '{print $1}'`
+ if [ "$src" = "$src_cp" ]; then
+ echo "Use lua deps cache"
+ sudo cp -r build-cache/deps ./
+ else
+ create_lua_deps
+ fi
+ fi
+
+ # sudo apt-get install tree -y
+ # tree deps
+
+ git clone https://github.com/iresty/test-nginx.git test-nginx
+ make utils
+
+ git clone https://github.com/apache/openwhisk-utilities.git .travis/openwhisk-utilities
+ cp .travis/ASF* .travis/openwhisk-utilities/scancode/
+
+ ls -l ./
+}
+
+script() {
+ export_or_prefix
+ export PATH=$OPENRESTY_PREFIX/nginx/sbin:$OPENRESTY_PREFIX/luajit/bin:$OPENRESTY_PREFIX/bin:$PATH
+ openresty -V
+ sudo service etcd stop
+ mkdir -p ~/etcd-data
+ /usr/bin/etcd --listen-client-urls 'http://0.0.0.0:2379' --advertise-client-urls='http://0.0.0.0:2379' --data-dir ~/etcd-data > /dev/null 2>&1 &
+ etcd --version
+ sleep 5
+
+
+ # enable mTLS
+ sed -i 's/\# port_admin: 9180/port_admin: 9180/' conf/config.yaml
+ sed -i 's/\# https_admin: true/https_admin: true/' conf/config.yaml
+ sed -i 's/mtls_enable: false/mtls_enable: true/' conf/config.yaml
+ sed -i 's#admin_ssl_ca_cert: ""#admin_ssl_ca_cert: "../t/certs/mtls_ca.crt"#' conf/config.yaml
+ sed -i 's#admin_ssl_cert_key: ""#admin_ssl_cert_key: "../t/certs/mtls_server.key"#' conf/config.yaml
+ sed -i 's#admin_ssl_cert: ""#admin_ssl_cert: "../t/certs/mtls_server.crt"#' conf/config.yaml
+
+ ./bin/apisix help
+ ./bin/apisix init
+ ./bin/apisix init_etcd
+ ./bin/apisix start
+
+ sleep 1
+ cat logs/error.log
+
+
+ echo "127.0.0.1 admin.apisix.dev" | sudo tee -a /etc/hosts
+
+ # correct certs
+ code=$(curl -i -o /dev/null -s -w %{http_code} --cacert ./t/certs/mtls_ca.crt --key ./t/certs/mtls_client.key --cert ./t/certs/mtls_client.crt -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' https://admin.apisix.dev:9180/apisix/admin/routes)
+ if [ ! $code -eq 200 ]; then
+ echo "failed: failed to enabled mTLS for admin"
+ exit 1
+ fi
+
+ # # no certs
+ # code=$(curl -i -o /dev/null -s -w %{http_code} -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' https://admin.apisix.dev:9180/apisix/admin/routes)
+ # if [ ! $code -eq 000 ]; then
+ # echo "failed: failed to enabled mTLS for admin"
+ # exit 1
+ # fi
+
+ # # no ca cert
+ # code=$(curl -i -o /dev/null -s -w %{http_code} --key ./t/certs/mtls_client.key --cert ./t/certs/mtls_client.crt -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' https://admin.apisix.dev:9180/apisix/admin/routes)
+ # if [ ! $code -eq 000 ]; then
+ # echo "failed: failed to enabled mTLS for admin"
+ # exit 1
+ # fi
+
+ # # error key
+ # code=$(curl -i -o /dev/null -s -w %{http_code} --cacert ./t/certs/mtls_ca.crt --key ./t/certs/mtls_server.key --cert ./t/certs/mtls_client.crt -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' https://admin.apisix.dev:9180/apisix/admin/routes)
+ # if [ ! $code -eq 000 ]; then
+ # echo "failed: failed to enabled mTLS for admin"
+ # exit 1
+ # fi
+
+ # skip
+ code=$(curl -i -o /dev/null -s -w %{http_code} -k -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' https://admin.apisix.dev:9180/apisix/admin/routes)
+ if [ ! $code -eq 400 ]; then
+ echo "failed: failed to enabled mTLS for admin"
+ exit 1
+ fi
+
+ ./bin/apisix stop
+ sleep 1
+
+ make lint && make license-check || exit 1
+}
+
+after_success() {
+ cat luacov.stats.out
+ luacov-coveralls
+}
+
+case_opt=$1
+shift
+
+case ${case_opt} in
+before_install)
+ before_install "$@"
+ ;;
+do_install)
+ do_install "$@"
+ ;;
+script)
+ script "$@"
+ ;;
+after_success)
+ after_success "$@"
+ ;;
+esac
diff --git a/.travis/linux_openresty_runner.sh b/.travis/linux_openresty_runner.sh
index 384d10ec4a82..d5922c6fcb3b 100755
--- a/.travis/linux_openresty_runner.sh
+++ b/.travis/linux_openresty_runner.sh
@@ -37,14 +37,19 @@ before_install() {
sudo cpanm --notest Test::Nginx >build.log 2>&1 || (cat build.log && exit 1)
docker pull redis:3.0-alpine
docker run --rm -itd -p 6379:6379 --name apisix_redis redis:3.0-alpine
+ docker run --rm -itd -e HTTP_PORT=8888 -e HTTPS_PORT=9999 -p 8888:8888 -p 9999:9999 mendhak/http-https-echo
+ # Runs Keycloak version 10.0.2 with inbuilt policies for unit tests
+ docker run --rm -itd -e KEYCLOAK_USER=admin -e KEYCLOAK_PASSWORD=123456 -p 8090:8080 sshniro/keycloak-apisix
# spin up kafka cluster for tests (1 zookeper and 1 kafka instance)
docker pull bitnami/zookeeper:3.6.0
docker pull bitnami/kafka:latest
docker network create kafka-net --driver bridge
docker run --name zookeeper-server -d -p 2181:2181 --network kafka-net -e ALLOW_ANONYMOUS_LOGIN=yes bitnami/zookeeper:3.6.0
docker run --name kafka-server1 -d --network kafka-net -e ALLOW_PLAINTEXT_LISTENER=yes -e KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper-server:2181 -e KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092 -p 9092:9092 -e KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true bitnami/kafka:latest
+ docker pull bitinit/eureka
+ docker run --name eureka -d -p 8761:8761 --env ENVIRONMENT=apisix --env spring.application.name=apisix-eureka --env server.port=8761 --env eureka.instance.ip-address=127.0.0.1 --env eureka.client.registerWithEureka=true --env eureka.client.fetchRegistry=false --env eureka.client.serviceUrl.defaultZone=http://127.0.0.1:8761/eureka/ bitinit/eureka
sleep 5
- docker exec -it kafka-server1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server:2181 --replication-factor 1 --partitions 1 --topic test2
+ docker exec -i kafka-server1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server:2181 --replication-factor 1 --partitions 1 --topic test2
}
do_install() {
@@ -123,7 +128,11 @@ script() {
export_or_prefix
export PATH=$OPENRESTY_PREFIX/nginx/sbin:$OPENRESTY_PREFIX/luajit/bin:$OPENRESTY_PREFIX/bin:$PATH
openresty -V
- sudo service etcd start
+ sudo service etcd stop
+ mkdir -p ~/etcd-data
+ /usr/bin/etcd --listen-client-urls 'http://0.0.0.0:2379' --advertise-client-urls='http://0.0.0.0:2379' --data-dir ~/etcd-data > /dev/null 2>&1 &
+ etcd --version
+ sleep 5
./build-cache/grpc_server_example &
@@ -132,6 +141,23 @@ script() {
./bin/apisix init_etcd
./bin/apisix start
+ #start again --> fial
+ res=`./bin/apisix start`
+ if [ "$res" != "APISIX is running..." ]; then
+ echo "failed: APISIX runs repeatedly"
+ exit 1
+ fi
+
+ #kill apisix
+ sudo kill -9 `ps aux | grep apisix | grep nginx | awk '{print $2}'`
+
+ #start -> ok
+ res=`./bin/apisix start`
+ if [ "$res" == "APISIX is running..." ]; then
+ echo "failed: shouldn't stop APISIX running after kill the old process."
+ exit 1
+ fi
+
sleep 1
cat logs/error.log
@@ -142,7 +168,7 @@ script() {
sleep 1
make lint && make license-check || exit 1
- APISIX_ENABLE_LUACOV=1 prove -Itest-nginx/lib -r t
+ APISIX_ENABLE_LUACOV=1 PERL5LIB=.:$PERL5LIB prove -Itest-nginx/lib -r t
}
after_success() {
diff --git a/.travis/linux_tengine_runner.sh b/.travis/linux_tengine_runner.sh
index 45a9ec448e29..e5e81e986fb4 100755
--- a/.travis/linux_tengine_runner.sh
+++ b/.travis/linux_tengine_runner.sh
@@ -38,14 +38,19 @@ before_install() {
sudo cpanm --notest Test::Nginx >build.log 2>&1 || (cat build.log && exit 1)
docker pull redis:3.0-alpine
docker run --rm -itd -p 6379:6379 --name apisix_redis redis:3.0-alpine
+ docker run --rm -itd -e HTTP_PORT=8888 -e HTTPS_PORT=9999 -p 8888:8888 -p 9999:9999 mendhak/http-https-echo
+ # Runs Keycloak version 10.0.2 with inbuilt policies for unit tests
+ docker run --rm -itd -e KEYCLOAK_USER=admin -e KEYCLOAK_PASSWORD=123456 -p 8090:8080 sshniro/keycloak-apisix
# spin up kafka cluster for tests (1 zookeper and 1 kafka instance)
docker pull bitnami/zookeeper:3.6.0
docker pull bitnami/kafka:latest
docker network create kafka-net --driver bridge
docker run --name zookeeper-server -d -p 2181:2181 --network kafka-net -e ALLOW_ANONYMOUS_LOGIN=yes bitnami/zookeeper:3.6.0
docker run --name kafka-server1 -d --network kafka-net -e ALLOW_PLAINTEXT_LISTENER=yes -e KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper-server:2181 -e KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092 -p 9092:9092 -e KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true bitnami/kafka:latest
+ docker pull bitinit/eureka
+ docker run --name eureka -d -p 8761:8761 --env ENVIRONMENT=apisix --env spring.application.name=apisix-eureka --env server.port=8761 --env eureka.instance.ip-address=127.0.0.1 --env eureka.client.registerWithEureka=true --env eureka.client.fetchRegistry=false --env eureka.client.serviceUrl.defaultZone=http://127.0.0.1:8761/eureka/ bitinit/eureka
sleep 5
- docker exec -it kafka-server1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server:2181 --replication-factor 1 --partitions 1 --topic test2
+ docker exec -i kafka-server1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server:2181 --replication-factor 1 --partitions 1 --topic test2
}
tengine_install() {
@@ -113,6 +118,11 @@ tengine_install() {
wget -P patches https://raw.githubusercontent.com/totemofwolf/tengine/feature/patches/tengine-2.3.2-delete_unused_variable.patch
wget -P patches https://raw.githubusercontent.com/totemofwolf/tengine/feature/patches/tengine-2.3.2-keepalive_post_request_status.patch
wget -P patches https://raw.githubusercontent.com/totemofwolf/tengine/feature/patches/tengine-2.3.2-tolerate_backslash_zero_in_uri.patch
+ wget -P patches https://raw.githubusercontent.com/totemofwolf/tengine/feature/patches/tengine-2.3.2-avoid-limit_req_zone-directive-in-multiple-variables.patch
+ wget -P patches https://raw.githubusercontent.com/totemofwolf/tengine/feature/patches/tengine-2.3.2-segmentation-fault-in-master-process.patch
+ wget -P patches https://raw.githubusercontent.com/totemofwolf/tengine/feature/patches/tengine-2.3.2-support-dtls-offload.patch
+ wget -P patches https://raw.githubusercontent.com/totemofwolf/tengine/feature/patches/tengine-2.3.2-support-prometheus-to-upstream_check_module.patch
+ wget -P patches https://raw.githubusercontent.com/totemofwolf/tengine/feature/patches/tengine-2.3.2-vnswrr-adaptated-to-dynamic_resolve.patch
cd bundle/tengine-2.3.2
patch -p1 < ../../patches/nginx-1.17.4-always_enable_cc_feature_tests.patch
@@ -143,6 +153,11 @@ tengine_install() {
patch -p1 < ../../patches/tengine-2.3.2-delete_unused_variable.patch
patch -p1 < ../../patches/tengine-2.3.2-keepalive_post_request_status.patch
patch -p1 < ../../patches/tengine-2.3.2-tolerate_backslash_zero_in_uri.patch
+ patch -p1 < ../../patches/tengine-2.3.2-avoid-limit_req_zone-directive-in-multiple-variables.patch
+ patch -p1 < ../../patches/tengine-2.3.2-segmentation-fault-in-master-process.patch
+ patch -p1 < ../../patches/tengine-2.3.2-support-dtls-offload.patch
+ patch -p1 < ../../patches/tengine-2.3.2-support-prometheus-to-upstream_check_module.patch
+ patch -p1 < ../../patches/tengine-2.3.2-vnswrr-adaptated-to-dynamic_resolve.patch
cd -
# patching end
@@ -266,7 +281,11 @@ script() {
export_or_prefix
export PATH=$OPENRESTY_PREFIX/nginx/sbin:$OPENRESTY_PREFIX/luajit/bin:$OPENRESTY_PREFIX/bin:$PATH
openresty -V
- sudo service etcd start
+ sudo service etcd stop
+ mkdir -p ~/etcd-data
+ /usr/bin/etcd --listen-client-urls 'http://0.0.0.0:2379' --advertise-client-urls='http://0.0.0.0:2379' --data-dir ~/etcd-data > /dev/null 2>&1 &
+ etcd --version
+ sleep 5
./build-cache/grpc_server_example &
@@ -279,7 +298,7 @@ script() {
./bin/apisix stop
sleep 1
make lint && make license-check || exit 1
- APISIX_ENABLE_LUACOV=1 prove -Itest-nginx/lib -r t
+ APISIX_ENABLE_LUACOV=1 PERL5LIB=.:$PERL5LIB prove -Itest-nginx/lib -r t
}
after_success() {
diff --git a/.travis/osx_openresty_runner.sh b/.travis/osx_openresty_runner.sh
index 1cfce2728585..b31e9dedd010 100755
--- a/.travis/osx_openresty_runner.sh
+++ b/.travis/osx_openresty_runner.sh
@@ -23,6 +23,10 @@ export_or_prefix() {
}
before_install() {
+ if [ "$TRAVIS_OS_NAME" == "" ]; then
+ exit 0
+ fi
+
HOMEBREW_NO_AUTO_UPDATE=1 brew install perl cpanminus etcd luarocks openresty/brew/openresty-debug redis@3.2
brew upgrade go
@@ -36,6 +40,10 @@ before_install() {
}
do_install() {
+ if [ "$TRAVIS_OS_NAME" == "" ]; then
+ exit 0
+ fi
+
export_or_prefix
make deps
@@ -43,7 +51,7 @@ do_install() {
git clone https://github.com/iresty/test-nginx.git test-nginx
wget -P utils https://raw.githubusercontent.com/openresty/openresty-devel-utils/master/lj-releng
- chmod a+x utils/lj-releng
+ chmod a+x utils/lj-releng
wget https://github.com/iresty/grpc_server_example/releases/download/20200314/grpc_server_example-darwin-amd64.tar.gz
tar -xvf grpc_server_example-darwin-amd64.tar.gz
@@ -52,6 +60,10 @@ do_install() {
}
script() {
+ if [ "$TRAVIS_OS_NAME" == "" ]; then
+ exit 0
+ fi
+
export_or_prefix
export PATH=$OPENRESTY_PREFIX/nginx/sbin:$OPENRESTY_PREFIX/luajit/bin:$OPENRESTY_PREFIX/bin:$PATH
@@ -78,6 +90,10 @@ script() {
}
after_success() {
+ if [ "$TRAVIS_OS_NAME" == "" ]; then
+ exit 0
+ fi
+
$PWD/deps/bin/luacov-coveralls
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5e5494df6f29..1ef75bac13c0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -19,6 +19,10 @@
# Table of Contents
+
+- [1.4.1](#141)
+- [1.4.0](#140)
+- [1.3.0](#130)
- [1.2.0](#120)
- [1.1.0](#110)
- [1.0.0](#100)
@@ -27,6 +31,44 @@
- [0.7.0](#070)
- [0.6.0](#060)
+
+## 1.4.1
+
+### Bugfix
+- Fix: multiple SSL certificates are configured, but only one certificate working fine. [1818](https://github.com/apache/incubator-apisix/pull/1818)
+
+
+## 1.4.0
+
+### Core
+- Admin API: Support unique names for routes [1655](https://github.com/apache/incubator-apisix/pull/1655)
+- Optimization of log buffer size and flush time [1570](https://github.com/apache/incubator-apisix/pull/1570)
+
+### New plugins
+- :sunrise: **Apache Skywalking plugin** [1241](https://github.com/apache/incubator-apisix/pull/1241)
+- :sunrise: **Keycloak Identity Server Plugin** [1701](https://github.com/apache/incubator-apisix/pull/1701)
+- :sunrise: **Echo Plugin** [1632](https://github.com/apache/incubator-apisix/pull/1632)
+- :sunrise: **Consume Restriction Plugin** [1437](https://github.com/apache/incubator-apisix/pull/1437)
+
+### Improvements
+- Batch Request : Copy all headers to every request [1697](https://github.com/apache/incubator-apisix/pull/1697)
+- SSL private key encryption [1678](https://github.com/apache/incubator-apisix/pull/1678)
+- Improvement of docs for multiple plugins
+
+
+## 1.3.0
+
+The 1.3 version is mainly for security update.
+
+### Security
+- reject invalid header[#1462](https://github.com/apache/incubator-apisix/pull/1462) and uri safe encode[#1461](https://github.com/apache/incubator-apisix/pull/1461)
+- only allow 127.0.0.1 access admin API and dashboard by default. [#1458](https://github.com/apache/incubator-apisix/pull/1458)
+
+### Plugin
+- :sunrise: **add batch request plugin**. [#1388](https://github.com/apache/incubator-apisix/pull/1388)
+- implemented plugin `sys logger`. [#1414](https://github.com/apache/incubator-apisix/pull/1414)
+
+
## 1.2.0
The 1.2 version brings many new features, including core and plugins.
diff --git a/CHANGELOG_CN.md b/CHANGELOG_CN.md
index 8e19e84721ea..3a9e844667cd 100644
--- a/CHANGELOG_CN.md
+++ b/CHANGELOG_CN.md
@@ -19,6 +19,9 @@
# Table of Contents
+- [1.4.1](#141)
+- [1.4.0](#140)
+- [1.3.0](#130)
- [1.2.0](#120)
- [1.1.0](#110)
- [1.0.0](#100)
@@ -27,6 +30,41 @@
- [0.7.0](#070)
- [0.6.0](#060)
+## 1.4.1
+
+### Bugfix
+- 修复在配置了多个 SSL 证书的情况下,只有一个证书生效的问题。 [1818](https://github.com/apache/incubator-apisix/pull/1818)
+
+## 1.4.0
+
+### Core
+- Admin API: 路由支持唯一 name 字段 [1655](https://github.com/apache/incubator-apisix/pull/1655)
+- 优化 log 缓冲区大小和刷新时间 [1570](https://github.com/apache/incubator-apisix/pull/1570)
+
+### New plugins
+- :sunrise: **Apache Skywalking plugin** [1241](https://github.com/apache/incubator-apisix/pull/1241)
+- :sunrise: **Keycloak Identity Server Plugin** [1701](https://github.com/apache/incubator-apisix/pull/1701)
+- :sunrise: **Echo Plugin** [1632](https://github.com/apache/incubator-apisix/pull/1632)
+- :sunrise: **Consume Restriction Plugin** [1437](https://github.com/apache/incubator-apisix/pull/1437)
+
+### Improvements
+- Batch Request : 对每个请求拷贝头 [1697](https://github.com/apache/incubator-apisix/pull/1697)
+- SSL 私钥加密 [1678](https://github.com/apache/incubator-apisix/pull/1678)
+- 众多插件文档改善
+
+## 1.3.0
+
+1.3 版本主要带来安全更新。
+
+## Security
+- 拒绝无效的 header [#1462](https://github.com/apache/incubator-apisix/pull/1462) 并对 uri 进行安全编码 [#1461](https://github.com/apache/incubator-apisix/pull/1461)
+- 默认只允许本地环回地址 127.0.0.1 访问 admin API 和 dashboard. [#1458](https://github.com/apache/incubator-apisix/pull/1458)
+
+### Plugin
+- :sunrise: **新增 batch request 插件**. [#1388](https://github.com/apache/incubator-apisix/pull/1388)
+- 实现完成 `sys logger` 插件. [#1414](https://github.com/apache/incubator-apisix/pull/1414)
+
+
## 1.2.0
1.2 版本在内核以及插件上带来了非常多的更新。
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000000..732f5ae2eb46
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,127 @@
+
+
+*The following is copied for your convenience from
diff --git a/apisix/admin/global_rules.lua b/apisix/admin/global_rules.lua
index c74d7739d2cf..e9bb0057de0e 100644
--- a/apisix/admin/global_rules.lua
+++ b/apisix/admin/global_rules.lua
@@ -43,6 +43,8 @@ local function check_conf(id, conf, need_id)
return nil, {error_msg = "wrong route id"}
end
+ conf.id = id
+
core.log.info("schema: ", core.json.delay_encode(core.schema.global_rule))
core.log.info("conf : ", core.json.delay_encode(conf))
local ok, err = core.schema.check(core.schema.global_rule, conf)
@@ -109,14 +111,16 @@ function _M.patch(id, conf, sub_path)
return 400, {error_msg = "missing global rule id"}
end
- if not sub_path then
- return 400, {error_msg = "missing sub-path"}
- end
-
if not conf then
return 400, {error_msg = "missing new configuration"}
end
+ if not sub_path or sub_path == "" then
+ if type(conf) ~= "table" then
+ return 400, {error_msg = "invalid configuration"}
+ end
+ end
+
local key = "/global_rules/" .. id
local res_old, err = core.etcd.get(key)
if not res_old then
@@ -131,32 +135,17 @@ function _M.patch(id, conf, sub_path)
core.json.delay_encode(res_old, true))
local node_value = res_old.body.node.value
- local sub_value = node_value
- local sub_paths = core.utils.split_uri(sub_path)
- for i = 1, #sub_paths - 1 do
- local sub_name = sub_paths[i]
- if sub_value[sub_name] == nil then
- sub_value[sub_name] = {}
- end
- sub_value = sub_value[sub_name]
-
- if type(sub_value) ~= "table" then
- return 400, "invalid sub-path: /"
- .. core.table.concat(sub_paths, 1, i)
+ if sub_path and sub_path ~= "" then
+ local code, err, node_val = core.table.patch(node_value, sub_path, conf)
+ node_value = node_val
+ if code then
+ return code, err
end
- end
-
- if type(sub_value) ~= "table" then
- return 400, "invalid sub-path: /" .. sub_path
- end
-
- local sub_name = sub_paths[#sub_paths]
- if sub_name and sub_name ~= "" then
- sub_value[sub_name] = conf
else
- node_value = conf
+ node_value = core.table.merge(node_value, conf);
end
+
core.log.info("new conf: ", core.json.delay_encode(node_value, true))
local ok, err = check_conf(id, node_value, true)
diff --git a/apisix/admin/init.lua b/apisix/admin/init.lua
index 158ba926fb3a..71cb25e56286 100644
--- a/apisix/admin/init.lua
+++ b/apisix/admin/init.lua
@@ -26,6 +26,7 @@ local require = require
local reload_event = "/apisix/admin/plugins/reload"
local ipairs = ipairs
local events
+local MAX_REQ_BODY = 1024 * 1024 * 1.5 -- 1.5 MiB
local viewer_methods = {
@@ -117,8 +118,11 @@ local function run()
core.response.exit(404)
end
- ngx.req.read_body()
- local req_body = ngx.req.get_body_data()
+ local req_body, err = core.request.get_body(MAX_REQ_BODY)
+ if err then
+ core.log.error("failed to read request body: ", err)
+ core.response.exit(400, {error_msg = "invalid request body: " .. err})
+ end
if req_body then
local data, err = core.json.decode(req_body)
diff --git a/apisix/admin/plugins.lua b/apisix/admin/plugins.lua
index 7d6262c59c10..7b835e10ca43 100644
--- a/apisix/admin/plugins.lua
+++ b/apisix/admin/plugins.lua
@@ -18,9 +18,13 @@ local core = require("apisix.core")
local local_plugins = require("apisix.plugin").plugins_hash
local stream_local_plugins = require("apisix.plugin").stream_plugins_hash
local pairs = pairs
+local ipairs = ipairs
local pcall = pcall
local require = require
local table_remove = table.remove
+local table_sort = table.sort
+local table_insert = table.insert
+
local _M = {
version = 0.1,
@@ -114,7 +118,23 @@ function _M.get_plugins_list()
table_remove(plugins, 1)
end
- return plugins
+ local priorities = {}
+ local success = {}
+ for i, name in ipairs(plugins) do
+ local plugin_name = "apisix.plugins." .. name
+ local ok, plugin = pcall(require, plugin_name)
+ if ok and plugin.priority then
+ priorities[name] = plugin.priority
+ table_insert(success, name)
+ end
+ end
+
+ local function cmp(x, y)
+ return priorities[x] > priorities[y]
+ end
+
+ table_sort(success, cmp)
+ return success
end
diff --git a/apisix/admin/routes.lua b/apisix/admin/routes.lua
index 3303e8dc0d0c..eac54c465d4a 100644
--- a/apisix/admin/routes.lua
+++ b/apisix/admin/routes.lua
@@ -45,6 +45,8 @@ local function check_conf(id, conf, need_id)
return nil, {error_msg = "wrong route id"}
end
+ conf.id = id
+
core.log.info("schema: ", core.json.delay_encode(core.schema.route))
core.log.info("conf : ", core.json.delay_encode(conf))
local ok, err = core.schema.check(core.schema.route, conf)
@@ -122,6 +124,18 @@ local function check_conf(id, conf, need_id)
end
end
+ if conf.script then
+ local obj, err = loadstring(conf.script)
+ if not obj then
+ return nil, {error_msg = "failed to load 'script' string: "
+ .. err}
+ end
+
+ if type(obj()) ~= "table" then
+ return nil, {error_msg = "'script' should be a Lua object"}
+ end
+ end
+
return need_id and id or true
end
@@ -135,7 +149,7 @@ function _M.put(id, conf, sub_path, args)
local key = "/routes/" .. id
local res, err = core.etcd.set(key, conf, args.ttl)
if not res then
- core.log.error("failed to put route[", key, "]: ", err)
+ core.log.error("failed to put route[", key, "] to etcd: ", err)
return 500, {error_msg = err}
end
@@ -151,7 +165,7 @@ function _M.get(id)
local res, err = core.etcd.get(key)
if not res then
- core.log.error("failed to get route[", key, "]: ", err)
+ core.log.error("failed to get route[", key, "] from etcd: ", err)
return 500, {error_msg = err}
end
@@ -169,7 +183,7 @@ function _M.post(id, conf, sub_path, args)
-- core.log.info("key: ", key)
local res, err = core.etcd.push("/routes", conf, args.ttl)
if not res then
- core.log.error("failed to post route[", key, "]: ", err)
+ core.log.error("failed to post route[", key, "] to etcd: ", err)
return 500, {error_msg = err}
end
@@ -186,7 +200,7 @@ function _M.delete(id)
-- core.log.info("key: ", key)
local res, err = core.etcd.delete(key)
if not res then
- core.log.error("failed to delete route[", key, "]: ", err)
+ core.log.error("failed to delete route[", key, "] in etcd: ", err)
return 500, {error_msg = err}
end
@@ -199,14 +213,16 @@ function _M.patch(id, conf, sub_path, args)
return 400, {error_msg = "missing route id"}
end
- if not sub_path then
- return 400, {error_msg = "missing sub-path"}
- end
-
if not conf then
return 400, {error_msg = "missing new configuration"}
end
+ if not sub_path or sub_path == "" then
+ if type(conf) ~= "table" then
+ return 400, {error_msg = "invalid configuration"}
+ end
+ end
+
local key = "/routes"
if id then
key = key .. "/" .. id
@@ -214,7 +230,7 @@ function _M.patch(id, conf, sub_path, args)
local res_old, err = core.etcd.get(key)
if not res_old then
- core.log.error("failed to get route [", key, "]: ", err)
+ core.log.error("failed to get route [", key, "] in etcd: ", err)
return 500, {error_msg = err}
end
@@ -225,32 +241,17 @@ function _M.patch(id, conf, sub_path, args)
core.json.delay_encode(res_old, true))
local node_value = res_old.body.node.value
- local sub_value = node_value
- local sub_paths = core.utils.split_uri(sub_path)
- for i = 1, #sub_paths - 1 do
- local sub_name = sub_paths[i]
- if sub_value[sub_name] == nil then
- sub_value[sub_name] = {}
- end
- sub_value = sub_value[sub_name]
-
- if type(sub_value) ~= "table" then
- return 400, "invalid sub-path: /"
- .. core.table.concat(sub_paths, 1, i)
+ if sub_path and sub_path ~= "" then
+ local code, err, node_val = core.table.patch(node_value, sub_path, conf)
+ node_value = node_val
+ if code then
+ return code, err
end
- end
-
- if type(sub_value) ~= "table" then
- return 400, "invalid sub-path: /" .. sub_path
- end
-
- local sub_name = sub_paths[#sub_paths]
- if sub_name and sub_name ~= "" then
- sub_value[sub_name] = conf
else
- node_value = conf
+ node_value = core.table.merge(node_value, conf);
end
+
core.log.info("new conf: ", core.json.delay_encode(node_value, true))
local id, err = check_conf(id, node_value, true)
@@ -261,7 +262,7 @@ function _M.patch(id, conf, sub_path, args)
-- TODO: this is not safe, we need to use compare-set
local res, err = core.etcd.set(key, node_value, args.ttl)
if not res then
- core.log.error("failed to set new route[", key, "]: ", err)
+ core.log.error("failed to set new route[", key, "] to etcd: ", err)
return 500, {error_msg = err}
end
diff --git a/apisix/admin/services.lua b/apisix/admin/services.lua
index e26ea41e6336..2200333e92a8 100644
--- a/apisix/admin/services.lua
+++ b/apisix/admin/services.lua
@@ -20,8 +20,8 @@ local schema_plugin = require("apisix.admin.plugins").check_schema
local upstreams = require("apisix.admin.upstreams")
local tostring = tostring
local ipairs = ipairs
-local tonumber = tonumber
local type = type
+local loadstring = loadstring
local _M = {
@@ -47,6 +47,7 @@ local function check_conf(id, conf, need_id)
return nil, {error_msg = "wrong service id"}
end
+ conf.id = id
core.log.info("schema: ", core.json.delay_encode(core.schema.service))
core.log.info("conf : ", core.json.delay_encode(conf))
@@ -55,7 +56,7 @@ local function check_conf(id, conf, need_id)
return nil, {error_msg = "invalid configuration: " .. err}
end
- if need_id and not tonumber(id) then
+ if need_id and not id then
return nil, {error_msg = "wrong type of service id"}
end
@@ -91,6 +92,18 @@ local function check_conf(id, conf, need_id)
end
end
+ if conf.script then
+ local obj, err = loadstring(conf.script)
+ if not obj then
+ return nil, {error_msg = "failed to load 'script' string: "
+ .. err}
+ end
+
+ if type(obj()) ~= "table" then
+ return nil, {error_msg = "'script' should be a Lua object"}
+ end
+ end
+
return need_id and id or true
end
@@ -182,14 +195,16 @@ function _M.patch(id, conf, sub_path)
return 400, {error_msg = "missing service id"}
end
- if not sub_path then
- return 400, {error_msg = "missing sub-path"}
- end
-
if not conf then
return 400, {error_msg = "missing new configuration"}
end
+ if not sub_path or sub_path == "" then
+ if type(conf) ~= "table" then
+ return 400, {error_msg = "invalid configuration"}
+ end
+ end
+
local key = "/services" .. "/" .. id
local res_old, err = core.etcd.get(key)
if not res_old then
@@ -203,42 +218,27 @@ function _M.patch(id, conf, sub_path)
core.log.info("key: ", key, " old value: ",
core.json.delay_encode(res_old, true))
- local new_value = res_old.body.node.value
- local sub_value = new_value
- local sub_paths = core.utils.split_uri(sub_path)
- for i = 1, #sub_paths - 1 do
- local sub_name = sub_paths[i]
- if sub_value[sub_name] == nil then
- sub_value[sub_name] = {}
- end
+ local node_value = res_old.body.node.value
- sub_value = sub_value[sub_name]
-
- if type(sub_value) ~= "table" then
- return 400, "invalid sub-path: /"
- .. core.table.concat(sub_paths, 1, i)
+ if sub_path and sub_path ~= "" then
+ local code, err, node_val = core.table.patch(node_value, sub_path, conf)
+ node_value = node_val
+ if code then
+ return code, err
end
- end
-
- if type(sub_value) ~= "table" then
- return 400, "invalid sub-path: /" .. sub_path
- end
-
- local sub_name = sub_paths[#sub_paths]
- if sub_name and sub_name ~= "" then
- sub_value[sub_name] = conf
else
- new_value = conf
+ node_value = core.table.merge(node_value, conf);
end
- core.log.info("new value ", core.json.delay_encode(new_value, true))
- local id, err = check_conf(id, new_value, true)
+ core.log.info("new value ", core.json.delay_encode(node_value, true))
+
+ local id, err = check_conf(id, node_value, true)
if not id then
return 400, err
end
-- TODO: this is not safe, we need to use compare-set
- local res, err = core.etcd.set(key, new_value)
+ local res, err = core.etcd.set(key, node_value)
if not res then
core.log.error("failed to set new service[", key, "]: ", err)
return 500, {error_msg = err}
diff --git a/apisix/admin/ssl.lua b/apisix/admin/ssl.lua
index 898d9c1a988f..6d9307d95d1d 100644
--- a/apisix/admin/ssl.lua
+++ b/apisix/admin/ssl.lua
@@ -14,10 +14,13 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
-local core = require("apisix.core")
-local schema_plugin = require("apisix.admin.plugins").check_schema
-local tostring = tostring
-
+local core = require("apisix.core")
+local tostring = tostring
+local aes = require "resty.aes"
+local ngx_encode_base64 = ngx.encode_base64
+local str_find = string.find
+local type = type
+local assert = assert
local _M = {
version = 0.1,
@@ -42,6 +45,8 @@ local function check_conf(id, conf, need_id)
return nil, {error_msg = "wrong ssl id"}
end
+ conf.id = id
+
core.log.info("schema: ", core.json.delay_encode(core.schema.ssl))
core.log.info("conf : ", core.json.delay_encode(conf))
local ok, err = core.schema.check(core.schema.ssl, conf)
@@ -49,48 +54,31 @@ local function check_conf(id, conf, need_id)
return nil, {error_msg = "invalid configuration: " .. err}
end
- local upstream_id = conf.upstream_id
- if upstream_id then
- local key = "/upstreams/" .. upstream_id
- local res, err = core.etcd.get(key)
- if not res then
- return nil, {error_msg = "failed to fetch upstream info by "
- .. "upstream id [" .. upstream_id .. "]: "
- .. err}
- end
-
- if res.status ~= 200 then
- return nil, {error_msg = "failed to fetch upstream info by "
- .. "upstream id [" .. upstream_id .. "], "
- .. "response code: " .. res.status}
- end
- end
+ return need_id and id or true
+end
- local service_id = conf.service_id
- if service_id then
- local key = "/services/" .. service_id
- local res, err = core.etcd.get(key)
- if not res then
- return nil, {error_msg = "failed to fetch service info by "
- .. "service id [" .. service_id .. "]: "
- .. err}
- end
- if res.status ~= 200 then
- return nil, {error_msg = "failed to fetch service info by "
- .. "service id [" .. service_id .. "], "
- .. "response code: " .. res.status}
- end
+local function aes_encrypt(origin)
+ local local_conf = core.config.local_conf()
+ local iv
+ if local_conf and local_conf.apisix
+ and local_conf.apisix.ssl.key_encrypt_salt then
+ iv = local_conf.apisix.ssl.key_encrypt_salt
end
+ local aes_128_cbc_with_iv = (type(iv)=="string" and #iv == 16) and
+ assert(aes:new(iv, nil, aes.cipher(128, "cbc"), {iv=iv})) or nil
- if conf.plugins then
- local ok, err = schema_plugin(conf.plugins)
- if not ok then
- return nil, {error_msg = err}
+ if aes_128_cbc_with_iv ~= nil and str_find(origin, "---") then
+ local encrypted = aes_128_cbc_with_iv:encrypt(origin)
+ if encrypted == nil then
+ core.log.error("failed to encrypt key[", origin, "] ")
+ return origin
end
+
+ return ngx_encode_base64(encrypted)
end
- return need_id and id or true
+ return origin
end
@@ -100,6 +88,9 @@ function _M.put(id, conf)
return 400, err
end
+ -- encrypt private key
+ conf.key = aes_encrypt(conf.key)
+
local key = "/ssl/" .. id
local res, err = core.etcd.set(key, conf)
if not res then
@@ -138,6 +129,9 @@ function _M.post(id, conf)
return 400, err
end
+ -- encrypt private key
+ conf.key = aes_encrypt(conf.key)
+
local key = "/ssl"
-- core.log.info("key: ", key)
local res, err = core.etcd.push("/ssl", conf)
@@ -167,4 +161,57 @@ function _M.delete(id)
end
+function _M.patch(id, conf)
+ if not id then
+ return 400, {error_msg = "missing route id"}
+ end
+
+ if not conf then
+ return 400, {error_msg = "missing new configuration"}
+ end
+
+ if type(conf) ~= "table" then
+ return 400, {error_msg = "invalid configuration"}
+ end
+
+ local key = "/ssl"
+ if id then
+ key = key .. "/" .. id
+ end
+
+ local res_old, err = core.etcd.get(key)
+ if not res_old then
+ core.log.error("failed to get ssl [", key, "] in etcd: ", err)
+ return 500, {error_msg = err}
+ end
+
+ if res_old.status ~= 200 then
+ return res_old.status, res_old.body
+ end
+ core.log.info("key: ", key, " old value: ",
+ core.json.delay_encode(res_old, true))
+
+
+ local node_value = res_old.body.node.value
+
+ node_value = core.table.merge(node_value, conf);
+
+ core.log.info("new ssl conf: ", core.json.delay_encode(node_value, true))
+
+ local id, err = check_conf(id, node_value, true)
+ if not id then
+ return 400, err
+ end
+
+ -- TODO: this is not safe, we need to use compare-set
+ local res, err = core.etcd.set(key, node_value)
+ if not res then
+ core.log.error("failed to set new ssl[", key, "] to etcd: ", err)
+ return 500, {error_msg = err}
+ end
+
+ return res.status, res.body
+end
+
+
return _M
diff --git a/apisix/admin/stream_routes.lua b/apisix/admin/stream_routes.lua
index e806da5e01d6..969f775164e6 100644
--- a/apisix/admin/stream_routes.lua
+++ b/apisix/admin/stream_routes.lua
@@ -31,17 +31,19 @@ local function check_conf(id, conf, need_id)
id = id or conf.id
if need_id and not id then
- return nil, {error_msg = "missing stream stream route id"}
+ return nil, {error_msg = "missing stream route id"}
end
if not need_id and id then
- return nil, {error_msg = "wrong stream stream route id, do not need it"}
+ return nil, {error_msg = "wrong stream route id, do not need it"}
end
if need_id and conf.id and tostring(conf.id) ~= tostring(id) then
- return nil, {error_msg = "wrong stream stream route id"}
+ return nil, {error_msg = "wrong stream route id"}
end
+ conf.id = id
+
core.log.info("schema: ", core.json.delay_encode(core.schema.stream_route))
core.log.info("conf : ", core.json.delay_encode(conf))
local ok, err = core.schema.check(core.schema.stream_route, conf)
@@ -129,7 +131,7 @@ end
function _M.delete(id)
if not id then
- return 400, {error_msg = "missing stream stream route id"}
+ return 400, {error_msg = "missing stream route id"}
end
local key = "/stream_routes/" .. id
diff --git a/apisix/admin/upstreams.lua b/apisix/admin/upstreams.lua
index b49c33a78ec5..b74f46e2e1ba 100644
--- a/apisix/admin/upstreams.lua
+++ b/apisix/admin/upstreams.lua
@@ -19,7 +19,6 @@ local get_routes = require("apisix.router").http_routes
local get_services = require("apisix.http.service").services
local tostring = tostring
local ipairs = ipairs
-local tonumber = tonumber
local type = type
@@ -99,17 +98,18 @@ local function check_conf(id, conf, need_id)
if need_id and conf.id and tostring(conf.id) ~= tostring(id) then
return nil, {error_msg = "wrong upstream id"}
end
+
+ -- let schema check id
+ conf.id = id
+
core.log.info("schema: ", core.json.delay_encode(core.schema.upstream))
core.log.info("conf : ", core.json.delay_encode(conf))
+
local ok, err = check_upstream_conf(conf)
if not ok then
return nil, {error_msg = err}
end
- if need_id and not tonumber(id) then
- return nil, {error_msg = "wrong type of service id"}
- end
-
return need_id and id or true
end
@@ -216,14 +216,16 @@ function _M.patch(id, conf, sub_path)
return 400, {error_msg = "missing upstream id"}
end
- if not sub_path then
- return 400, {error_msg = "missing sub-path"}
- end
-
if not conf then
return 400, {error_msg = "missing new configuration"}
end
+ if not sub_path or sub_path == "" then
+ if type(conf) ~= "table" then
+ return 400, {error_msg = "invalid configuration"}
+ end
+ end
+
local key = "/upstreams" .. "/" .. id
local res_old, err = core.etcd.get(key)
if not res_old then
@@ -238,32 +240,17 @@ function _M.patch(id, conf, sub_path)
core.json.delay_encode(res_old, true))
local new_value = res_old.body.node.value
- local sub_value = new_value
- local sub_paths = core.utils.split_uri(sub_path)
- for i = 1, #sub_paths - 1 do
- local sub_name = sub_paths[i]
- if sub_value[sub_name] == nil then
- sub_value[sub_name] = {}
- end
- sub_value = sub_value[sub_name]
-
- if type(sub_value) ~= "table" then
- return 400, "invalid sub-path: /"
- .. core.table.concat(sub_paths, 1, i)
+ if sub_path and sub_path ~= "" then
+ local code, err, node_val = core.table.patch(new_value, sub_path, conf)
+ new_value = node_val
+ if code then
+ return code, err
end
- end
-
- if type(sub_value) ~= "table" then
- return 400, "invalid sub-path: /" .. sub_path
- end
-
- local sub_name = sub_paths[#sub_paths]
- if sub_name and sub_name ~= "" then
- sub_value[sub_name] = conf
else
- new_value = conf
+ new_value = core.table.merge(new_value, conf);
end
+
core.log.info("new value ", core.json.delay_encode(new_value, true))
local id, err = check_conf(id, new_value, true)
diff --git a/apisix/balancer.lua b/apisix/balancer.lua
index a5134bcbd928..36f4f32d4b28 100644
--- a/apisix/balancer.lua
+++ b/apisix/balancer.lua
@@ -14,23 +14,23 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
-local healthcheck = require("resty.healthcheck")
-local roundrobin = require("resty.roundrobin")
-local resty_chash = require("resty.chash")
+local healthcheck
+local require = require
+local discovery = require("apisix.discovery.init").discovery
local balancer = require("ngx.balancer")
local core = require("apisix.core")
-local error = error
-local str_char = string.char
-local str_gsub = string.gsub
-local pairs = pairs
+local ipairs = ipairs
local tostring = tostring
local set_more_tries = balancer.set_more_tries
local get_last_failure = balancer.get_last_failure
local set_timeouts = balancer.set_timeouts
-local upstreams_etcd
local module_name = "balancer"
+local pickers = {
+ roundrobin = require("apisix.balancer.roundrobin"),
+ chash = require("apisix.balancer.chash"),
+}
local lrucache_server_picker = core.lrucache.new({
@@ -39,33 +39,44 @@ local lrucache_server_picker = core.lrucache.new({
local lrucache_checker = core.lrucache.new({
ttl = 300, count = 256
})
+local lrucache_addr = core.lrucache.new({
+ ttl = 300, count = 1024 * 4
+})
local _M = {
- version = 0.1,
+ version = 0.2,
name = module_name,
}
local function fetch_health_nodes(upstream, checker)
+ local nodes = upstream.nodes
if not checker then
- return upstream.nodes
+ local new_nodes = core.table.new(0, #nodes)
+ for _, node in ipairs(nodes) do
+ -- TODO filter with metadata
+ new_nodes[node.host .. ":" .. node.port] = node.weight
+ end
+ return new_nodes
end
- local host = upstream.checks and upstream.checks.host
- local up_nodes = core.table.new(0, core.table.nkeys(upstream.nodes))
-
- for addr, weight in pairs(upstream.nodes) do
- local ip, port = core.utils.parse_addr(addr)
- local ok = checker:get_target_status(ip, port, host)
+ local host = upstream.checks and upstream.checks.active and upstream.checks.active.host
+ local port = upstream.checks and upstream.checks.active and upstream.checks.active.port
+ local up_nodes = core.table.new(0, #nodes)
+ for _, node in ipairs(nodes) do
+ local ok = checker:get_target_status(node.host, port or node.port, host)
if ok then
- up_nodes[addr] = weight
+ -- TODO filter with metadata
+ up_nodes[node.host .. ":" .. node.port] = node.weight
end
end
if core.table.nkeys(up_nodes) == 0 then
core.log.warn("all upstream nodes is unhealth, use default")
- up_nodes = upstream.nodes
+ for _, node in ipairs(nodes) do
+ up_nodes[node.host .. ":" .. node.port] = node.weight
+ end
end
return up_nodes
@@ -73,18 +84,22 @@ end
local function create_checker(upstream, healthcheck_parent)
+ if healthcheck == nil then
+ healthcheck = require("resty.healthcheck")
+ end
local checker = healthcheck.new({
name = "upstream#" .. healthcheck_parent.key,
shm_name = "upstream-healthcheck",
checks = upstream.checks,
})
- for addr, weight in pairs(upstream.nodes) do
- local ip, port = core.utils.parse_addr(addr)
- local ok, err = checker:add_target(ip, port, upstream.checks.host)
+ local host = upstream.checks and upstream.checks.active and upstream.checks.active.host
+ local port = upstream.checks and upstream.checks.active and upstream.checks.active.port
+ for _, node in ipairs(upstream.nodes) do
+ local ok, err = checker:add_target(node.host, port or node.port, host)
if not ok then
- core.log.error("failed to add new health check target: ", addr,
- " err: ", err)
+ core.log.error("failed to add new health check target: ", node.host, ":",
+ port or node.port, " err: ", err)
end
end
@@ -122,144 +137,87 @@ local function fetch_healthchecker(upstream, healthcheck_parent, version)
end
-local function fetch_chash_hash_key(ctx, upstream)
- local key = upstream.key
- local hash_on = upstream.hash_on or "vars"
- local chash_key
-
- if hash_on == "consumer" then
- chash_key = ctx.consumer_id
- elseif hash_on == "vars" then
- chash_key = ctx.var[key]
- elseif hash_on == "header" then
- chash_key = ctx.var["http_" .. key]
- elseif hash_on == "cookie" then
- chash_key = ctx.var["cookie_" .. key]
- end
-
- if not chash_key then
- chash_key = ctx.var["remote_addr"]
- core.log.warn("chash_key fetch is nil, use default chash_key ",
- "remote_addr: ", chash_key)
- end
- core.log.info("upstream key: ", key)
- core.log.info("hash_on: ", hash_on)
- core.log.info("chash_key: ", core.json.delay_encode(chash_key))
-
- return chash_key
-end
-
-
local function create_server_picker(upstream, checker)
- if upstream.type == "roundrobin" then
+ local picker = pickers[upstream.type]
+ if picker then
local up_nodes = fetch_health_nodes(upstream, checker)
core.log.info("upstream nodes: ", core.json.delay_encode(up_nodes))
- local picker = roundrobin:new(up_nodes)
- return {
- upstream = upstream,
- get = function ()
- return picker:find()
- end
- }
+ return picker.new(up_nodes, upstream)
end
- if upstream.type == "chash" then
- local up_nodes = fetch_health_nodes(upstream, checker)
- core.log.info("upstream nodes: ", core.json.delay_encode(up_nodes))
-
- local str_null = str_char(0)
-
- local servers, nodes = {}, {}
- for serv, weight in pairs(up_nodes) do
- local id = str_gsub(serv, ":", str_null)
+ return nil, "invalid balancer type: " .. upstream.type, 0
+end
- servers[id] = serv
- nodes[id] = weight
- end
- local picker = resty_chash:new(nodes)
- return {
- upstream = upstream,
- get = function (ctx)
- local chash_key = fetch_chash_hash_key(ctx, upstream)
- local id = picker:find(chash_key)
- -- core.log.warn("chash id: ", id, " val: ", servers[id])
- return servers[id]
- end
- }
- end
-
- return nil, "invalid balancer type: " .. upstream.type, 0
+local function parse_addr(addr)
+ local host, port, err = core.utils.parse_addr(addr)
+ return {host = host, port = port}, err
end
local function pick_server(route, ctx)
core.log.info("route: ", core.json.delay_encode(route, true))
core.log.info("ctx: ", core.json.delay_encode(ctx, true))
- local healthcheck_parent = route
- local up_id = route.value.upstream_id
- local up_conf = (route.dns_value and route.dns_value.upstream)
- or route.value.upstream
- if not up_id and not up_conf then
- return nil, nil, "missing upstream configuration"
+ local up_conf = ctx.upstream_conf
+ if up_conf.service_name then
+ if not discovery then
+ return nil, "discovery is uninitialized"
+ end
+ up_conf.nodes = discovery.nodes(up_conf.service_name)
end
- local version
- local key
-
- if up_id then
- if not upstreams_etcd then
- return nil, nil, "need to create a etcd instance for fetching "
- .. "upstream information"
- end
+ local nodes_count = up_conf.nodes and #up_conf.nodes or 0
+ if nodes_count == 0 then
+ return nil, "no valid upstream node"
+ end
- local up_obj = upstreams_etcd:get(tostring(up_id))
- if not up_obj then
- return nil, nil, "failed to find upstream by id: " .. up_id
+ if up_conf.timeout then
+ local timeout = up_conf.timeout
+ local ok, err = set_timeouts(timeout.connect, timeout.send,
+ timeout.read)
+ if not ok then
+ core.log.error("could not set upstream timeouts: ", err)
end
- core.log.info("upstream: ", core.json.delay_encode(up_obj))
-
- healthcheck_parent = up_obj
- up_conf = up_obj.dns_value or up_obj.value
- version = up_obj.modifiedIndex
- key = up_conf.type .. "#upstream_" .. up_id
-
- else
- version = ctx.conf_version
- key = up_conf.type .. "#route_" .. route.value.id
end
- if core.table.nkeys(up_conf.nodes) == 0 then
- return nil, nil, "no valid upstream node"
+ if nodes_count == 1 then
+ local node = up_conf.nodes[1]
+ ctx.balancer_ip = node.host
+ ctx.balancer_port = node.port
+ return node
end
+ local healthcheck_parent = ctx.upstream_healthcheck_parent
+ local version = ctx.upstream_version
+ local key = ctx.upstream_key
local checker = fetch_healthchecker(up_conf, healthcheck_parent, version)
+ ctx.up_checker = checker
ctx.balancer_try_count = (ctx.balancer_try_count or 0) + 1
if checker and ctx.balancer_try_count > 1 then
local state, code = get_last_failure()
+ local host = up_conf.checks and up_conf.checks.active and up_conf.checks.active.host
+ local port = up_conf.checks and up_conf.checks.active and up_conf.checks.active.port
if state == "failed" then
if code == 504 then
- checker:report_timeout(ctx.balancer_ip, ctx.balancer_port,
- up_conf.checks.host)
+ checker:report_timeout(ctx.balancer_ip, port or ctx.balancer_port, host)
else
- checker:report_tcp_failure(ctx.balancer_ip,
- ctx.balancer_port, up_conf.checks.host)
+ checker:report_tcp_failure(ctx.balancer_ip, port or ctx.balancer_port, host)
end
-
else
- checker:report_http_status(ctx.balancer_ip, ctx.balancer_port,
- up_conf.checks.host, code)
+ checker:report_http_status(ctx.balancer_ip, port or ctx.balancer_port, host, code)
end
end
if ctx.balancer_try_count == 1 then
local retries = up_conf.retries
- if retries and retries > 0 then
+ if not retries or retries < 0 then
+ retries = #up_conf.nodes - 1
+ end
+
+ if retries > 0 then
set_more_tries(retries)
- else
- set_more_tries(core.table.nkeys(up_conf.nodes))
end
end
@@ -270,45 +228,44 @@ local function pick_server(route, ctx)
local server_picker = lrucache_server_picker(key, version,
create_server_picker, up_conf, checker)
if not server_picker then
- return nil, nil, "failed to fetch server picker"
+ return nil, "failed to fetch server picker"
end
local server, err = server_picker.get(ctx)
if not server then
err = err or "no valid upstream node"
- return nil, nil, "failed to find valid upstream server, " .. err
+ return nil, "failed to find valid upstream server, " .. err
end
- if up_conf.timeout then
- local timeout = up_conf.timeout
- local ok, err = set_timeouts(timeout.connect, timeout.send,
- timeout.read)
- if not ok then
- core.log.error("could not set upstream timeouts: ", err)
- end
+ local res, err = lrucache_addr(server, nil, parse_addr, server)
+ ctx.balancer_ip = res.host
+ ctx.balancer_port = res.port
+ -- core.log.info("cached balancer peer host: ", host, ":", port)
+ if err then
+ core.log.error("failed to parse server addr: ", server, " err: ", err)
+ return core.response.exit(502)
end
- local ip, port, err = core.utils.parse_addr(server)
- ctx.balancer_ip = ip
- ctx.balancer_port = port
-
- return ip, port, err
+ return res
end
+
+
-- for test
_M.pick_server = pick_server
function _M.run(route, ctx)
- local ip, port, err = pick_server(route, ctx)
- if err then
+ local server, err = pick_server(route, ctx)
+ if not server then
core.log.error("failed to pick server: ", err)
return core.response.exit(502)
end
- local ok, err = balancer.set_current_peer(ip, port)
+ core.log.info("proxy request to ", server.host, ":", server.port)
+ local ok, err = balancer.set_current_peer(server.host, server.port)
if not ok then
- core.log.error("failed to set server peer [", ip, ":", port,
- "] err: ", err)
+ core.log.error("failed to set server peer [", server.host, ":",
+ server.port, "] err: ", err)
return core.response.exit(502)
end
@@ -317,34 +274,6 @@ end
function _M.init_worker()
- local err
- upstreams_etcd, err = core.config.new("/upstreams", {
- automatic = true,
- item_schema = core.schema.upstream,
- filter = function(upstream)
- upstream.has_domain = false
- if not upstream.value then
- return
- end
-
- for addr, _ in pairs(upstream.value.nodes or {}) do
- local host = core.utils.parse_addr(addr)
- if not core.utils.parse_ipv4(host) and
- not core.utils.parse_ipv6(host) then
- upstream.has_domain = true
- break
- end
- end
-
- core.log.info("filter upstream: ",
- core.json.delay_encode(upstream))
- end,
- })
- if not upstreams_etcd then
- error("failed to create etcd instance for fetching upstream: " .. err)
- return
- end
end
-
return _M
diff --git a/apisix/balancer/chash.lua b/apisix/balancer/chash.lua
new file mode 100644
index 000000000000..38831cdb4e48
--- /dev/null
+++ b/apisix/balancer/chash.lua
@@ -0,0 +1,80 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local core = require("apisix.core")
+local resty_chash = require("resty.chash")
+local str_char = string.char
+local str_gsub = string.gsub
+local pairs = pairs
+
+
+local _M = {}
+
+
+local function fetch_chash_hash_key(ctx, upstream)
+ local key = upstream.key
+ local hash_on = upstream.hash_on or "vars"
+ local chash_key
+
+ if hash_on == "consumer" then
+ chash_key = ctx.consumer_id
+ elseif hash_on == "vars" then
+ chash_key = ctx.var[key]
+ elseif hash_on == "header" then
+ chash_key = ctx.var["http_" .. key]
+ elseif hash_on == "cookie" then
+ chash_key = ctx.var["cookie_" .. key]
+ end
+
+ if not chash_key then
+ chash_key = ctx.var["remote_addr"]
+ core.log.warn("chash_key fetch is nil, use default chash_key ",
+ "remote_addr: ", chash_key)
+ end
+ core.log.info("upstream key: ", key)
+ core.log.info("hash_on: ", hash_on)
+ core.log.info("chash_key: ", core.json.delay_encode(chash_key))
+
+ return chash_key
+end
+
+
+function _M.new(up_nodes, upstream)
+ local str_null = str_char(0)
+
+ local servers, nodes = {}, {}
+ for serv, weight in pairs(up_nodes) do
+ local id = str_gsub(serv, ":", str_null)
+
+ servers[id] = serv
+ nodes[id] = weight
+ end
+
+ local picker = resty_chash:new(nodes)
+ return {
+ upstream = upstream,
+ get = function (ctx)
+ local chash_key = fetch_chash_hash_key(ctx, upstream)
+ local id = picker:find(chash_key)
+ -- core.log.warn("chash id: ", id, " val: ", servers[id])
+ return servers[id]
+ end
+ }
+end
+
+
+return _M
diff --git a/apisix/balancer/roundrobin.lua b/apisix/balancer/roundrobin.lua
new file mode 100644
index 000000000000..dac4f03ea10d
--- /dev/null
+++ b/apisix/balancer/roundrobin.lua
@@ -0,0 +1,34 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local roundrobin = require("resty.roundrobin")
+
+local _M = {}
+
+
+function _M.new(up_nodes, upstream)
+ local picker = roundrobin:new(up_nodes)
+ return {
+ upstream = upstream,
+ get = function ()
+ return picker:find()
+ end
+ }
+end
+
+
+return _M
diff --git a/apisix/consumer.lua b/apisix/consumer.lua
index 31dfa05c80f3..35f01329d82b 100644
--- a/apisix/consumer.lua
+++ b/apisix/consumer.lua
@@ -74,6 +74,15 @@ function _M.plugin(plugin_name)
end
+function _M.consumers()
+ if not consumers then
+ return nil, nil
+ end
+
+ return consumers.values, consumers.conf_version
+end
+
+
function _M.init_worker()
local err
consumers, err = core.config.new("/consumers", {
diff --git a/apisix/core.lua b/apisix/core.lua
index 051dae36af75..1b4ebf42f832 100644
--- a/apisix/core.lua
+++ b/apisix/core.lua
@@ -38,4 +38,5 @@ return {
etcd = require("apisix.core.etcd"),
http = require("apisix.core.http"),
tablepool= require("tablepool"),
+ empty_tab= {},
}
diff --git a/apisix/core/config_etcd.lua b/apisix/core/config_etcd.lua
index 484e6ef9fc7e..6e616b08f906 100644
--- a/apisix/core/config_etcd.lua
+++ b/apisix/core/config_etcd.lua
@@ -49,6 +49,26 @@ local mt = {
end
}
+
+local function getkey(etcd_cli, key)
+ if not etcd_cli then
+ return nil, "not inited"
+ end
+
+ local res, err = etcd_cli:get(key)
+ if not res then
+ -- log.error("failed to get key from etcd: ", err)
+ return nil, err
+ end
+
+ if type(res.body) ~= "table" then
+ return nil, "failed to get key from etcd"
+ end
+
+ return res
+end
+
+
local function readdir(etcd_cli, key)
if not etcd_cli then
return nil, nil, "not inited"
@@ -67,12 +87,12 @@ local function readdir(etcd_cli, key)
return res
end
-local function waitdir(etcd_cli, key, modified_index)
+local function waitdir(etcd_cli, key, modified_index, timeout)
if not etcd_cli then
return nil, nil, "not inited"
end
- local res, err = etcd_cli:waitdir(key, modified_index)
+ local res, err = etcd_cli:waitdir(key, modified_index, timeout)
if not res then
-- log.error("failed to get key from etcd: ", err)
return nil, err
@@ -201,9 +221,25 @@ local function sync_data(self)
return true
end
- local dir_res, err = waitdir(self.etcd_cli, self.key, self.prev_index + 1)
+ -- for fetch the etcd index
+ local key_res, _ = getkey(self.etcd_cli, self.key)
+
+ local dir_res, err = waitdir(self.etcd_cli, self.key, self.prev_index + 1, self.timeout)
+
log.info("waitdir key: ", self.key, " prev_index: ", self.prev_index + 1)
log.info("res: ", json.delay_encode(dir_res, true))
+ if err == "timeout" then
+ if key_res and key_res.headers then
+ local key_index = key_res.headers["X-Etcd-Index"]
+ local key_idx = key_index and tonumber(key_index) or 0
+ if key_idx and key_idx > self.prev_index then
+ -- Avoid the index to exceed 1000 by updating other keys
+ -- that will causing a full reload
+ self:upgrade_version(key_index)
+ end
+ end
+ end
+
if not dir_res then
return false, err
end
@@ -285,6 +321,7 @@ local function sync_data(self)
end
elseif res.value then
+ res.clean_handlers = {}
insert_tab(self.values, res)
self.values_hash[key] = #self.values
res.value.id = key
@@ -307,6 +344,7 @@ local function sync_data(self)
key = short_key(self, self.values[i].key)
self.values_hash[key] = i
end
+ self.sync_times = 0
end
self.conf_version = self.conf_version + 1
@@ -328,6 +366,15 @@ function _M.get(self, key)
end
+function _M.getkey(self, key)
+ if not self.running then
+ return nil, "stoped"
+ end
+
+ return getkey(self.etcd_cli, key)
+end
+
+
local function _automatic_fetch(premature, self)
if premature then
return
@@ -393,6 +440,7 @@ function _M.new(key, opts)
local automatic = opts and opts.automatic
local item_schema = opts and opts.item_schema
local filter_fun = opts and opts.filter
+ local timeout = opts and opts.timeout
local obj = setmetatable({
etcd_cli = etcd_cli,
@@ -408,6 +456,7 @@ function _M.new(key, opts)
prev_index = nil,
last_err = nil,
last_err_time = nil,
+ timeout = timeout,
filter = filter_fun,
}, mt)
diff --git a/apisix/core/config_util.lua b/apisix/core/config_util.lua
new file mode 100644
index 000000000000..1e399217b6da
--- /dev/null
+++ b/apisix/core/config_util.lua
@@ -0,0 +1,45 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local setmetatable = setmetatable
+local type = type
+
+
+local _M = {}
+
+
+local function _iterate_values(self, tab)
+ while true do
+ self.idx = self.idx + 1
+ local v = tab[self.idx]
+ if type(v) == "table" then
+ return self.idx, v
+ end
+ if v == nil then
+ return nil, nil
+ end
+ -- skip the tombstone
+ end
+end
+
+
+function _M.iterate_values(tab)
+ local iter = setmetatable({idx = 0}, {__call = _iterate_values})
+ return iter, tab, 0
+end
+
+
+return _M
diff --git a/apisix/core/config_yaml.lua b/apisix/core/config_yaml.lua
index 7803deccf4e1..bb1cd250af2a 100644
--- a/apisix/core/config_yaml.lua
+++ b/apisix/core/config_yaml.lua
@@ -58,7 +58,10 @@ local mt = {
local apisix_yaml
local apisix_yaml_ctime
-local function read_apisix_yaml(pre_mtime)
+local function read_apisix_yaml(premature, pre_mtime)
+ if premature then
+ return
+ end
local attributes, err = lfs.attributes(apisix_yaml_path)
if not attributes then
log.error("failed to fetch ", apisix_yaml_path, " attributes: ", err)
@@ -308,7 +311,7 @@ end
function _M.fetch_created_obj(key)
- return created_obj[key]
+ return created_obj[sub_str(key, 2)]
end
diff --git a/apisix/core/request.lua b/apisix/core/request.lua
index 1148234738c2..fcf6e0711a7b 100644
--- a/apisix/core/request.lua
+++ b/apisix/core/request.lua
@@ -14,14 +14,21 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
+
+local lfs = require("lfs")
local ngx = ngx
local get_headers = ngx.req.get_headers
local tonumber = tonumber
local error = error
local type = type
local str_fmt = string.format
+local io_open = io.open
+local req_read_body = ngx.req.read_body
+local req_get_body_data = ngx.req.get_body_data
+local req_get_body_file = ngx.req.get_body_file
+
-local _M = {version = 0.1}
+local _M = {}
local function _headers(ctx)
@@ -94,7 +101,77 @@ function _M.get_remote_client_port(ctx)
ctx = ngx.ctx.api_ctx
end
return tonumber(ctx.var.remote_port)
- end
+end
+
+
+local function get_file(file_name)
+ local f, err = io_open(file_name, 'r')
+ if not f then
+ return nil, err
+ end
+
+ local req_body = f:read("*all")
+ f:close()
+ return req_body
+end
+
+
+function _M.get_body(max_size)
+ req_read_body()
+
+ local req_body = req_get_body_data()
+ if req_body then
+ return req_body
+ end
+
+ local file_name = req_get_body_file()
+ if not file_name then
+ return nil
+ end
+
+ if max_size then
+ local size, err = lfs.attributes (file_name, "size")
+ if not size then
+ return nil, err
+ end
+
+ if size > max_size then
+ return nil, "request size " .. size .. " is greater than the "
+ .. "maximum size " .. max_size .. " allowed"
+ end
+ end
+
+ local req_body, err = get_file(file_name)
+ return req_body, err
+end
+
+function _M.get_scheme(ctx)
+ if not ctx then
+ ctx = ngx.ctx.api_ctx
+ end
+ return ctx.var.scheme or ''
+end
+
+
+function _M.get_host(ctx)
+ if not ctx then
+ ctx = ngx.ctx.api_ctx
+ end
+ return ctx.var.host or ''
+end
+
+
+function _M.get_port(ctx)
+ if not ctx then
+ ctx = ngx.ctx.api_ctx
+ end
+ return tonumber(ctx.var.server_port)
+end
+
+
+function _M.get_http_version()
+ return ngx.req.http_version()
+end
return _M
diff --git a/apisix/core/schema.lua b/apisix/core/schema.lua
index 4f72a5b3133c..a85d09d57b3a 100644
--- a/apisix/core/schema.lua
+++ b/apisix/core/schema.lua
@@ -36,10 +36,20 @@ local function create_validator(schema)
return nil, res -- error message
end
-
-function _M.check(schema, json)
+local function get_validator(schema)
local validator, err = cached_validator(schema, nil,
create_validator, schema)
+
+ if not validator then
+ return nil, err
+ end
+
+ return validator, nil
+end
+
+function _M.check(schema, json)
+ local validator, err = get_validator(schema)
+
if not validator then
return false, err
end
@@ -47,5 +57,6 @@ function _M.check(schema, json)
return validator(json)
end
+_M.valid = get_validator
return _M
diff --git a/apisix/core/table.lua b/apisix/core/table.lua
index 0fc64acc3444..5c84164e38d3 100644
--- a/apisix/core/table.lua
+++ b/apisix/core/table.lua
@@ -22,16 +22,18 @@ local new_tab = require("table.new")
local nkeys = require("table.nkeys")
local pairs = pairs
local type = type
+local ngx_re = require("ngx.re")
local _M = {
- version = 0.1,
+ version = 0.2,
new = new_tab,
clear = require("table.clear"),
nkeys = nkeys,
insert = table.insert,
concat = table.concat,
clone = require("table.clone"),
+ isarray = require("table.isarray"),
}
@@ -84,5 +86,62 @@ local function deepcopy(orig)
end
_M.deepcopy = deepcopy
+local ngx_null = ngx.null
+local function merge(origin, extend)
+ for k,v in pairs(extend) do
+ if type(v) == "table" then
+ if type(origin[k] or false) == "table" then
+ if _M.nkeys(origin[k]) ~= #origin[k] then
+ merge(origin[k] or {}, extend[k] or {})
+ else
+ origin[k] = v
+ end
+ else
+ origin[k] = v
+ end
+ elseif v == ngx_null then
+ origin[k] = nil
+ else
+ origin[k] = v
+ end
+ end
+
+ return origin
+end
+_M.merge = merge
+
+
+local function patch(node_value, sub_path, conf)
+ local sub_value = node_value
+ local sub_paths = ngx_re.split(sub_path, "/")
+ for i = 1, #sub_paths - 1 do
+ local sub_name = sub_paths[i]
+ if sub_value[sub_name] == nil then
+ sub_value[sub_name] = {}
+ end
+
+ sub_value = sub_value[sub_name]
+
+ if type(sub_value) ~= "table" then
+ return 400, "invalid sub-path: /"
+ .. _M.concat(sub_paths, 1, i)
+ end
+ end
+
+ if type(sub_value) ~= "table" then
+ return 400, "invalid sub-path: /" .. sub_path
+ end
+
+ local sub_name = sub_paths[#sub_paths]
+ if sub_name and sub_name ~= "" then
+ sub_value[sub_name] = conf
+ else
+ node_value = conf
+ end
+
+ return nil, nil, node_value
+end
+_M.patch = patch
+
return _M
diff --git a/apisix/core/version.lua b/apisix/core/version.lua
index dfd10502979b..5c2ffdcc3ceb 100644
--- a/apisix/core/version.lua
+++ b/apisix/core/version.lua
@@ -15,5 +15,6 @@
-- limitations under the License.
--
return {
- VERSION = "1.2"
+
+ VERSION = "1.4.1"
}
diff --git a/apisix/discovery/eureka.lua b/apisix/discovery/eureka.lua
new file mode 100644
index 000000000000..d4b436853617
--- /dev/null
+++ b/apisix/discovery/eureka.lua
@@ -0,0 +1,253 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local local_conf = require("apisix.core.config_local").local_conf()
+local http = require("resty.http")
+local core = require("apisix.core")
+local ipmatcher = require("resty.ipmatcher")
+local ipairs = ipairs
+local tostring = tostring
+local type = type
+local math_random = math.random
+local error = error
+local ngx = ngx
+local ngx_timer_at = ngx.timer.at
+local ngx_timer_every = ngx.timer.every
+local string_sub = string.sub
+local string_find = string.find
+local log = core.log
+
+local default_weight
+local applications
+
+local schema = {
+ type = "object",
+ properties = {
+ host = {
+ type = "array",
+ minItems = 1,
+ items = {
+ type = "string",
+ },
+ },
+ fetch_interval = {type = "integer", minimum = 1, default = 30},
+ prefix = {type = "string"},
+ weight = {type = "integer", minimum = 0},
+ timeout = {
+ type = "object",
+ properties = {
+ connect = {type = "integer", minimum = 1, default = 2000},
+ send = {type = "integer", minimum = 1, default = 2000},
+ read = {type = "integer", minimum = 1, default = 5000},
+ }
+ },
+ },
+ required = {"host"}
+}
+
+
+local _M = {
+ version = 0.1,
+}
+
+
+local function service_info()
+ local host = local_conf.eureka and local_conf.eureka.host
+ if not host then
+ log.error("do not set eureka.host")
+ return
+ end
+
+ local basic_auth
+ -- TODO Add health check to get healthy nodes.
+ local url = host[math_random(#host)]
+ local auth_idx = string_find(url, "@", 1, true)
+ if auth_idx then
+ local protocol_idx = string_find(url, "://", 1, true)
+ local protocol = string_sub(url, 1, protocol_idx + 2)
+ local user_and_password = string_sub(url, protocol_idx + 3, auth_idx - 1)
+ local other = string_sub(url, auth_idx + 1)
+ url = protocol .. other
+ basic_auth = "Basic " .. ngx.encode_base64(user_and_password)
+ end
+ if local_conf.eureka.prefix then
+ url = url .. local_conf.eureka.prefix
+ end
+ if string_sub(url, #url) ~= "/" then
+ url = url .. "/"
+ end
+
+ return url, basic_auth
+end
+
+
+local function request(request_uri, basic_auth, method, path, query, body)
+ log.info("eureka uri:", request_uri, ".")
+ local url = request_uri .. path
+ local headers = core.table.new(0, 5)
+ headers['Connection'] = 'Keep-Alive'
+ headers['Accept'] = 'application/json'
+
+ if basic_auth then
+ headers['Authorization'] = basic_auth
+ end
+
+ if body and 'table' == type(body) then
+ local err
+ body, err = core.json.encode(body)
+ if not body then
+ return nil, 'invalid body : ' .. err
+ end
+ -- log.warn(method, url, body)
+ headers['Content-Type'] = 'application/json'
+ end
+
+ local httpc = http.new()
+ local timeout = local_conf.eureka.timeout
+ local connect_timeout = timeout and timeout.connect or 2000
+ local send_timeout = timeout and timeout.send or 2000
+ local read_timeout = timeout and timeout.read or 5000
+ log.info("connect_timeout:", connect_timeout, ", send_timeout:", send_timeout,
+ ", read_timeout:", read_timeout, ".")
+ httpc:set_timeouts(connect_timeout, send_timeout, read_timeout)
+ return httpc:request_uri(url, {
+ version = 1.1,
+ method = method,
+ headers = headers,
+ query = query,
+ body = body,
+ ssl_verify = false,
+ })
+end
+
+
+local function parse_instance(instance)
+ local status = instance.status
+ local overridden_status = instance.overriddenstatus or instance.overriddenStatus
+ if overridden_status and overridden_status ~= "UNKNOWN" then
+ status = overridden_status
+ end
+
+ if status ~= "UP" then
+ return
+ end
+ local port
+ if tostring(instance.port["@enabled"]) == "true" and instance.port["$"] then
+ port = instance.port["$"]
+ -- secure = false
+ end
+ if tostring(instance.securePort["@enabled"]) == "true" and instance.securePort["$"] then
+ port = instance.securePort["$"]
+ -- secure = true
+ end
+ local ip = instance.ipAddr
+ if not ipmatcher.parse_ipv4(ip) and
+ not ipmatcher.parse_ipv6(ip) then
+ log.error(instance.app, " service ", instance.hostName, " node IP ", ip,
+ " is invalid(must be IPv4 or IPv6).")
+ return
+ end
+ return ip, port, instance.metadata
+end
+
+
+local function fetch_full_registry(premature)
+ if premature then
+ return
+ end
+
+ local request_uri, basic_auth = service_info()
+ if not request_uri then
+ return
+ end
+
+ local res, err = request(request_uri, basic_auth, "GET", "apps")
+ if not res then
+ log.error("failed to fetch registry", err)
+ return
+ end
+
+ if not res.body or res.status ~= 200 then
+ log.error("failed to fetch registry, status = ", res.status)
+ return
+ end
+
+ local json_str = res.body
+ local data, err = core.json.decode(json_str)
+ if not data then
+ log.error("invalid response body: ", json_str, " err: ", err)
+ return
+ end
+ local apps = data.applications.application
+ local up_apps = core.table.new(0, #apps)
+ for _, app in ipairs(apps) do
+ for _, instance in ipairs(app.instance) do
+ local ip, port, metadata = parse_instance(instance)
+ if ip and port then
+ local nodes = up_apps[app.name]
+ if not nodes then
+ nodes = core.table.new(#app.instance, 0)
+ up_apps[app.name] = nodes
+ end
+ core.table.insert(nodes, {
+ host = ip,
+ port = port,
+ weight = metadata and metadata.weight or default_weight,
+ metadata = metadata,
+ })
+ if metadata then
+ -- remove useless data
+ metadata.weight = nil
+ end
+ end
+ end
+ end
+ applications = up_apps
+end
+
+
+function _M.nodes(service_name)
+ if not applications then
+ log.error("failed to fetch nodes for : ", service_name)
+ return
+ end
+
+ return applications[service_name]
+end
+
+
+function _M.init_worker()
+ if not local_conf.eureka or not local_conf.eureka.host or #local_conf.eureka.host == 0 then
+ error("do not set eureka.host")
+ return
+ end
+
+ local ok, err = core.schema.check(schema, local_conf.eureka)
+ if not ok then
+ error("invalid eureka configuration: " .. err)
+ return
+ end
+ default_weight = local_conf.eureka.weight or 100
+ log.info("default_weight:", default_weight, ".")
+ local fetch_interval = local_conf.eureka.fetch_interval or 30
+ log.info("fetch_interval:", fetch_interval, ".")
+ ngx_timer_at(0, fetch_full_registry)
+ ngx_timer_every(fetch_interval, fetch_full_registry)
+end
+
+
+return _M
diff --git a/apisix/discovery/init.lua b/apisix/discovery/init.lua
new file mode 100644
index 000000000000..16aafe62c50d
--- /dev/null
+++ b/apisix/discovery/init.lua
@@ -0,0 +1,33 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local log = require("apisix.core.log")
+local local_conf = require("apisix.core.config_local").local_conf()
+
+local discovery_type = local_conf.apisix and local_conf.apisix.discovery
+local discovery
+
+if discovery_type then
+ log.info("use discovery: ", discovery_type)
+ discovery = require("apisix.discovery." .. discovery_type)
+end
+
+
+return {
+ version = 0.1,
+ discovery = discovery
+}
diff --git a/apisix/http/router/radixtree_sni.lua b/apisix/http/router/radixtree_sni.lua
index 0ecc3bf3cbb2..4c7843b3bbbe 100644
--- a/apisix/http/router/radixtree_sni.lua
+++ b/apisix/http/router/radixtree_sni.lua
@@ -18,9 +18,15 @@ local get_request = require("resty.core.base").get_request
local radixtree_new = require("resty.radixtree").new
local core = require("apisix.core")
local ngx_ssl = require("ngx.ssl")
-local ipairs = ipairs
+local config_util = require("apisix.core.config_util")
+local ipairs = ipairs
local type = type
local error = error
+local str_find = string.find
+local aes = require "resty.aes"
+local assert = assert
+local str_gsub = string.gsub
+local ngx_decode_base64 = ngx.decode_base64
local ssl_certificates
local radixtree_router
local radixtree_router_ver
@@ -38,9 +44,43 @@ local function create_router(ssl_items)
local route_items = core.table.new(#ssl_items, 0)
local idx = 0
- for _, ssl in ipairs(ssl_items) do
- if type(ssl) == "table" then
- local sni = ssl.value.sni:reverse()
+ local local_conf = core.config.local_conf()
+ local iv
+ if local_conf and local_conf.apisix
+ and local_conf.apisix.ssl
+ and local_conf.apisix.ssl.key_encrypt_salt then
+ iv = local_conf.apisix.ssl.key_encrypt_salt
+ end
+ local aes_128_cbc_with_iv = (type(iv)=="string" and #iv == 16) and
+ assert(aes:new(iv, nil, aes.cipher(128, "cbc"), {iv=iv})) or nil
+
+ for _, ssl in config_util.iterate_values(ssl_items) do
+ if ssl.value ~= nil and
+ (ssl.value.status == nil or ssl.value.status == 1) then -- compatible with old version
+
+ local j = 0
+ local sni
+ if type(ssl.value.snis) == "table" and #ssl.value.snis > 0 then
+ sni = core.table.new(0, #ssl.value.snis)
+ for _, s in ipairs(ssl.value.snis) do
+ j = j + 1
+ sni[j] = s:reverse()
+ end
+ else
+ sni = ssl.value.sni:reverse()
+ end
+
+ -- decrypt private key
+ if aes_128_cbc_with_iv ~= nil and
+ not str_find(ssl.value.key, "---") then
+ local decrypted = aes_128_cbc_with_iv:decrypt(ngx_decode_base64(ssl.value.key))
+ if decrypted == nil then
+ core.log.error("decrypt ssl key failed. key[", ssl.value.key, "] ")
+ else
+ ssl.value.key = decrypted
+ end
+ end
+
idx = idx + 1
route_items[idx] = {
paths = sni,
@@ -49,12 +89,17 @@ local function create_router(ssl_items)
return
end
api_ctx.matched_ssl = ssl
+ api_ctx.matched_sni = sni
end
}
end
end
core.log.info("route items: ", core.json.delay_encode(route_items, true))
+ -- for testing
+ if #route_items > 1 then
+ core.log.info("we have more than 1 ssl certs now")
+ end
local router, err = radixtree_new(route_items)
if not router then
return nil, err
@@ -110,18 +155,46 @@ function _M.match_and_set(api_ctx)
local sni
sni, err = ngx_ssl.server_name()
if type(sni) ~= "string" then
- return false, "failed to fetch SNI: " .. (err or "not found")
+ return false, "failed to fetch SSL certificate: " .. (err or "not found")
end
core.log.debug("sni: ", sni)
- local ok = radixtree_router:dispatch(sni:reverse(), nil, api_ctx)
+
+ local sni_rev = sni:reverse()
+ local ok = radixtree_router:dispatch(sni_rev, nil, api_ctx)
if not ok then
- core.log.warn("not found any valid sni configuration")
+ core.log.error("failed to find any SSL certificate by SNI: ", sni)
return false
end
+
+ if type(api_ctx.matched_sni) == "table" then
+ local matched = false
+ for _, msni in ipairs(api_ctx.matched_sni) do
+ if sni_rev == msni or not str_find(sni_rev, ".", #msni, true) then
+ matched = true
+ end
+ end
+ if not matched then
+ local log_snis = core.json.encode(api_ctx.matched_sni, true)
+ if log_snis ~= nil then
+ log_snis = str_gsub(log_snis:reverse(), "%[", "%]")
+ log_snis = str_gsub(log_snis, "%]", "%[", 1)
+ end
+ core.log.warn("failed to find any SSL certificate by SNI: ",
+ sni, " matched SNIs: ", log_snis)
+ return false
+ end
+ else
+ if str_find(sni_rev, ".", #api_ctx.matched_sni, true) then
+ core.log.warn("failed to find any SSL certificate by SNI: ",
+ sni, " matched SNI: ", api_ctx.matched_sni:reverse())
+ return false
+ end
+ end
+
local matched_ssl = api_ctx.matched_ssl
- core.log.info("debug: ", core.json.delay_encode(matched_ssl, true))
+ core.log.info("debug - matched: ", core.json.delay_encode(matched_ssl, true))
ok, err = set_pem_ssl_key(matched_ssl.value.cert, matched_ssl.value.key)
if not ok then
return false, err
@@ -131,11 +204,20 @@ function _M.match_and_set(api_ctx)
end
+function _M.ssls()
+ if not ssl_certificates then
+ return nil, nil
+ end
+
+ return ssl_certificates.values, ssl_certificates.conf_version
+end
+
+
function _M.init_worker()
local err
ssl_certificates, err = core.config.new("/ssl", {
automatic = true,
- item_schema = core.schema.ssl
+ item_schema = core.schema.ssl,
})
if not ssl_certificates then
error("failed to create etcd instance for fetching ssl certificates: "
diff --git a/apisix/http/service.lua b/apisix/http/service.lua
index 42d31dd58b3c..161d82fe2358 100644
--- a/apisix/http/service.lua
+++ b/apisix/http/service.lua
@@ -14,7 +14,8 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
-local core = require("apisix.core")
+local core = require("apisix.core")
+local ipairs = ipairs
local services
local error = error
local pairs = pairs
@@ -45,17 +46,36 @@ local function filter(service)
return
end
- if not service.value.upstream then
+ if not service.value.upstream or not service.value.upstream.nodes then
return
end
- for addr, _ in pairs(service.value.upstream.nodes or {}) do
- local host = core.utils.parse_addr(addr)
- if not core.utils.parse_ipv4(host) and
- not core.utils.parse_ipv6(host) then
- service.has_domain = true
- break
+ local nodes = service.value.upstream.nodes
+ if core.table.isarray(nodes) then
+ for _, node in ipairs(nodes) do
+ local host = node.host
+ if not core.utils.parse_ipv4(host) and
+ not core.utils.parse_ipv6(host) then
+ service.has_domain = true
+ break
+ end
end
+ else
+ local new_nodes = core.table.new(core.table.nkeys(nodes), 0)
+ for addr, weight in pairs(nodes) do
+ local host, port = core.utils.parse_addr(addr)
+ if not core.utils.parse_ipv4(host) and
+ not core.utils.parse_ipv6(host) then
+ service.has_domain = true
+ end
+ local node = {
+ host = host,
+ port = port,
+ weight = weight,
+ }
+ core.table.insert(new_nodes, node)
+ end
+ service.value.upstream.nodes = new_nodes
end
core.log.info("filter service: ", core.json.delay_encode(service))
diff --git a/apisix/init.lua b/apisix/init.lua
index aa8598e87775..bbb22a709dd5 100644
--- a/apisix/init.lua
+++ b/apisix/init.lua
@@ -16,11 +16,14 @@
--
local require = require
local core = require("apisix.core")
+local config_util = require("apisix.core.config_util")
local plugin = require("apisix.plugin")
+local script = require("apisix.script")
local service_fetch = require("apisix.http.service").get
local admin_init = require("apisix.admin.init")
local get_var = require("resty.ngxvar").fetch
local router = require("apisix.router")
+local set_upstream = require("apisix.upstream").set_by_route
local ipmatcher = require("resty.ipmatcher")
local ngx = ngx
local get_method = ngx.req.get_method
@@ -28,12 +31,15 @@ local ngx_exit = ngx.exit
local math = math
local error = error
local ipairs = ipairs
-local pairs = pairs
local tostring = tostring
+local type = type
+local ngx_now = ngx.now
+local str_byte = string.byte
+local str_sub = string.sub
local load_balancer
-
+local local_conf
local dns_resolver
-local parsed_domain
+local lru_resolved_domain
local function parse_args(args)
@@ -42,7 +48,7 @@ local function parse_args(args)
end
-local _M = {version = 0.3}
+local _M = {version = 0.4}
function _M.http_init(args)
@@ -60,7 +66,7 @@ function _M.http_init(args)
local seed, err = core.utils.get_seed_from_urandom()
if not seed then
core.log.warn('failed to get seed from urandom: ', err)
- seed = ngx.now() * 1000 + ngx.worker.pid()
+ seed = ngx_now() * 1000 + ngx.worker.pid()
end
math.randomseed(seed)
parse_args(args)
@@ -74,7 +80,10 @@ function _M.http_init_worker()
if not ok then
error("failed to init worker event: " .. err)
end
-
+ local discovery = require("apisix.discovery.init").discovery
+ if discovery and discovery.init_worker then
+ discovery.init_worker()
+ end
require("apisix.balancer").init_worker()
load_balancer = require("apisix.balancer").run
require("apisix.admin.init").init_worker()
@@ -89,12 +98,13 @@ function _M.http_init_worker()
end
require("apisix.debug").init_worker()
+ require("apisix.upstream").init_worker()
- local local_conf = core.config.local_conf()
+ local_conf = core.config.local_conf()
local dns_resolver_valid = local_conf and local_conf.apisix and
local_conf.apisix.dns_resolver_valid
- parsed_domain = core.lrucache.new({
+ lru_resolved_domain = core.lrucache.new({
ttl = dns_resolver_valid, count = 512, invalid_stale = true,
})
end
@@ -107,30 +117,7 @@ local function run_plugin(phase, plugins, api_ctx)
end
plugins = plugins or api_ctx.plugins
- if not plugins then
- return api_ctx
- end
-
- if phase == "balancer" then
- local balancer_name = api_ctx.balancer_name
- local balancer_plugin = api_ctx.balancer_plugin
- if balancer_name and balancer_plugin then
- local phase_fun = balancer_plugin[phase]
- phase_fun(balancer_plugin, api_ctx)
- return api_ctx
- end
-
- for i = 1, #plugins, 2 do
- local phase_fun = plugins[i][phase]
- if phase_fun and
- (not balancer_name or balancer_name == plugins[i].name) then
- phase_fun(plugins[i + 1], api_ctx)
- if api_ctx.balancer_name == plugins[i].name then
- api_ctx.balancer_plugin = plugins[i]
- return api_ctx
- end
- end
- end
+ if not plugins or #plugins == 0 then
return api_ctx
end
@@ -173,72 +160,122 @@ function _M.http_ssl_phase()
local ok, err = router.router_ssl.match_and_set(api_ctx)
if not ok then
if err then
- core.log.warn("failed to fetch ssl config: ", err)
+ core.log.error("failed to fetch ssl config: ", err)
end
+ ngx_exit(-1)
end
end
-local function parse_domain_in_up(up, ver)
- local new_nodes = core.table.new(0, 8)
- for addr, weight in pairs(up.value.nodes) do
- local host, port = core.utils.parse_addr(addr)
+local function parse_domain(host)
+ local ip_info, err = core.utils.dns_parse(dns_resolver, host)
+ if not ip_info then
+ core.log.error("failed to parse domain: ", host, ", error: ",err)
+ return nil, err
+ end
+
+ core.log.info("parse addr: ", core.json.delay_encode(ip_info))
+ core.log.info("resolver: ", core.json.delay_encode(dns_resolver))
+ core.log.info("host: ", host)
+ if ip_info.address then
+ core.log.info("dns resolver domain: ", host, " to ", ip_info.address)
+ return ip_info.address
+ else
+ return nil, "failed to parse domain"
+ end
+end
+
+
+local function parse_domain_for_nodes(nodes)
+ local new_nodes = core.table.new(#nodes, 0)
+ for _, node in ipairs(nodes) do
+ local host = node.host
if not ipmatcher.parse_ipv4(host) and
- not ipmatcher.parse_ipv6(host) then
- local ip_info, err = core.utils.dns_parse(dns_resolver, host)
- if not ip_info then
- return nil, err
+ not ipmatcher.parse_ipv6(host) then
+ local ip, err = parse_domain(host)
+ if ip then
+ local new_node = core.table.clone(node)
+ new_node.host = ip
+ core.table.insert(new_nodes, new_node)
end
- core.log.info("parse addr: ", core.json.delay_encode(ip_info))
- core.log.info("resolver: ", core.json.delay_encode(dns_resolver))
- core.log.info("host: ", host)
- if ip_info.address then
- new_nodes[ip_info.address .. ":" .. port] = weight
- core.log.info("dns resolver domain: ", host, " to ",
- ip_info.address)
- else
- return nil, "failed to parse domain in route"
+ if err then
+ return nil, err
end
else
- new_nodes[addr] = weight
+ core.table.insert(new_nodes, node)
+ end
+ end
+ return new_nodes
+end
+
+local function compare_upstream_node(old_t, new_t)
+ if type(old_t) ~= "table" then
+ return false
+ end
+
+ if #new_t ~= #old_t then
+ return false
+ end
+
+ for i = 1, #new_t do
+ local new_node = new_t[i]
+ local old_node = old_t[i]
+ for _, name in ipairs({"host", "port", "weight"}) do
+ if new_node[name] ~= old_node[name] then
+ return false
+ end
end
end
+ return true
+end
+
+
+local function parse_domain_in_up(up, api_ctx)
+ local nodes = up.value.nodes
+ local new_nodes, err = parse_domain_for_nodes(nodes)
+ if not new_nodes then
+ return nil, err
+ end
+
+ local old_dns_value = up.dns_value and up.dns_value.nodes
+ local ok = compare_upstream_node(old_dns_value, new_nodes)
+ if ok then
+ return up
+ end
+
+ if not up.modifiedIndex_org then
+ up.modifiedIndex_org = up.modifiedIndex
+ end
+ up.modifiedIndex = up.modifiedIndex_org .. "#" .. ngx_now()
+
up.dns_value = core.table.clone(up.value)
up.dns_value.nodes = new_nodes
- core.log.info("parse upstream which contain domain: ",
+ core.log.info("resolve upstream which contain domain: ",
core.json.delay_encode(up))
return up
end
-local function parse_domain_in_route(route, ver)
- local new_nodes = core.table.new(0, 8)
- for addr, weight in pairs(route.value.upstream.nodes) do
- local host, port = core.utils.parse_addr(addr)
- if not ipmatcher.parse_ipv4(host) and
- not ipmatcher.parse_ipv6(host) then
- local ip_info, err = core.utils.dns_parse(dns_resolver, host)
- if not ip_info then
- return nil, err
- end
+local function parse_domain_in_route(route, api_ctx)
+ local nodes = route.value.upstream.nodes
+ local new_nodes, err = parse_domain_for_nodes(nodes)
+ if not new_nodes then
+ return nil, err
+ end
- core.log.info("parse addr: ", core.json.delay_encode(ip_info))
- core.log.info("resolver: ", core.json.delay_encode(dns_resolver))
- core.log.info("host: ", host)
- if ip_info and ip_info.address then
- new_nodes[ip_info.address .. ":" .. port] = weight
- core.log.info("dns resolver domain: ", host, " to ",
- ip_info.address)
- else
- return nil, "failed to parse domain in route"
- end
+ local old_dns_value = route.dns_value and route.dns_value.upstream.nodes
+ local ok = compare_upstream_node(old_dns_value, new_nodes)
+ if ok then
+ return route
+ end
- else
- new_nodes[addr] = weight
- end
+ if not route.modifiedIndex_org then
+ route.modifiedIndex_org = route.modifiedIndex
end
+ route.modifiedIndex = route.modifiedIndex_org .. "#" .. ngx_now()
+ api_ctx.conf_version = route.modifiedIndex
route.dns_value = core.table.deepcopy(route.value)
route.dns_value.upstream.nodes = new_nodes
@@ -248,6 +285,11 @@ local function parse_domain_in_route(route, ver)
end
+local function return_direct(...)
+ return ...
+end
+
+
function _M.http_access_phase()
local ngx_ctx = ngx.ctx
local api_ctx = ngx_ctx.api_ctx
@@ -263,7 +305,8 @@ function _M.http_access_phase()
if router.global_rules and router.global_rules.values
and #router.global_rules.values > 0 then
local plugins = core.tablepool.fetch("plugins", 32, 0)
- for _, global_rule in ipairs(router.global_rules.values) do
+ local values = router.global_rules.values
+ for _, global_rule in config_util.iterate_values(values) do
api_ctx.conf_type = "global_rule"
api_ctx.conf_version = global_rule.modifiedIndex
api_ctx.conf_id = global_rule.value.id
@@ -279,6 +322,17 @@ function _M.http_access_phase()
api_ctx.conf_type = nil
api_ctx.conf_version = nil
api_ctx.conf_id = nil
+
+ api_ctx.global_rules = router.global_rules
+ end
+
+ if local_conf.apisix and local_conf.apisix.delete_uri_tail_slash then
+ local uri = api_ctx.var.uri
+ if str_byte(uri, #uri) == str_byte("/") then
+ api_ctx.var.uri = str_sub(api_ctx.var.uri, 1, #uri - 1)
+ core.log.info("remove the end of uri '/', current uri: ",
+ api_ctx.var.uri)
+ end
end
router.router_http.match(api_ctx)
@@ -328,16 +382,39 @@ function _M.http_access_phase()
local enable_websocket
local up_id = route.value.upstream_id
if up_id then
- local upstreams_etcd = core.config.fetch_created_obj("/upstreams")
- if upstreams_etcd then
- local upstream = upstreams_etcd:get(tostring(up_id))
+ local upstreams = core.config.fetch_created_obj("/upstreams")
+ if upstreams then
+ local upstream = upstreams:get(tostring(up_id))
+ if not upstream then
+ core.log.error("failed to find upstream by id: " .. up_id)
+ return core.response.exit(500)
+ end
+
if upstream.has_domain then
- local _, err = parsed_domain(upstream, api_ctx.conf_version,
- parse_domain_in_up, upstream)
+ -- try to fetch the resolved domain, if we got `nil`,
+ -- it means we need to create the cache by handle.
+ -- the `api_ctx.conf_version` is different after we called
+ -- `parse_domain_in_up`, need to recreate the cache by new
+ -- `api_ctx.conf_version`
+ local parsed_upstream, err = lru_resolved_domain(upstream,
+ upstream.modifiedIndex, return_direct, nil)
if err then
- core.log.error("failed to parse domain in upstream: ", err)
+ core.log.error("failed to get resolved upstream: ", err)
return core.response.exit(500)
end
+
+ if not parsed_upstream then
+ parsed_upstream, err = parse_domain_in_up(upstream)
+ if err then
+ core.log.error("failed to reolve domain in upstream: ",
+ err)
+ return core.response.exit(500)
+ end
+
+ lru_resolved_domain(upstream, upstream.modifiedIndex,
+ return_direct, parsed_upstream)
+ end
+
end
if upstream.value.enable_websocket then
@@ -347,13 +424,23 @@ function _M.http_access_phase()
else
if route.has_domain then
- local err
- route, err = parsed_domain(route, api_ctx.conf_version,
- parse_domain_in_route, route)
+ local parsed_route, err = lru_resolved_domain(route, api_ctx.conf_version,
+ return_direct, nil)
if err then
- core.log.error("failed to parse domain in route: ", err)
+ core.log.error("failed to get resolved route: ", err)
return core.response.exit(500)
end
+
+ if not parsed_route then
+ route, err = parse_domain_in_route(route, api_ctx)
+ if err then
+ core.log.error("failed to reolve domain in route: ", err)
+ return core.response.exit(500)
+ end
+
+ lru_resolved_domain(route, api_ctx.conf_version,
+ return_direct, route)
+ end
end
if route.value.upstream and route.value.upstream.enable_websocket then
@@ -366,19 +453,30 @@ function _M.http_access_phase()
api_ctx.var.upstream_connection = api_ctx.var.http_connection
end
- local plugins = core.tablepool.fetch("plugins", 32, 0)
- api_ctx.plugins = plugin.filter(route, plugins)
-
- run_plugin("rewrite", plugins, api_ctx)
- if api_ctx.consumer then
- local changed
- route, changed = plugin.merge_consumer_route(route, api_ctx.consumer)
- if changed then
- core.table.clear(api_ctx.plugins)
- api_ctx.plugins = plugin.filter(route, api_ctx.plugins)
+ if route.value.script then
+ script.load(route, api_ctx)
+ script.run("access", api_ctx)
+ else
+ local plugins = plugin.filter(route)
+ api_ctx.plugins = plugins
+
+ run_plugin("rewrite", plugins, api_ctx)
+ if api_ctx.consumer then
+ local changed
+ route, changed = plugin.merge_consumer_route(route, api_ctx.consumer)
+ if changed then
+ core.table.clear(api_ctx.plugins)
+ api_ctx.plugins = plugin.filter(route, api_ctx.plugins)
+ end
end
+ run_plugin("access", plugins, api_ctx)
+ end
+
+ local ok, err = set_upstream(route, api_ctx)
+ if not ok then
+ core.log.error("failed to parse upstream: ", err)
+ core.response.exit(500)
end
- run_plugin("access", plugins, api_ctx)
end
@@ -439,53 +537,120 @@ function _M.grpc_access_phase()
run_plugin("rewrite", plugins, api_ctx)
run_plugin("access", plugins, api_ctx)
+
+ set_upstream(route, api_ctx)
end
-local function common_phase(plugin_name)
+
+local function common_phase(phase_name)
local api_ctx = ngx.ctx.api_ctx
if not api_ctx then
return
end
- if router.global_rules and router.global_rules.values
- and #router.global_rules.values > 0
- then
+ if api_ctx.global_rules then
local plugins = core.tablepool.fetch("plugins", 32, 0)
- for _, global_rule in ipairs(router.global_rules.values) do
+ local values = api_ctx.global_rules.values
+ for _, global_rule in config_util.iterate_values(values) do
core.table.clear(plugins)
plugins = plugin.filter(global_rule, plugins)
- run_plugin(plugin_name, plugins, api_ctx)
+ run_plugin(phase_name, plugins, api_ctx)
end
core.tablepool.release("plugins", plugins)
end
- run_plugin(plugin_name, nil, api_ctx)
+
+ if api_ctx.script_obj then
+ script.run(phase_name, api_ctx)
+ else
+ run_plugin(phase_name, nil, api_ctx)
+ end
+
return api_ctx
end
+
function _M.http_header_filter_phase()
common_phase("header_filter")
end
+
function _M.http_body_filter_phase()
common_phase("body_filter")
end
-function _M.http_log_phase()
+local function healcheck_passive(api_ctx)
+ local checker = api_ctx.up_checker
+ if not checker then
+ return
+ end
+
+ local up_conf = api_ctx.upstream_conf
+ local passive = up_conf.checks.passive
+ if not passive then
+ return
+ end
+
+ core.log.info("enabled healthcheck passive")
+ local host = up_conf.checks and up_conf.checks.active
+ and up_conf.checks.active.host
+ local port = up_conf.checks and up_conf.checks.active
+ and up_conf.checks.active.port
+
+ local resp_status = ngx.status
+ local http_statuses = passive and passive.healthy and
+ passive.healthy.http_statuses
+ core.log.info("passive.healthy.http_statuses: ",
+ core.json.delay_encode(http_statuses))
+ if http_statuses then
+ for i, status in ipairs(http_statuses) do
+ if resp_status == status then
+ checker:report_http_status(api_ctx.balancer_ip,
+ port or api_ctx.balancer_port,
+ host,
+ resp_status)
+ end
+ end
+ end
+
+ local http_statuses = passive and passive.unhealthy and
+ passive.unhealthy.http_statuses
+ core.log.info("passive.unhealthy.http_statuses: ",
+ core.json.delay_encode(http_statuses))
+ if not http_statuses then
+ return
+ end
+
+ for i, status in ipairs(http_statuses) do
+ for i, status in ipairs(http_statuses) do
+ if resp_status == status then
+ checker:report_http_status(api_ctx.balancer_ip,
+ port or api_ctx.balancer_port,
+ host,
+ resp_status)
+ end
+ end
+ end
+end
+
+
+function _M.http_log_phase()
local api_ctx = common_phase("log")
+ healcheck_passive(api_ctx)
if api_ctx.uri_parse_param then
core.tablepool.release("uri_parse_param", api_ctx.uri_parse_param)
end
core.ctx.release_vars(api_ctx)
- if api_ctx.plugins then
+ if api_ctx.plugins and api_ctx.plugins ~= core.empty_tab then
core.tablepool.release("plugins", api_ctx.plugins)
end
core.tablepool.release("api_ctx", api_ctx)
end
+
function _M.http_balancer_phase()
local api_ctx = ngx.ctx.api_ctx
if not api_ctx then
@@ -493,24 +658,12 @@ function _M.http_balancer_phase()
return core.response.exit(500)
end
- -- first time
- if not api_ctx.balancer_name then
- run_plugin("balancer", nil, api_ctx)
- if api_ctx.balancer_name then
- return
- end
- end
-
- if api_ctx.balancer_name and api_ctx.balancer_name ~= "default" then
- return run_plugin("balancer", nil, api_ctx)
- end
-
- api_ctx.balancer_name = "default"
load_balancer(api_ctx.matched_route, api_ctx)
end
+
local function cors_admin()
- local local_conf = core.config.local_conf()
+ local_conf = core.config.local_conf()
if local_conf.apisix and not local_conf.apisix.enable_admin_cors then
return
end
@@ -534,6 +687,10 @@ local function cors_admin()
"Access-Control-Max-Age", "3600")
end
+local function add_content_type()
+ core.response.set_header("Content-Type", "application/json")
+end
+
do
local router
@@ -545,6 +702,9 @@ function _M.http_admin()
-- add cors rsp header
cors_admin()
+ -- add content type to rsp header
+ add_content_type()
+
-- core.log.info("uri: ", get_var("uri"), " method: ", get_method())
local ok = router:dispatch(get_var("uri"), {method = get_method()})
if not ok then
@@ -567,11 +727,11 @@ function _M.stream_init_worker()
load_balancer = require("apisix.balancer").run
- local local_conf = core.config.local_conf()
+ local_conf = core.config.local_conf()
local dns_resolver_valid = local_conf and local_conf.apisix and
local_conf.apisix.dns_resolver_valid
- parsed_domain = core.lrucache.new({
+ lru_resolved_domain = core.lrucache.new({
ttl = dns_resolver_valid, count = 512, invalid_stale = true,
})
end
@@ -604,7 +764,13 @@ function _M.stream_preread_phase()
api_ctx.plugins = plugin.stream_filter(matched_route, plugins)
-- core.log.info("valid plugins: ", core.json.delay_encode(plugins, true))
+ api_ctx.conf_type = "stream/route"
+ api_ctx.conf_version = matched_route.modifiedIndex
+ api_ctx.conf_id = matched_route.value.id
+
run_plugin("preread", plugins, api_ctx)
+
+ set_upstream(matched_route, api_ctx)
end
@@ -616,19 +782,6 @@ function _M.stream_balancer_phase()
return ngx_exit(1)
end
- -- first time
- if not api_ctx.balancer_name then
- run_plugin("balancer", nil, api_ctx)
- if api_ctx.balancer_name then
- return
- end
- end
-
- if api_ctx.balancer_name and api_ctx.balancer_name ~= "default" then
- return run_plugin("balancer", nil, api_ctx)
- end
-
- api_ctx.balancer_name = "default"
load_balancer(api_ctx.matched_route, api_ctx)
end
diff --git a/apisix/plugin.lua b/apisix/plugin.lua
index 8186d155af61..691860d14a6c 100644
--- a/apisix/plugin.lua
+++ b/apisix/plugin.lua
@@ -99,10 +99,6 @@ local function load()
return nil, "failed to read plugin list from local file"
end
- if local_conf.apisix and local_conf.apisix.enable_heartbeat then
- core.table.insert(plugin_names, "heartbeat")
- end
-
local processed = {}
for _, name in ipairs(plugin_names) do
if processed[name] == nil then
@@ -233,15 +229,16 @@ end
function _M.filter(user_route, plugins)
- plugins = plugins or core.table.new(#local_plugins * 2, 0)
local user_plugin_conf = user_route.value.plugins
- if user_plugin_conf == nil then
+ if user_plugin_conf == nil or
+ core.table.nkeys(user_plugin_conf) == 0 then
if local_conf and local_conf.apisix.enable_debug then
core.response.set_header("Apisix-Plugins", "no plugin")
end
- return plugins
+ return core.empty_tab
end
+ plugins = plugins or core.tablepool.fetch("plugins", 32, 0)
for _, plugin_obj in ipairs(local_plugins) do
local name = plugin_obj.name
local plugin_conf = user_plugin_conf[name]
diff --git a/apisix/plugins/authz-keycloak.lua b/apisix/plugins/authz-keycloak.lua
new file mode 100644
index 000000000000..2704f4ef0356
--- /dev/null
+++ b/apisix/plugins/authz-keycloak.lua
@@ -0,0 +1,165 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local core = require("apisix.core")
+local http = require "resty.http"
+local sub_str = string.sub
+local url = require "net.url"
+local tostring = tostring
+local ngx = ngx
+local plugin_name = "authz-keycloak"
+
+
+local schema = {
+ type = "object",
+ properties = {
+ token_endpoint = {type = "string", minLength = 1, maxLength = 4096},
+ permissions = {
+ type = "array",
+ items = {
+ type = "string",
+ minLength = 1, maxLength = 100
+ },
+ uniqueItems = true
+ },
+ grant_type = {
+ type = "string",
+ default="urn:ietf:params:oauth:grant-type:uma-ticket",
+ enum = {"urn:ietf:params:oauth:grant-type:uma-ticket"},
+ minLength = 1, maxLength = 100
+ },
+ audience = {type = "string", minLength = 1, maxLength = 100},
+ timeout = {type = "integer", minimum = 1000, default = 3000},
+ policy_enforcement_mode = {
+ type = "string",
+ enum = {"ENFORCING", "PERMISSIVE"},
+ default = "ENFORCING"
+ },
+ keepalive = {type = "boolean", default = true},
+ keepalive_timeout = {type = "integer", minimum = 1000, default = 60000},
+ keepalive_pool = {type = "integer", minimum = 1, default = 5},
+
+ },
+ required = {"token_endpoint"}
+}
+
+
+local _M = {
+ version = 0.1,
+ priority = 2000,
+ type = 'auth',
+ name = plugin_name,
+ schema = schema,
+}
+
+function _M.check_schema(conf)
+ return core.schema.check(schema, conf)
+end
+
+local function is_path_protected(conf)
+ -- TODO if permissions are empty lazy load paths from Keycloak
+ if conf.permissions == nil then
+ return false
+ end
+ return true
+end
+
+
+local function evaluate_permissions(conf, token)
+ local url_decoded = url.parse(conf.token_endpoint)
+ local host = url_decoded.host
+ local port = url_decoded.port
+
+ if not port then
+ if url_decoded.scheme == "https" then
+ port = 443
+ else
+ port = 80
+ end
+ end
+
+ if not is_path_protected(conf) and conf.policy_enforcement_mode == "ENFORCING" then
+ core.response.exit(403)
+ return
+ end
+
+ local httpc = http.new()
+ httpc:set_timeout(conf.timeout)
+
+ local params = {
+ method = "POST",
+ body = ngx.encode_args({
+ grant_type = conf.grant_type,
+ audience = conf.audience,
+ response_mode = "decision",
+ permission = conf.permissions
+ }),
+ headers = {
+ ["Content-Type"] = "application/x-www-form-urlencoded",
+ ["Authorization"] = token
+ }
+ }
+
+ if conf.keepalive then
+ params.keepalive_timeout = conf.keepalive_timeout
+ params.keepalive_pool = conf.keepalive_pool
+ else
+ params.keepalive = conf.keepalive
+ end
+
+ local httpc_res, httpc_err = httpc:request_uri(conf.token_endpoint, params)
+
+ if not httpc_res then
+ core.log.error("error while sending authz request to [", host ,"] port[",
+ tostring(port), "] ", httpc_err)
+ core.response.exit(500, httpc_err)
+ return
+ end
+
+ if httpc_res.status >= 400 then
+ core.log.error("status code: ", httpc_res.status, " msg: ", httpc_res.body)
+ core.response.exit(httpc_res.status, httpc_res.body)
+ end
+end
+
+
+local function fetch_jwt_token(ctx)
+ local token = core.request.header(ctx, "authorization")
+ if not token then
+ return nil, "authorization header not available"
+ end
+
+ local prefix = sub_str(token, 1, 7)
+ if prefix ~= 'Bearer ' and prefix ~= 'bearer ' then
+ return "Bearer " .. token
+ end
+ return token
+end
+
+
+function _M.rewrite(conf, ctx)
+ core.log.debug("hit keycloak-auth rewrite")
+ local jwt_token, err = fetch_jwt_token(ctx)
+ if not jwt_token then
+ core.log.error("failed to fetch JWT token: ", err)
+ return 401, {message = "Missing JWT token in request"}
+ end
+
+ evaluate_permissions(conf, jwt_token)
+end
+
+
+return _M
diff --git a/apisix/plugins/batch-requests.lua b/apisix/plugins/batch-requests.lua
new file mode 100644
index 000000000000..71878218d8d7
--- /dev/null
+++ b/apisix/plugins/batch-requests.lua
@@ -0,0 +1,270 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local core = require("apisix.core")
+local http = require("resty.http")
+local ngx = ngx
+local io_open = io.open
+local ipairs = ipairs
+local pairs = pairs
+local str_find = string.find
+local str_lower = string.lower
+
+
+local plugin_name = "batch-requests"
+
+local schema = {
+ type = "object",
+ additionalProperties = false,
+}
+
+local req_schema = {
+ type = "object",
+ properties = {
+ query = {
+ description = "pipeline query string",
+ type = "object"
+ },
+ headers = {
+ description = "pipeline header",
+ type = "object"
+ },
+ timeout = {
+ description = "pipeline timeout(ms)",
+ type = "integer",
+ default = 30000,
+ },
+ pipeline = {
+ type = "array",
+ minItems = 1,
+ items = {
+ type = "object",
+ properties = {
+ version = {
+ description = "HTTP version",
+ type = "number",
+ enum = {1.0, 1.1},
+ default = 1.1,
+ },
+ method = {
+ description = "HTTP method",
+ type = "string",
+ enum = {"GET", "POST", "PUT", "DELETE", "PATCH", "HEAD",
+ "OPTIONS", "CONNECT", "TRACE"},
+ default = "GET"
+ },
+ path = {
+ type = "string",
+ minLength = 1,
+ },
+ query = {
+ description = "request header",
+ type = "object",
+ },
+ headers = {
+ description = "request query string",
+ type = "object",
+ },
+ ssl_verify = {
+ type = "boolean",
+ default = false
+ },
+ }
+ }
+ }
+ },
+ anyOf = {
+ {required = {"pipeline"}},
+ },
+}
+
+local _M = {
+ version = 0.1,
+ priority = 4010,
+ name = plugin_name,
+ schema = schema
+}
+
+
+function _M.check_schema(conf)
+ local ok, err = core.schema.check(schema, conf)
+ if not ok then
+ return false, err
+ end
+ return true
+end
+
+
+local function check_input(data)
+ local ok, err = core.schema.check(req_schema, data)
+ if not ok then
+ return 400, {error_msg = "bad request body: " .. err}
+ end
+end
+
+local function lowercase_key_or_init(obj)
+ if not obj then
+ return {}
+ end
+
+ local lowercase_key_obj = {}
+ for k, v in pairs(obj) do
+ lowercase_key_obj[str_lower(k)] = v
+ end
+
+ return lowercase_key_obj
+end
+
+local function ensure_header_lowercase(data)
+ data.headers = lowercase_key_or_init(data.headers)
+
+ for i,req in ipairs(data.pipeline) do
+ req.headers = lowercase_key_or_init(req.headers)
+ end
+end
+
+
+local function set_common_header(data)
+ local outer_headers = core.request.headers(nil)
+ for i,req in ipairs(data.pipeline) do
+ for k, v in pairs(data.headers) do
+ if not req.headers[k] then
+ req.headers[k] = v
+ end
+ end
+
+ if outer_headers then
+ for k, v in pairs(outer_headers) do
+ local is_content_header = str_find(k, "content-", 1, true) == 1
+ -- skip header start with "content-"
+ if not req.headers[k] and not is_content_header then
+ req.headers[k] = v
+ end
+ end
+ end
+ end
+end
+
+
+local function set_common_query(data)
+ if not data.query then
+ return
+ end
+
+ for i,req in ipairs(data.pipeline) do
+ if not req.query then
+ req.query = data.query
+ else
+ for k, v in pairs(data.query) do
+ if not req.query[k] then
+ req.query[k] = v
+ end
+ end
+ end
+ end
+end
+
+
+local function get_file(file_name)
+ local f = io_open(file_name, 'r')
+ if f then
+ local req_body = f:read("*all")
+ f:close()
+ return req_body
+ end
+
+ return
+end
+
+
+local function batch_requests()
+ ngx.req.read_body()
+ local req_body = ngx.req.get_body_data()
+ if not req_body then
+ local file_name = ngx.req.get_body_file()
+ if file_name then
+ req_body = get_file(file_name)
+ end
+
+ if not req_body then
+ core.response.exit(400, {
+ error_msg = "no request body, you should give at least one pipeline setting"
+ })
+ end
+ end
+
+ local data, err = core.json.decode(req_body)
+ if not data then
+ core.response.exit(400, {
+ error_msg = "invalid request body: " .. req_body .. ", err: " .. err
+ })
+ end
+
+ local code, body = check_input(data)
+ if code then
+ core.response.exit(code, body)
+ end
+
+ local httpc = http.new()
+ httpc:set_timeout(data.timeout)
+ local ok, err = httpc:connect("127.0.0.1", ngx.var.server_port)
+ if not ok then
+ core.response.exit(500, {error_msg = "connect to apisix failed: " .. err})
+ end
+
+ ensure_header_lowercase(data)
+ set_common_header(data)
+ set_common_query(data)
+
+ local responses, err = httpc:request_pipeline(data.pipeline)
+ if not responses then
+ core.response.exit(400, {error_msg = "request failed: " .. err})
+ end
+
+ local aggregated_resp = {}
+ for _, resp in ipairs(responses) do
+ if not resp.status then
+ core.table.insert(aggregated_resp, {
+ status = 504,
+ reason = "upstream timeout"
+ })
+ end
+ local sub_resp = {
+ status = resp.status,
+ reason = resp.reason,
+ headers = resp.headers,
+ }
+ if resp.has_body then
+ sub_resp.body = resp:read_body()
+ end
+ core.table.insert(aggregated_resp, sub_resp)
+ end
+ core.response.exit(200, aggregated_resp)
+end
+
+
+function _M.api()
+ return {
+ {
+ methods = {"POST"},
+ uri = "/apisix/batch-requests",
+ handler = batch_requests,
+ }
+ }
+end
+
+
+return _M
diff --git a/apisix/plugins/consumer-restriction.lua b/apisix/plugins/consumer-restriction.lua
new file mode 100644
index 000000000000..912e2129a8cc
--- /dev/null
+++ b/apisix/plugins/consumer-restriction.lua
@@ -0,0 +1,94 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local ipairs = ipairs
+local core = require("apisix.core")
+
+local schema = {
+ type = "object",
+ properties = {
+ whitelist = {
+ type = "array",
+ items = {type = "string"},
+ minItems = 1
+ },
+ blacklist = {
+ type = "array",
+ items = {type = "string"},
+ minItems = 1
+ }
+ },
+ oneOf = {
+ {required = {"whitelist"}},
+ {required = {"blacklist"}}
+ }
+}
+
+
+local plugin_name = "consumer-restriction"
+
+
+local _M = {
+ version = 0.1,
+ priority = 2400,
+ name = plugin_name,
+ schema = schema,
+}
+
+local function is_include(value, tab)
+ for k,v in ipairs(tab) do
+ if v == value then
+ return true
+ end
+ end
+ return false
+end
+
+function _M.check_schema(conf)
+ local ok, err = core.schema.check(schema, conf)
+
+ if not ok then
+ return false, err
+ end
+
+ return true
+end
+
+function _M.access(conf, ctx)
+ if not ctx.consumer then
+ return 401, { message = "Missing authentication or identity verification." }
+ end
+
+ local block = false
+ if conf.blacklist and #conf.blacklist > 0 then
+ if is_include(ctx.consumer.username, conf.blacklist) then
+ block = true
+ end
+ end
+
+ if conf.whitelist and #conf.whitelist > 0 then
+ if not is_include(ctx.consumer.username, conf.whitelist) then
+ block = true
+ end
+ end
+
+ if block then
+ return 403, { message = "The consumer is not allowed" }
+ end
+end
+
+
+return _M
diff --git a/apisix/plugins/cors.lua b/apisix/plugins/cors.lua
index b64010e29977..5d827d213663 100644
--- a/apisix/plugins/cors.lua
+++ b/apisix/plugins/cors.lua
@@ -64,18 +64,43 @@ local schema = {
allow_credential = {
type = "boolean",
default = false
- },
+ }
}
}
local _M = {
version = 0.1,
priority = 4000,
- type = 'auth',
name = plugin_name,
schema = schema,
}
+
+local function create_mutiple_origin_cache(conf)
+ if not str_find(conf.allow_origins, ",", 1, true) then
+ return nil
+ end
+ local origin_cache = {}
+ local iterator, err = re_gmatch(conf.allow_origins, "([^,]+)", "jiox")
+ if not iterator then
+ core.log.error("match origins failed: ", err)
+ return nil
+ end
+ while true do
+ local origin, err = iterator()
+ if err then
+ core.log.error("iterate origins failed: ", err)
+ return nil
+ end
+ if not origin then
+ break
+ end
+ origin_cache[origin[0]] = true
+ end
+ return origin_cache
+end
+
+
function _M.check_schema(conf)
local ok, err = core.schema.check(schema, conf)
if not ok then
@@ -85,63 +110,53 @@ function _M.check_schema(conf)
return true
end
-function _M.access(conf, ctx)
- local allow_origins = conf.allow_origins
- if allow_origins == "**" then
- allow_origins = ngx.var.http_origin or '*'
+
+local function set_cors_headers(conf, ctx)
+ local allow_methods = conf.allow_methods
+ if allow_methods == "**" then
+ allow_methods = "GET,POST,PUT,DELETE,PATCH,HEAD,OPTIONS,CONNECT,TRACE"
end
- if str_find(allow_origins, ",", 1, true) then
- local finded = false
- local iterator, err = re_gmatch(allow_origins, "([^,]+)", "jiox")
- if not iterator then
- return 500, {message = "match origins failed", error = err}
- end
- while true do
- local origin, err = iterator()
- if err then
- return 500, {message = "iterate origins failed", error = err}
- end
- if not origin then
- break
- end
-
- if origin[0] == ngx.var.http_origin then
- allow_origins = origin[0]
- finded = true
- break
- end
- end
- if not finded then
- return
- end
+
+ core.response.set_header("Access-Control-Allow-Origin", ctx.cors_allow_origins)
+ core.response.set_header("Access-Control-Allow-Methods", allow_methods)
+ core.response.set_header("Access-Control-Allow-Headers", conf.allow_headers)
+ core.response.set_header("Access-Control-Max-Age", conf.max_age)
+ core.response.set_header("Access-Control-Expose-Headers", conf.expose_headers)
+ if conf.allow_credential then
+ core.response.set_header("Access-Control-Allow-Credentials", true)
end
+end
- ctx.cors_allow_origins = allow_origins
+function _M.rewrite(conf, ctx)
if ctx.var.request_method == "OPTIONS" then
return 200
end
end
+
function _M.header_filter(conf, ctx)
- if not ctx.cors_allow_origins then
- -- no origin matched, don't add headers
- return
+ local allow_origins = conf.allow_origins
+ local req_origin = core.request.header(ctx, "Origin")
+ if allow_origins == "**" then
+ allow_origins = req_origin or '*'
end
-
- local allow_methods = conf.allow_methods
- if allow_methods == "**" then
- allow_methods = "GET,POST,PUT,DELETE,PATCH,HEAD,OPTIONS,CONNECT,TRACE"
+ local multiple_origin, err = core.lrucache.plugin_ctx(plugin_name, ctx,
+ create_mutiple_origin_cache, conf)
+ if err then
+ return 500, {message = "get mutiple origin cache failed: " .. err}
end
- ngx.header["Access-Control-Allow-Origin"] = ctx.cors_allow_origins
- ngx.header["Access-Control-Allow-Methods"] = allow_methods
- ngx.header["Access-Control-Allow-Headers"] = conf.allow_headers
- ngx.header["Access-Control-Expose-Headers"] = conf.expose_headers
- ngx.header["Access-Control-Max-Age"] = conf.max_age
- if conf.allow_credential then
- ngx.header["Access-Control-Allow-Credentials"] = true
+ if multiple_origin then
+ if multiple_origin[req_origin] then
+ allow_origins = req_origin
+ else
+ return
+ end
end
+
+ ctx.cors_allow_origins = allow_origins
+ set_cors_headers(conf, ctx)
end
return _M
diff --git a/apisix/plugins/echo.lua b/apisix/plugins/echo.lua
new file mode 100644
index 000000000000..d112574ef1e5
--- /dev/null
+++ b/apisix/plugins/echo.lua
@@ -0,0 +1,131 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local core = require("apisix.core")
+local pairs = pairs
+local type = type
+local ngx = ngx
+
+
+local schema = {
+ type = "object",
+ properties = {
+ before_body = {
+ description = "body before the filter phase.",
+ type = "string"
+ },
+ body = {
+ description = "body to replace upstream response.",
+ type = "string"
+ },
+ after_body = {
+ description = "body after the modification of filter phase.",
+ type = "string"
+ },
+ headers = {
+ description = "new headers for repsonse",
+ type = "object",
+ minProperties = 1,
+ },
+ auth_value = {
+ description = "auth value",
+ type = "string"
+ },
+ },
+ anyOf = {
+ {required = {"before_body"}},
+ {required = {"body"}},
+ {required = {"after_body"}}
+ },
+ minProperties = 1,
+ additionalProperties = false,
+}
+
+local plugin_name = "echo"
+
+local _M = {
+ version = 0.1,
+ priority = 412,
+ name = plugin_name,
+ schema = schema,
+}
+
+
+function _M.check_schema(conf)
+ local ok, err = core.schema.check(schema, conf)
+ if not ok then
+ return false, err
+ end
+
+ return true
+end
+
+
+function _M.body_filter(conf, ctx)
+ if conf.body then
+ ngx.arg[1] = conf.body
+ end
+
+ if conf.before_body then
+ ngx.arg[1] = conf.before_body .. ngx.arg[1]
+ end
+
+ if conf.after_body then
+ ngx.arg[1] = ngx.arg[1] .. conf.after_body
+ end
+ ngx.arg[2] = true
+end
+
+
+function _M.access(conf, ctx)
+ local value = core.request.header(ctx, "Authorization")
+
+ if value ~= conf.auth_value then
+ return 401, "unauthorized body"
+ end
+
+end
+
+
+function _M.header_filter(conf, ctx)
+ if not conf.headers then
+ return
+ end
+
+ if not conf.headers_arr then
+ conf.headers_arr = {}
+
+ for field, value in pairs(conf.headers) do
+ if type(field) == 'string'
+ and (type(value) == 'string' or type(value) == 'number') then
+ if #field == 0 then
+ return false, 'invalid field length in header'
+ end
+ core.table.insert(conf.headers_arr, field)
+ core.table.insert(conf.headers_arr, value)
+ else
+ return false, 'invalid type as header value'
+ end
+ end
+ end
+
+ local field_cnt = #conf.headers_arr
+ for i = 1, field_cnt, 2 do
+ ngx.header[conf.headers_arr[i]] = conf.headers_arr[i+1]
+ end
+end
+
+return _M
diff --git a/apisix/plugins/example-plugin.lua b/apisix/plugins/example-plugin.lua
index 025ade4fd8e0..bf3683798511 100644
--- a/apisix/plugins/example-plugin.lua
+++ b/apisix/plugins/example-plugin.lua
@@ -15,7 +15,7 @@
-- limitations under the License.
--
local core = require("apisix.core")
-local balancer = require("ngx.balancer")
+local upstream = require("apisix.upstream")
local schema = {
type = "object",
@@ -60,25 +60,27 @@ end
function _M.access(conf, ctx)
core.log.warn("plugin access phase, conf: ", core.json.encode(conf))
-- return 200, {message = "hit example plugin"}
-end
-
-
-function _M.balancer(conf, ctx)
- core.log.warn("plugin balancer phase, conf: ", core.json.encode(conf))
if not conf.ip then
return
end
- -- NOTE: update `ctx.balancer_name` is important, APISIX will skip other
- -- balancer handler.
- ctx.balancer_name = plugin_name
+ local up_conf = {
+ type = "roundrobin",
+ nodes = {
+ {host = conf.ip, port = conf.port, weight = 1}
+ }
+ }
- local ok, err = balancer.set_current_peer(conf.ip, conf.port)
+ local ok, err = upstream.check_schema(up_conf)
if not ok then
- core.log.error("failed to set server peer: ", err)
- return core.response.exit(502)
+ return 500, err
end
+
+ local matched_route = ctx.matched_route
+ upstream.set(ctx, up_conf.type .. "#route_" .. matched_route.value.id,
+ ctx.conf_version, up_conf, matched_route)
+ return
end
diff --git a/apisix/plugins/grpc-transcode/proto.lua b/apisix/plugins/grpc-transcode/proto.lua
index 13f3060b1cd8..09240fa5a5b3 100644
--- a/apisix/plugins/grpc-transcode/proto.lua
+++ b/apisix/plugins/grpc-transcode/proto.lua
@@ -63,6 +63,15 @@ function _M.fetch(proto_id)
end
+function _M.protos()
+ if not protos then
+ return nil, nil
+ end
+
+ return protos.values, protos.conf_version
+end
+
+
function _M.init()
local err
protos, err = core.config.new("/proto", {
diff --git a/apisix/plugins/grpc-transcode/util.lua b/apisix/plugins/grpc-transcode/util.lua
index 83d89abaf2a0..d705a1ed7126 100644
--- a/apisix/plugins/grpc-transcode/util.lua
+++ b/apisix/plugins/grpc-transcode/util.lua
@@ -51,7 +51,7 @@ local function get_from_request(name, kind)
local request_table
if ngx.req.get_method() == "POST" then
if string.find(ngx.req.get_headers()["Content-Type"] or "",
- "application/json", true) then
+ "application/json", 1, true) then
request_table = json.decode(ngx.req.get_body_data())
else
request_table = ngx.req.get_post_args()
diff --git a/apisix/plugins/heartbeat.lua b/apisix/plugins/heartbeat.lua
deleted file mode 100644
index 0a6cf76cbdc5..000000000000
--- a/apisix/plugins/heartbeat.lua
+++ /dev/null
@@ -1,145 +0,0 @@
---
--- Licensed to the Apache Software Foundation (ASF) under one or more
--- contributor license agreements. See the NOTICE file distributed with
--- this work for additional information regarding copyright ownership.
--- The ASF licenses this file to You under the Apache License, Version 2.0
--- (the "License"); you may not use this file except in compliance with
--- the License. You may obtain a copy of the License at
---
--- http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
---
-local core = require("apisix.core")
-local http = require("resty.http")
-local encode_args = ngx.encode_args
-local plugin_name = "heartbeat"
-local ngx = ngx
-
-
-local apisix_heartbeat_addr = "https://www.iresty.com/apisix/heartbeat?"
-
-
-local schema = {
- type = "object",
- additionalProperties = false,
-}
-
-
-local _M = {
- version = 0.1,
- priority = 100,
- name = plugin_name,
- schema = schema,
-}
-
-
-function _M.check_schema(conf)
- local ok, err = core.schema.check(schema, conf)
- if not ok then
- return false, err
- end
-
- return true
-end
-
-
-local function request_apisix_svr(args)
- local http_cli, err = http.new()
- if err then
- return nil, err
- end
-
- http_cli:set_timeout(5 * 1000)
-
- local res
- res, err = http_cli:request_uri(apisix_heartbeat_addr .. args, {
- method = "GET",
- ssl_verify = false,
- keepalive = false,
- headers = {
- ["User-Agent"] = "curl/7.54.0",
- }
- })
-
- if err then
- return nil, err
- end
-
- if res.status ~= 200 then
- return nil, "invalid response code: " .. res.status
- end
-
- return res
-end
-
-
-local function report()
- -- ngx.sleep(3)
- local etcd_version, etcd_version_err
- local local_conf = core.config.local_conf()
-
- if local_conf.apisix.config_center == "etcd" then
- etcd_version, etcd_version_err = core.etcd.server_version()
- if not etcd_version then
- core.log.error("failed to fetch etcd version: ", etcd_version_err)
- else
- etcd_version = etcd_version.body and etcd_version.body.etcdserver
- end
- end
-
- core.log.info(core.json.encode(etcd_version))
-
- local info = {
- version = core.version,
- plugins = local_conf.plugins,
- config_center = local_conf.apisix.config_center,
- etcd_version = etcd_version,
- etcd_version_err = etcd_version_err,
- uuid = core.id.get(),
- }
-
- -- core.log.info(core.json.delay_encode(info, true))
- local args, err = encode_args(info)
- if not args then
- core.log.error("failed to encode hearbeat information: ", err)
- return
- end
- core.log.info("heartbeat body: ", args)
-
- local res
- res, err = request_apisix_svr(args)
- if not res then
- core.log.error("failed to report heartbeat information: ", err)
- return
- end
-
- core.log.info("succeed to report body: ",
- core.json.delay_encode(res, true))
-end
-
-do
- local timer
-
-function _M.init()
- if timer or ngx.worker.id() ~= 0 then
- return
- end
-
- local err
- timer, err = core.timer.new("heartbeat", report, {check_interval = 60 * 60})
- if not timer then
- core.log.error("failed to create timer: ", err)
- else
- core.log.info("succeed to create timer: heartbeat")
- end
-end
-
-end -- do
-
-
-return _M
diff --git a/apisix/plugins/http-logger.lua b/apisix/plugins/http-logger.lua
new file mode 100644
index 000000000000..44df6aeff99c
--- /dev/null
+++ b/apisix/plugins/http-logger.lua
@@ -0,0 +1,176 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local core = require("apisix.core")
+local log_util = require("apisix.utils.log-util")
+local batch_processor = require("apisix.utils.batch-processor")
+local plugin_name = "http-logger"
+local ngx = ngx
+local tostring = tostring
+local http = require "resty.http"
+local url = require "net.url"
+local buffers = {}
+
+local schema = {
+ type = "object",
+ properties = {
+ uri = {type = "string"},
+ auth_header = {type = "string", default = ""},
+ timeout = {type = "integer", minimum = 1, default = 3},
+ name = {type = "string", default = "http logger"},
+ max_retry_count = {type = "integer", minimum = 0, default = 0},
+ retry_delay = {type = "integer", minimum = 0, default = 1},
+ buffer_duration = {type = "integer", minimum = 1, default = 60},
+ inactive_timeout = {type = "integer", minimum = 1, default = 5},
+ batch_max_size = {type = "integer", minimum = 1, default = 1000},
+ include_req_body = {type = "boolean", default = false}
+ },
+ required = {"uri"}
+}
+
+
+local _M = {
+ version = 0.1,
+ priority = 410,
+ name = plugin_name,
+ schema = schema,
+}
+
+
+function _M.check_schema(conf)
+ return core.schema.check(schema, conf)
+end
+
+
+local function send_http_data(conf, log_message)
+ local err_msg
+ local res = true
+ local url_decoded = url.parse(conf.uri)
+ local host = url_decoded.host
+ local port = url_decoded.port
+
+ if ((not port) and url_decoded.scheme == "https") then
+ port = 443
+ elseif not port then
+ port = 80
+ end
+
+ local httpc = http.new()
+ httpc:set_timeout(conf.timeout * 1000)
+ local ok, err = httpc:connect(host, port)
+
+ if not ok then
+ return false, "failed to connect to host[" .. host .. "] port["
+ .. tostring(port) .. "] " .. err
+ end
+
+ if url_decoded.scheme == "https" then
+ ok, err = httpc:ssl_handshake(true, host, false)
+ if not ok then
+ return nil, "failed to perform SSL with host[" .. host .. "] "
+ .. "port[" .. tostring(port) .. "] " .. err
+ end
+ end
+
+ local httpc_res, httpc_err = httpc:request({
+ method = "POST",
+ path = url_decoded.path,
+ query = url_decoded.query,
+ body = log_message,
+ headers = {
+ ["Host"] = url_decoded.host,
+ ["Content-Type"] = "application/json",
+ ["Authorization"] = conf.auth_header
+ }
+ })
+
+ if not httpc_res then
+ return false, "error while sending data to [" .. host .. "] port["
+ .. tostring(port) .. "] " .. httpc_err
+ end
+
+ -- some error occurred in the server
+ if httpc_res.status >= 400 then
+ res = false
+ err_msg = "server returned status code[" .. httpc_res.status .. "] host["
+ .. host .. "] port[" .. tostring(port) .. "] "
+ .. "body[" .. httpc_res:read_body() .. "]"
+ end
+
+ -- keep the connection alive
+ ok, err = httpc:set_keepalive(conf.keepalive)
+
+ if not ok then
+ core.log.debug("failed to keep the connection alive", err)
+ end
+
+ return res, err_msg
+end
+
+
+function _M.log(conf)
+ local entry = log_util.get_full_log(ngx, conf)
+
+ if not entry.route_id then
+ core.log.error("failed to obtain the route id for http logger")
+ return
+ end
+
+ local log_buffer = buffers[entry.route_id]
+
+ if log_buffer then
+ log_buffer:push(entry)
+ return
+ end
+
+ -- Generate a function to be executed by the batch processor
+ local func = function(entries, batch_max_size)
+ local data, err
+ if batch_max_size == 1 then
+ data, err = core.json.encode(entries[1]) -- encode as single {}
+ else
+ data, err = core.json.encode(entries) -- encode as array [{}]
+ end
+
+ if not data then
+ return false, 'error occurred while encoding the data: ' .. err
+ end
+
+ return send_http_data(conf, data)
+ end
+
+ local config = {
+ name = conf.name,
+ retry_delay = conf.retry_delay,
+ batch_max_size = conf.batch_max_size,
+ max_retry_count = conf.max_retry_count,
+ buffer_duration = conf.buffer_duration,
+ inactive_timeout = conf.inactive_timeout,
+ }
+
+ local err
+ log_buffer, err = batch_processor:new(func, config)
+
+ if not log_buffer then
+ core.log.error("error when creating the batch processor: ", err)
+ return
+ end
+
+ buffers[entry.route_id] = log_buffer
+ log_buffer:push(entry)
+end
+
+return _M
diff --git a/apisix/plugins/ip-restriction.lua b/apisix/plugins/ip-restriction.lua
index ab4deed3a0d8..f08c9c7ccbd0 100644
--- a/apisix/plugins/ip-restriction.lua
+++ b/apisix/plugins/ip-restriction.lua
@@ -110,7 +110,7 @@ function _M.check_schema(conf)
end
-local function create_ip_mather(ip_list)
+local function create_ip_matcher(ip_list)
local ip, err = ipmatcher.new(ip_list)
if not ip then
core.log.error("failed to create ip matcher: ", err,
@@ -128,7 +128,7 @@ function _M.access(conf, ctx)
if conf.blacklist and #conf.blacklist > 0 then
local matcher = lrucache(conf.blacklist, nil,
- create_ip_mather, conf.blacklist)
+ create_ip_matcher, conf.blacklist)
if matcher then
block = matcher:match(remote_addr)
end
@@ -136,7 +136,7 @@ function _M.access(conf, ctx)
if conf.whitelist and #conf.whitelist > 0 then
local matcher = lrucache(conf.whitelist, nil,
- create_ip_mather, conf.whitelist)
+ create_ip_matcher, conf.whitelist)
if matcher then
block = not matcher:match(remote_addr)
end
diff --git a/apisix/plugins/kafka-logger.lua b/apisix/plugins/kafka-logger.lua
index 8f89af263592..fc7d90cde719 100644
--- a/apisix/plugins/kafka-logger.lua
+++ b/apisix/plugins/kafka-logger.lua
@@ -17,14 +17,17 @@
local core = require("apisix.core")
local log_util = require("apisix.utils.log-util")
local producer = require ("resty.kafka.producer")
+local batch_processor = require("apisix.utils.batch-processor")
local pairs = pairs
local type = type
local table = table
-
+local ipairs = ipairs
local plugin_name = "kafka-logger"
-local ngx = ngx
-
+local stale_timer_running = false;
local timer_at = ngx.timer.at
+local tostring = tostring
+local ngx = ngx
+local buffers = {}
local schema = {
type = "object",
@@ -32,13 +35,16 @@ local schema = {
broker_list = {
type = "object"
},
- timeout = { -- timeout in milliseconds
- type = "integer", minimum = 1, default= 2000
- },
kafka_topic = {type = "string"},
- async = {type = "boolean", default = false},
key = {type = "string"},
- max_retry = {type = "integer", minimum = 0 , default = 3},
+ timeout = {type = "integer", minimum = 1, default = 3},
+ name = {type = "string", default = "kafka logger"},
+ max_retry_count = {type = "integer", minimum = 0, default = 0},
+ retry_delay = {type = "integer", minimum = 0, default = 1},
+ buffer_duration = {type = "integer", minimum = 1, default = 60},
+ inactive_timeout = {type = "integer", minimum = 1, default = 5},
+ batch_max_size = {type = "integer", minimum = 1, default = 1000},
+ include_req_body = {type = "boolean", default = false}
},
required = {"broker_list", "kafka_topic", "key"}
}
@@ -50,15 +56,13 @@ local _M = {
schema = schema,
}
+
function _M.check_schema(conf)
return core.schema.check(schema, conf)
end
-local function log(premature, conf, log_message)
- if premature then
- return
- end
+local function send_kafka_data(conf, log_message)
if core.table.nkeys(conf.broker_list) == 0 then
core.log.error("failed to identify the broker specified")
end
@@ -68,7 +72,7 @@ local function log(premature, conf, log_message)
for host, port in pairs(conf.broker_list) do
if type(host) == 'string'
- and type(port) == 'number' then
+ and type(port) == 'number' then
local broker = {
host = host, port = port
@@ -77,28 +81,92 @@ local function log(premature, conf, log_message)
end
end
- broker_config["request_timeout"] = conf.timeout
- broker_config["max_retry"] = conf.max_retry
-
- --Async producers will queue logs and push them when the buffer exceeds.
- if conf.async then
- broker_config["producer_type"] = "async"
- end
+ broker_config["request_timeout"] = conf.timeout * 1000
local prod, err = producer:new(broker_list,broker_config)
if err then
- core.log.error("failed to identify the broker specified", err)
- return
+ return nil, "failed to identify the broker specified: " .. err
end
local ok, err = prod:send(conf.kafka_topic, conf.key, log_message)
if not ok then
- core.log.error("failed to send data to Kafka topic", err)
+ return nil, "failed to send data to Kafka topic" .. err
end
end
+-- remove stale objects from the memory after timer expires
+local function remove_stale_objects(premature)
+ if premature then
+ return
+ end
+
+ for key, batch in ipairs(buffers) do
+ if #batch.entry_buffer.entries == 0 and #batch.batch_to_process == 0 then
+ core.log.debug("removing batch processor stale object, route id:", tostring(key))
+ buffers[key] = nil
+ end
+ end
+
+ stale_timer_running = false
+end
+
+
function _M.log(conf)
- return timer_at(0, log, conf, core.json.encode(log_util.get_full_log(ngx)))
+ local entry = log_util.get_full_log(ngx, conf)
+
+ if not entry.route_id then
+ core.log.error("failed to obtain the route id for kafka logger")
+ return
+ end
+
+ local log_buffer = buffers[entry.route_id]
+
+ if not stale_timer_running then
+ -- run the timer every 30 mins if any log is present
+ timer_at(1800, remove_stale_objects)
+ stale_timer_running = true
+ end
+
+ if log_buffer then
+ log_buffer:push(entry)
+ return
+ end
+
+ -- Generate a function to be executed by the batch processor
+ local func = function(entries, batch_max_size)
+ local data, err
+ if batch_max_size == 1 then
+ data, err = core.json.encode(entries[1]) -- encode as single {}
+ else
+ data, err = core.json.encode(entries) -- encode as array [{}]
+ end
+
+ if not data then
+ return false, 'error occurred while encoding the data: ' .. err
+ end
+
+ return send_kafka_data(conf, data)
+ end
+
+ local config = {
+ name = conf.name,
+ retry_delay = conf.retry_delay,
+ batch_max_size = conf.batch_max_size,
+ max_retry_count = conf.max_retry_count,
+ buffer_duration = conf.buffer_duration,
+ inactive_timeout = conf.inactive_timeout,
+ }
+
+ local err
+ log_buffer, err = batch_processor:new(func, config)
+
+ if not log_buffer then
+ core.log.error("error when creating the batch processor: ", err)
+ return
+ end
+
+ buffers[entry.route_id] = log_buffer
+ log_buffer:push(entry)
end
return _M
diff --git a/apisix/plugins/limit-conn.lua b/apisix/plugins/limit-conn.lua
index dbffbabb8277..6ca46d5d1df7 100644
--- a/apisix/plugins/limit-conn.lua
+++ b/apisix/plugins/limit-conn.lua
@@ -30,9 +30,9 @@ local schema = {
enum = {"remote_addr", "server_addr", "http_x_real_ip",
"http_x_forwarded_for"},
},
- rejected_code = {type = "integer", minimum = 200},
+ rejected_code = {type = "integer", minimum = 200, default = 503},
},
- required = {"conn", "burst", "default_conn_delay", "key", "rejected_code"}
+ required = {"conn", "burst", "default_conn_delay", "key"}
}
diff --git a/apisix/plugins/limit-count.lua b/apisix/plugins/limit-count.lua
index 42db2d54784b..3e9d4af28ade 100644
--- a/apisix/plugins/limit-count.lua
+++ b/apisix/plugins/limit-count.lua
@@ -34,7 +34,8 @@ local schema = {
enum = {"remote_addr", "server_addr", "http_x_real_ip",
"http_x_forwarded_for"},
},
- rejected_code = {type = "integer", minimum = 200, maximum = 600},
+ rejected_code = {type = "integer", minimum = 200, maximum = 600,
+ default = 503},
policy = {
type = "string",
enum = {"local", "redis"},
@@ -53,7 +54,7 @@ local schema = {
},
},
additionalProperties = false,
- required = {"count", "time_window", "key", "rejected_code"},
+ required = {"count", "time_window", "key"},
}
diff --git a/apisix/plugins/limit-count/limit-count-redis.lua b/apisix/plugins/limit-count/limit-count-redis.lua
index b71cd852b0a0..a4dfba51b36e 100644
--- a/apisix/plugins/limit-count/limit-count-redis.lua
+++ b/apisix/plugins/limit-count/limit-count-redis.lua
@@ -71,7 +71,12 @@ function _M.incoming(self, key)
local remaining
key = self.plugin_name .. tostring(key)
- local ret = red:ttl(key)
+ -- todo: test case
+ local ret, err = red:ttl(key)
+ if not ret then
+ return false, "failed to get redis `" .. key .."` ttl: " .. err
+ end
+
core.log.info("ttl key: ", key, " ret: ", ret, " err: ", err)
if ret < 0 then
-- todo: test case
diff --git a/apisix/plugins/limit-req.lua b/apisix/plugins/limit-req.lua
index e35c4b328e51..1caadce8b2f1 100644
--- a/apisix/plugins/limit-req.lua
+++ b/apisix/plugins/limit-req.lua
@@ -29,9 +29,9 @@ local schema = {
enum = {"remote_addr", "server_addr", "http_x_real_ip",
"http_x_forwarded_for"},
},
- rejected_code = {type = "integer", minimum = 200},
+ rejected_code = {type = "integer", minimum = 200, default = 503},
},
- required = {"rate", "burst", "key", "rejected_code"}
+ required = {"rate", "burst", "key"}
}
diff --git a/apisix/plugins/openid-connect.lua b/apisix/plugins/openid-connect.lua
index 6a93226f9baa..29aca4c85ebd 100644
--- a/apisix/plugins/openid-connect.lua
+++ b/apisix/plugins/openid-connect.lua
@@ -116,11 +116,12 @@ local function introspect(ctx, conf)
end
else
res, err = openidc.introspect(conf)
- if res then
+ if err then
+ return ngx.HTTP_UNAUTHORIZED, err
+ else
return res
end
end
-
if conf.bearer_only then
ngx.header["WWW-Authenticate"] = 'Bearer realm="' .. conf.realm
.. '",error="' .. err .. '"'
@@ -138,7 +139,8 @@ local function add_user_header(user)
end
-function _M.access(conf, ctx)
+function _M.access(plugin_conf, ctx)
+ local conf = core.table.clone(plugin_conf)
if not conf.redirect_uri then
conf.redirect_uri = ctx.var.request_uri
end
diff --git a/apisix/plugins/prometheus/exporter.lua b/apisix/plugins/prometheus/exporter.lua
index 06243f8ecca1..7e3d63eae7da 100644
--- a/apisix/plugins/prometheus/exporter.lua
+++ b/apisix/plugins/prometheus/exporter.lua
@@ -14,26 +14,60 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
-local base_prometheus = require("resty.prometheus")
+local base_prometheus = require("prometheus")
local core = require("apisix.core")
local ipairs = ipairs
+local ngx = ngx
local ngx_capture = ngx.location.capture
local re_gmatch = ngx.re.gmatch
+local tonumber = tonumber
+local select = select
+local type = type
local prometheus
+local router = require("apisix.router")
+local get_routes = router.http_routes
+local get_ssls = router.ssls
+local get_services = require("apisix.http.service").services
+local get_consumers = require("apisix.consumer").consumers
+local get_upstreams = require("apisix.upstream").upstreams
+local clear_tab = core.table.clear
+local get_stream_routes = router.stream_routes
+local get_protos = require("apisix.plugins.grpc-transcode.proto").protos
+
+
-- Default set of latency buckets, 1ms to 60s:
local DEFAULT_BUCKETS = { 1, 2, 5, 7, 10, 15, 20, 25, 30, 40, 50, 60, 70,
80, 90, 100, 200, 300, 400, 500, 1000,
- 2000, 5000, 10000, 30000, 60000 }
+ 2000, 5000, 10000, 30000, 60000
+}
local metrics = {}
-local _M = {version = 0.3}
+ local inner_tab_arr = {}
+local function gen_arr(...)
+ clear_tab(inner_tab_arr)
+
+ for i = 1, select('#', ...) do
+ inner_tab_arr[i] = select(i, ...)
+ end
+
+ return inner_tab_arr
+end
+
+
+local _M = {}
function _M.init()
- core.table.clear(metrics)
+ -- todo: support hot reload, we may need to update the lua-prometheus
+ -- library
+ if ngx.get_phase() ~= "init" and ngx.get_phase() ~= "init_worker" then
+ return
+ end
+
+ clear_tab(metrics)
-- across all services
prometheus = base_prometheus.init("prometheus-metrics", "apisix_")
@@ -44,6 +78,10 @@ function _M.init()
metrics.etcd_reachable = prometheus:gauge("etcd_reachable",
"Config server etcd reachable from APISIX, 0 is unreachable")
+ metrics.etcd_modify_indexes = prometheus:gauge("etcd_modify_indexes",
+ "Etcd modify index for APISIX keys",
+ {"key"})
+
-- per service
metrics.status = prometheus:counter("http_status",
"HTTP status codes per service in APISIX",
@@ -53,6 +91,10 @@ function _M.init()
"HTTP request latency per service in APISIX",
{"type", "service", "node"}, DEFAULT_BUCKETS)
+ metrics.overhead = prometheus:histogram("http_overhead",
+ "HTTP request overhead per service in APISIX",
+ {"type", "service", "node"}, DEFAULT_BUCKETS)
+
metrics.bandwidth = prometheus:counter("bandwidth",
"Total bandwidth in bytes consumed per service in APISIX",
{"type", "route", "service", "node"})
@@ -74,21 +116,31 @@ function _M.log(conf, ctx)
service_id = vars.host
end
- metrics.status:inc(1, vars.status, route_id, service_id, balancer_ip)
+ metrics.status:inc(1,
+ gen_arr(vars.status, route_id, service_id, balancer_ip))
local latency = (ngx.now() - ngx.req.start_time()) * 1000
- metrics.latency:observe(latency, "request", service_id, balancer_ip)
+ metrics.latency:observe(latency,
+ gen_arr("request", service_id, balancer_ip))
+
+ local overhead = latency
+ if ctx.var.upstream_response_time then
+ overhead = overhead - tonumber(ctx.var.upstream_response_time) * 1000
+ end
+ metrics.overhead:observe(overhead,
+ gen_arr("request", service_id, balancer_ip))
- metrics.bandwidth:inc(vars.request_length, "ingress", route_id, service_id,
- balancer_ip)
+ metrics.bandwidth:inc(vars.request_length,
+ gen_arr("ingress", route_id, service_id, balancer_ip))
- metrics.bandwidth:inc(vars.bytes_sent, "egress", route_id, service_id,
- balancer_ip)
+ metrics.bandwidth:inc(vars.bytes_sent,
+ gen_arr("egress", route_id, service_id, balancer_ip))
end
local ngx_statu_items = {"active", "accepted", "handled", "total",
"reading", "writing", "waiting"}
+ local label_values = {}
local function nginx_status()
local res = ngx_capture("/apisix/nginx_status")
if not res or res.status ~= 200 then
@@ -113,14 +165,92 @@ local function nginx_status()
break
end
- metrics.connections:set(val[0], name)
+ label_values[1] = name
+ metrics.connections:set(val[0], label_values)
+
+ end
+end
+
+
+local key_values = {}
+local function set_modify_index(key, items, items_ver, global_max_index)
+ clear_tab(key_values)
+ local max_idx = 0
+ if items_ver and items then
+ for _, item in ipairs(items) do
+ if type(item) == "table" and item.modifiedIndex > max_idx then
+ max_idx = item.modifiedIndex
+ end
+ end
+ end
+
+ key_values[1] = key
+ metrics.etcd_modify_indexes:set(max_idx, key_values)
+
+
+ global_max_index = max_idx > global_max_index and max_idx or global_max_index
+
+ return global_max_index
+end
+
+
+local function etcd_modify_index()
+ clear_tab(key_values)
+ local global_max_idx = 0
+
+ -- routes
+ local routes, routes_ver = get_routes()
+ global_max_idx = set_modify_index("routes", routes, routes_ver, global_max_idx)
+
+ -- services
+ local services, services_ver = get_services()
+ global_max_idx = set_modify_index("services", services, services_ver, global_max_idx)
+
+ -- ssls
+ local ssls, ssls_ver = get_ssls()
+ global_max_idx = set_modify_index("ssls", ssls, ssls_ver, global_max_idx)
+
+ -- consumers
+ local consumers, consumers_ver = get_consumers()
+ global_max_idx = set_modify_index("consumers", consumers, consumers_ver, global_max_idx)
+
+ -- global_rules
+ local global_rules = router.global_rules
+ if global_rules then
+ global_max_idx = set_modify_index("global_rules", global_rules.values,
+ global_rules.conf_version, global_max_idx)
+
+ -- prev_index
+ key_values[1] = "prev_index"
+ metrics.etcd_modify_indexes:set(global_rules.prev_index, key_values)
+
+ else
+ global_max_idx = set_modify_index("global_rules", nil, nil, global_max_idx)
end
+
+ -- upstreams
+ local upstreams, upstreams_ver = get_upstreams()
+ global_max_idx = set_modify_index("upstreams", upstreams, upstreams_ver, global_max_idx)
+
+ -- stream_routes
+ local stream_routes, stream_routes_ver = get_stream_routes()
+ global_max_idx = set_modify_index("stream_routes", stream_routes,
+ stream_routes_ver, global_max_idx)
+
+ -- proto
+ local protos, protos_ver = get_protos()
+ global_max_idx = set_modify_index("protos", protos, protos_ver, global_max_idx)
+
+ -- global max
+ key_values[1] = "max_modify_index"
+ metrics.etcd_modify_indexes:set(global_max_idx, key_values)
+
end
function _M.collect()
if not prometheus or not metrics then
- core.log.err("prometheus: plugin is not initialized, please make sure ",
+ core.log.error("prometheus: plugin is not initialized, please make sure ",
" 'prometheus_metrics' shared dict is present in nginx template")
return 500, {message = "An unexpected error occurred"}
end
@@ -128,6 +258,9 @@ function _M.collect()
-- across all services
nginx_status()
+ -- etcd modify index
+ etcd_modify_index()
+
-- config server status
local config = core.config.new()
local version, err = config:server_version()
@@ -140,6 +273,14 @@ function _M.collect()
"processing metrics endpoint: ", err)
end
+ local res, _ = config:getkey("/routes")
+ if res and res.headers then
+ clear_tab(key_values)
+ -- global max
+ key_values[1] = "x_etcd_index"
+ metrics.etcd_modify_indexes:set(res.headers["X-Etcd-Index"], key_values)
+ end
+
core.response.set_header("content_type", "text/plain")
return 200, core.table.concat(prometheus:metric_data())
end
diff --git a/apisix/plugins/proxy-rewrite.lua b/apisix/plugins/proxy-rewrite.lua
index e1ee90336786..d24d0b42ec08 100644
--- a/apisix/plugins/proxy-rewrite.lua
+++ b/apisix/plugins/proxy-rewrite.lua
@@ -21,7 +21,8 @@ local ipairs = ipairs
local ngx = ngx
local type = type
local re_sub = ngx.re.sub
-
+local sub_str = string.sub
+local find_str = string.find
local schema = {
type = "object",
@@ -61,6 +62,7 @@ local schema = {
},
},
minProperties = 1,
+ additionalProperties = false,
}
@@ -87,32 +89,35 @@ function _M.check_schema(conf)
end
end
- --reform header from object into array, so can avoid use pairs, which is NYI
- if conf.headers then
- conf.headers_arr = {}
+ -- check headers
+ if not conf.headers then
+ return true
+ end
- for field, value in pairs(conf.headers) do
- if type(field) == 'string'
- and (type(value) == 'string' or type(value) == 'number') then
- if #field == 0 then
- return false, 'invalid field length in header'
- end
-
- core.log.info("header field: ", field)
-
- if not core.utils.validate_header_field(field) then
- return false, 'invalid field character in header'
- end
- if not core.utils.validate_header_value(value) then
- return false, 'invalid value character in header'
- end
- core.table.insert(conf.headers_arr, field)
- core.table.insert(conf.headers_arr, value)
- else
- return false, 'invalid type as header value'
- end
+ for field, value in pairs(conf.headers) do
+ if type(field) ~= 'string' then
+ return false, 'invalid type as header field'
+ end
+
+ if type(value) ~= 'string' and type(value) ~= 'number' then
+ return false, 'invalid type as header value'
+ end
+
+ if #field == 0 then
+ return false, 'invalid field length in header'
+ end
+
+ core.log.info("header field: ", field)
+
+ if not core.utils.validate_header_field(field) then
+ return false, 'invalid field character in header'
+ end
+
+ if not core.utils.validate_header_value(value) then
+ return false, 'invalid value character in header'
end
end
+
return true
end
@@ -153,20 +158,42 @@ function _M.rewrite(conf, ctx)
end
end
- upstream_uri = core.utils.uri_safe_encode(upstream_uri)
+ local index = find_str(upstream_uri, "?", 1, true)
+ if index then
+ upstream_uri = core.utils.uri_safe_encode(sub_str(upstream_uri, 1, index-1)) ..
+ sub_str(upstream_uri, index)
+ else
+ upstream_uri = core.utils.uri_safe_encode(upstream_uri)
+ end
if ctx.var.is_args == "?" then
- ctx.var.upstream_uri = upstream_uri .. "?" .. (ctx.var.args or "")
+ if index then
+ ctx.var.upstream_uri = upstream_uri .. "&" .. (ctx.var.args or "")
+ else
+ ctx.var.upstream_uri = upstream_uri .. "?" .. (ctx.var.args or "")
+ end
else
ctx.var.upstream_uri = upstream_uri
end
- if conf.headers_arr then
- local field_cnt = #conf.headers_arr
- for i = 1, field_cnt, 2 do
- ngx.req.set_header(conf.headers_arr[i], conf.headers_arr[i+1])
+ if not conf.headers then
+ return
+ end
+
+ -- reform header from object into array, so can avoid use pairs,
+ -- which is NYI
+ if not conf.headers_arr then
+ conf.headers_arr = {}
+
+ for field, value in pairs(conf.headers) do
+ core.table.insert_tail(conf.headers_arr, field, value)
end
end
+
+ local field_cnt = #conf.headers_arr
+ for i = 1, field_cnt, 2 do
+ ngx.req.set_header(conf.headers_arr[i], conf.headers_arr[i+1])
+ end
end
end -- do
diff --git a/apisix/plugins/redirect.lua b/apisix/plugins/redirect.lua
index 6cc28ac31307..de9c7691ba03 100644
--- a/apisix/plugins/redirect.lua
+++ b/apisix/plugins/redirect.lua
@@ -30,8 +30,12 @@ local schema = {
properties = {
ret_code = {type = "integer", minimum = 200, default = 302},
uri = {type = "string", minLength = 2},
+ http_to_https = {type = "boolean"}, -- default is false
},
- required = {"uri"},
+ oneOf = {
+ {required = {"uri"}},
+ {required = {"http_to_https"}}
+ }
}
@@ -80,11 +84,13 @@ function _M.check_schema(conf)
return false, err
end
- local uri_segs, err = parse_uri(conf.uri)
- if not uri_segs then
- return false, err
+ if conf.uri then
+ local uri_segs, err = parse_uri(conf.uri)
+ if not uri_segs then
+ return false, err
+ end
+ core.log.info(core.json.delay_encode(uri_segs))
end
- core.log.info(core.json.delay_encode(uri_segs))
return true
end
@@ -120,15 +126,27 @@ end
function _M.rewrite(conf, ctx)
core.log.info("plugin rewrite phase, conf: ", core.json.delay_encode(conf))
- local new_uri, err = concat_new_uri(conf.uri, ctx)
- if not new_uri then
- core.log.error("failed to generate new uri by: ", conf.uri, " error: ",
- err)
- core.response.exit(500)
+ local ret_code = conf.ret_code
+ local uri = conf.uri
+
+ if conf.http_to_https and ctx.var.scheme == "http" then
+ -- TODO: add test case
+ -- PR: https://github.com/apache/incubator-apisix/pull/1958
+ uri = "https://$host$request_uri"
+ ret_code = 301
end
- core.response.set_header("Location", new_uri)
- core.response.exit(conf.ret_code)
+ if uri and ret_code then
+ local new_uri, err = concat_new_uri(uri, ctx)
+ if not new_uri then
+ core.log.error("failed to generate new uri by: ", uri, " error: ",
+ err)
+ core.response.exit(500)
+ end
+
+ core.response.set_header("Location", new_uri)
+ core.response.exit(ret_code)
+ end
end
diff --git a/apisix/plugins/request-validation.lua b/apisix/plugins/request-validation.lua
new file mode 100644
index 000000000000..438dbb2e0c2b
--- /dev/null
+++ b/apisix/plugins/request-validation.lua
@@ -0,0 +1,115 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local core = require("apisix.core")
+local plugin_name = "request-validation"
+local ngx = ngx
+local io = io
+
+local schema = {
+ type = "object",
+ properties = {
+ body_schema = {type = "object"},
+ header_schema = {type = "object"}
+ },
+ anyOf = {
+ {required = {"body_schema"}},
+ {required = {"header_schema"}}
+ }
+}
+
+
+local _M = {
+ version = 0.1,
+ priority = 2800,
+ type = 'validation',
+ name = plugin_name,
+ schema = schema,
+}
+
+
+function _M.check_schema(conf)
+ local ok, err = core.schema.check(schema, conf)
+ if not ok then
+ return false, err
+ end
+
+ if conf.body_schema then
+ ok, err = core.schema.valid(conf.body_schema)
+ if not ok then
+ return false, err
+ end
+ end
+
+ if conf.header_schema then
+ ok, err = core.schema.valid(conf.header_schema)
+ if not ok then
+ return false, err
+ end
+ end
+
+ return true, nil
+end
+
+
+function _M.rewrite(conf)
+ local headers = ngx.req.get_headers()
+
+ if conf.header_schema then
+ local ok, err = core.schema.check(conf.header_schema, headers)
+ if not ok then
+ core.log.error("req schema validation failed", err)
+ core.response.exit(400, err)
+ end
+ end
+
+ if conf.body_schema then
+ ngx.req.read_body()
+ local req_body, error
+ local body = ngx.req.get_body_data()
+
+ if not body then
+ local filename = ngx.req.get_body_file()
+ if not filename then
+ return core.response.exit(500)
+ end
+ local fd = io.open(filename, 'rb')
+ if not fd then
+ return core.response.exit(500)
+ end
+ body = fd:read('*a')
+ end
+
+ if headers["content-type"] == "application/x-www-form-urlencoded" then
+ req_body, error = ngx.decode_args(body)
+ else -- JSON as default
+ req_body, error = core.json.decode(body)
+ end
+
+ if not req_body then
+ core.log.error('failed to decode the req body', error)
+ return core.response.exit(400, error)
+ end
+
+ local ok, err = core.schema.check(conf.body_schema, req_body)
+ if not ok then
+ core.log.error("req schema validation failed", err)
+ return core.response.exit(400, err)
+ end
+ end
+end
+
+return _M
diff --git a/apisix/plugins/response-rewrite.lua b/apisix/plugins/response-rewrite.lua
index bba2aa622c8c..08f905b6e274 100644
--- a/apisix/plugins/response-rewrite.lua
+++ b/apisix/plugins/response-rewrite.lua
@@ -46,6 +46,7 @@ local schema = {
}
},
minProperties = 1,
+ additionalProperties = false,
}
@@ -63,22 +64,19 @@ function _M.check_schema(conf)
return false, err
end
- --reform header from object into array, so can avoid use pairs, which is NYI
if conf.headers then
- conf.headers_arr = {}
-
for field, value in pairs(conf.headers) do
- if type(field) == 'string'
- and (type(value) == 'string' or type(value) == 'number') then
- if #field == 0 then
- return false, 'invalid field length in header'
- end
- core.table.insert(conf.headers_arr, field)
- core.table.insert(conf.headers_arr, value)
- else
+ if type(field) ~= 'string' then
+ return false, 'invalid type as header field'
+ end
+
+ if type(value) ~= 'string' and type(value) ~= 'number' then
return false, 'invalid type as header value'
end
+ if #field == 0 then
+ return false, 'invalid field length in header'
+ end
end
end
@@ -119,12 +117,23 @@ function _M.header_filter(conf, ctx)
ngx.header.content_encoding = nil
end
- if conf.headers_arr then
- local field_cnt = #conf.headers_arr
- for i = 1, field_cnt, 2 do
- ngx.header[conf.headers_arr[i]] = conf.headers_arr[i+1]
+ if not conf.headers then
+ return
+ end
+
+ --reform header from object into array, so can avoid use pairs, which is NYI
+ if not conf.headers_arr then
+ conf.headers_arr = {}
+
+ for field, value in pairs(conf.headers) do
+ core.table.insert_tail(conf.headers_arr, field, value)
end
end
+
+ local field_cnt = #conf.headers_arr
+ for i = 1, field_cnt, 2 do
+ ngx.header[conf.headers_arr[i]] = conf.headers_arr[i+1]
+ end
end
end -- do
diff --git a/apisix/plugins/skywalking.lua b/apisix/plugins/skywalking.lua
new file mode 100644
index 000000000000..f95286bd8d14
--- /dev/null
+++ b/apisix/plugins/skywalking.lua
@@ -0,0 +1,80 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local core = require("apisix.core")
+local ngx = ngx
+local math = math
+
+local sw_client = require("apisix.plugins.skywalking.client")
+local sw_tracer = require("apisix.plugins.skywalking.tracer")
+
+local plugin_name = "skywalking"
+
+
+local schema = {
+ type = "object",
+ properties = {
+ endpoint = {type = "string"},
+ sample_ratio = {type = "number", minimum = 0.00001, maximum = 1, default = 1}
+ },
+ service_name = {
+ type = "string",
+ description = "service name for skywalking",
+ default = "APISIX",
+ },
+ required = {"endpoint"}
+}
+
+
+local _M = {
+ version = 0.1,
+ priority = -1100, -- last running plugin, but before serverless post func
+ name = plugin_name,
+ schema = schema,
+}
+
+
+function _M.check_schema(conf)
+ return core.schema.check(schema, conf)
+end
+
+
+function _M.rewrite(conf, ctx)
+ core.log.debug("rewrite phase of skywalking plugin")
+ ctx.skywalking_sample = false
+ if conf.sample_ratio == 1 or math.random() < conf.sample_ratio then
+ ctx.skywalking_sample = true
+ sw_client.heartbeat(conf)
+ -- Currently, we can not have the upstream real network address
+ sw_tracer.start(ctx, conf.endpoint, "upstream service")
+ end
+end
+
+
+function _M.body_filter(conf, ctx)
+ if ctx.skywalking_sample and ngx.arg[2] then
+ sw_tracer.finish(ctx)
+ end
+end
+
+
+function _M.log(conf, ctx)
+ if ctx.skywalking_sample then
+ sw_tracer.prepareForReport(ctx, conf.endpoint)
+ end
+end
+
+return _M
diff --git a/apisix/plugins/skywalking/client.lua b/apisix/plugins/skywalking/client.lua
new file mode 100644
index 000000000000..f83a6e35bf80
--- /dev/null
+++ b/apisix/plugins/skywalking/client.lua
@@ -0,0 +1,232 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local core = require("apisix.core")
+local http = require("resty.http")
+local cjson = require('cjson')
+local ngx = ngx
+local ipairs = ipairs
+
+local register = require("skywalking.register")
+
+local _M = {}
+
+local function register_service(conf)
+ local endpoint = conf.endpoint
+
+ local tracing_buffer = ngx.shared['skywalking-tracing-buffer']
+ local service_id = tracing_buffer:get(endpoint .. '_service_id')
+ if service_id then
+ return service_id
+ end
+
+ local service_name = conf.service_name
+ local service = register.newServiceRegister(service_name)
+
+ local httpc = http.new()
+ local res, err = httpc:request_uri(endpoint .. '/v2/service/register',
+ {
+ method = "POST",
+ body = core.json.encode(service),
+ headers = {
+ ["Content-Type"] = "application/json",
+ },
+ })
+ if not res then
+ core.log.error("skywalking service register failed, request uri: ",
+ endpoint .. '/v2/service/register', ", err: ", err)
+
+ elseif res.status == 200 then
+ core.log.debug("skywalking service register response: ", res.body)
+ local register_results = cjson.decode(res.body)
+
+ for _, result in ipairs(register_results) do
+ if result.key == service_name then
+ service_id = result.value
+ core.log.debug("skywalking service registered, service id:"
+ .. service_id)
+ end
+ end
+
+ else
+ core.log.error("skywalking service register failed, request uri:",
+ endpoint .. "/v2/service/register",
+ ", response code:", res.status)
+ end
+
+ if service_id then
+ tracing_buffer:set(endpoint .. '_service_id', service_id)
+ end
+
+ return service_id
+end
+
+local function register_service_instance(conf, service_id)
+ local endpoint = conf.endpoint
+
+ local tracing_buffer = ngx.shared['skywalking-tracing-buffer']
+ local instance_id = tracing_buffer:get(endpoint .. '_instance_id')
+ if instance_id then
+ return instance_id
+ end
+
+ local service_instance_name = core.id.get()
+ local service_instance = register.newServiceInstanceRegister(
+ service_id,
+ service_instance_name,
+ ngx.now() * 1000)
+
+ local httpc = http.new()
+ local res, err = httpc:request_uri(endpoint .. '/v2/instance/register',
+ {
+ method = "POST",
+ body = core.json.encode(service_instance),
+ headers = {
+ ["Content-Type"] = "application/json",
+ },
+ })
+
+ if not res then
+ core.log.error("skywalking service Instance register failed",
+ ", request uri: ", conf.endpoint .. '/v2/instance/register',
+ ", err: ", err)
+
+ elseif res.status == 200 then
+ core.log.debug("skywalking service instance register response: ", res.body)
+ local register_results = cjson.decode(res.body)
+
+ for _, result in ipairs(register_results) do
+ if result.key == service_instance_name then
+ instance_id = result.value
+ end
+ end
+
+ else
+ core.log.error("skywalking service instance register failed, ",
+ "response code:", res.status)
+ end
+
+ if instance_id then
+ tracing_buffer:set(endpoint .. '_instance_id', instance_id)
+ end
+
+ return instance_id
+end
+
+local function ping(endpoint)
+ local tracing_buffer = ngx.shared['skywalking-tracing-buffer']
+ local ping_pkg = register.newServiceInstancePingPkg(
+ tracing_buffer:get(endpoint .. '_instance_id'),
+ core.id.get(),
+ ngx.now() * 1000)
+
+ local httpc = http.new()
+ local res, err = httpc:request_uri(endpoint .. '/v2/instance/heartbeat', {
+ method = "POST",
+ body = core.json.encode(ping_pkg),
+ headers = {
+ ["Content-Type"] = "application/json",
+ },
+ })
+
+ if err then
+ core.log.error("skywalking agent ping failed, err: ", err)
+ else
+ core.log.debug(res.body)
+ end
+end
+
+-- report trace segments to the backend
+local function report_traces(endpoint)
+ local tracing_buffer = ngx.shared['skywalking-tracing-buffer']
+ local segment = tracing_buffer:rpop(endpoint .. '_segment')
+
+ local count = 0
+
+ local httpc = http.new()
+
+ while segment ~= nil do
+ local res, err = httpc:request_uri(endpoint .. '/v2/segments', {
+ method = "POST",
+ body = segment,
+ headers = {
+ ["Content-Type"] = "application/json",
+ },
+ })
+
+ if err == nil then
+ if res.status ~= 200 then
+ core.log.error("skywalking segment report failed, response code ", res.status)
+ break
+ else
+ count = count + 1
+ core.log.debug(res.body)
+ end
+ else
+ core.log.error("skywalking segment report failed, err: ", err)
+ break
+ end
+
+ segment = tracing_buffer:rpop('segment')
+ end
+
+ if count > 0 then
+ core.log.debug(count, " skywalking segments reported")
+ end
+end
+
+do
+ local heartbeat_timer
+
+function _M.heartbeat(conf)
+ local sw_heartbeat = function()
+ local service_id = register_service(conf)
+ if not service_id then
+ return
+ end
+
+ core.log.debug("skywalking service registered, ",
+ "service id: ", service_id)
+
+ local service_instance_id = register_service_instance(conf, service_id)
+ if not service_instance_id then
+ return
+ end
+
+ core.log.debug("skywalking service Instance registered, ",
+ "service instance id: ", service_instance_id)
+ report_traces(conf.endpoint)
+ ping(conf.endpoint)
+ end
+
+ local err
+ if ngx.worker.id() == 0 and not heartbeat_timer then
+ heartbeat_timer, err = core.timer.new("skywalking_heartbeat",
+ sw_heartbeat,
+ {check_interval = 3}
+ )
+ if not heartbeat_timer then
+ core.log.error("failed to create skywalking_heartbeat timer: ", err)
+ else
+ core.log.info("succeed to create timer: skywalking heartbeat")
+ end
+ end
+end
+
+end -- do
+
+
+return _M
diff --git a/apisix/plugins/skywalking/tracer.lua b/apisix/plugins/skywalking/tracer.lua
new file mode 100644
index 000000000000..187b941edf46
--- /dev/null
+++ b/apisix/plugins/skywalking/tracer.lua
@@ -0,0 +1,101 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local core = require("apisix.core")
+local span = require("skywalking.span")
+local tracing_context = require("skywalking.tracing_context")
+local span_layer = require("skywalking.span_layer")
+local sw_segment = require('skywalking.segment')
+
+local pairs = pairs
+local ngx = ngx
+
+-- Constant pre-defined in SkyWalking main repo
+-- 84 represents Nginx
+local NGINX_COMPONENT_ID = 6000
+
+local _M = {}
+
+function _M.start(ctx, endpoint, upstream_name)
+ local context
+ -- TODO: use lrucache for better performance
+ local tracing_buffer = ngx.shared['skywalking-tracing-buffer']
+ local instance_id = tracing_buffer:get(endpoint .. '_instance_id')
+ local service_id = tracing_buffer:get(endpoint .. '_service_id')
+
+ if service_id and instance_id then
+ context = tracing_context.new(service_id, instance_id)
+ else
+ context = tracing_context.newNoOP()
+ end
+
+ local context_carrier = {}
+ context_carrier["sw6"] = ngx.req.get_headers()["sw6"]
+ local entry_span = tracing_context.createEntrySpan(context, ctx.var.uri, nil, context_carrier)
+ span.start(entry_span, ngx.now() * 1000)
+ span.setComponentId(entry_span, NGINX_COMPONENT_ID)
+ span.setLayer(entry_span, span_layer.HTTP)
+
+ span.tag(entry_span, 'http.method', ngx.req.get_method())
+ span.tag(entry_span, 'http.params', ctx.var.scheme .. '://'
+ .. ctx.var.host .. ctx.var.request_uri)
+
+ context_carrier = {}
+ local exit_span = tracing_context.createExitSpan(context,
+ ctx.var.upstream_uri,
+ entry_span,
+ upstream_name,
+ context_carrier)
+ span.start(exit_span, ngx.now() * 1000)
+ span.setComponentId(exit_span, NGINX_COMPONENT_ID)
+ span.setLayer(exit_span, span_layer.HTTP)
+
+ for name, value in pairs(context_carrier) do
+ ngx.req.set_header(name, value)
+ end
+
+ -- Push the data in the context
+ ctx.sw_tracing_context = context
+ ctx.sw_entry_span = entry_span
+ ctx.sw_exit_span = exit_span
+
+ core.log.debug("push data into skywalking context")
+end
+
+function _M.finish(ctx)
+ -- Finish the exit span when received the first response package from upstream
+ if ctx.sw_exit_span then
+ span.finish(ctx.sw_exit_span, ngx.now() * 1000)
+ ctx.sw_exit_span = nil
+ end
+end
+
+function _M.prepareForReport(ctx, endpoint)
+ if ctx.sw_entry_span then
+ span.finish(ctx.sw_entry_span, ngx.now() * 1000)
+ local status, segment = tracing_context.drainAfterFinished(ctx.sw_tracing_context)
+ if status then
+ local segment_json = core.json.encode(sw_segment.transform(segment))
+ core.log.debug('segment = ', segment_json)
+
+ local tracing_buffer = ngx.shared['skywalking-tracing-buffer']
+ local length = tracing_buffer:lpush(endpoint .. '_segment', segment_json)
+ core.log.debug('segment buffer size = ', length)
+ end
+ end
+end
+
+return _M
diff --git a/apisix/plugins/syslog.lua b/apisix/plugins/syslog.lua
new file mode 100644
index 000000000000..7b96a2e010b6
--- /dev/null
+++ b/apisix/plugins/syslog.lua
@@ -0,0 +1,189 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local core = require("apisix.core")
+local log_util = require("apisix.utils.log-util")
+local batch_processor = require("apisix.utils.batch-processor")
+local logger_socket = require("resty.logger.socket")
+local plugin_name = "syslog"
+local ngx = ngx
+local buffers = {}
+local ipairs = ipairs
+local stale_timer_running = false;
+local timer_at = ngx.timer.at
+local tostring = tostring
+
+local schema = {
+ type = "object",
+ properties = {
+ host = {type = "string"},
+ port = {type = "integer"},
+ name = {type = "string", default = "sys logger"},
+ flush_limit = {type = "integer", minimum = 1, default = 4096},
+ drop_limit = {type = "integer", default = 1048576},
+ timeout = {type = "integer", minimum = 1, default = 3},
+ sock_type = {type = "string", default = "tcp"},
+ max_retry_times = {type = "integer", minimum = 1, default = 1},
+ retry_interval = {type = "integer", minimum = 0, default = 1},
+ pool_size = {type = "integer", minimum = 5, default = 5},
+ tls = {type = "boolean", default = false},
+ batch_max_size = {type = "integer", minimum = 1, default = 1000},
+ buffer_duration = {type = "integer", minimum = 1, default = 60},
+ include_req_body = {type = "boolean", default = false}
+ },
+ required = {"host", "port"}
+}
+
+local lrucache = core.lrucache.new({
+ ttl = 300, count = 512
+})
+
+local _M = {
+ version = 0.1,
+ priority = 401,
+ name = plugin_name,
+ schema = schema,
+}
+
+function _M.check_schema(conf)
+ return core.schema.check(schema, conf)
+end
+
+function _M.flush_syslog(logger)
+ local ok, err = logger:flush(logger)
+ if not ok then
+ core.log.error("failed to flush message:", err)
+ end
+end
+
+local function send_syslog_data(conf, log_message)
+ local err_msg
+ local res = true
+
+ -- fetch api_ctx
+ local api_ctx = ngx.ctx.api_ctx
+ if not api_ctx then
+ core.log.error("invalid api_ctx cannot proceed with sys logger plugin")
+ return core.response.exit(500)
+ end
+
+ -- fetch it from lrucache
+ local logger, err = lrucache(api_ctx.conf_type .. "#" .. api_ctx.conf_id, api_ctx.conf_version,
+ logger_socket.new, logger_socket, {
+ host = conf.host,
+ port = conf.port,
+ flush_limit = conf.flush_limit,
+ drop_limit = conf.drop_limit,
+ timeout = conf.timeout,
+ sock_type = conf.sock_type,
+ max_retry_times = conf.max_retry_times,
+ retry_interval = conf.retry_interval,
+ pool_size = conf.pool_size,
+ tls = conf.tls,
+ })
+
+ if not logger then
+ res = false
+ err_msg = "failed when initiating the sys logger processor".. err
+ end
+
+ -- reuse the logger object
+ local ok, err = logger:log(core.json.encode(log_message))
+ if not ok then
+ res = false
+ err_msg = "failed to log message" .. err
+ end
+
+ return res, err_msg
+end
+
+-- remove stale objects from the memory after timer expires
+local function remove_stale_objects(premature)
+ if premature then
+ return
+ end
+
+ for key, batch in ipairs(buffers) do
+ if #batch.entry_buffer.entries == 0 and #batch.batch_to_process == 0 then
+ core.log.debug("removing batch processor stale object, route id:", tostring(key))
+ buffers[key] = nil
+ end
+ end
+
+ stale_timer_running = false
+end
+
+-- log phase in APISIX
+function _M.log(conf)
+ local entry = log_util.get_full_log(ngx, conf)
+
+ if not entry.route_id then
+ core.log.error("failed to obtain the route id for sys logger")
+ return
+ end
+
+ local log_buffer = buffers[entry.route_id]
+
+ if not stale_timer_running then
+ -- run the timer every 30 mins if any log is present
+ timer_at(1800, remove_stale_objects)
+ stale_timer_running = true
+ end
+
+ if log_buffer then
+ log_buffer:push(entry)
+ return
+ end
+
+ -- Generate a function to be executed by the batch processor
+ local func = function(entries, batch_max_size)
+ local data, err
+ if batch_max_size == 1 then
+ data, err = core.json.encode(entries[1]) -- encode as single {}
+ else
+ data, err = core.json.encode(entries) -- encode as array [{}]
+ end
+
+ if not data then
+ return false, 'error occurred while encoding the data: ' .. err
+ end
+
+ return send_syslog_data(conf, data)
+ end
+
+ local config = {
+ name = conf.name,
+ retry_delay = conf.retry_interval,
+ batch_max_size = conf.batch_max_size,
+ max_retry_count = conf.max_retry_times,
+ buffer_duration = conf.buffer_duration,
+ inactive_timeout = conf.timeout,
+ }
+
+ local err
+ log_buffer, err = batch_processor:new(func, config)
+
+ if not log_buffer then
+ core.log.error("error when creating the batch processor: ", err)
+ return
+ end
+
+ buffers[entry.route_id] = log_buffer
+ log_buffer:push(entry)
+
+end
+
+return _M
diff --git a/apisix/plugins/tcp-logger.lua b/apisix/plugins/tcp-logger.lua
index 9eeef3320b77..ced5f8f23dad 100644
--- a/apisix/plugins/tcp-logger.lua
+++ b/apisix/plugins/tcp-logger.lua
@@ -22,6 +22,9 @@ local tostring = tostring
local buffers = {}
local ngx = ngx
local tcp = ngx.socket.tcp
+local ipairs = ipairs
+local stale_timer_running = false;
+local timer_at = ngx.timer.at
local schema = {
type = "object",
@@ -37,6 +40,7 @@ local schema = {
buffer_duration = {type = "integer", minimum = 1, default = 60},
inactive_timeout = {type = "integer", minimum = 1, default = 5},
batch_max_size = {type = "integer", minimum = 1, default = 1000},
+ include_req_body = {type = "boolean", default = false}
},
required = {"host", "port"}
}
@@ -94,9 +98,25 @@ local function send_tcp_data(conf, log_message)
return res, err_msg
end
+-- remove stale objects from the memory after timer expires
+local function remove_stale_objects(premature)
+ if premature then
+ return
+ end
+
+ for key, batch in ipairs(buffers) do
+ if #batch.entry_buffer.entries == 0 and #batch.batch_to_process == 0 then
+ core.log.debug("removing batch processor stale object, route id:", tostring(key))
+ buffers[key] = nil
+ end
+ end
+
+ stale_timer_running = false
+end
+
function _M.log(conf)
- local entry = log_util.get_full_log(ngx)
+ local entry = log_util.get_full_log(ngx, conf)
if not entry.route_id then
core.log.error("failed to obtain the route id for tcp logger")
@@ -105,6 +125,12 @@ function _M.log(conf)
local log_buffer = buffers[entry.route_id]
+ if not stale_timer_running then
+ -- run the timer every 30 mins if any log is present
+ timer_at(1800, remove_stale_objects)
+ stale_timer_running = true
+ end
+
if log_buffer then
log_buffer:push(entry)
return
diff --git a/apisix/plugins/udp-logger.lua b/apisix/plugins/udp-logger.lua
index b1b565fb1b2d..cec782a34762 100644
--- a/apisix/plugins/udp-logger.lua
+++ b/apisix/plugins/udp-logger.lua
@@ -22,6 +22,9 @@ local tostring = tostring
local buffers = {}
local ngx = ngx
local udp = ngx.socket.udp
+local ipairs = ipairs
+local stale_timer_running = false;
+local timer_at = ngx.timer.at
local schema = {
type = "object",
@@ -33,6 +36,7 @@ local schema = {
buffer_duration = {type = "integer", minimum = 1, default = 60},
inactive_timeout = {type = "integer", minimum = 1, default = 5},
batch_max_size = {type = "integer", minimum = 1, default = 1000},
+ include_req_body = {type = "boolean", default = false}
},
required = {"host", "port"}
}
@@ -77,9 +81,25 @@ local function send_udp_data(conf, log_message)
return res, err_msg
end
+-- remove stale objects from the memory after timer expires
+local function remove_stale_objects(premature)
+ if premature then
+ return
+ end
+
+ for key, batch in ipairs(buffers) do
+ if #batch.entry_buffer.entries == 0 and #batch.batch_to_process == 0 then
+ core.log.debug("removing batch processor stale object, route id:", tostring(key))
+ buffers[key] = nil
+ end
+ end
+
+ stale_timer_running = false
+end
+
function _M.log(conf)
- local entry = log_util.get_full_log(ngx)
+ local entry = log_util.get_full_log(ngx, conf)
if not entry.route_id then
core.log.error("failed to obtain the route id for udp logger")
@@ -88,6 +108,12 @@ function _M.log(conf)
local log_buffer = buffers[entry.route_id]
+ if not stale_timer_running then
+ -- run the timer every 30 mins if any log is present
+ timer_at(1800, remove_stale_objects)
+ stale_timer_running = true
+ end
+
if log_buffer then
log_buffer:push(entry)
return
diff --git a/apisix/plugins/uri-blocker.lua b/apisix/plugins/uri-blocker.lua
new file mode 100644
index 000000000000..b3dba18de20d
--- /dev/null
+++ b/apisix/plugins/uri-blocker.lua
@@ -0,0 +1,94 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local core = require("apisix.core")
+local re_compile = require("resty.core.regex").re_match_compile
+local re_find = ngx.re.find
+local ipairs = ipairs
+
+local schema = {
+ type = "object",
+ properties = {
+ block_rules = {
+ type = "array",
+ items = {
+ type = "string",
+ minLength = 1,
+ maxLength = 4096,
+ },
+ uniqueItems = true
+ },
+ rejected_code = {
+ type = "integer",
+ minimum = 200,
+ default = 403
+ },
+ },
+ required = {"block_rules"},
+}
+
+
+local plugin_name = "uri-blocker"
+
+local _M = {
+ version = 0.1,
+ priority = 2900,
+ name = plugin_name,
+ schema = schema,
+}
+
+
+function _M.check_schema(conf)
+ local ok, err = core.schema.check(schema, conf)
+ if not ok then
+ return false, err
+ end
+
+ for i, re_rule in ipairs(conf.block_rules) do
+ local ok, err = re_compile(re_rule, "j")
+ -- core.log.warn("ok: ", tostring(ok), " err: ", tostring(err),
+ -- " re_rule: ", re_rule)
+ if not ok then
+ return false, err
+ end
+ end
+
+ return true
+end
+
+
+function _M.rewrite(conf, ctx)
+ core.log.info("uri: ", ctx.var.request_uri)
+ core.log.info("block uri rules: ", conf.block_rules_concat)
+
+ if not conf.block_rules_concat then
+ local block_rules = {}
+ for i, re_rule in ipairs(conf.block_rules) do
+ block_rules[i] = re_rule
+ end
+
+ conf.block_rules_concat = core.table.concat(block_rules, "|")
+ core.log.info("concat block_rules: ", conf.block_rules_concat)
+ end
+
+ local from = re_find(ctx.var.request_uri, conf.block_rules_concat, "jo")
+ if from then
+ core.response.exit(conf.rejected_code)
+ end
+end
+
+
+return _M
diff --git a/apisix/plugins/zipkin.lua b/apisix/plugins/zipkin.lua
index 56412390e379..eebf07e07329 100644
--- a/apisix/plugins/zipkin.lua
+++ b/apisix/plugins/zipkin.lua
@@ -48,7 +48,7 @@ local schema = {
local _M = {
version = 0.1,
- priority = -1000, -- last running plugin, but before serverless post func
+ priority = -1000,
name = plugin_name,
schema = schema,
}
@@ -81,8 +81,8 @@ local function report2endpoint(premature, reporter)
end
-function _M.rewrite(conf, ctx)
-
+function _M.rewrite(plugin_conf, ctx)
+ local conf = core.table.clone(plugin_conf)
-- once the server started, server_addr and server_port won't change, so we can cache it.
conf.server_port = tonumber(ctx.var['server_port'])
diff --git a/apisix/plugins/zipkin/codec.lua b/apisix/plugins/zipkin/codec.lua
index 1d888fbb9bc3..a6a273979aca 100644
--- a/apisix/plugins/zipkin/codec.lua
+++ b/apisix/plugins/zipkin/codec.lua
@@ -17,6 +17,10 @@
local core = require("apisix.core")
local to_hex = require "resty.string".to_hex
local new_span_context = require("opentracing.span_context").new
+local ngx = ngx
+local string = string
+local pairs = pairs
+local tonumber = tonumber
local function hex_to_char(c)
return string.char(tonumber(c, 16))
diff --git a/apisix/plugins/zipkin/random_sampler.lua b/apisix/plugins/zipkin/random_sampler.lua
index 0a9290449f37..f2ee5b49a550 100644
--- a/apisix/plugins/zipkin/random_sampler.lua
+++ b/apisix/plugins/zipkin/random_sampler.lua
@@ -14,6 +14,12 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
+local assert = assert
+local type = type
+local setmetatable = setmetatable
+local math = math
+
+
local _M = {}
local mt = { __index = _M }
diff --git a/apisix/plugins/zipkin/reporter.lua b/apisix/plugins/zipkin/reporter.lua
index 8d6b4c06b8af..cc71fc5c8cca 100644
--- a/apisix/plugins/zipkin/reporter.lua
+++ b/apisix/plugins/zipkin/reporter.lua
@@ -18,6 +18,11 @@ local resty_http = require "resty.http"
local to_hex = require "resty.string".to_hex
local cjson = require "cjson".new()
cjson.encode_number_precision(16)
+local assert = assert
+local type = type
+local setmetatable = setmetatable
+local math = math
+local tostring = tostring
local _M = {}
diff --git a/apisix/router.lua b/apisix/router.lua
index d3b45941d995..b94be0c42f42 100644
--- a/apisix/router.lua
+++ b/apisix/router.lua
@@ -15,12 +15,13 @@
-- limitations under the License.
--
local require = require
-local core = require("apisix.core")
-local error = error
-local pairs = pairs
+local core = require("apisix.core")
+local error = error
+local pairs = pairs
+local ipairs = ipairs
-local _M = {version = 0.2}
+local _M = {version = 0.3}
local function filter(route)
@@ -29,17 +30,36 @@ local function filter(route)
return
end
- if not route.value.upstream then
+ if not route.value.upstream or not route.value.upstream.nodes then
return
end
- for addr, _ in pairs(route.value.upstream.nodes or {}) do
- local host = core.utils.parse_addr(addr)
- if not core.utils.parse_ipv4(host) and
- not core.utils.parse_ipv6(host) then
- route.has_domain = true
- break
+ local nodes = route.value.upstream.nodes
+ if core.table.isarray(nodes) then
+ for _, node in ipairs(nodes) do
+ local host = node.host
+ if not core.utils.parse_ipv4(host) and
+ not core.utils.parse_ipv6(host) then
+ route.has_domain = true
+ break
+ end
end
+ else
+ local new_nodes = core.table.new(core.table.nkeys(nodes), 0)
+ for addr, weight in pairs(nodes) do
+ local host, port = core.utils.parse_addr(addr)
+ if not core.utils.parse_ipv4(host) and
+ not core.utils.parse_ipv6(host) then
+ route.has_domain = true
+ end
+ local node = {
+ host = host,
+ port = port,
+ weight = weight,
+ }
+ core.table.insert(new_nodes, node)
+ end
+ route.value.upstream.nodes = new_nodes
end
core.log.info("filter route: ", core.json.delay_encode(route))
@@ -78,14 +98,30 @@ end
function _M.stream_init_worker()
local router_stream = require("apisix.stream.router.ip_port")
- router_stream.stream_init_worker()
+ router_stream.stream_init_worker(filter)
_M.router_stream = router_stream
end
+function _M.ssls()
+ return _M.router_ssl.ssls()
+end
+
function _M.http_routes()
return _M.router_http.routes()
end
+function _M.stream_routes()
+ -- maybe it's not inited.
+ if not _M.router_stream then
+ return nil, nil
+ end
+ return _M.router_stream.routes()
+end
+
+
+-- for test
+_M.filter_test = filter
+
return _M
diff --git a/apisix/schema_def.lua b/apisix/schema_def.lua
index 50a783acd599..9e4604f29152 100644
--- a/apisix/schema_def.lua
+++ b/apisix/schema_def.lua
@@ -18,7 +18,7 @@ local schema = require('apisix.core.schema')
local setmetatable = setmetatable
local error = error
-local _M = {version = 0.4}
+local _M = {version = 0.5}
local plugins_schema = {
@@ -29,8 +29,8 @@ local plugins_schema = {
local id_schema = {
anyOf = {
{
- type = "string", minLength = 1, maxLength = 32,
- pattern = [[^[0-9]+$]]
+ type = "string", minLength = 1, maxLength = 64,
+ pattern = [[^[a-zA-Z0-9-_]+$]]
},
{type = "integer", minimum = 1}
}
@@ -75,9 +75,14 @@ local health_checker = {
enum = {"http", "https", "tcp"},
default = "http"
},
- timeout = {type = "integer", default = 1},
+ timeout = {type = "number", default = 1},
concurrency = {type = "integer", default = 10},
host = host_def,
+ port = {
+ type = "integer",
+ minimum = 1,
+ maximum = 65535
+ },
http_path = {type = "string", default = "/"},
https_verify_certificate = {type = "boolean", default = true},
healthy = {
@@ -225,11 +230,9 @@ local health_checker = {
}
-local upstream_schema = {
- type = "object",
- properties = {
- nodes = {
- description = "nodes of upstream",
+local nodes_schema = {
+ anyOf = {
+ {
type = "object",
patternProperties = {
[".*"] = {
@@ -240,9 +243,42 @@ local upstream_schema = {
},
minProperties = 1,
},
+ {
+ type = "array",
+ minItems = 1,
+ items = {
+ type = "object",
+ properties = {
+ host = host_def,
+ port = {
+ description = "port of node",
+ type = "integer",
+ minimum = 1,
+ },
+ weight = {
+ description = "weight of node",
+ type = "integer",
+ minimum = 0,
+ },
+ metadata = {
+ description = "metadata of node",
+ type = "object",
+ }
+ },
+ required = {"host", "port", "weight"},
+ },
+ }
+ }
+}
+
+
+local upstream_schema = {
+ type = "object",
+ properties = {
+ nodes = nodes_schema,
retries = {
type = "integer",
- minimum = 1,
+ minimum = 0,
},
timeout = {
type = "object",
@@ -253,6 +289,25 @@ local upstream_schema = {
},
required = {"connect", "send", "read"},
},
+ k8s_deployment_info = {
+ type = "object",
+ properties = {
+ namespace = {type = "string", description = "k8s namespace"},
+ deploy_name = {type = "string", description = "k8s deployment name"},
+ service_name = {type = "string", description = "k8s service name"},
+ port = {type = "number", minimum = 0},
+ backend_type = {
+ type = "string",
+ default = "pod",
+ description = "k8s service name",
+ enum = {"svc", "pod"}
+ },
+ },
+ anyOf = {
+ {required = {"namespace", "deploy_name", "port"}},
+ {required = {"namespace", "service_name", "port"}},
+ },
+ },
type = {
description = "algorithms of load balancing",
type = "string",
@@ -277,10 +332,16 @@ local upstream_schema = {
description = "enable websocket for request",
type = "boolean"
},
+ name = {type = "string", maxLength = 50},
desc = {type = "string", maxLength = 256},
+ service_name = {type = "string", maxLength = 50},
id = id_schema
},
- required = {"nodes", "type"},
+ anyOf = {
+ {required = {"type", "nodes"}},
+ {required = {"type", "k8s_deployment_info"}},
+ {required = {"type", "service_name"}},
+ },
additionalProperties = false,
}
@@ -314,6 +375,7 @@ _M.route = {
},
uniqueItems = true,
},
+ name = {type = "string", maxLength = 50},
desc = {type = "string", maxLength = 256},
priority = {type = "integer", default = 0},
@@ -360,6 +422,8 @@ _M.route = {
pattern = [[^function]],
},
+ script = {type = "string", minLength = 10, maxLength = 102400},
+
plugins = plugins_schema,
upstream = upstream_schema,
@@ -379,6 +443,13 @@ _M.route = {
{required = {"upstream", "uris"}},
{required = {"upstream_id", "uris"}},
{required = {"service_id", "uris"}},
+ {required = {"script", "uri"}},
+ {required = {"script", "uris"}},
+ },
+ ["not"] = {
+ anyOf = {
+ {required = {"script", "plugins"}}
+ }
},
additionalProperties = false,
}
@@ -391,12 +462,15 @@ _M.service = {
plugins = plugins_schema,
upstream = upstream_schema,
upstream_id = id_schema,
+ name = {type = "string", maxLength = 50},
desc = {type = "string", maxLength = 256},
+ script = {type = "string", minLength = 10, maxLength = 102400},
},
anyOf = {
{required = {"upstream"}},
{required = {"upstream_id"}},
{required = {"plugins"}},
+ {required = {"script"}},
},
additionalProperties = false,
}
@@ -405,6 +479,7 @@ _M.service = {
_M.consumer = {
type = "object",
properties = {
+ id = id_schema,
username = {
type = "string", minLength = 1, maxLength = 32,
pattern = [[^[a-zA-Z0-9_]+$]]
@@ -423,6 +498,7 @@ _M.upstream = upstream_schema
_M.ssl = {
type = "object",
properties = {
+ id = id_schema,
cert = {
type = "string", minLength = 128, maxLength = 64*1024
},
@@ -432,13 +508,34 @@ _M.ssl = {
sni = {
type = "string",
pattern = [[^\*?[0-9a-zA-Z-.]+$]],
+ },
+ snis = {
+ type = "array",
+ items = {
+ type = "string",
+ pattern = [[^\*?[0-9a-zA-Z-.]+$]],
+ }
+ },
+ exptime = {
+ type = "integer",
+ minimum = 1588262400, -- 2020/5/1 0:0:0
+ },
+ status = {
+ description = "ssl status, 1 to enable, 0 to disable",
+ type = "integer",
+ enum = {1, 0},
+ default = 1
}
},
- required = {"sni", "key", "cert"},
+ oneOf = {
+ {required = {"sni", "key", "cert"}},
+ {required = {"snis", "key", "cert"}}
+ },
additionalProperties = false,
}
+
_M.proto = {
type = "object",
properties = {
@@ -454,6 +551,7 @@ _M.proto = {
_M.global_rule = {
type = "object",
properties = {
+ id = id_schema,
plugins = plugins_schema
},
required = {"plugins"},
@@ -464,6 +562,7 @@ _M.global_rule = {
_M.stream_route = {
type = "object",
properties = {
+ id = id_schema,
remote_addr = remote_addr_def,
server_addr = {
description = "server IP",
diff --git a/apisix/script.lua b/apisix/script.lua
new file mode 100644
index 000000000000..a1e0c02d904f
--- /dev/null
+++ b/apisix/script.lua
@@ -0,0 +1,59 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local require = require
+local core = require("apisix.core")
+local loadstring = loadstring
+local error = error
+
+
+local _M = {}
+
+
+function _M.load(route, api_ctx)
+ local script = route.value.script
+ if script == nil or script == "" then
+ error("missing valid script")
+ end
+
+ local loadfun, err = loadstring(script, "route#" .. route.value.id)
+ if not loadfun then
+ error("failed to load script: " .. err .. " script: " .. script)
+ return nil
+ end
+ api_ctx.script_obj = loadfun()
+end
+
+
+function _M.run(phase, api_ctx)
+ local obj = api_ctx and api_ctx.script_obj
+ if not obj then
+ core.log.error("missing loaded script object")
+ return api_ctx
+ end
+
+ core.log.info("loaded script_obj: ", core.json.delay_encode(obj, true))
+
+ local phase_fun = obj[phase]
+ if phase_fun then
+ phase_fun(api_ctx)
+ end
+
+ return api_ctx
+end
+
+
+return _M
diff --git a/apisix/stream/plugins/mqtt-proxy.lua b/apisix/stream/plugins/mqtt-proxy.lua
index 439eee285820..b5334306b169 100644
--- a/apisix/stream/plugins/mqtt-proxy.lua
+++ b/apisix/stream/plugins/mqtt-proxy.lua
@@ -15,7 +15,9 @@
-- limitations under the License.
--
local core = require("apisix.core")
-local balancer = require("ngx.balancer")
+local upstream = require("apisix.upstream")
+local bit = require("bit")
+local ngx = ngx
local ngx_exit = ngx.exit
local str_byte = string.byte
local str_sub = string.sub
@@ -156,25 +158,28 @@ function _M.preread(conf, ctx)
end
core.log.info("mqtt client id: ", res.client_id)
-end
+ local up_conf = {
+ type = "roundrobin",
+ nodes = {
+ {host = conf.upstream.ip, port = conf.upstream.port, weight = 1},
+ }
+ }
-function _M.log(conf, ctx)
- core.log.info("plugin log phase, conf: ", core.json.encode(conf))
-end
+ local ok, err = upstream.check_schema(up_conf)
+ if not ok then
+ return 500, err
+ end
+ local matched_route = ctx.matched_route
+ upstream.set(ctx, up_conf.type .. "#route_" .. matched_route.value.id,
+ ctx.conf_version, up_conf, matched_route)
+ return
+end
-function _M.balancer(conf, ctx)
- core.log.info("plugin balancer phase, conf: ", core.json.encode(conf))
- -- ctx.balancer_name = plugin_name
- local up = conf.upstream
- ctx.balancer_name = plugin_name
- local ok, err = balancer.set_current_peer(up.ip, up.port)
- if not ok then
- core.log.error("failed to set server peer: ", err)
- return ngx_exit(1)
- end
+function _M.log(conf, ctx)
+ core.log.info("plugin log phase, conf: ", core.json.encode(conf))
end
diff --git a/apisix/upstream.lua b/apisix/upstream.lua
new file mode 100644
index 000000000000..dabb303e23d3
--- /dev/null
+++ b/apisix/upstream.lua
@@ -0,0 +1,156 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local core = require("apisix.core")
+local error = error
+local tostring = tostring
+local ipairs = ipairs
+local pairs = pairs
+local upstreams
+
+
+local _M = {}
+
+
+local function set_directly(ctx, key, ver, conf, parent)
+ if not ctx then
+ error("missing argument ctx", 2)
+ end
+ if not key then
+ error("missing argument key", 2)
+ end
+ if not ver then
+ error("missing argument ver", 2)
+ end
+ if not conf then
+ error("missing argument conf", 2)
+ end
+ if not parent then
+ error("missing argument parent", 2)
+ end
+
+ ctx.upstream_conf = conf
+ ctx.upstream_version = ver
+ ctx.upstream_key = key
+ ctx.upstream_healthcheck_parent = parent
+ return
+end
+_M.set = set_directly
+
+
+function _M.set_by_route(route, api_ctx)
+ if api_ctx.upstream_conf then
+ core.log.warn("upstream node has been specified, ",
+ "cannot be set repeatedly")
+ return true
+ end
+
+ local up_id = route.value.upstream_id
+ if up_id then
+ if not upstreams then
+ return false, "need to create a etcd instance for fetching "
+ .. "upstream information"
+ end
+
+ local up_obj = upstreams:get(tostring(up_id))
+ if not up_obj then
+ return false, "failed to find upstream by id: " .. up_id
+ end
+ core.log.info("upstream: ", core.json.delay_encode(up_obj))
+
+ local up_conf = up_obj.dns_value or up_obj.value
+ set_directly(api_ctx, up_conf.type .. "#upstream_" .. up_id,
+ up_obj.modifiedIndex, up_conf, up_obj)
+ return true
+ end
+
+ local up_conf = (route.dns_value and route.dns_value.upstream)
+ or route.value.upstream
+ if not up_conf then
+ return false, "missing upstream configuration in Route or Service"
+ end
+
+ set_directly(api_ctx, up_conf.type .. "#route_" .. route.value.id,
+ api_ctx.conf_version, up_conf, route)
+ return true
+end
+
+
+function _M.upstreams()
+ if not upstreams then
+ return nil, nil
+ end
+
+ return upstreams.values, upstreams.conf_version
+end
+
+
+function _M.check_schema(conf)
+ return core.schema.check(core.schema.upstream, conf)
+end
+
+
+function _M.init_worker()
+ local err
+ upstreams, err = core.config.new("/upstreams", {
+ automatic = true,
+ item_schema = core.schema.upstream,
+ filter = function(upstream)
+ upstream.has_domain = false
+ if not upstream.value or not upstream.value.nodes then
+ return
+ end
+
+ local nodes = upstream.value.nodes
+ if core.table.isarray(nodes) then
+ for _, node in ipairs(nodes) do
+ local host = node.host
+ if not core.utils.parse_ipv4(host) and
+ not core.utils.parse_ipv6(host) then
+ upstream.has_domain = true
+ break
+ end
+ end
+ else
+ local new_nodes = core.table.new(core.table.nkeys(nodes), 0)
+ for addr, weight in pairs(nodes) do
+ local host, port = core.utils.parse_addr(addr)
+ if not core.utils.parse_ipv4(host) and
+ not core.utils.parse_ipv6(host) then
+ upstream.has_domain = true
+ end
+ local node = {
+ host = host,
+ port = port,
+ weight = weight,
+ }
+ core.table.insert(new_nodes, node)
+ end
+ upstream.value.nodes = new_nodes
+ end
+
+ core.log.info("filter upstream: ", core.json.delay_encode(upstream))
+ end,
+ })
+ if not upstreams then
+ error("failed to create etcd instance for fetching upstream: " .. err)
+ return
+ end
+end
+
+
+return _M
diff --git a/apisix/utils/batch-processor.lua b/apisix/utils/batch-processor.lua
index 41960951b2bc..18919c369566 100644
--- a/apisix/utils/batch-processor.lua
+++ b/apisix/utils/batch-processor.lua
@@ -21,9 +21,9 @@ local ipairs = ipairs
local table = table
local now = ngx.now
local type = type
-local Batch_Processor = {}
-local Batch_Processor_mt = {
- __index = Batch_Processor
+local batch_processor = {}
+local batch_processor_mt = {
+ __index = batch_processor
}
local execute_func
local create_buffer_timer
@@ -42,8 +42,8 @@ local schema = {
}
-local function schedule_func_exec(batch_processor, delay, batch)
- local hdl, err = timer_at(delay, execute_func, batch_processor, batch)
+local function schedule_func_exec(self, delay, batch)
+ local hdl, err = timer_at(delay, execute_func, self, batch)
if not hdl then
core.log.error("failed to create process timer: ", err)
return
@@ -51,69 +51,75 @@ local function schedule_func_exec(batch_processor, delay, batch)
end
-function execute_func(premature, batch_processor, batch)
+function execute_func(premature, self, batch)
if premature then
return
end
- local ok, err = batch_processor.func(batch.entries, batch_processor.batch_max_size)
+ local ok, err = self.func(batch.entries, self.batch_max_size)
if not ok then
- core.log.error("Batch Processor[", batch_processor.name, "] failed to process entries: ", err)
+ core.log.error("Batch Processor[", self.name,
+ "] failed to process entries: ", err)
batch.retry_count = batch.retry_count + 1
- if batch.retry_count <= batch_processor.max_retry_count then
- schedule_func_exec(batch_processor, batch_processor.retry_delay, batch)
+ if batch.retry_count <= self.max_retry_count then
+ schedule_func_exec(self, self.retry_delay,
+ batch)
else
- core.log.error("Batch Processor[", batch_processor.name,"] exceeded ",
- "the max_retry_count[", batch.retry_count,"] dropping the entries")
+ core.log.error("Batch Processor[", self.name,"] exceeded ",
+ "the max_retry_count[", batch.retry_count,
+ "] dropping the entries")
end
return
end
- core.log.debug("Batch Processor[", batch_processor.name ,"] successfully processed the entries")
+ core.log.debug("Batch Processor[", self.name,
+ "] successfully processed the entries")
end
-local function flush_buffer(premature, batch_processor)
+local function flush_buffer(premature, self)
if premature then
return
end
- if now() - batch_processor.last_entry_t >= batch_processor.inactive_timeout or
- now() - batch_processor.first_entry_t >= batch_processor.buffer_duration then
- core.log.debug("Batch Processor[", batch_processor.name ,"] buffer ",
+ if now() - self.last_entry_t >= self.inactive_timeout or
+ now() - self.first_entry_t >= self.buffer_duration
+ then
+ core.log.debug("Batch Processor[", self.name ,"] buffer ",
"duration exceeded, activating buffer flush")
- batch_processor:process_buffer()
- batch_processor.is_timer_running = false
+ self:process_buffer()
+ self.is_timer_running = false
return
end
- -- buffer duration did not exceed or the buffer is active, extending the timer
- core.log.debug("Batch Processor[", batch_processor.name ,"] extending buffer timer")
- create_buffer_timer(batch_processor)
+ -- buffer duration did not exceed or the buffer is active,
+ -- extending the timer
+ core.log.debug("Batch Processor[", self.name ,"] extending buffer timer")
+ create_buffer_timer(self)
end
-function create_buffer_timer(batch_processor)
- local hdl, err = timer_at(batch_processor.inactive_timeout, flush_buffer, batch_processor)
+function create_buffer_timer(self)
+ local hdl, err = timer_at(self.inactive_timeout, flush_buffer, self)
if not hdl then
core.log.error("failed to create buffer timer: ", err)
return
end
- batch_processor.is_timer_running = true
+ self.is_timer_running = true
end
-function Batch_Processor:new(func, config)
+function batch_processor:new(func, config)
local ok, err = core.schema.check(schema, config)
if not ok then
- return err
+ return nil, err
end
if not(type(func) == "function") then
return nil, "Invalid argument, arg #1 must be a function"
end
- local batch_processor = {
+ local processor = {
func = func,
buffer_duration = config.buffer_duration,
inactive_timeout = config.inactive_timeout,
@@ -128,11 +134,11 @@ function Batch_Processor:new(func, config)
last_entry_t = 0
}
- return setmetatable(batch_processor, Batch_Processor_mt)
+ return setmetatable(processor, batch_processor_mt)
end
-function Batch_Processor:push(entry)
+function batch_processor:push(entry)
-- if the batch size is one then immediately send for processing
if self.batch_max_size == 1 then
local batch = { entries = { entry }, retry_count = 0 }
@@ -149,7 +155,8 @@ function Batch_Processor:push(entry)
self.last_entry_t = now()
if self.batch_max_size <= #entries then
- core.log.debug("Batch Processor[", self.name ,"] batch max size has exceeded")
+ core.log.debug("Batch Processor[", self.name ,
+ "] batch max size has exceeded")
self:process_buffer()
end
@@ -159,7 +166,7 @@ function Batch_Processor:push(entry)
end
-function Batch_Processor:process_buffer()
+function batch_processor:process_buffer()
-- If entries are present in the buffer move the entries to processing
if #self.entry_buffer.entries > 0 then
core.log.debug("tranferring buffer entries to processing pipe line, ",
@@ -175,4 +182,4 @@ function Batch_Processor:process_buffer()
end
-return Batch_Processor
+return batch_processor
diff --git a/apisix/utils/log-util.lua b/apisix/utils/log-util.lua
index 6ee03b288db6..b11a435808f8 100644
--- a/apisix/utils/log-util.lua
+++ b/apisix/utils/log-util.lua
@@ -18,7 +18,7 @@ local core = require("apisix.core")
local _M = {}
-local function get_full_log(ngx)
+local function get_full_log(ngx, conf)
local ctx = ngx.ctx.api_ctx
local var = ctx.var
local service_id
@@ -34,7 +34,7 @@ local function get_full_log(ngx)
service_id = var.host
end
- return {
+ local log = {
request = {
url = url,
uri = var.request_uri,
@@ -56,6 +56,20 @@ local function get_full_log(ngx)
start_time = ngx.req.start_time() * 1000,
latency = (ngx.now() - ngx.req.start_time()) * 1000
}
+
+ if conf.include_req_body then
+ local body = ngx.req.get_body_data()
+ if body then
+ log.request.body = body
+ else
+ local body_file = ngx.req.get_body_file()
+ if body_file then
+ log.request.body_file = body_file
+ end
+ end
+ end
+
+ return log
end
_M.get_full_log = get_full_log
diff --git a/benchmark/fake-apisix/conf/nginx.conf b/benchmark/fake-apisix/conf/nginx.conf
index 327169adf189..124008dc4516 100644
--- a/benchmark/fake-apisix/conf/nginx.conf
+++ b/benchmark/fake-apisix/conf/nginx.conf
@@ -24,7 +24,6 @@ pid logs/nginx.pid;
worker_rlimit_nofile 20480;
events {
- accept_mutex off;
worker_connections 10620;
}
@@ -33,6 +32,9 @@ worker_shutdown_timeout 3;
http {
lua_package_path "$prefix/lua/?.lua;;";
+ log_format main '$remote_addr - $remote_user [$time_local] $http_host "$request" $status $body_bytes_sent $request_time "$http_referer" "$http_user_agent" $upstream_addr $upstream_status $upstream_response_time';
+ access_log logs/access.log main buffer=16384 flush=5;
+
init_by_lua_block {
require "resty.core"
apisix = require("apisix")
@@ -49,7 +51,7 @@ http {
apisix.http_balancer_phase()
}
- keepalive 32;
+ keepalive 320;
}
server {
@@ -60,8 +62,6 @@ http {
listen 9080;
- access_log off;
-
server_tokens off;
more_set_headers 'Server: APISIX web server';
@@ -100,12 +100,43 @@ http {
proxy_set_header X-Real-IP $remote_addr;
proxy_pass_header Server;
proxy_pass_header Date;
+
+ ### the following x-forwarded-* headers is to send to upstream server
+
+ set $var_x_forwarded_for $remote_addr;
+ set $var_x_forwarded_proto $scheme;
+ set $var_x_forwarded_host $host;
+ set $var_x_forwarded_port $server_port;
+
+ if ($http_x_forwarded_for != "") {
+ set $var_x_forwarded_for "${http_x_forwarded_for}, ${realip_remote_addr}";
+ }
+ if ($http_x_forwarded_proto != "") {
+ set $var_x_forwarded_proto $http_x_forwarded_proto;
+ }
+ if ($http_x_forwarded_host != "") {
+ set $var_x_forwarded_host $http_x_forwarded_host;
+ }
+ if ($http_x_forwarded_port != "") {
+ set $var_x_forwarded_port $http_x_forwarded_port;
+ }
+
+ proxy_set_header X-Forwarded-For $var_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $var_x_forwarded_proto;
+ proxy_set_header X-Forwarded-Host $var_x_forwarded_host;
+ proxy_set_header X-Forwarded-Port $var_x_forwarded_port;
+
+ # proxy pass
proxy_pass $upstream_scheme://apisix_backend$upstream_uri;
header_filter_by_lua_block {
apisix.http_header_filter_phase()
}
+ body_filter_by_lua_block {
+ apisix.http_body_filter_phase()
+ }
+
log_by_lua_block {
apisix.http_log_phase()
}
diff --git a/benchmark/fake-apisix/lua/apisix.lua b/benchmark/fake-apisix/lua/apisix.lua
index 30671f7fa41b..ea5bf15bf111 100644
--- a/benchmark/fake-apisix/lua/apisix.lua
+++ b/benchmark/fake-apisix/lua/apisix.lua
@@ -25,7 +25,7 @@ end
local function fake_fetch()
ngx.ctx.ip = "127.0.0.1"
- ngx.ctx.port = 80
+ ngx.ctx.port = 1980
end
function _M.http_access_phase()
@@ -42,6 +42,12 @@ function _M.http_header_filter_phase()
end
end
+function _M.http_body_filter_phase()
+ if ngx.ctx then
+ -- do something
+ end
+end
+
function _M.http_log_phase()
if ngx.ctx then
-- do something
diff --git a/benchmark/run.sh b/benchmark/run.sh
index ff068d64f57b..fa3fc3cce600 100755
--- a/benchmark/run.sh
+++ b/benchmark/run.sh
@@ -28,6 +28,8 @@ mkdir -p benchmark/fake-apisix/logs
sudo openresty -p $PWD/benchmark/server || exit 1
+make init
+
trap 'onCtrlC' INT
function onCtrlC () {
sudo killall wrk
@@ -36,7 +38,12 @@ function onCtrlC () {
sudo openresty -p $PWD/benchmark/server -s stop || exit 1
}
-sed -i "s/worker_processes [0-9]*/worker_processes $worker_cnt/g" conf/nginx.conf
+if [[ "$(uname)" == "Darwin" ]]; then
+ sed -i "" "s/worker_processes .*/worker_processes $worker_cnt;/g" conf/nginx.conf
+else
+ sed -i "s/worker_processes .*/worker_processes $worker_cnt;/g" conf/nginx.conf
+fi
+
make run
sleep 3
@@ -52,7 +59,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13
"upstream": {
"type": "roundrobin",
"nodes": {
- "127.0.0.1:80": 1
+ "127.0.0.1:1980": 1
}
}
}'
@@ -85,7 +92,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13
"upstream": {
"type": "roundrobin",
"nodes": {
- "127.0.0.1:80": 1
+ "127.0.0.1:1980": 1
}
}
}'
diff --git a/benchmark/server/conf/nginx.conf b/benchmark/server/conf/nginx.conf
index f943cbf620d9..c3accd6561fe 100644
--- a/benchmark/server/conf/nginx.conf
+++ b/benchmark/server/conf/nginx.conf
@@ -32,7 +32,7 @@ worker_shutdown_timeout 3;
http {
server {
- listen 80;
+ listen 1980;
access_log off;
location = /hello {
diff --git a/bin/apisix b/bin/apisix
index 1659de218ddc..609d47047276 100755
--- a/bin/apisix
+++ b/bin/apisix
@@ -21,6 +21,8 @@ local function trim(s)
return (s:gsub("^%s*(.-)%s*$", "%1"))
end
+-- Note: The `excute_cmd` return value will have a line break at the end,
+-- it is recommended to use the `trim` function to handle the return value.
local function excute_cmd(cmd)
local t, err = io.popen(cmd)
if not t then
@@ -50,7 +52,7 @@ if script_path:sub(1, 2) == './' then
error("failed to fetch current path")
end
- if string.match(apisix_home, '^/[root][^/]+') then
+ if string.match(apisix_home .. "/", '^/root/') then
is_root_path = true
end
@@ -103,9 +105,8 @@ events {
}
worker_rlimit_core {* worker_rlimit_core *};
-working_directory /tmp/apisix_cores/;
-worker_shutdown_timeout 3;
+worker_shutdown_timeout {* worker_shutdown_timeout *};
env APISIX_PROFILE;
@@ -179,6 +180,7 @@ http {
lua_shared_dict upstream-healthcheck 10m;
lua_shared_dict worker-events 10m;
lua_shared_dict lrucache-lock 10m;
+ lua_shared_dict skywalking-tracing-buffer 100m;
# for openid-connect plugin
lua_shared_dict discovery 1m; # cache for discovery metadata documents
@@ -192,14 +194,14 @@ http {
{% end %}
{% end %}
- {% if proxy_cache then %}
+ {% if enabled_plugins["proxy-cache"] then %}
# for proxy cache
{% for _, cache in ipairs(proxy_cache.zones) do %}
proxy_cache_path {* cache.disk_path *} levels={* cache.cache_levels *} keys_zone={* cache.name *}:{* cache.memory_size *} inactive=1d max_size={* cache.disk_size *};
{% end %}
{% end %}
- {% if proxy_cache then %}
+ {% if enabled_plugins["proxy-cache"] then %}
# for proxy cache
map $upstream_cache_zone $upstream_cache_zone_info {
{% for _, cache in ipairs(proxy_cache.zones) do %}
@@ -227,7 +229,7 @@ http {
log_format main '$remote_addr - $remote_user [$time_local] $http_host "$request" $status $body_bytes_sent $request_time "$http_referer" "$http_user_agent" $upstream_addr $upstream_status $upstream_response_time';
- access_log {* http.access_log *} main buffer=32768 flush=3;
+ access_log {* http.access_log *} main buffer=16384 flush=3;
open_file_cache max=1000 inactive=60;
client_max_body_size 0;
keepalive_timeout {* http.keepalive_timeout *};
@@ -239,6 +241,7 @@ http {
more_set_headers 'Server: APISIX web server';
include mime.types;
+ charset utf-8;
{% if real_ip_header then %}
real_ip_header {* real_ip_header *};
@@ -284,7 +287,30 @@ http {
{% if enable_admin and port_admin then %}
server {
+ {%if https_admin then%}
+ listen {* port_admin *} ssl;
+
+ {%if admin_api_mtls and admin_api_mtls.admin_ssl_cert and admin_api_mtls.admin_ssl_cert ~= "" and
+ admin_api_mtls.admin_ssl_cert_key and admin_api_mtls.admin_ssl_cert_key ~= "" and
+ admin_api_mtls.admin_ssl_ca_cert and admin_api_mtls.admin_ssl_ca_cert ~= ""
+ then%}
+ ssl_verify_client on;
+ ssl_certificate {* admin_api_mtls.admin_ssl_cert *};
+ ssl_certificate_key {* admin_api_mtls.admin_ssl_cert_key *};
+ ssl_client_certificate {* admin_api_mtls.admin_ssl_ca_cert *};
+ {% else %}
+ ssl_certificate cert/apisix_admin_ssl.crt;
+ ssl_certificate_key cert/apisix_admin_ssl.key;
+ {%end%}
+
+ ssl_session_cache shared:SSL:20m;
+ ssl_protocols {* ssl.ssl_protocols *};
+ ssl_ciphers {* ssl.ssl_ciphers *};
+ ssl_prefer_server_ciphers on;
+
+ {% else %}
listen {* port_admin *};
+ {%end%}
log_not_found off;
location /apisix/admin {
{%if allow_admin then%}
@@ -309,10 +335,10 @@ http {
alias dashboard/;
- try_files $uri $uri/index.html /index.html;
+ try_files $uri $uri/index.html /index.html =404;
}
- location /robots.txt {
+ location =/robots.txt {
return 200 'User-agent: *\nDisallow: /';
}
}
@@ -340,7 +366,8 @@ http {
ssl_certificate cert/apisix.crt;
ssl_certificate_key cert/apisix.key;
- ssl_session_cache shared:SSL:1m;
+ ssl_session_cache shared:SSL:20m;
+ ssl_session_timeout 10m;
ssl_protocols {* ssl.ssl_protocols *};
ssl_ciphers {* ssl.ssl_ciphers *};
@@ -379,7 +406,7 @@ http {
alias dashboard/;
- try_files $uri $uri/index.html /index.html;
+ try_files $uri $uri/index.html /index.html =404;
}
{% end %}
@@ -432,7 +459,7 @@ http {
proxy_set_header X-Forwarded-Host $var_x_forwarded_host;
proxy_set_header X-Forwarded-Port $var_x_forwarded_port;
- {% if proxy_cache then %}
+ {% if enabled_plugins["proxy-cache"] then %}
### the following configuration is to cache response content from upstream server
set $upstream_cache_zone off;
@@ -460,7 +487,10 @@ http {
{% end %}
proxy_pass $upstream_scheme://apisix_backend$upstream_uri;
+
+ {% if enabled_plugins["proxy-mirror"] then %}
mirror /proxy_mirror;
+ {% end %}
header_filter_by_lua_block {
apisix.http_header_filter_phase()
@@ -498,6 +528,7 @@ http {
}
}
+ {% if enabled_plugins["proxy-mirror"] then %}
location = /proxy_mirror {
internal;
@@ -507,6 +538,7 @@ http {
proxy_pass $upstream_mirror_host$request_uri;
}
+ {% end %}
}
}
]=]
@@ -659,14 +691,24 @@ local function init()
with_module_status = false
end
+ local enabled_plugins = {}
+ for i, name in ipairs(yaml_conf.plugins) do
+ enabled_plugins[name] = true
+ end
+
+ if enabled_plugins["proxy-cache"] and not yaml_conf.apisix.proxy_cache then
+ error("missing apisix.proxy_cache for plugin proxy-cache")
+ end
+
-- Using template.render
local sys_conf = {
lua_path = pkg_path_org,
lua_cpath = pkg_cpath_org,
- os_name = excute_cmd("uname"),
+ os_name = trim(excute_cmd("uname")),
apisix_lua_home = apisix_home,
with_module_status = with_module_status,
error_log = {level = "warn"},
+ enabled_plugins = enabled_plugins,
}
if not yaml_conf.apisix then
@@ -699,6 +741,7 @@ local function init()
if(sys_conf["enable_dev_mode"] == true) then
sys_conf["worker_processes"] = 1
+ sys_conf["enable_reuseport"] = false
else
sys_conf["worker_processes"] = "auto"
end
@@ -768,6 +811,18 @@ local function init_etcd(show_output)
local host_count = #(yaml_conf.etcd.host)
+ -- check whether the user has enabled etcd v2 protocol
+ for index, host in ipairs(yaml_conf.etcd.host) do
+ uri = host .. "/v2/keys"
+ local cmd = "curl -i -m ".. timeout * 2 .. " -o /dev/null -s -w %{http_code} " .. uri
+ local res = excute_cmd(cmd)
+ if res == "404" then
+ io.stderr:write(string.format("failed: please make sure that you have enabled the v2 protocol of etcd on %s.\n", host))
+ return
+ end
+ end
+
+ local etcd_ok = false
for index, host in ipairs(yaml_conf.etcd.host) do
local is_success = true
@@ -786,7 +841,7 @@ local function init_etcd(show_output)
if not res:find("index", 1, true)
and not res:find("createdIndex", 1, true) then
is_success = false
- if (index == hostCount) then
+ if (index == host_count) then
error(cmd .. "\n" .. res)
end
break
@@ -799,9 +854,14 @@ local function init_etcd(show_output)
end
if is_success then
+ etcd_ok = true
break
end
end
+
+ if not etcd_ok then
+ error("none of the configured etcd works well")
+ end
end
_M.init_etcd = init_etcd
@@ -809,6 +869,18 @@ local openresty_args = [[openresty -p ]] .. apisix_home .. [[ -c ]]
.. apisix_home .. [[/conf/nginx.conf]]
function _M.start(...)
+ -- check running
+ local pid_path = apisix_home .. "/logs/nginx.pid"
+ local pid, err = read_file(pid_path)
+ if pid then
+ local hd = io.popen("lsof -p " .. pid)
+ local res = hd:read("*a")
+ if res and res ~= "" then
+ print("APISIX is running...")
+ return nil
+ end
+ end
+
init(...)
init_etcd(...)
@@ -829,14 +901,21 @@ function _M.restart()
end
function _M.reload()
+ -- reinit nginx.conf
+ init()
+
local test_cmd = openresty_args .. [[ -t -q ]]
- if os.execute((test_cmd)) ~= 0 then
+ -- When success,
+ -- On linux, os.execute returns 0,
+ -- On macos, os.execute returns 3 values: true, exit, 0, and we need the first.
+ local test_ret = os.execute((test_cmd))
+ if (test_ret == 0 or test_ret == true) then
+ local cmd = openresty_args .. [[ -s reload]]
+ -- print(cmd)
+ os.execute(cmd)
return
end
-
- local cmd = openresty_args .. [[ -s reload]]
- -- print(cmd)
- os.execute(cmd)
+ print("test openresty failed")
end
function _M.version()
diff --git a/conf/cert/apisix_admin_ssl.crt b/conf/cert/apisix_admin_ssl.crt
new file mode 100644
index 000000000000..82d7fc3aa31a
--- /dev/null
+++ b/conf/cert/apisix_admin_ssl.crt
@@ -0,0 +1,33 @@
+-----BEGIN CERTIFICATE-----
+MIIFsTCCA5mgAwIBAgIUODyT8W4gAxf8uwMNmtj5M1ANoUwwDQYJKoZIhvcNAQEL
+BQAwVjELMAkGA1UEBhMCQ04xEjAQBgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwG
+Wmh1SGFpMQ0wCwYDVQQKDARhcGk3MRMwEQYDVQQDDAphcGlzaXguZGV2MCAXDTIw
+MDYwNDAzMzc1MFoYDzIxMjAwNTExMDMzNzUwWjBWMQswCQYDVQQGEwJDTjESMBAG
+A1UECAwJR3VhbmdEb25nMQ8wDQYDVQQHDAZaaHVIYWkxDTALBgNVBAoMBGFwaTcx
+EzARBgNVBAMMCmFwaXNpeC5kZXYwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
+AoICAQDQveSdplH49Lr+LsLWpGJbNRhf2En0V4SuFKpzGFP7mXaI7rMnpdH3BUVY
+S3juMgPOdNh6ho4BeSbGZGfU3lG1NwIOXiPNA1mrTWGNGV97crJDVZeWTuDpqNHJ
+4ATrnF6RnRbg0en8rjVtce6LBMrDJVyGbi9VAqBUPrCmzT/l0V1jPL6KNSN8mQog
+ladrJuzUanfhWM9K9xyM+/SUt1MNUYFLNsVHasPzsi5/YDRBiwuzTtiT56O6yge2
+lvrdPFvULrCxlGteyvhtrFJwqjN//YtnQFooNR0CXBfXs0a7WGgMjawupuP1JKiY
+t9KEcGHWGZDeLfsGGKgQ9G+PaP4y+gHjLr5xQvwt68otpoafGy+BpOoHZZFoLBpx
+TtJKA3qnwyZg9zr7lrtqr8CISO/SEyh6xkAOUzb7yc2nHu9UpruzVIR7xI7pjc7f
+2T6WyCVy6gFYQwzFLwkN/3O+ZJkioxXsnwaYWDj61k3d9ozVDkVkTuxmNJjXV8Ta
+htGRAHo0/uHmpFTcaQfDf5o+iWi4z9B5kgfA/A1XWFQlCH1kl3mHKg7JNCN9qGF8
+rG+YzdiLQfo5OqJSvzGHRXbdGI2JQe/zyJHsMO7d0AhwXuPOWGTTAODOPlaBCxNB
+AgjuUgt+3saqCrK4eaOo8sPt055AYJhZlaTH4EeD4sv7rJGm7wIDAQABo3UwczAd
+BgNVHQ4EFgQUPS1LXZMqgQvH/zQHHzgTzrd7PIIwHwYDVR0jBBgwFoAUPS1LXZMq
+gQvH/zQHHzgTzrd7PIIwDAYDVR0TBAUwAwEB/zAjBgNVHREEHDAaggphcGlzaXgu
+ZGV2ggwqLmFwaXNpeC5kZXYwDQYJKoZIhvcNAQELBQADggIBAMlwNS8uo3JkkshI
+rpYobdjCZfr74PBl+LhoihvzHs25/in3+CxETRA8cYo5pRotqdA63po3wiCCPs6a
+mZiELQxyGHhFcqoYxnoURR4nyogRZLA6jjLGkbG4H+CA4ApmZmvGnP3X5uQW4v5q
+IdqIXL3BvoUBln8GMEC7Rz5SGUjWG03JPkl6MdeziFyHkwdBCOrtK5m7icRncvq+
+iL8CMUx024LLI6A5hTBPwfVfgbWJTSv7tEu85q54ZZoYQhiD8dde4D7g5/noPvXM
+ZyA9C3Sl981+pUhhazad9j9k8DCcqf9e8yH9lPY26tjiEcShv4YnwbErWzJU1F9s
+ZI5Z6nj5PU66upnBWAWV7fWCOrlouB4GjNaznSNrmpn4Bb2+FinDK3t4AfWDPS5s
+ljQBGQNXOd30DC7BdNAF5dQAUhVfz1EgQGqYa+frMQLiv8rNMs7h6gKQEqU+jC/1
+jbGe4/iwc0UeTtSgTPHMofqjqc99/R/ZqtJ3qFPJmoWpyu0NlNINw2KWRQaMoGLo
+WgDCS0YA5/hNXVFcWnZ73jY62yrVSoj+sFbkUpGWhEFnO+uSmBv8uwY3UeCOQDih
+X7Yazs3TZRqEPU+25QATf0kbxyzlWbGkwvyRD8x+n3ZHs5Ilhrc6jWHqM/S3ir7i
+m9GcWiwg++EbusQsqs3w3uKAHAdT
+-----END CERTIFICATE-----
diff --git a/conf/cert/apisix_admin_ssl.key b/conf/cert/apisix_admin_ssl.key
new file mode 100644
index 000000000000..ec889056ffb6
--- /dev/null
+++ b/conf/cert/apisix_admin_ssl.key
@@ -0,0 +1,51 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIJKQIBAAKCAgEA0L3knaZR+PS6/i7C1qRiWzUYX9hJ9FeErhSqcxhT+5l2iO6z
+J6XR9wVFWEt47jIDznTYeoaOAXkmxmRn1N5RtTcCDl4jzQNZq01hjRlfe3KyQ1WX
+lk7g6ajRyeAE65xekZ0W4NHp/K41bXHuiwTKwyVchm4vVQKgVD6wps0/5dFdYzy+
+ijUjfJkKIJWnaybs1Gp34VjPSvccjPv0lLdTDVGBSzbFR2rD87Iuf2A0QYsLs07Y
+k+ejusoHtpb63Txb1C6wsZRrXsr4baxScKozf/2LZ0BaKDUdAlwX17NGu1hoDI2s
+Lqbj9SSomLfShHBh1hmQ3i37BhioEPRvj2j+MvoB4y6+cUL8LevKLaaGnxsvgaTq
+B2WRaCwacU7SSgN6p8MmYPc6+5a7aq/AiEjv0hMoesZADlM2+8nNpx7vVKa7s1SE
+e8SO6Y3O39k+lsglcuoBWEMMxS8JDf9zvmSZIqMV7J8GmFg4+tZN3faM1Q5FZE7s
+ZjSY11fE2obRkQB6NP7h5qRU3GkHw3+aPolouM/QeZIHwPwNV1hUJQh9ZJd5hyoO
+yTQjfahhfKxvmM3Yi0H6OTqiUr8xh0V23RiNiUHv88iR7DDu3dAIcF7jzlhk0wDg
+zj5WgQsTQQII7lILft7GqgqyuHmjqPLD7dOeQGCYWZWkx+BHg+LL+6yRpu8CAwEA
+AQKCAgBNsbBLAWHXYPfMrgj1LUAypIOLAQ0dtgl7ZdO/fRmdNxSIiRgDtNN+tuaF
+o6nCNrl1+cWtbTGj2L0W8L442/rbkTrhsCZxI0MX4HhjtUL1xs4VA+GlH3zVW3Gi
+SxBpxczpM+gVC+ykkQ7vyo04DzONCPX0T0Ssxop4cND9dL3Iw3GYAz8EYBzyPmAn
+mqwy1M0nju1J4e1eALYOv6TcSZPPDDwsi5lIKLQAm5x06pDoqGFVfw5blsc5OgM+
+8dkzyUiApFQ99Hk2UiO/ZnlU1/TNOcjOSISGHKbMfwycy2yTRKeNrJmez51fXCKo
+nRrtEotHzkI+gCzDqx+7F9ACN9kM4f4JO5ca0/My6tCY+mH8TA/nVzMnUpL7329w
+NobuNTpyA6x5nmB3QqElrzQCRtTj7Nw5ytMdRbByJhXww9C5tajUysdq8oGoZdz5
+94kXr6qCC5Qm3CkgyF2RjqZyg9tHUEEdaFKouHgziiqG9P2Nk1SHk7Jd7bF4rleI
+i93u/f0fdVK7aMksofgUbOmfhnS+o1NxerVcbdX+E/iv6yfkrYDb46y3//4dcpwk
+TeUEMCjc7ShwvYPq350q3jmzgwxeTK8ZdXwJymdJ7MaGcnMXPqd9A43evYM6nG6f
+i3l2tYhH4cp6misGChnGORR68qsRkY8ssvSFNFzjcFHhnPyoCQKCAQEA8isIC1IJ
+Iq9kB4mDVh0QdiuoBneNOEHy/8fASeZsqedu0OZPyoXU96iOhXuqf8sQ33ydvPef
+iRwasLLkgw8sDeWILUjS36ZzwGP2QNxWfrapCFS8VfKl7hTPMVp0Wzxh8qqpGLSh
+O0W7EEAJCgzzULagfupaO0Chmb3LZqXRp8m5oubnmE+9z0b5GrCIT1S8Yay2mEw9
+jxqZJGBhV7QnupyC2DIxLXlGmQk7Qs1+1mCCFwyfugHXclWYa+fet/79SkkADK0/
+ysxfy+FdZgGT/Ba5odsEpt1zH+tw4WXioJsX9mU3zAHbpPqtcfuVU+2xyKfQYrRG
+NSm9MMNmart0wwKCAQEA3Koaj/0gNxLLslLIES50KmmagzU8CkEmCa/WLoVy02xr
+qp42hvj+PzBTf3rIno3KEpRhMmnAtswozbV3P4l/VSZdfY+pwWsx7/5+Cf1R9nAP
+vp6YCjGcLcbASazYNOWf0FRInt3pxdgT9DWjJDi99FGKA+UbI2yxHwzE+cE8r9Od
+Iy42uhzCjJBqdg+an+q63k6yrOwv18KP69LlU/4vknhw4g3WxF4yTwVmXU8WKmux
+aOrJv2ED8pfA7k+zwv0rPyN+F2nOySxoChaFfeu6ntBCX7zK/nV0DsMQImOycfzO
+yN8WB9lRZTJVzU2r6PaGAI359uLHEmURy0069g+yZQKCAQAbECwJ99UFh0xKe1eu
+G/lm+2H/twSVMOmTJCOdHp8uLar4tYRdQa+XLcMfr75SIcN09lw6bgHqNLXW4Wcg
+LmXh97DMPsMyM0vkSEeQ4A7agldJkw6pHEDm5nRxM4alW44mrGPRWv5ZvWU2X7Gi
+6eeXMZGmHVKQJJzqrYc5pXZUpfqU9fET2HWB4JCeJvRUyUd0MvUE+CA5CePraMn4
+Hy4BcNQ+jP1p84+sMpfo00ZFduuS39pJ00LciCxMgtElBt4PmzDiOcpTQ5vBESJ6
+79o15eRA7lUKwNzIyGsJBXXaNPrskks2BU8ilNElV9RMWNfxcK+dGEBwWIXIGU4s
+x145AoIBAQCst9R8udNaaDLaTGNe126DuA8B/kwVdrLwSBqsZTXgeO+5J4dklEZl
+bU0d7hxTxoXRjySZEh+OtTSG9y/0oonxO0tYOXfU9jOrNxaueQKLk2EvgfFdoUEu
+r2/Y+xpsJQO3TBFfkDEn856Cuu0MMAG214/gxpY8XxowRI11NCRtN4S6gbTCbjp1
+TaCW8lXEMDW+Rfki0ugLyLVgD74CxWW1DuLEfbKKF3TnV0GtbXbbE1pU1dm+G5C8
+dL3FissYp5MPI5fRebcqzcBNjR1F15pGLpqVVy/IhmSmHVZmpISLJicxITScRiSo
+wgJY5R/XBAcVLgvmi9Dn/AY2jCfHa7flAoIBAQCbnZ6ivZg81g6/X9qdo9J61hX0
+Y7Fn7bLvcs1L0ARGTsfXMvegA806XyZThqjpY47nHpQtoz4z62kiTTsdpAZUeA3z
+9HUWr0b3YEpsvZpgyMNHgwq1vRDPjw4AWz0pBoDWMxx8Ck5nP1A//c1zyu9pgYEU
+R+OutDeCJ+0VAc6JSH9WMA08utGPGs3t02Zhtyt2sszE9vzz4hTi5340/AYG72p7
+YGlikUxvbyylYh9wR4YUYa/klikvKLHEML1P0BCr8Vex+wLSGS1h1F5tW1Xr2CZQ
+dVxFmfGmPDmwWbCQR6Rvt6FHRwNMpMrLr011h2RBcHBpdQl7XpUENDoopIh0
+-----END RSA PRIVATE KEY-----
diff --git a/conf/cert/openssl-test2.conf b/conf/cert/openssl-test2.conf
new file mode 100644
index 000000000000..1e5beec911df
--- /dev/null
+++ b/conf/cert/openssl-test2.conf
@@ -0,0 +1,40 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+[req]
+distinguished_name = req_distinguished_name
+x509_extensions = v3_req
+prompt = no
+
+[req_distinguished_name]
+C = CN
+ST = GuangDong
+L = ZhuHai
+O = iresty
+CN = test2.com
+
+[v3_req]
+subjectKeyIdentifier = hash
+authorityKeyIdentifier = keyid,issuer
+basicConstraints = CA:TRUE
+subjectAltName = @alt_names
+
+[alt_names]
+DNS.1 = test2.com
+DNS.2 = *.test2.com
+
+## openssl genrsa -out test2.key 3072
+## openssl req -new -x509 -key test2.key -sha256 -config openssl-test2.conf -out test2.crt -days 36500
diff --git a/conf/cert/test2.crt b/conf/cert/test2.crt
new file mode 100644
index 000000000000..922a8f8b6896
--- /dev/null
+++ b/conf/cert/test2.crt
@@ -0,0 +1,28 @@
+-----BEGIN CERTIFICATE-----
+MIIEsTCCAxmgAwIBAgIUMbgUUCYHkuKDaPy0bzZowlK0JG4wDQYJKoZIhvcNAQEL
+BQAwVzELMAkGA1UEBhMCQ04xEjAQBgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwG
+Wmh1SGFpMQ8wDQYDVQQKDAZpcmVzdHkxEjAQBgNVBAMMCXRlc3QyLmNvbTAgFw0y
+MDA0MDQyMjE3NTJaGA8yMTIwMDMxMTIyMTc1MlowVzELMAkGA1UEBhMCQ04xEjAQ
+BgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwGWmh1SGFpMQ8wDQYDVQQKDAZpcmVz
+dHkxEjAQBgNVBAMMCXRlc3QyLmNvbTCCAaIwDQYJKoZIhvcNAQEBBQADggGPADCC
+AYoCggGBAMQGBk35V3zaNVDWzEzVGd+EkZnUOrRpXQg5mmcnoKnrQ5rQQMsQCbMO
+gFvLt/9OEZQmbE2HuEKsPzL79Yjdu8rGjSoQdbJZ9ccO32uvln1gn68iK79o7Tvm
+TCi+BayyNA+lo9IxrBm1wGBkOU1ZPasGYzgBAbMLTSDps1EYxNR8t4l9PrTTRsh6
+NZyTYoDeVIsKZ9SckpjWVnxHOkF+AzZzIJJSe2pj572TDLYA/Xw9I4X3L+SHzwTl
+iGWNXb2tU367LHERHvensQzdle7mQN2kE5GpB7QPWB+t9V4mn30jc/LyDvOaei6L
++pbl5CriGBTjaR80oXhK765K720BQeKUezri15bQlMaUGQRnzr53ZsqA4PEh6WCX
+hUT2ibO32+uZFXzVQw8y/JUkPf76pZagi8DoLV+sfSbUtnpbQ8wyV2qqTM2eCuPi
+RgUwXQi2WssKKzrqcgKil3vksHZozLtOmyZiNE4qfNxv+UGoIybJtZmB+9spY0Rw
+5zBRuULycQIDAQABo3MwcTAdBgNVHQ4EFgQUCmZefzpizPrb3VbiIDhrA48ypB8w
+HwYDVR0jBBgwFoAUCmZefzpizPrb3VbiIDhrA48ypB8wDAYDVR0TBAUwAwEB/zAh
+BgNVHREEGjAYggl0ZXN0Mi5jb22CCyoudGVzdDIuY29tMA0GCSqGSIb3DQEBCwUA
+A4IBgQA0nRTv1zm1ACugJFfYZfxZ0mLJfRUCFMmFfhy+vGiIu6QtnOFVw/tEOyMa
+m78lBiqac15n3YWYiHiC5NFffTZ7XVlOjN2i4x2z2IJsHNa8tU80AX0Q/pizGK/d
++dzlcsGBb9MGT18h/B3/EYQFKLjUsr0zvDb1T0YDlRUsN3Bq6CvZmvfe9F7Yh4Z/
+XO5R+rX8w9c9A2jzM5isBw2qp/Ggn5RQodMwApEYkJdu80MuxaY6s3dssS4Ay8wP
+VNFEeLcdauJ00ES1OnbnuNiYSiSMOgWBsnR+c8AaSRB/OZLYQQKGGYbq0tspwRjM
+MGJRrI/jdKnvJQ8p02abdvA9ZuFChoD3Wg03qQ6bna68ZKPd9peBPpMrDDGDLkGI
+NzZ6bLJKILnQkV6b1OHVnPDsKXfXjUTTNK/QLJejTXu9RpMBakYZMzs/SOSDtFlS
+A+q25t6+46nvA8msUSBKyOGBX42mJcKvR4OgG44PfDjYfmjn2l+Dz/jNXDclpb+Q
+XAzBnfM=
+-----END CERTIFICATE-----
diff --git a/conf/cert/test2.key b/conf/cert/test2.key
new file mode 100644
index 000000000000..c25d4e5bde9e
--- /dev/null
+++ b/conf/cert/test2.key
@@ -0,0 +1,39 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIG5QIBAAKCAYEAxAYGTflXfNo1UNbMTNUZ34SRmdQ6tGldCDmaZyegqetDmtBA
+yxAJsw6AW8u3/04RlCZsTYe4Qqw/Mvv1iN27ysaNKhB1sln1xw7fa6+WfWCfryIr
+v2jtO+ZMKL4FrLI0D6Wj0jGsGbXAYGQ5TVk9qwZjOAEBswtNIOmzURjE1Hy3iX0+
+tNNGyHo1nJNigN5Uiwpn1JySmNZWfEc6QX4DNnMgklJ7amPnvZMMtgD9fD0jhfcv
+5IfPBOWIZY1dva1TfrsscREe96exDN2V7uZA3aQTkakHtA9YH631XiaffSNz8vIO
+85p6Lov6luXkKuIYFONpHzSheErvrkrvbQFB4pR7OuLXltCUxpQZBGfOvndmyoDg
+8SHpYJeFRPaJs7fb65kVfNVDDzL8lSQ9/vqllqCLwOgtX6x9JtS2eltDzDJXaqpM
+zZ4K4+JGBTBdCLZayworOupyAqKXe+SwdmjMu06bJmI0Tip83G/5QagjJsm1mYH7
+2yljRHDnMFG5QvJxAgMBAAECggGBAIELlkruwvGmlULKpWRPReEn3NJwLNVoJ56q
+jUMri1FRWAgq4PzNahU+jrHfwxmHw3rMcK/5kQwTaOefh1y63E35uCThARqQroSE
+/gBeb6vKWFVrIXG5GbQ9QBXyQroV9r/2Q4q0uJ+UTzklwbNx9G8KnXbY8s1zuyrX
+rvzMWYepMwqIMSfJjuebzH9vZ4F+3BlMmF4XVUrYj8bw/SDwXB0UXXT2Z9j6PC1J
+CS0oKbgIZ8JhoF3KKjcHBGwWTIf5+byRxeG+z99PBEBafm1Puw1vLfOjD3DN/fso
+8xCEtD9pBPBJ+W97x/U+10oKetmP1VVEr2Ph8+s2VH1zsRF5jo5d0GtvJqOwIQJ7
+z3OHJ7lLODw0KAjB1NRXW4dTTUDm6EUuUMWFkGAV6YTyhNLAT0DyrUFJck9RiY48
+3QN8vSf3n/+3wwg1gzcJ9w3W4DUbvGqu86CaUQ4UegfYJlusY/3YGp5bGNQdxmws
+lgIoSRrHp6UJKsP8Yl08MIvT/oNLgQKBwQD75SuDeyE0ukhEp0t6v+22d18hfSef
+q3lLWMI1SQR9Kiem9Z1KdRkIVY8ZAHANm6D8wgjOODT4QZtiqJd2BJn3Xf+aLfCd
+CW0hPvmGTcp/E4sDZ2u0HbIrUStz7ZcgXpjD2JJAJGEKY2Z7J65gnTqbqoBDrw1q
+1+FqtikkHRte1UqxjwnWBpSdoRQFgNPHxPWffhML1xsD9Pk1B1b7JoakYcKsNoQM
+oXUKPLxSZEtd0hIydqmhGYTa9QWBPNDlA5UCgcEAxzfGbOrPBAOOYZd3jORXQI6p
+H7SddTHMQyG04i+OWUd0HZFkK7/k6r26GFmImNIsQMB26H+5XoKRFKn+sUl14xHY
+FwB140j0XSav2XzT38UpJ9CptbgK1eKGQVp41xwRYjHVScE5hJuA3a1TKM0l26rp
+hny/KaP+tXuqt9QbxcUN6efubNYyFP+m6nq2/XdX74bJuGpXLq8W0oFdiocO6tmF
+4/Hsc4dCVrcwULqXQa0lJ57zZpfIPARqWM2847xtAoHBANVUNbDpg6rTJMc34722
+dAy3NhL3mqooH9aG+hsEls+l9uT4WFipqSScyU8ERuHPbt0BO1Hi2kFx1rYMUBG8
+PeT4b7NUutVUGV8xpUNv+FH87Bta6CUnjTAQUzuf+QCJ/NjIPrwh0yloG2+roIvk
+PLF/CZfI1hUpdZfZZChYmkiLXPHZURw4gH6q33j1rOYf0WFc9aZua0vDmZame6zB
+6P+oZ6VPmi/UQXoFC/y/QfDYK18fjfOI2DJTlnDoX4XErQKBwGc3M5xMz/MRcJyJ
+oIwj5jzxbRibOJV2tpD1jsU9xG/nQHbtVEwCgTVKFXf2M3qSMhFeZn0xZ7ZayZY+
+OVJbcDO0lBPezjVzIAB/Qc7aCOBAQ4F4b+VRtHN6iPqlSESTK0KH9Szgas+UzeCM
+o7BZEctNMu7WBSkq6ZXXu+zAfZ8q6HmPDA3hsFMG3dFQwSxzv+C/IhZlKkRqvNVV
+50QVk5oEF4WxW0PECY/qG6NH+YQylDSB+zPlYf4Of5cBCWOoxQKBwQCeo37JpEAR
+kYtqSjXkC5GpPTz8KR9lCY4SDuC1XoSVCP0Tk23GX6GGyEf4JWE+fb/gPEFx4Riu
+7pvxRwq+F3LaAa/FFTNUpY1+8UuiMO7J0B1RkVXkyJjFUF/aQxAnOoZPmzrdZhWy
+bpe2Ka+JS/aXSd1WRN1nmo/DarpWFvdLWZFwUt6zMziH40o1gyPHEuXOqVtf2QCe
+Q6WC9xnEz4lbb/fR2TF9QRA4FtoRpDe/f3ZGIpWE0RdwyZZ6uA7T1+Q=
+-----END RSA PRIVATE KEY-----
diff --git a/conf/config.yaml b/conf/config.yaml
index ee1d69853bd8..f8673ada245b 100644
--- a/conf/config.yaml
+++ b/conf/config.yaml
@@ -16,7 +16,6 @@
#
apisix:
node_listen: 9080 # APISIX listening port
- enable_heartbeat: true
enable_admin: true
enable_admin_cors: true # Admin API support CORS response headers.
enable_debug: false
@@ -54,6 +53,12 @@ apisix:
- 127.0.0.0/24 # If we don't set any IP list, then any IP access is allowed by default.
# - "::/64"
# port_admin: 9180 # use a separate port
+ # https_admin: true # enable HTTPS when use a separate port for Admin API.
+ # Admin API will use conf/apisix_admin_api.crt and conf/apisix_admin_api.key as certificate.
+ admin_api_mtls: # Depends on `port_admin` and `https_admin`.
+ admin_ssl_cert: "" # Path of your self-signed server side cert.
+ admin_ssl_cert_key: "" # Path of your self-signed server side key.
+ admin_ssl_ca_cert: "" # Path of your self-signed ca cert.The CA is used to sign all admin api callers' certificates.
# Default token when use API to call for Admin API.
# *NOTE*: Highly recommended to modify this value to protect APISIX's Admin API.
@@ -69,6 +74,8 @@ apisix:
name: "viewer"
key: 4054f7cf07e344346cd3f287985e76a2
role: viewer
+
+ delete_uri_tail_slash: false # delete the '/' at the end of the URI
router:
http: 'radixtree_uri' # radixtree_uri: match route by uri(base on radixtree)
# radixtree_host_uri: match route by host + uri(base on radixtree)
@@ -89,13 +96,17 @@ apisix:
enable: true
enable_http2: true
listen_port: 9443
- ssl_protocols: "TLSv1 TLSv1.1 TLSv1.2 TLSv1.3"
- ssl_ciphers: "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA256:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA"
-
+ ssl_protocols: "TLSv1.2 TLSv1.3"
+ ssl_ciphers: "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384"
+ key_encrypt_salt: "edd1c9f0985e76a2" # If not set, will save origin ssl key into etcd.
+ # If set this, must be a string of length 16. And it will encrypt ssl key with AES-128-CBC
+ # !!! So do not change it after saving your ssl, it can't decrypt the ssl keys have be saved if you change !!
+# discovery: eureka # service discovery center
nginx_config: # config for render the template to genarate nginx.conf
error_log: "logs/error.log"
error_log_level: "warn" # warn,error
worker_rlimit_nofile: 20480 # the number of files a worker process can open, should be larger than worker_connections
+ worker_shutdown_timeout: 240s # timeout for a graceful shutdown of worker processes
event:
worker_connections: 10620
http:
@@ -116,7 +127,19 @@ etcd:
host: # it's possible to define multiple etcd hosts addresses of the same etcd cluster.
- "http://127.0.0.1:2379" # multiple etcd address
prefix: "/apisix" # apisix configurations prefix
- timeout: 3 # 3 seconds
+ timeout: 30 # 30 seconds
+ # user: root # root username for etcd
+ # password: 5tHkHhYkjr6cQY # root password for etcd
+#eureka:
+# host: # it's possible to define multiple eureka hosts addresses of the same eureka cluster.
+# - "http://127.0.0.1:8761"
+# prefix: "/eureka/"
+# fetch_interval: 30 # default 30s
+# weight: 100 # default weight for node
+# timeout:
+# connect: 2000 # default 2000ms
+# send: 2000 # default 2000ms
+# read: 5000 # default 5000ms
plugins: # plugin list
- example-plugin
@@ -140,10 +163,20 @@ plugins: # plugin list
- fault-injection
- udp-logger
- wolf-rbac
- - proxy-cache
- tcp-logger
- - proxy-mirror
- kafka-logger
- cors
+ - consumer-restriction
+ - syslog
+ - batch-requests
+ - http-logger
+ - skywalking
+ - echo
+ - authz-keycloak
+ - uri-blocker
+ - request-validation
+ - proxy-cache
+ - proxy-mirror
+
stream_plugins:
- mqtt-proxy
diff --git a/dashboard b/dashboard
index cfb3ee7b8721..329b092dcaa7 160000
--- a/dashboard
+++ b/dashboard
@@ -1 +1 @@
-Subproject commit cfb3ee7b8721076975c1deaff3e52da3ea4a312a
+Subproject commit 329b092dcaa7a505dcdec86c667b6803f5863d94
diff --git a/doc/README.md b/doc/README.md
index 561d3a023df5..c9a8f95b41f6 100644
--- a/doc/README.md
+++ b/doc/README.md
@@ -16,19 +16,19 @@
# limitations under the License.
#
-->
-[Chinese](README_CN.md)
+
+[Chinese](./zh-cn/README.md)
Reference Documentation
==================
-* [APISIX Readme](../README.md)
+* [APISIX Readme](./README.md)
* [Architecture Design](architecture-design.md)
* [Benchmark](benchmark.md)
* [Getting Started Guide](getting-started.md)
* [How to build Apache APISIX](how-to-build.md)
* [Health Check](health-check.md): Enable health check on the upstream node, and will automatically filter unhealthy nodes during load balancing to ensure system stability.
-* Router
- * [radixtree](router-radixtree.md)
+* [Router radixtree](router-radixtree.md)
* [Stand Alone Model](stand-alone.md): Supports to load route rules from local yaml file, it is more friendly such as under the kubernetes(k8s).
* [Stream Proxy](stream-proxy.md)
* [Admin API](admin-api.md)
@@ -51,7 +51,7 @@ Plugins
* [proxy-rewrite](plugins/proxy-rewrite.md): Rewrite upstream request information.
* [prometheus](plugins/prometheus.md): Expose metrics related to APISIX and proxied upstream services in Prometheus exposition format, which can be scraped by a Prometheus Server.
* [OpenTracing](plugins/zipkin.md): Supports Zikpin and Apache SkyWalking.
-* [grpc-transcode](plugins/grpc-transcoding.md): REST <--> gRPC transcoding.
+* [grpc-transcode](plugins/grpc-transcode.md): REST <--> gRPC transcoding.
* [serverless](plugins/serverless.md):Allows to dynamically run Lua code at *different* phase in APISIX.
* [ip-restriction](plugins/ip-restriction.md): IP whitelist/blacklist.
* [openid-connect](plugins/oauth.md)
@@ -64,11 +64,21 @@ Plugins
* [proxy-mirror](plugins/proxy-mirror.md): Provides the ability to mirror client requests.
* [kafka-logger](plugins/kafka-logger.md): Log requests to External Kafka servers.
* [cors](plugins/cors.md): Enable CORS(Cross-origin resource sharing) for your API.
+* [batch-requests](plugins/batch-requests.md): Allow you send mutiple http api via **http pipeline**.
+* [authz-keycloak](plugins/authz-keycloak.md): Authorization with Keycloak Identity Server.
+* [uri-blocker](plugins/uri-blocker.md): Block client request by URI.
+* [oauth](plugins/oauth.md): Provides OAuth 2 authentication and introspection.
-Deploy to the Cloud
+Deploy
=======
+
### AWS
The recommended approach is to deploy APISIX with [AWS CDK](https://aws.amazon.com/cdk/) on [AWS Fargate](https://aws.amazon.com/fargate/) which helps you decouple the APISIX layer and the upstream layer on top of a fully-managed and secure serverless container compute environment with autoscaling capabilities.
See [this guide](https://github.com/pahud/cdk-samples/blob/master/typescript/apisix/README.md) by [Pahud Hsieh](https://github.com/pahud) and learn how to provision the recommended architecture 100% in AWS CDK.
+
+### Kubernetes
+
+See [this guide](../kubernetes/README.md) and learn how to deploy apisix in Kubernetes.
+
diff --git a/doc/README_CN.md b/doc/README_CN.md
deleted file mode 100644
index bf3141400251..000000000000
--- a/doc/README_CN.md
+++ /dev/null
@@ -1,67 +0,0 @@
-
-[English](README.md)
-
-Reference document
-==================
-
-* [APISIX 说明](../README_CN.md)
-* [架构设计](architecture-design-cn.md)
-* [压力测试](benchmark-cn.md)
-* [如何构建 Apache APISIX](how-to-build-cn.md)
-* [健康检查](health-check.md): 支持对上游节点的主动和被动健康检查,在负载均衡时自动过滤掉不健康的节点。
-* Router(路由)
- * [radixtree](router-radixtree.md)
- * [r3](router-r3.md)
-* [独立运行模型](stand-alone-cn.md): 支持从本地 yaml 格式的配置文件启动,更适合 Kubernetes(k8s) 体系。
-* [TCP/UDP 动态代理](stream-proxy-cn.md)
-* [管理 API](admin-api-cn.md)
-* [变更日志](../CHANGELOG_CN.md)
-* [代码风格](../CODE_STYLE.md)
-* [常见问答](../FAQ_CN.md)
-
-插件
-===
-
-* [插件热加载](plugins-cn.md):无需重启服务,完成插件热加载或卸载。
-* [HTTPS](https-cn.md):根据 TLS 扩展字段 SNI(Server Name Indication) 动态加载证书。
-* [动态负载均衡](architecture-design-cn.md#upstream):跨多个上游服务的动态负载均衡,目前已支持 round-robin 和一致性哈希算法。
-* [key-auth](plugins/key-auth-cn.md):基于 Key Authentication 的用户认证。
-* [JWT-auth](plugins/jwt-auth-cn.md):基于 [JWT](https://jwt.io/) (JSON Web Tokens) Authentication 的用户认证。
-* [basic-auth](plugins/basic-auth-cn.md):基于 basic auth 的用户认证。
-* [wolf-rbac](plugins/wolf-rbac-cn.md) 基于 *RBAC* 的用户认证及授权。
-* [limit-count](plugins/limit-count-cn.md):基于“固定窗口”的限速实现。
-* [limit-req](plugins/limit-req-cn.md):基于漏桶原理的请求限速实现。
-* [limit-conn](plugins/limit-conn-cn.md):限制并发请求(或并发连接)。
-* [proxy-rewrite](plugins/proxy-rewrite-cn.md): 支持自定义修改 proxy 到上游的信息。
-* [prometheus](plugins/prometheus-cn.md):以 Prometheus 格式导出 APISIX 自身的状态信息,方便被外部 Prometheus 服务抓取。
-* [OpenTracing](plugins/zipkin-cn.md):支持 Zikpin 和 Apache SkyWalking。
-* [grpc-transcode](plugins/grpc-transcoding-cn.md):REST <--> gRPC 转码。
-* [serverless](plugins/serverless-cn.md):允许在 APISIX 中的不同阶段动态运行 Lua 代码。
-* [ip-restriction](plugins/ip-restriction-cn.md): IP 黑白名单。
-* [openid-connect](plugins/oauth.md)
-* [redirect](plugins/redirect-cn.md): URI 重定向。
-* [response-rewrite](plugins/response-rewrite-cn.md): 支持自定义修改返回内容的 `status code`、`body`、`headers`。
-* [fault-injection](plugins/fault-injection-cn.md):故障注入,可以返回指定的响应体、响应码和响应时间,从而提供了不同的失败场景下处理的能力,例如服务失败、服务过载、服务高延时等。
-* [proxy-cache](plugins/proxy-cache-cn.md):代理缓存插件提供缓存后端响应数据的能力。
-* [proxy-mirror](plugins/proxy-mirror-cn.md):代理镜像插件提供镜像客户端请求的能力。
-* [udp-logger](plugins/udp-logger.md): 将请求记录到UDP服务器
-* [tcp-logger](plugins/tcp-logger.md): 将请求记录到TCP服务器
-* [kafka-logger](plugins/kafka-logger-cn.md): 将请求记录到外部Kafka服务器。
-* [cors](plugins/cors-cn.md): 为你的API启用CORS.
diff --git a/doc/_navbar.md b/doc/_navbar.md
new file mode 100644
index 000000000000..1612e7d8a96e
--- /dev/null
+++ b/doc/_navbar.md
@@ -0,0 +1,22 @@
+
+
+- Translations
+ - [:uk: English](/)
+ - [:cn: 中文](/zh-cn/)
diff --git a/doc/_sidebar.md b/doc/_sidebar.md
new file mode 100644
index 000000000000..6a9c1e551167
--- /dev/null
+++ b/doc/_sidebar.md
@@ -0,0 +1,103 @@
+
+
+- Getting started
+
+ - [Introduction](README.md)
+ - [Quick start](getting-started.md)
+
+- General
+
+ - [Architecture](architecture-design.md)
+
+ - [Benchmark](benchmark.md)
+
+ - Installation
+
+ - [How to build](how-to-build.md)
+ - [Install Dependencies](install-dependencies.md)
+
+ - [HTTPS](https.md)
+
+ - [Router](router-radixtree.md)
+
+ - Plugins
+
+ - [Develop Plugins](plugin-develop.md)
+ - [Hot Reload](plugins.md)
+
+ - Proxy Modes
+
+ - [GRPC Proxy](grpc-proxy.md)
+ - [Stream Proxy](stream-proxy.md)
+
+- Plugins
+
+ - Authentication
+
+ - [Key Auth](plugins/key-auth.md)
+ - [Basic Auth](plugins/basic-auth.md)
+ - [JWT Auth](plugins/jwt-auth.md)
+ - [Opend ID Connect](plugins/oauth.md)
+
+ - General
+
+ - [Redirect](plugins/redirect.md)
+ - [Serverless](plugins/serverless.md)
+ - [Batch Request](plugins/batch-requests.md)
+ - [Fault Injection](plugins/fault-injection.md)
+ - [MQTT Proxy](plugins/mqtt-proxy.md)
+ - [Proxy Cache](plugins/proxy-cache.md)
+ - [Proxy Mirror](plugins/proxy-mirror.md)
+ - [Echo](plugins/echo.md)
+
+ - Transformations
+
+ - [Response Rewrite](plugins/response-rewrite.md)
+ - [Proxy Rewrite](plugins/proxy-rewrite.md)
+ - [GRPC Transcoding](plugins/grpc-transcode.md)
+
+ - Security
+
+ - [Consumer Restriction](plugins/consumer-restriction.md)
+ - [Limit Connection](plugins/limit-conn.md)
+ - [Limit Count](plugins/limit-count.md)
+ - [Limit Request](plugins/limit-req.md)
+ - [CORS](plugins/cors.md)
+ - [IP Restriction](plugins/ip-restriction.md)
+ - [Keycloak Authorization](plugins/authz-keycloak.md)
+ - [RBAC Wolf](plugins/wolf-rbac.md)
+
+ - Monitoring
+
+ - [Prometheus](plugins/prometheus.md)
+ - [SKywalking](plugins/skywalking.md)
+ - [Zipkin](plugins/zipkin.md)
+
+ - Loggers
+
+ - [HTTP Logger](plugins/http-logger.md)
+ - [Kafka Logger](plugins/kafka-logger.md)
+ - [Syslog](plugins/syslog.md)
+ - [TCP Logger](plugins/tcp-logger.md)
+ - [UDP Logger](plugins/udp-logger.md)
+
+- Admin API
+
+ - [Admin API](admin-api.md)
diff --git a/doc/admin-api.md b/doc/admin-api.md
index 9c40948b35d8..9a3def916ded 100644
--- a/doc/admin-api.md
+++ b/doc/admin-api.md
@@ -19,8 +19,6 @@
# Table of Contents
-===
-
* [Route](#route)
* [Service](#service)
* [Consumer](#consumer)
@@ -41,7 +39,8 @@
|PUT |/apisix/admin/routes/{id}|{...}|Create resource by ID|
|POST |/apisix/admin/routes |{...}|Create resource, and ID is generated by server|
|DELETE |/apisix/admin/routes/{id}|NULL|Remove resource|
-|PATCH |/apisix/admin/routes/{id}/{path}|{...}|Update targeted content|
+|PATCH |/apisix/admin/routes/{id}|{...}|Standard PATCH. Update some attributes of the existing Route, and other attributes not involved will remain as they are; if you want to delete an attribute, set the value of the attribute Set to null to delete; especially, when the value of the attribute is an array, the attribute will be updated in full|
+|PATCH |/apisix/admin/routes/{id}/{path}|{...}|SubPath PATCH, specify the attribute of Route to be updated through {path}, update the value of this attribute in full, and other attributes that are not involved will remain as they are. The difference between the two PATCH can refer to the following examples|
> URI Request Parameters:
@@ -53,7 +52,8 @@
|Parameter |Required |Type |Description |Example|
|---------|---------|----|-----------|----|
-|desc |False |Auxiliary |Identifies route names, usage scenarios, and more.|customer xxxx|
+|name |False |Auxiliary |Identifies route names.|customer-xxxx|
+|desc |False |Auxiliary |route description, usage scenarios, and more.|customer xxxx|
|uri |True |Match Rules|In addition to full matching such as `/foo/bar`、`/foo/gloo`, using different [Router](architecture-design.md#router) allows more advanced matching, see [Router](architecture-design.md#router) for more.|"/hello"|
|host |False |Match Rules|Currently requesting a domain name, such as `foo.com`; pan-domain names such as `*.foo.com` are also supported.|"foo.com"|
|hosts |False |Match Rules|The `host` in the form of a list means that multiple different hosts are allowed, and match any one of them.|{"foo.com", "*.bar.com"}|
@@ -61,9 +61,10 @@
|remote_addrs|False |Match Rules|The `remote_addr` in the form of a list indicates that multiple different IP addresses are allowed, and match any one of them.|{"127.0.0.1", "192.0.0.0/8", "::1"}|
|methods |False |Match Rules|If empty or without this option, there are no `method` restrictions, and it can be a combination of one or more: `GET`,`POST`,`PUT`,`DELETE`,`PATCH`, `HEAD`,`OPTIONS`,`CONNECT`,`TRACE`.|{"GET", "POST"}|
|priority |False |Match Rules|If different routes contain the same `uri`, determine which route is matched first based on the attribute` priority`. Larger value means higher priority. The default value is 0.|priority = 10|
-|vars |False |Match Rules |A list of one or more `{var, operator, val}` elements, like this: `{{var, operator, val}, {var, operator, val}, ...}`. For example: `{"arg_name", "==", "json"}` means that the current request parameter `name` is `json`. The `var` here is consistent with the internal variable name of Nginx, so you can also use `request_uri`, `host`, etc. For the operator part, the currently supported operators are `==`, `~=`,`>`, `<`, and `~~`. For the `>` and `<` operators, the result is first converted to `number` and then compared. See a list of [supported operators](#available-operators) |{{"arg_name", "==", "json"}, {"arg_age", ">", 18}}|
+|vars |False |Match Rules |A list of one or more `{var, operator, val}` elements, like this: `{{var, operator, val}, {var, operator, val}, ...}}`. For example: `{"arg_name", "==", "json"}` means that the current request parameter `name` is `json`. The `var` here is consistent with the internal variable name of Nginx, so you can also use `request_uri`, `host`, etc. For the operator part, the currently supported operators are `==`, `~=`,`>`, `<`, and `~~`. For the `>` and `<` operators, the result is first converted to `number` and then compared. See a list of [supported operators](#available-operators) |{{"arg_name", "==", "json"}, {"arg_age", ">", 18}}|
|filter_func|False|Match Rules|User-defined filtering function. You can use it to achieve matching requirements for special scenarios. This function accepts an input parameter named `vars` by default, which you can use to get Nginx variables.|function(vars) return vars["arg_name"] == "json" end|
|plugins |False |Plugin|See [Plugin](architecture-design.md#plugin) for more ||
+|script |False |Script|See [Script](architecture-design.md#script) for more ||
|upstream |False |Upstream|Enabled Upstream configuration, see [Upstream](architecture-design.md#upstream) for more||
|upstream_id|False |Upstream|Enabled upstream id, see [Upstream](architecture-design.md#upstream) for more ||
|service_id|False |Service|Binded Service configuration, see [Service](architecture-design.md#service) for more ||
@@ -77,12 +78,13 @@ Config Example:
{
"id": "1", # id, unnecessary.
"uri": "/release/a", # uri
- "uris": ["/a","/b"], # A set of uri, uri and uris need only have a non-empty one.
+ "uris": ["/a","/b"], # A set of uri, URL and uris need only have a non-empty one.
"methods": ["GET","POST"], # Can fill multiple methods
"host": "aa.com", # host
"hosts": ["a.com","b.com"], # A set of host. Host and hosts only need to be non-empty one.
"plugins": {}, # Bound plugin
"priority": 0, # If different routes contain the same `uri`, determine which route is matched first based on the attribute` priority`, the default value is 0.
+ "name": "route-xxx",
"desc": "hello world",
"remote_addr": "127.0.0.1", # Client IP
"remote_addrs": ["127.0.0.1"], # A set of Client IP. Remote_addr and remo-te_addrs only need to be non-empty one.
@@ -131,6 +133,96 @@ HTTP/1.1 201 Created
Date: Sat, 31 Aug 2019 01:17:15 GMT
...
+
+# Add an upstream node to the Route
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d '
+{
+ "upstream": {
+ "nodes": {
+ "39.97.63.216:80": 1
+ }
+ }
+}'
+HTTP/1.1 200 OK
+...
+
+After successful execution, upstream nodes will be updated to:
+{
+ "39.97.63.215:80": 1,
+ "39.97.63.216:80": 1
+}
+
+
+# Update the weight of an upstream node to the Route
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d '
+{
+ "upstream": {
+ "nodes": {
+ "39.97.63.216:80": 10
+ }
+ }
+}'
+HTTP/1.1 200 OK
+...
+
+After successful execution, upstream nodes will be updated to:
+{
+ "39.97.63.215:80": 1,
+ "39.97.63.216:80": 10
+}
+
+
+# Delete an upstream node for the Route
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d '
+{
+ "upstream": {
+ "nodes": {
+ "39.97.63.215:80": null
+ }
+ }
+}'
+HTTP/1.1 200 OK
+...
+
+After successful execution, upstream nodes will be updated to:
+{
+ "39.97.63.216:80": 10
+}
+
+
+# Replace methods of the Route -- array
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d '{
+ "methods": ["GET", "POST"]
+}'
+HTTP/1.1 200 OK
+...
+
+After successful execution, methods will not retain the original data, and the entire update is:
+["GET", "POST"]
+
+
+# Replace upstream nodes of the Route -- sub path
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1/upstream/nodes -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d '
+{
+ "39.97.63.200:80": 1
+}'
+HTTP/1.1 200 OK
+...
+
+After successful execution, nodes will not retain the original data, and the entire update is:
+{
+ "39.97.63.200:80": 1
+}
+
+
+# Replace methods of the Route -- sub path
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1/methods -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d'["POST", "DELETE", " PATCH"]'
+HTTP/1.1 200 OK
+...
+
+After successful execution, methods will not retain the original data, and the entire update is:
+["POST", "DELETE", "PATCH"]
+
```
> Response Parameters
@@ -183,7 +275,8 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13
|PUT |/apisix/admin/services/{id}|{...}|Create resource by ID|
|POST |/apisix/admin/services |{...}|Create resource, and ID is generated by server|
|DELETE |/apisix/admin/services/{id}|NULL|Remove resource|
-|PATCH |/apisix/admin/routes/{id}/{path}|{...}|Update targeted content|
+|PATCH |/apisix/admin/services/{id}|{...}|Standard PATCH. Update some attributes of the existing Service, and other attributes not involved will remain as they are; if you want to delete an attribute, set the value of the attribute Set to null to delete; especially, when the value of the attribute is an array, the attribute will be updated in full|
+|PATCH |/apisix/admin/services/{id}/{path}|{...}|SubPath PATCH, specify the attribute of Service to be updated through {path}, update the value of this attribute in full, and other attributes that are not involved will remain as they are. The difference between the two PATCH can refer to the following examples|
> Request Body Parameters:
@@ -192,7 +285,8 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13
|plugins |False |Plugin|See [Plugin](architecture-design.md#plugin) for more ||
|upstream |False |Upstream|Enabled Upstream configuration, see [Upstream](architecture-design.md#upstream) for more||
|upstream_id|False |Upstream|Enabled upstream id, see [Upstream](architecture-design.md#upstream) for more ||
-|desc |False |Auxiliary |Identifies route names, usage scenarios, and more.|customer xxxx|
+|name |False |Auxiliary |Identifies service names.|customer-xxxx|
+|desc |False |Auxiliary |service usage scenarios, and more.|customer xxxx|
Config Example:
@@ -202,6 +296,7 @@ Config Example:
"plugins": {}, # Bound plugin
"upstream_id": "1", # upstream id, recommended
"upstream": {}, # upstream, not recommended
+ "name": "service-test",
"desc": "hello world",
}
```
@@ -209,7 +304,7 @@ Config Example:
Example:
```shell
-$ curl http://127.0.0.1:9080/apisix/admin/services/201 -X PUT -i -d '
+$ curl http://127.0.0.1:9080/apisix/admin/services/201 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d '
{
"plugins": {
"limit-count": {
@@ -228,17 +323,78 @@ $ curl http://127.0.0.1:9080/apisix/admin/services/201 -X PUT -i -d '
}'
HTTP/1.1 201 Created
-Date: Thu, 26 Dec 2019 03:48:47 GMT
-Content-Type: text/plain
-Transfer-Encoding: chunked
-Connection: keep-alive
-Access-Control-Allow-Origin: *
-Access-Control-Allow-Credentials: true
-Access-Control-Expose-Headers: *
-Access-Control-Max-Age: 3600
-Server: APISIX web server
-
-{"node":{"value":{"upstream":{"nodes":{"39.97.63.215:80":1},"type":"roundrobin"},"plugins":{"limit-count":{"time_window":60,"count":2,"rejected_code":503,"key":"remote_addr","policy":"local"}}},"createdIndex":60,"key":"\/apisix\/services\/201","modifiedIndex":60},"action":"set"}
+...
+
+
+# Add an upstream node to the Service
+$ curl http://127.0.0.1:9080/apisix/admin/services/201 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d '
+{
+ "upstream": {
+ "nodes": {
+ "39.97.63.216:80": 1
+ }
+ }
+}'
+HTTP/1.1 200 OK
+...
+
+After successful execution, upstream nodes will be updated to:
+{
+ "39.97.63.215:80": 1,
+ "39.97.63.216:80": 1
+}
+
+
+# Update the weight of an upstream node to the Service
+$ curl http://127.0.0.1:9080/apisix/admin/services/201 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d '
+{
+ "upstream": {
+ "nodes": {
+ "39.97.63.216:80": 10
+ }
+ }
+}'
+HTTP/1.1 200 OK
+...
+
+After successful execution, upstream nodes will be updated to:
+{
+ "39.97.63.215:80": 1,
+ "39.97.63.216:80": 10
+}
+
+
+# Delete an upstream node for the Service
+$ curl http://127.0.0.1:9080/apisix/admin/services/201 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d '
+{
+ "upstream": {
+ "nodes": {
+ "39.97.63.215:80": null
+ }
+ }
+}'
+HTTP/1.1 200 OK
+...
+
+After successful execution, upstream nodes will be updated to:
+{
+ "39.97.63.216:80": 10
+}
+
+
+# Replace upstream nodes of the Service
+$ curl http://127.0.0.1:9080/apisix/admin/services/201/upstream/nodes -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d '
+{
+ "39.97.63.200:80": 1
+}'
+HTTP/1.1 200 OK
+...
+
+After successful execution, upstream nodes will not retain the original data, and the entire update is:
+{
+ "39.97.63.200:80": 1
+}
+
```
> Response Parameters
@@ -286,7 +442,7 @@ The binding authentication and authorization plug-in is a bit special. When it n
Example:
```shell
-$ curl http://127.0.0.1:9080/apisix/admin/consumers/2 -X PUT -i -d '
+$ curl http://127.0.0.1:9080/apisix/admin/consumers/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d '
{
"username": "jack",
"plugins": {
@@ -328,7 +484,8 @@ Return response from etcd currently.
|PUT |/apisix/admin/upstreams/{id}|{...}|Create resource by ID|
|POST |/apisix/admin/upstreams |{...}|Create resource, and ID is generated by server|
|DELETE |/apisix/admin/upstreams/{id}|NULL|Remove resource|
-|PATCH |/apisix/admin/upstreams/{id}/{path}|{...}|Update targeted content|
+|PATCH |/apisix/admin/upstreams/{id}|{...}|Standard PATCH. Update some attributes of the existing Upstream, and other attributes not involved will remain as they are; if you want to delete an attribute, set the value of the attribute Set to null to delete; especially, when the value of the attribute is an array, the attribute will be updated in full|
+|PATCH |/apisix/admin/upstreams/{id}/{path}|{...}|SubPath PATCH, specify the attribute of Upstream to be updated through {path}, update the value of this attribute in full, and other attributes that are not involved will remain as they are. The difference between the two PATCH can refer to the following example|
> Request Body Parameters:
@@ -337,21 +494,23 @@ In addition to the basic complex equalization algorithm selection, APISIX's Upst
|Name |Optional|Description|
|------- |-----|------|
|type |required|`roundrobin` supports the weight of the load, `chash` consistency hash, pick one of them.|
-|nodes |required|Hash table, the key of the internal element is the upstream machine address list, the format is `Address + Port`, where the address part can be IP or domain name, such as `192.168.1.100:80`, `foo.com:80`, etc. Value is the weight of the node. In particular, when the weight value is `0`, it has a special meaning, which usually means that the upstream node is invalid and never wants to be selected.|
+|nodes |required if `k8s_deployment_info` not configured|Hash table, the key of the internal element is the upstream machine address list, the format is `Address + Port`, where the address part can be IP or domain name, such as `192.168.1.100:80`, `foo.com:80`, etc. Value is the weight of the node. In particular, when the weight value is `0`, it has a special meaning, which usually means that the upstream node is invalid and never wants to be selected.|
+|k8s_deployment_info|required if `nodes` not configured|fields: `namespace`、`deploy_name`、`service_name`、`port`、`backend_type`, `port` is number, `backend_type` is `pod` or `service`, others is string. |
|hash_on |optional|This option is only valid if the `type` is `chash`. Supported types `vars`(Nginx variables), `header`(custom header), `cookie`, `consumer`, the default value is `vars`.|
|key |required|This option is only valid if the `type` is `chash`. Find the corresponding node `id` according to `hash_on` and `key`. When `hash_on` is set as `vars`, `key` is the required parameter, for now, it support nginx built-in variables like `uri, server_name, server_addr, request_uri, remote_port, remote_addr, query_string, host, hostname, arg_***`, `arg_***` is arguments in the request line, [Nginx variables list](http://nginx.org/en/docs/varindex.html). When `hash_on` is set as `header`, `key` is the required parameter, and `header name` is customized. When `hash_on` is set to `cookie`, `key` is the required parameter, and `cookie name` is customized. When `hash_on` is set to `consumer`, `key` does not need to be set. In this case, the `key` adopted by the hash algorithm is the `consumer_id` authenticated. If the specified `hash_on` and `key` can not fetch values, it will be fetch `remote_addr` by default.|
|checks |optional|Configure the parameters of the health check. For details, refer to [health-check](health-check.md).|
-|retries |optional|Pass the request to the next upstream using the underlying Nginx retry mechanism, the retry mechanism is enabled by default and set the number of retries according to the number of backend nodes. If `retries` option is explicitly set, it will override the default value.|
+|retries |optional|Pass the request to the next upstream using the underlying Nginx retry mechanism, the retry mechanism is enabled by default and set the number of retries according to the number of backend nodes. If `retries` option is explicitly set, it will override the default value. `0` means disable retry mechanism.|
|enable_websocket|optional| enable `websocket`(boolean), default `false`.|
|timeout|optional| Set the timeout for connection, sending and receiving messages. |
-|desc |optional|Identifies route names, usage scenarios, and more.|
+|name |optional|Identifies upstream names|
+|desc |optional|upstream usage scenarios, and more.|
Config Example:
```shell
{
"id": "1", # id
- "retries": 0, # retry time
+ "retries": 1, # retry times
"timeout": { # Set the timeout for connection, sending and receiving messages.
"connect":15,
"send":15,
@@ -359,10 +518,18 @@ Config Example:
},
"enable_websocket": true,
"nodes": {"host:80": 100}, # Upstream machine address list, the format is `Address + Port`
+ "k8s_deployment_info": { # kubernetes deployment info
+ "namespace": "test-namespace",
+ "deploy_name": "test-deploy-name",
+ "service_name": "test-service-name",
+ "backend_type": "pod", # pod or service
+ "port": 8080
+ },
"type":"roundrobin", # chash or roundrobin
"checks": {}, # Health check parameters
"hash_on": "",
"key": "",
+ "name": "upstream-for-test",
"desc": "hello world",
}
```
@@ -370,21 +537,83 @@ Config Example:
Example:
```shell
-$ curl http://127.0.0.1:9080/apisix/admin/upstreams/100 -i -X PUT -d '
-> {
-> "type": "roundrobin",
-> "nodes": {
-> "127.0.0.1:80": 1,
-> "127.0.0.2:80": 2,
-> "foo.com:80": 3
-> }
-> }'
+$ curl http://127.0.0.1:9080/apisix/admin/upstreams/100 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -i -X PUT -d '
+{
+ "type":"roundrobin",
+ "nodes":{
+ "127.0.0.1:80":1,
+ "127.0.0.2:80":2,
+ "foo.com:80":3
+ }
+}'
HTTP/1.1 201 Created
Date: Thu, 26 Dec 2019 04:19:34 GMT
Content-Type: text/plain
...
-{"node":{"value":{"nodes":{"127.0.0.1:80":1,"foo.com:80":3,"127.0.0.2:80":2},"type":"roundrobin"},"createdIndex":61,"key":"\/apisix\/upstreams\/100","modifiedIndex":61},"action":"set"}
+
+# Add a node to the Upstream
+$ curl http://127.0.0.1:9080/apisix/admin/upstreams/100 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d '
+{
+ "nodes": {
+ "39.97.63.216:80": 1
+ }
+}'
+HTTP/1.1 200 OK
+...
+
+After successful execution, nodes will be updated to:
+{
+ "39.97.63.215:80": 1,
+ "39.97.63.216:80": 1
+}
+
+
+# Update the weight of a node to the Upstream
+$ curl http://127.0.0.1:9080/apisix/admin/upstreams/100 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d '
+{
+ "nodes": {
+ "39.97.63.216:80": 10
+ }
+}'
+HTTP/1.1 200 OK
+...
+
+After successful execution, nodes will be updated to:
+{
+ "39.97.63.215:80": 1,
+ "39.97.63.216:80": 10
+}
+
+
+# Delete a node for the Upstream
+$ curl http://127.0.0.1:9080/apisix/admin/upstreams/100 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d '
+{
+ "nodes": {
+ "39.97.63.215:80": null
+ }
+}'
+HTTP/1.1 200 OK
+...
+
+After successful execution, nodes will be updated to:
+{
+ "39.97.63.216:80": 10
+}
+
+
+# Replace the nodes of the Upstream
+$ curl http://127.0.0.1:9080/apisix/admin/upstreams/100/nodes -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d '
+{
+ "39.97.63.200:80": 1
+}'
+HTTP/1.1 200 OK
+...
+
+After the execution is successful, nodes will not retain the original data, and the entire update is:
+{
+ "39.97.63.200:80": 1
+}
```
diff --git a/doc/architecture-design.md b/doc/architecture-design.md
index e580ab37a5fd..d52ba3036a85 100644
--- a/doc/architecture-design.md
+++ b/doc/architecture-design.md
@@ -17,7 +17,7 @@
#
-->
-[Chinese](architecture-design-cn.md)
+[Chinese](zh-cn/architecture-design.md)
## Table of Contents
@@ -26,6 +26,7 @@
- [**Route**](#route)
- [**Service**](#service)
- [**Plugin**](#plugin)
+- [**Script**](#script)
- [**Upstream**](#upstream)
- [**Router**](#router)
- [**Consumer**](#consumer)
@@ -80,7 +81,7 @@ We configure all the parameters directly in the Route, it's easy to set up, and
The shortcomings mentioned above are independently abstracted in APISIX by the two concepts [Service](#service) and [Upstream](#upstream).
-The route example created below is to proxy the request with uri `/index.html` to the Upstream service with the address `39.97.63.215:80`:
+The route example created below is to proxy the request with URL `/index.html` to the Upstream service with the address `39.97.63.215:80`:
```shell
$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d '
@@ -106,7 +107,7 @@ Server: APISIX web server
When we receive a successful response, it indicates that the route was successfully created.
-For specific options of Route, please refer to [Admin API](admin-api-cn.md#route).
+For specific options of Route, please refer to [Admin API](zh-cn/admin-api.md#route).
[Back to top](#Table-of-contents)
@@ -216,6 +217,25 @@ Not all plugins have specific configuration items. For example, there is no spec
[Back to top](#Table-of-contents)
+## Script
+
+`Script` represents a script that will be executed during the `HTTP` request/response life cycle.
+
+The `Script` configuration can be directly bound to the `Route`.
+
+`Script` and `Plugin` are mutually exclusive, and `Script` is executed first. This means that after configuring `Script`, the `Plugin` configured on `Route` will not be executed.
+
+In theory, you can write arbitrary Lua code in `Script`, or you can directly call existing plugins to reuse existing code.
+
+`Script` also has the concept of execution phase, supporting `access`, `header_filer`, `body_filter` and `log` phase. The system will automatically execute the code of the corresponding phase in the `Script` script in the corresponding phase.
+
+```json
+{
+ ...
+ "script": "local _M = {} \n function _M.access(api_ctx) \n ngx.log(ngx.INFO,\"hit access phase\") \n end \nreturn _M"
+}
+```
+
## Upstream
Upstream is a virtual host abstraction that performs load balancing on a given set of service nodes according to configuration rules. Upstream address information can be directly configured to `Route` (or `Service`). When Upstream has duplicates, you need to use "reference" to avoid duplication.
@@ -233,7 +253,9 @@ In addition to the basic complex equalization algorithm selection, APISIX's Upst
|Name |Optional|Description|
|------- |-----|------|
|type |required|`roundrobin` supports the weight of the load, `chash` consistency hash, pick one of them.|
-|nodes |required|Hash table, the key of the internal element is the upstream machine address list, the format is `Address + Port`, where the address part can be IP or domain name, such as `192.168.1.100:80`, `foo.com:80`, etc. Value is the weight of the node. In particular, when the weight value is `0`, it has a special meaning, which usually means that the upstream node is invalid and never wants to be selected.|
+|nodes |required if `service_name` and `k8s_deployment_info` not configured|Hash table, the key of the internal element is the upstream machine address list, the format is `Address + Port`, where the address part can be IP or domain name, such as `192.168.1.100:80`, `foo.com:80`, etc. Value is the weight of the node. In particular, when the weight value is `0`, it has a special meaning, which usually means that the upstream node is invalid and never wants to be selected.|
+|service_name |required if `nodes` and `k8s_deployment_info` not configured |The name of the upstream service and used with the registry, refer to [Integration service discovery registry](discovery.md).|
+|k8s_deployment_info |required if `nodes` and `service_name` not configured|fields: `namespace`、`deploy_name`、`service_name`、`port`、`backend_type`, `port` is number, `backend_type` is `pod` or `service`, others is string. |
|hash_on |optional|This option is only valid if the `type` is `chash`. Supported types `vars`(Nginx variables), `header`(custom header), `cookie`, `consumer`, the default value is `vars`.|
|key |required|This option is only valid if the `type` is `chash`. Find the corresponding node `id` according to `hash_on` and `key`. When `hash_on` is set as `vars`, `key` is the required parameter, for now, it support nginx built-in variables like `uri, server_name, server_addr, request_uri, remote_port, remote_addr, query_string, host, hostname, arg_***`, `arg_***` is arguments in the request line, [Nginx variables list](http://nginx.org/en/docs/varindex.html). When `hash_on` is set as `header`, `key` is the required parameter, and `header name` is customized. When `hash_on` is set to `cookie`, `key` is the required parameter, and `cookie name` is customized. When `hash_on` is set to `consumer`, `key` does not need to be set. In this case, the `key` adopted by the hash algorithm is the `consumer_id` authenticated. If the specified `hash_on` and `key` can not fetch values, it will be fetch `remote_addr` by default.|
|checks |optional|Configure the parameters of the health check. For details, refer to [health-check](health-check.md).|
@@ -248,10 +270,12 @@ Create an upstream object use case:
curl http://127.0.0.1:9080/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
{
"type": "roundrobin",
- "nodes": {
- "127.0.0.1:80": 1,
- "127.0.0.2:80": 2,
- "foo.com:80": 3
+ "k8s_deployment_info": {
+ "namespace": "test-namespace",
+ "deploy_name": "test-deploy-name",
+ "service_name": "test-service-name",
+ "backend_type": "pod",
+ "port": 8080
}
}'
@@ -347,7 +371,7 @@ Here are some examples of configurations using different `hash_on` types:
Create a consumer object:
```shell
-curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d `
+curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
{
"username": "jack",
"plugins": {
@@ -355,7 +379,7 @@ curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f1
"key": "auth-jack"
}
}
-}`
+}'
```
Create route object and enable `key-auth` plugin authentication:
@@ -449,8 +473,8 @@ Set the route that best suits your business needs in the local configuration `co
* `Absolute match `: Complete match for the given `uri` , such as `/foo/bar`,`/foo/glo`.
* `Prefix match`: Use `*` at the end to represent the given `uri` as a prefix match. For example, `/foo*` allows matching `/foo/`, `/foo/a` and `/foo/b`.
* `match priority`: first try absolute match, if you can't hit absolute match, try prefix match.
- * `Any filter attribute`: Allows you to specify any Ningx built-in variable as a filter, such as uri request parameters, request headers, cookies, and so on.
- * `radixtree_host_uri`: Use `host + uri` as the primary index (based on the `radixtree` engine), matching both host and uri for the current request.
+ * `Any filter attribute`: Allows you to specify any Nginx built-in variable as a filter, such as URL request parameters, request headers, cookies, and so on.
+ * `radixtree_host_uri`: Use `host + uri` as the primary index (based on the `radixtree` engine), matching both host and URL for the current request.
* `apisix.router.ssl`: SSL loads the matching route.
* `radixtree_sni`: (Default) Use `SNI` (Server Name Indication) as the primary index (based on the radixtree engine).
@@ -533,6 +557,35 @@ HTTP/1.1 503 Service Temporarily Unavailable
```
+Use the [consumer-restriction](zh-cn/plugins/consumer-restriction.md) plug-in to restrict the access of Jack to this API.
+
+# Add Jack to the blacklist
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "plugins": {
+ "key-auth": {},
+ "consumer-restriction": {
+ "blacklist": [
+ "jack"
+ ]
+ }
+ },
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:1980": 1
+ },
+ "type": "roundrobin"
+ },
+ "uri": "/hello"
+}'
+
+# Repeated tests, all return 403; Jack is forbidden to access this API
+$ curl http://127.0.0.1:9080/hello -H 'apikey: auth-one' -I
+HTTP/1.1 403
+...
+
+```
+
[Back to top](#Table-of-contents)
## Global Rule
@@ -544,6 +597,7 @@ We can register a global [Plugin](#Plugin) with `GlobalRule`:
curl -X PUT \
https://{apisix_listen_address}/apisix/admin/global_rules/1 \
-H 'Content-Type: application/json' \
+ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \
-d '{
"plugins": {
"limit-count": {
diff --git a/doc/benchmark.md b/doc/benchmark.md
index eaf4e4cc6cda..9c3c016d0fcf 100644
--- a/doc/benchmark.md
+++ b/doc/benchmark.md
@@ -17,7 +17,7 @@
#
-->
-[Chinese](benchmark-cn.md)
+[Chinese](zh-cn/benchmark.md)
### Benchmark Environments
@@ -35,13 +35,13 @@ and the response size was 1KB.
The x-axis means the size of CPU core, and the y-axis is QPS.
-
+
#### Latency
Note the y-axis latency in **microsecond(μs)** not millisecond.
-
+
#### Flame Graph
@@ -80,18 +80,18 @@ and the response size was 1KB.
The x-axis means the size of CPU core, and the y-axis is QPS.
-
+
#### Latency
Note the y-axis latency in **microsecond(μs)** not millisecond.
-
+
#### Flame Graph
The result of Flame Graph:
-
+
And if you want to run the benchmark test in your machine, you should run another Nginx to listen 80 port.
diff --git a/doc/discovery.md b/doc/discovery.md
new file mode 100644
index 000000000000..ad5fbfd10b5e
--- /dev/null
+++ b/doc/discovery.md
@@ -0,0 +1,244 @@
+
+[Chinese](zh-cn/discovery.md)
+
+# Integration service discovery registry
+
+* [**Summary**](#Summary)
+* [**How extend the discovery client?**](#how-extend-the-discovery-client)
+ * [**Basic steps**](#basic-steps)
+ * [**the example of Eureka**](#the-example-of-eureka)
+ * [**Implementation of eureka.lua**](#implementation-of-eurekalua)
+ * [**How convert Eureka's instance data to APISIX's node?**](#how-convert-eurekas-instance-data-to-apisixs-node)
+* [**Configuration for discovery client**](#configuration-for-discovery-client)
+ * [**Select discovery client**](#select-discovery-client)
+ * [**Configuration for Eureka**](#configuration-for-eureka)
+* [**Upstream setting**](#upstream-setting)
+
+## Summary
+
+When system traffic changes, the number of servers of the upstream service also increases or decreases, or the server needs to be replaced due to its hardware failure. If the gateway maintains upstream service information through configuration, the maintenance costs in the microservices architecture pattern are unpredictable. Furthermore, due to the untimely update of these information, will also bring a certain impact for the business, and the impact of human error operation can not be ignored. So it is very necessary for the gateway to automatically get the latest list of service instances through the service registry。As shown in the figure below:
+
+
+
+1. When the service starts, it will report some of its information, such as the service name, IP, port and other information to the registry. The services communicate with the registry using a mechanism such as a heartbeat, and if the registry and the service are unable to communicate for a long time, the instance will be cancel.When the service goes offline, the registry will delete the instance information.
+2. The gateway gets service instance information from the registry in near-real time.
+3. When the user requests the service through the gateway, the gateway selects one instance from the registry for proxy.
+
+Common registries: Eureka, Etcd, Consul, Zookeeper, Nacos etc.
+
+## How extend the discovery client?
+
+### Basic steps
+
+It is very easy for APISIX to extend the discovery client. , the basic steps are as follows
+
+1. Add the implementation of registry client in the 'apisix/discovery/' directory;
+
+2. Implement the `_M. init_worker()` function for initialization and the `_M. nodes(service_name)` function for obtaining the list of service instance nodes;
+
+3. Convert the registry data into data in APISIX;
+
+
+### the example of Eureka
+
+#### Implementation of eureka.lua
+
+First, add [`eureka.lua`](../apisix/discovery/eureka.lua) in the `apisix/discovery/` directory;
+
+Then implement the `_M.init_worker()` function for initialization and the `_M.nodes(service_name)` function for obtaining the list of service instance nodes in ` eureka.lua`:
+
+ ```lua
+ local _M = {
+ version = 1.0,
+ }
+
+
+ function _M.nodes(service_name)
+ ... ...
+ end
+
+
+ function _M.init_worker()
+ ... ...
+ end
+
+
+ return _M
+ ```
+
+#### How convert Eureka's instance data to APISIX's node?
+
+Here's an example of Eureka's data:
+
+```json
+{
+ "applications": {
+ "application": [
+ {
+ "name": "USER-SERVICE", # service name
+ "instance": [
+ {
+ "instanceId": "192.168.1.100:8761",
+ "hostName": "192.168.1.100",
+ "app": "USER-SERVICE", # service name
+ "ipAddr": "192.168.1.100", # IP address
+ "status": "UP",
+ "overriddenStatus": "UNKNOWN",
+ "port": {
+ "$": 8761,
+ "@enabled": "true"
+ },
+ "securePort": {
+ "$": 443,
+ "@enabled": "false"
+ },
+ "metadata": {
+ "management.port": "8761",
+ "weight": 100 # Setting by 'eureka.instance.metadata-map.weight' of the spring boot application
+ },
+ "homePageUrl": "http://192.168.1.100:8761/",
+ "statusPageUrl": "http://192.168.1.100:8761/actuator/info",
+ "healthCheckUrl": "http://192.168.1.100:8761/actuator/health",
+ ... ...
+ }
+ ]
+ }
+ ]
+ }
+}
+```
+
+Deal with the Eureka's instance data need the following steps :
+
+1. select the UP instance. When the value of `overriddenStatus` is "UP" or the value of `overriddenStatus` is "UNKNOWN" and the value of `status` is "UP".
+2. Host. The `ipAddr` is the IP address of instance; and must be IPv4 or IPv6.
+3. Port. If the value of `port["@enabled"]` is equal to "true", using the value of `port["\$"]`, If the value of `securePort["@enabled"]` is equal to "true", using the value of `securePort["\$"]`.
+4. Weight. `local weight = metadata.weight or local_conf.eureka.weight or 100`
+
+The result of this example is as follows:
+
+```json
+[
+ {
+ "host" : "192.168.1.100",
+ "port" : 8761,
+ "weight" : 100,
+ "metadata" : {
+ "management.port": "8761",
+ }
+ }
+]
+```
+
+## Configuration for discovery client
+
+### Select discovery client
+
+Add the following configuration to `conf/config.yaml` and select one discovery client type which you want:
+
+```yaml
+apisix:
+ discovery: eureka
+```
+
+This name should be consistent with the file name of the implementation registry in the `apisix/discovery/` directory.
+
+The supported discovery client: Eureka.
+
+### Configuration for Eureka
+
+Add following configuration in `conf/config.yaml` :
+
+```yaml
+eureka:
+ host: # it's possible to define multiple eureka hosts addresses of the same eureka cluster.
+ - "http://${usename}:${passowrd}@${eureka_host1}:${eureka_port1}"
+ - "http://${usename}:${passowrd}@${eureka_host2}:${eureka_port2}"
+ prefix: "/eureka/"
+ fetch_interval: 30 # 30s
+ weight: 100 # default weight for node
+ timeout:
+ connect: 2000 # 2000ms
+ send: 2000 # 2000ms
+ read: 5000 # 5000ms
+```
+
+
+## Upstream setting
+
+Here is an example of routing a request with a URL of "/user/*" to a service which named "user-service" in the registry :
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d '
+{
+ "uri": "/user/*",
+ "upstream": {
+ "service_name": "USER-SERVICE",
+ "type": "roundrobin"
+ }
+}'
+
+HTTP/1.1 201 Created
+Date: Sat, 31 Aug 2019 01:17:15 GMT
+Content-Type: text/plain
+Transfer-Encoding: chunked
+Connection: keep-alive
+Server: APISIX web server
+
+{"node":{"value":{"uri":"\/user\/*","upstream": {"service_name": "USER-SERVICE", "type": "roundrobin"}},"createdIndex":61925,"key":"\/apisix\/routes\/1","modifiedIndex":61925},"action":"create"}
+```
+
+Because the upstream interface URL may have conflict, usually in the gateway by prefix to distinguish:
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d '
+{
+ "uri": "/a/*",
+ "plugins": {
+ "proxy-rewrite" : {
+ regex_uri: ["^/a/(.*)", "/${1}"]
+ }
+ }
+ "upstream": {
+ "service_name": "A-SERVICE",
+ "type": "roundrobin"
+ }
+}'
+
+$ curl http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d '
+{
+ "uri": "/b/*",
+ "plugins": {
+ "proxy-rewrite" : {
+ regex_uri: ["^/b/(.*)", "/${1}"]
+ }
+ }
+ "upstream": {
+ "service_name": "B-SERVICE",
+ "type": "roundrobin"
+ }
+}'
+```
+
+Suppose both A-SERVICE and B-SERVICE provide a `/test` API. The above configuration allows access to A-SERVICE's `/test` API through `/a/test` and B-SERVICE's `/test` API through `/b/test`.
+
+**Notice**:When configuring `upstream.service_name`, `upstream.nodes` will no longer take effect, but will be replaced by 'nodes' obtained from the registry.
+
+
diff --git a/doc/getting-started.md b/doc/getting-started.md
index a576329f155a..ae432d464342 100644
--- a/doc/getting-started.md
+++ b/doc/getting-started.md
@@ -17,7 +17,7 @@
#
-->
-[Chinese](getting-started-cn.md)
+[Chinese](zh-cn/getting-started.md)
# Quick Start Guide
diff --git a/doc/grpc-proxy.md b/doc/grpc-proxy.md
index 22e3297340f0..50404aef49d8 100644
--- a/doc/grpc-proxy.md
+++ b/doc/grpc-proxy.md
@@ -17,7 +17,7 @@
#
-->
-[中文](grpc-proxy-cn.md)
+[中文](zh-cn/grpc-proxy.md)
# grpc-proxy
diff --git a/doc/health-check.md b/doc/health-check.md
index b0b062deb175..3c34c163ffcc 100644
--- a/doc/health-check.md
+++ b/doc/health-check.md
@@ -16,7 +16,8 @@
# limitations under the License.
#
-->
-## Health Checks for Upstream
+
+# Health Checks for Upstream
Health Check of APISIX is based on [lua-resty-healthcheck](https://github.com/Kong/lua-resty-healthcheck),
you can use it for upstream.
@@ -44,6 +45,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13
"retries": 2,
"checks": {
"active": {
+ "timeout": 5,
"http_path": "/status",
"host": "foo.com",
"healthy": {
@@ -77,26 +79,29 @@ contains: `active` or `passive`.
* `active`: To enable active health checks, you need to specify the configuration items under `checks.active` in the Upstream object configuration.
- * `active.http_path`: The HTTP GET request path used to detect if the upstream is healthy.
- * `active.host`: The HTTP request host used to detect if the upstream is healthy.
+ * `active.timeout`: Socket timeout for active checks (in seconds), support decimals. For example `1.01` means `1010` milliseconds, `2` means `2000` milliseconds.
+
+ * `active.http_path`: The HTTP GET request path used to detect if the upstream is healthy.
+ * `active.host`: The HTTP request host used to detect if the upstream is healthy.
+ * `active.port`: The customize health check host port (optional), this will override the port in the `upstream` node.
- The threshold fields of `healthy` are:
- * `active.healthy.interval`: Interval between health checks for healthy targets (in seconds), the minimum is 1.
- * `active.healthy.successes`: The number of success times to determine the target is healthy, the minimum is 1.
+ The threshold fields of `healthy` are:
+ * `active.healthy.interval`: Interval between health checks for healthy targets (in seconds), the minimum is 1.
+ * `active.healthy.successes`: The number of success times to determine the target is healthy, the minimum is 1.
- The threshold fields of `unhealthy` are:
- * `active.unhealthy.interval`: Interval between health checks for unhealthy targets (in seconds), the minimum is 1.
- * `active.unhealthy.http_failures`: The number of http failures times to determine the target is unhealthy, the minimum is 1.
- * `active.req_headers`: Additional request headers. Array format, so you can fill in multiple headers.
+ The threshold fields of `unhealthy` are:
+ * `active.unhealthy.interval`: Interval between health checks for unhealthy targets (in seconds), the minimum is 1.
+ * `active.unhealthy.http_failures`: The number of http failures times to determine the target is unhealthy, the minimum is 1.
+ * `active.req_headers`: Additional request headers. Array format, so you can fill in multiple headers.
* `passive`: To enable passive health checks, you need to specify the configuration items under `checks.passive` in the Upstream object configuration.
- The threshold fields of `healthy` are:
- * `passive.healthy.http_statuses`: If the current response code is equal to any of these, set the upstream node to the `healthy` state. Otherwise ignore this request.
- * `passive.healthy.successes`: Number of successes in proxied traffic (as defined by `passive.healthy.http_statuses`) to consider a target healthy, as observed by passive health checks.
+ The threshold fields of `healthy` are:
+ * `passive.healthy.http_statuses`: If the current response code is equal to any of these, set the upstream node to the `healthy` state. Otherwise ignore this request.
+ * `passive.healthy.successes`: Number of successes in proxied traffic (as defined by `passive.healthy.http_statuses`) to consider a target healthy, as observed by passive health checks.
- The threshold fields of `unhealthy` are:
- * `passive.unhealthy.http_statuses`: If the current response code is equal to any of these, set the upstream node to the `unhealthy` state. Otherwise ignore this request.
- * `passive.unhealthy.tcp_failures`: Number of TCP failures in proxied traffic to consider a target unhealthy, as observed by passive health checks.
- * `passive.unhealthy.timeouts`: Number of timeouts in proxied traffic to consider a target unhealthy, as observed by passive health checks.
- * `passive.unhealthy.http_failures`: Number of HTTP failures in proxied traffic (as defined by `passive.unhealthy.http_statuses`) to consider a target unhealthy, as observed by passive health checks.
+ The threshold fields of `unhealthy` are:
+ * `passive.unhealthy.http_statuses`: If the current response code is equal to any of these, set the upstream node to the `unhealthy` state. Otherwise ignore this request.
+ * `passive.unhealthy.tcp_failures`: Number of TCP failures in proxied traffic to consider a target unhealthy, as observed by passive health checks.
+ * `passive.unhealthy.timeouts`: Number of timeouts in proxied traffic to consider a target unhealthy, as observed by passive health checks.
+ * `passive.unhealthy.http_failures`: Number of HTTP failures in proxied traffic (as defined by `passive.unhealthy.http_statuses`) to consider a target unhealthy, as observed by passive health checks.
diff --git a/doc/how-to-build.md b/doc/how-to-build.md
index e1b8d8b672ad..2d7f1fa58c85 100644
--- a/doc/how-to-build.md
+++ b/doc/how-to-build.md
@@ -34,21 +34,21 @@ You can install Apache APISIX in a variety of ways, including source code packag
You need to download the Apache source release first:
```shell
-wget http://www.apache.org/dist/incubator/apisix/1.2/apache-apisix-1.2-incubating-src.tar.gz
-tar zxvf apache-apisix-1.2-incubating-src.tar.gz
+wget http://www.apache.org/dist/apisix/1.4.1/apache-apisix-1.4.1-src.tar.gz
+tar zxvf apache-apisix-1.4.1-src.tar.gz
```
Install the Lua libraries that the runtime depends on:
```shell
-cd apache-apisix-1.2-incubating
+cd apache-apisix-1.4.1
make deps
```
### Installation via RPM package (CentOS 7)
```shell
-sudo yum install -y https://github.com/apache/incubator-apisix/releases/download/1.2/apisix-1.2-0.el7.noarch.rpm
+sudo yum install -y https://github.com/apache/incubator-apisix/releases/download/1.4.1/apisix-1.4.1-0.el7.noarch.rpm
```
### Installation via Luarocks (macOS not supported)
@@ -64,11 +64,11 @@ sudo sh -c "$(curl -fsSL https://raw.githubusercontent.com/apache/incubator-apis
> Install the specified version via Luarocks:
```shell
-# Install version 1.2
-sudo luarocks install --lua-dir=/path/openresty/luajit apisix 1.2
+# Install version 1.4.1
+sudo luarocks install --lua-dir=/path/openresty/luajit apisix 1.4.1
# old luarocks not support the `lua-dir` parameter, you can remove this option
-sudo luarocks install apisix 1.2
+sudo luarocks install apisix 1.4.1
```
## 3. Manage (start/stop) APISIX Server
@@ -90,19 +90,19 @@ $ make stop
$ make help
Makefile rules:
- help: Show Makefile rules.
- deps: Installation dependencies
- utils: Installation tools
- lint: Lint Lua source code
- init: Initialize the runtime environment
- run: Start the apisix server
- stop: Stop the apisix server
- verify: Verify the configuration of apisix server
- clean: Remove generated files
- reload: Reload the apisix server
- install: Install the apisix
- test: Run the test case
- license-check: Check lua souce code for Apache License
+ help: Show Makefile rules
+ deps: Installation dependencies
+ utils: Installation tools
+ lint: Lint Lua source code
+ init: Initialize the runtime environment
+ run: Start the apisix server
+ stop: Stop the apisix server
+ verify: Verify the configuration of apisix server
+ clean: Remove generated files
+ reload: Reload the apisix server
+ install: Install the apisix (only for luarocks)
+ test: Run the test case
+ license-check: Check Lua source code for Apache License
```
## 4. Test
@@ -115,12 +115,20 @@ Makefile rules:
* Run the test cases: `make test`
* To set the path of nginx to run the test cases: `TEST_NGINX_BINARY=/usr/local/bin/openresty prove -Itest-nginx/lib -r t`
-### Troubleshoot
+### Troubleshoot Testing
-If you run in to an issue `Error unknown directive "lua_package_path" in /API_ASPIX/incubator-apisix/t/servroot/conf/nginx.conf`
+**Set Nginx Path**
+- If you run in to an issue `Error unknown directive "lua_package_path" in /API_ASPIX/incubator-apisix/t/servroot/conf/nginx.conf`
make sure to set openresty as default nginx. And export the path as below.
-
* export PATH=/usr/local/openresty/nginx/sbin:$PATH
+ - Linux default installation path:
+ * export PATH=/usr/local/openresty/nginx/sbin:$PATH
+ - OSx default installation path via homebrew:
+ * export PATH=/usr/local/opt/openresty/nginx/sbin:$PATH
+
+**Run Individual Test Cases**
+- Use the following command to run test cases constratined to a file:
+ - prove -Itest-nginx/lib -r t/plugin/openid-connect.t
## 5. Update Admin API token to protect Apache APISIX
diff --git a/doc/https.md b/doc/https.md
index 5e7aa0edba62..c2091a72db47 100644
--- a/doc/https.md
+++ b/doc/https.md
@@ -17,7 +17,7 @@
#
-->
-[Chinese](https-cn.md)
+[Chinese](zh-cn/https.md)
### HTTPS
diff --git a/doc/images/apache.png b/doc/images/apache.png
new file mode 100644
index 000000000000..d0075db9e369
Binary files /dev/null and b/doc/images/apache.png differ
diff --git a/doc/images/apisix.png b/doc/images/apisix.png
index 153025180aae..cbccdd9c4327 100644
Binary files a/doc/images/apisix.png and b/doc/images/apisix.png differ
diff --git a/doc/images/discovery-cn.png b/doc/images/discovery-cn.png
new file mode 100644
index 000000000000..7b448c2ca1e4
Binary files /dev/null and b/doc/images/discovery-cn.png differ
diff --git a/doc/images/discovery.png b/doc/images/discovery.png
new file mode 100644
index 000000000000..6b592e3027a5
Binary files /dev/null and b/doc/images/discovery.png differ
diff --git a/doc/images/plugin/authz-keycloak.png b/doc/images/plugin/authz-keycloak.png
new file mode 100644
index 000000000000..6b6ae84a89d9
Binary files /dev/null and b/doc/images/plugin/authz-keycloak.png differ
diff --git a/doc/images/plugin/skywalking-1.png b/doc/images/plugin/skywalking-1.png
new file mode 100644
index 000000000000..9560c19d9ea1
Binary files /dev/null and b/doc/images/plugin/skywalking-1.png differ
diff --git a/doc/images/plugin/skywalking-2.png b/doc/images/plugin/skywalking-2.png
new file mode 100644
index 000000000000..f7d9d4ca0f48
Binary files /dev/null and b/doc/images/plugin/skywalking-2.png differ
diff --git a/doc/images/plugin/skywalking-3.png b/doc/images/plugin/skywalking-3.png
new file mode 100644
index 000000000000..691b30611350
Binary files /dev/null and b/doc/images/plugin/skywalking-3.png differ
diff --git a/doc/images/plugin/skywalking-4.png b/doc/images/plugin/skywalking-4.png
new file mode 100644
index 000000000000..4a8fb15e9b48
Binary files /dev/null and b/doc/images/plugin/skywalking-4.png differ
diff --git a/doc/images/plugin/skywalking-5.png b/doc/images/plugin/skywalking-5.png
new file mode 100644
index 000000000000..f24235ec278b
Binary files /dev/null and b/doc/images/plugin/skywalking-5.png differ
diff --git a/doc/index.html b/doc/index.html
new file mode 100644
index 000000000000..61aaf2e0aad3
--- /dev/null
+++ b/doc/index.html
@@ -0,0 +1,52 @@
+
+
+
+
+
+
+
+
## APISIX Config
@@ -73,13 +74,13 @@ Route 字面意思就是路由,通过定义一些规则来匹配客户端的
Route 中主要包含三部分内容:匹配规则(比如 uri、host、remote_addr 等),插件配置(限流限速等)和上游信息。
请看下图示例,是一些 Route 规则的实例,当某些属性值相同时,图中用相同颜色标识。
-
+
我们直接在 Route 中完成所有参数的配置,优点是容易设置,每个 Route 都相对独立自由度比较高。但当我们的 Route 有比较多的重复配置(比如启用相同的插件配置或上游信息),一旦我们要更新这些相同属性时,就需要遍历所有 Route 并进行修改,给后期管理维护增加不少复杂度。
上面提及重复的缺点在 APISIX 中独立抽象了 [Service](#service) 和 [Upstream](#upstream) 两个概念来解决。
-下面创建的 Route 示例,是把 uri 为 "/index.html" 的请求代理到地址为 "39.97.63.215:80" 的 Upstream 服务:
+下面创建的 Route 示例,是把 URL 为 "/index.html" 的请求代理到地址为 "39.97.63.215:80" 的 Upstream 服务:
```shell
$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d '
@@ -105,7 +106,7 @@ Server: APISIX web server
当我们接收到成功应答,表示该 Route 已成功创建。
-有关 Route 的具体选项,可具体查阅 [Admin API 之 Route](admin-api-cn.md#route)。
+有关 Route 的具体选项,可具体查阅 [Admin API 之 Route](admin-api.md#route)。
[返回目录](#目录)
@@ -114,7 +115,7 @@ Server: APISIX web server
`Service` 是某类 API 的抽象(也可以理解为一组 Route 的抽象)。它通常与上游服务抽象是一一对应的,`Route`
与 `Service` 之间,通常是 N:1 的关系,参看下图。
-
+
不同 Route 规则同时绑定到一个 Service 上,这些 Route 将具有相同的上游和插件配置,减少冗余配置。
@@ -193,7 +194,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/102 -H 'X-API-KEY: edd1c9f034335f
优先级更高。
一个插件在一次请求中只会执行一次,即使被同时绑定到多个不同对象中(比如 Route 或 Service)。
-插件运行先后顺序是根据插件自身的优先级来决定的,例如:[example-plugin](../apisix/plugins/example-plugin.lua#L37)。
+插件运行先后顺序是根据插件自身的优先级来决定的,例如:[example-plugin](../../apisix/plugins/example-plugin.lua#L37)。
插件配置作为 Route 或 Service 的一部分提交的,放到 `plugins` 下。它内部是使用插件
名字作为哈希的 key 来保存不同插件的配置项。
@@ -216,7 +217,28 @@ curl http://127.0.0.1:9080/apisix/admin/routes/102 -H 'X-API-KEY: edd1c9f034335f
并不是所有插件都有具体配置项,比如 `prometheus` 下是没有任何具体配置项,这时候用一个空的对象
标识即可。
-[查看 APISIX 已支持插件列表](plugins-cn.md)
+[查看 APISIX 已支持插件列表](plugins.md)
+
+[返回目录](#目录)
+
+## Script
+
+`Script` 表示将在 `HTTP` 请求/响应生命周期期间执行的脚本。
+
+`Script` 配置可直接绑定在 `Route` 上。
+
+`Script` 与 `Plugin` 互斥,且优先执行 `Script` ,这意味着配置 `Script` 后,`Route` 上配置的 `Plugin` 将不被执行。
+
+理论上,在 `Script` 中可以写任意 lua 代码,也可以直接调用已有插件以重用已有的代码。
+
+`Script` 也有执行阶段概念,支持 `access`、`header_filer`、`body_filter` 和 `log` 阶段。系统会在相应阶段中自动执行 `Script` 脚本中对应阶段的代码。
+
+```json
+{
+ ...
+ "script": "local _M = {} \n function _M.access(api_ctx) \n ngx.log(ngx.INFO,\"hit access phase\") \n end \nreturn _M"
+}
+```
[返回目录](#目录)
@@ -224,7 +246,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/102 -H 'X-API-KEY: edd1c9f034335f
Upstream 是虚拟主机抽象,对给定的多个服务节点按照配置规则进行负载均衡。Upstream 的地址信息可以直接配置到 `Route`(或 `Service`) 上,当 Upstream 有重复时,就需要用“引用”方式避免重复了。
-
+
如上图所示,通过创建 Upstream 对象,在 `Route` 用 ID 方式引用,就可以确保只维护一个对象的值了。
@@ -238,10 +260,12 @@ APISIX 的 Upstream 除了基本的复杂均衡算法选择外,还支持对上
|名字 |可选|说明|
|------- |-----|------|
|type |必填|`roundrobin` 支持权重的负载,`chash` 一致性哈希,两者是二选一的|
-|nodes |必填|哈希表,内部元素的 key 是上游机器地址列表,格式为`地址 + Port`,其中地址部分可以是 IP 也可以是域名,比如 `192.168.1.100:80`、`foo.com:80` 等。value 则是节点的权重。当权重值为 `0` 代表该上游节点失效,不会被选中,可以用于暂时摘除节点的情况。|
+|nodes |与 `k8s_deployment_info`、 `service_name` 三选一|哈希表,内部元素的 key 是上游机器地址列表,格式为`地址 + Port`,其中地址部分可以是 IP 也可以是域名,比如 `192.168.1.100:80`、`foo.com:80` 等。value 则是节点的权重。当权重值为 `0` 代表该上游节点失效,不会被选中,可以用于暂时摘除节点的情况。|
+|service_name |与 `nodes`、 `k8s_deployment_info` 三选一 |用于设置上游服务名,并配合注册中心使用,详细可参考[集成服务发现注册中心](discovery.md) |
+|k8s_deployment_info|与 `nodes`、 `service_name` 三选一|哈希表|字段包括 `namespace`、`deploy_name`、`service_name`、`port`、`backend_type`,其中 `port` 字段为数值,`backend_type` 为 `pod` 或 `service`,其他为字符串 |
|key |可选|在 `type` 等于 `chash` 是必选项。 `key` 需要配合 `hash_on` 来使用,通过 `hash_on` 和 `key` 来查找对应的 node `id`|
|hash_on |可选|`hash_on` 支持的类型有 `vars`(Nginx内置变量),`header`(自定义header),`cookie`,`consumer`,默认值为 `vars`|
-|checks |可选|配置健康检查的参数,详细可参考[health-check](health-check.md)|
+|checks |可选|配置健康检查的参数,详细可参考[health-check](../health-check.md)|
|retries |可选|使用底层的 Nginx 重试机制将请求传递给下一个上游,默认 APISIX 会启用重试机制,根据配置的后端节点个数设置重试次数,如果此参数显式被设置将会覆盖系统默认设置的重试次数。|
|enable_websocket|可选| 是否启用 `websocket`(布尔值),默认不启用|
@@ -259,10 +283,12 @@ APISIX 的 Upstream 除了基本的复杂均衡算法选择外,还支持对上
curl http://127.0.0.1:9080/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
{
"type": "roundrobin",
- "nodes": {
- "127.0.0.1:80": 1,
- "127.0.0.2:80": 2,
- "foo.com:80": 3
+ "k8s_deployment_info": {
+ "namespace": "test-namespace",
+ "deploy_name": "test-deploy-name",
+ "service_name": "test-service-name",
+ "backend_type": "pod",
+ "port": 8080
}
}'
@@ -349,7 +375,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13
}'
```
-更多细节可以参考[健康检查的文档](health-check.md)。
+更多细节可以参考[健康检查的文档](../health-check.md)。
下面是几个使用不同`hash_on`类型的配置示例:
@@ -358,7 +384,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13
创建一个consumer对象:
```shell
-curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d `
+curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
{
"username": "jack",
"plugins": {
@@ -366,7 +392,7 @@ curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f1
"key": "auth-jack"
}
}
-}`
+}'
```
新建路由,打开`key-auth`插件认证,`upstream`的`hash_on`类型为`consumer`:
@@ -456,11 +482,11 @@ APISIX 区别于其他 API 网关的一大特点是允许用户选择不同 Rout
在本地配置 `conf/config.yaml` 中设置最符合自身业务需求的路由。
* `apisix.router.http`: HTTP 请求路由。
- * `radixtree_uri`: (默认)只使用 `uri` 作为主索引。基于 `radixtree` 引擎,支持全量和深前缀匹配,更多见 [如何使用 router-radixtree](router-radixtree.md)。
+ * `radixtree_uri`: (默认)只使用 `uri` 作为主索引。基于 `radixtree` 引擎,支持全量和深前缀匹配,更多见 [如何使用 router-radixtree](../router-radixtree.md)。
* `绝对匹配`:完整匹配给定的 `uri` ,比如 `/foo/bar`,`/foo/glo`。
* `前缀匹配`:末尾使用 `*` 代表给定的 `uri` 是前缀匹配。比如 `/foo*`,则允许匹配 `/foo/`、`/foo/a`和`/foo/b`等。
* `匹配优先级`:优先尝试绝对匹配,若无法命中绝对匹配,再尝试前缀匹配。
- * `任意过滤属性`:允许指定任何 Ningx 内置变量作为过滤条件,比如 uri 请求参数、请求头、cookie 等。
+ * `任意过滤属性`:允许指定任何 Nginx 内置变量作为过滤条件,比如 URL 请求参数、请求头、cookie 等。
* `radixtree_host_uri`: 使用 `host + uri` 作为主索引(基于 `radixtree` 引擎),对当前请求会同时匹配 host 和 uri,支持的匹配条件与 `radixtree_uri` 基本一致。
* `apisix.router.ssl`: SSL 加载匹配路由。
@@ -473,7 +499,7 @@ APISIX 区别于其他 API 网关的一大特点是允许用户选择不同 Rout
对于 API 网关通常可以用请求域名、客户端 IP 地址等字段识别到某类请求方,
然后进行插件过滤并转发请求到指定上游,但有时候这个深度不够。
-
+
如上图所示,作为 API 网关,需要知道 API Consumer(消费方)具体是谁,这样就可以对不同 API Consumer 配置不同规则。
@@ -484,16 +510,16 @@ APISIX 区别于其他 API 网关的一大特点是允许用户选择不同 Rout
在 APISIX 中,识别 Consumer 的过程如下图:
-
+
-1. 授权认证:比如有 [key-auth](./plugins/key-auth.md)、[JWT](./plugins/jwt-auth-cn.md) 等。
+1. 授权认证:比如有 [key-auth](../plugins/key-auth.md)、[JWT](plugins/jwt-auth.md) 等。
2. 获取 consumer_id:通过授权认证,即可自然获取到对应的 Consumer `id`,它是 Consumer 对象的唯一识别标识。
3. 获取 Consumer 上绑定的 Plugin 或 Upstream 信息:完成对不同 Consumer 做不同配置的效果。
概括一下,Consumer 是某类服务的消费者,需与用户认证体系配合才能使用。
比如不同的 Consumer 请求同一个 API,网关服务根据当前请求用户信息,对应不同的 Plugin 或 Upstream 配置。
-此外,大家也可以参考 [key-auth](./plugins/key-auth.md) 认证授权插件的调用逻辑,辅助大家来进一步理解 Consumer 概念和使用。
+此外,大家也可以参考 [key-auth](../plugins/key-auth.md) 认证授权插件的调用逻辑,辅助大家来进一步理解 Consumer 概念和使用。
如何对某个 Consumer 开启指定插件,可以看下面例子:
@@ -544,6 +570,37 @@ HTTP/1.1 503 Service Temporarily Unavailable
```
+结合 [consumer-restriction](plugins/consumer-restriction.md) 插件,限制jack对该 route 的访问
+
+# 设置黑名单,禁止jack访问该API
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "plugins": {
+ "key-auth": {},
+ "consumer-restriction": {
+ "blacklist": [
+ "jack"
+ ]
+ }
+ },
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:1980": 1
+ },
+ "type": "roundrobin"
+ },
+ "uri": "/hello"
+}'
+
+# 反复测试,均返回 403,jack被禁止访问
+$ curl http://127.0.0.1:9080/hello -H 'apikey: auth-one' -I
+HTTP/1.1 403
+...
+
+```
+
[返回目录](#目录)
## Global Rule
@@ -555,6 +612,7 @@ HTTP/1.1 503 Service Temporarily Unavailable
curl -X PUT \
https://{apisix_listen_address}/apisix/admin/global_rules/1 \
-H 'Content-Type: application/json' \
+ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \
-d '{
"plugins": {
"limit-count": {
@@ -573,7 +631,7 @@ curl -X PUT \
我们可以通过以下接口查看所有的 `GlobalRule`:
```shell
-curl https://{apisix_listen_address}/apisix/admin/global_rules
+curl https://{apisix_listen_address}/apisix/admin/global_rules -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1'
```
[返回目录](#目录)
diff --git a/doc/zh-cn/batch-processor.md b/doc/zh-cn/batch-processor.md
new file mode 100644
index 000000000000..e9cfe2816bbb
--- /dev/null
+++ b/doc/zh-cn/batch-processor.md
@@ -0,0 +1,69 @@
+
+
+[English](../batch-processor.md)
+
+# 批处理机
+
+批处理处理器可用于聚合条目(日志/任何数据)并进行批处理。
+当batch_max_size设置为零时,处理器将立即执行每个条目。将批处理的最大大小设置为大于1将开始聚合条目,直到达到最大大小或超时到期为止
+
+## 构型
+
+创建批处理程序的唯一必需参数是函数。当批处理达到最大大小或缓冲区持续时间超过时,将执行该功能。
+
+|名称 |需求 |描述|
+|------- |----- |------|
+|id |可选的 |标识批处理者的唯一标识符|
+|batch_max_size |可选的 |每批的最大大小,默认为1000|
+|inactive_timeout|可选的 |如果不活动,将刷新缓冲区的最大时间(以秒为单位),默认值为5s|
+|buffer_duration|可选的 |必须先处理批次中最旧条目的最大期限(以秒为单位),默认是5|
+|max_retry_count|可选的 |从处理管道中移除之前的最大重试次数;默认为零|
+|retry_delay |可选的 |如果执行失败,应该延迟进程执行的秒数;默认为1|
+
+以下代码显示了如何使用批处理程序的示例。批处理处理器将要执行的功能作为第一个参数,将批处理配置作为第二个参数。
+
+```lua
+local bp = require("apisix.plugins.batch-processor")
+local func_to_execute = function(entries)
+ -- serialize to json array core.json.encode(entries)
+ -- process/send data
+ return true
+ end
+
+local config = {
+ max_retry_count = 2,
+ buffer_duration = 60,
+ inactive_timeout = 5,
+ batch_max_size = 1,
+ retry_delay = 0
+}
+
+
+local batch_processor, err = bp:new(func_to_execute, config)
+
+if batch_processor then
+ batch_processor:push({hello='world'})
+end
+```
+
+
+注意:请确保批处理的最大大小(条目数)在函数执行的范围内。
+刷新批处理的计时器基于“ inactive_timeout”配置运行。因此,为了获得最佳使用效果,
+保持“ inactive_timeout”小于“ buffer_duration”。
diff --git a/doc/benchmark-cn.md b/doc/zh-cn/benchmark.md
similarity index 81%
rename from doc/benchmark-cn.md
rename to doc/zh-cn/benchmark.md
index b42cf90f4763..14f9d72a5453 100644
--- a/doc/benchmark-cn.md
+++ b/doc/zh-cn/benchmark.md
@@ -17,7 +17,7 @@
#
-->
-[English](benchmark.md)
+[English](../benchmark.md)
### 测试环境
@@ -33,19 +33,19 @@
下图中 x 轴为 CPU 的使用个数,y 轴为每秒处理的请求数:
-
+
#### 延时
请注意 y 轴延时的单位是**微秒(μs)**,而不是毫秒:
-
+
#### 火焰图
火焰图的采样结果:
-
+
### 测试反向代理,开启 2 个插件
@@ -55,15 +55,15 @@
下图中 x 轴为 CPU 的使用个数,y 轴为每秒处理的请求数:
-
+
#### Latency
请注意 y 轴延时的单位是**微秒(μs)**,而不是毫秒:
-
+
#### 火焰图
火焰图的采样结果:
-
+
diff --git a/doc/zh-cn/discovery.md b/doc/zh-cn/discovery.md
new file mode 100644
index 000000000000..a6ba4420a6bb
--- /dev/null
+++ b/doc/zh-cn/discovery.md
@@ -0,0 +1,253 @@
+
+[English](../discovery.md)
+
+# 集成服务发现注册中心
+
+* [**摘要**](#摘要)
+* [**如何扩展注册中心**](#如何扩展注册中心)
+ * [**基本步骤**](#基本步骤)
+ * [**以 Eureka 举例**](#以-Eureka-举例)
+ * [**实现 eureka.lua**](#实现-eurekalua)
+ * [**Eureka 与 APISIX 之间数据转换逻辑**](#Eureka-与-APISIX-之间数据转换逻辑)
+* [**注册中心配置**](#注册中心配置)
+ * [**选择注册中心**](#选择注册中心)
+ * [**Eureka 的配置**](#Eureka-的配置)
+* [**upstream 配置**](#upstream-配置)
+
+## 摘要
+
+当业务量发生变化时,需要对上游服务进行扩缩容,或者因服务器硬件故障需要更换服务器。如果网关是通过配置来维护上游服务信息,在微服务架构模式下,其带来的维护成本可想而知。再者因不能及时更新这些信息,也会对业务带来一定的影响,还有人为误操作带来的影响也不可忽视,所以网关非常必要通过服务注册中心动态获取最新的服务实例信息。架构图如下所示:
+
+
+
+1. 服务启动时将自身的一些信息,比如服务名、IP、端口等信息上报到注册中心;各个服务与注册中心使用一定机制(例如心跳)通信,如果注册中心与服务长时间无法通信,就会注销该实例;当服务下线时,会删除注册中心的实例信息;
+2. 网关会准实时地从注册中心获取服务实例信息;
+3. 当用户通过网关请求服务时,网关从注册中心获取的实例列表中选择一个进行代理;
+
+常见的注册中心:Eureka, Etcd, Consul, Nacos, Zookeeper等
+
+
+## 如何扩展注册中心?
+
+### 基本步骤
+
+APISIX 要扩展注册中心其实是件非常容易的事情,其基本步骤如下:
+
+1. 在 `apisix/discovery/` 目录中添加注册中心客户端的实现;
+2. 实现用于初始化的 `_M.init_worker()` 函数以及用于获取服务实例节点列表的 `_M.nodes(service_name)` 函数;
+3. 将注册中心数据转换为 APISIX 格式的数据;
+
+### 以 Eureka 举例
+
+#### 实现 eureka.lua
+
+首先在 `apisix/discovery/` 目录中添加 [`eureka.lua`](../../apisix/discovery/eureka.lua);
+
+然后在 `eureka.lua` 实现用于初始化的 `init_worker` 函数以及用于获取服务实例节点列表的 `nodes` 函数即可:
+
+ ```lua
+ local _M = {
+ version = 0.1,
+ }
+
+
+ function _M.nodes(service_name)
+ ... ...
+ end
+
+
+ function _M.init_worker()
+ ... ...
+ end
+
+
+ return _M
+ ```
+
+#### Eureka 与 APISIX 之间数据转换逻辑
+
+APISIX是通过 `upstream.nodes` 来配置上游服务的,所以使用注册中心后,通过注册中心获取服务的所有 node 后,赋值给 `upstream.nodes` 来达到相同的效果。那么 APISIX 是怎么将 Eureka 的数据转成 node 的呢? 假如从 Eureka 获取如下数据:
+
+```json
+{
+ "applications": {
+ "application": [
+ {
+ "name": "USER-SERVICE", # 服务名称
+ "instance": [
+ {
+ "instanceId": "192.168.1.100:8761",
+ "hostName": "192.168.1.100",
+ "app": "USER-SERVICE", # 服务名称
+ "ipAddr": "192.168.1.100", # 实例 IP 地址
+ "status": "UP", # 状态
+ "overriddenStatus": "UNKNOWN", # 覆盖状态
+ "port": {
+ "$": 8761, # 端口
+ "@enabled": "true" # 开始端口
+ },
+ "securePort": {
+ "$": 443,
+ "@enabled": "false"
+ },
+ "metadata": {
+ "management.port": "8761",
+ "weight": 100 # 权重,需要通过 spring boot 应用的 eureka.instance.metadata-map.weight 进行配置
+ },
+ "homePageUrl": "http://192.168.1.100:8761/",
+ "statusPageUrl": "http://192.168.1.100:8761/actuator/info",
+ "healthCheckUrl": "http://192.168.1.100:8761/actuator/health",
+ ... ...
+ }
+ ]
+ }
+ ]
+ }
+}
+```
+
+解析 instance 数据步骤:
+
+1. 首先要选择状态为 “UP” 的实例: overriddenStatus 值不为 "UNKNOWN" 以 overriddenStatus 为准,否则以 status 的值为准;
+2. IP 地址:以 ipAddr 的值为 IP; 并且必须是 IPv4 或 IPv6 格式的;
+3. 端口:端口取值规则是,如果 port["@enabled"] 等于 "true" 那么使用 port["\$"] 的值;如果 securePort["@enabled"] 等于 "true" 那么使用 securePort["$"] 的值;
+4. 权重:权重取值顺序是,先判断 `metadata.weight` 是否有值,如果没有,则取配置中的 `eureka.weight` 的值, 如果还没有,则取默认值`100`;
+
+这个例子转成 APISIX nodes 的结果如下:
+
+```json
+[
+ {
+ "host" : "192.168.1.100",
+ "port" : 8761,
+ "weight" : 100,
+ "metadata" : {
+ "management.port": "8761",
+ }
+ }
+]
+```
+
+## 注册中心配置
+
+### 选择注册中心
+
+首先要在 `conf/config.yaml` 文件中增加如下配置,以选择注册中心的类型:
+
+```yaml
+apisix:
+ discovery: eureka
+```
+
+此名称要与 `apisix/discovery/` 目录中实现对应注册中心的文件名保持一致。
+
+现已支持注册中心有:Eureka 。
+
+### Eureka 的配置
+
+在 `conf/config.yaml` 增加如下格式的配置:
+
+```yaml
+eureka:
+ host: # it's possible to define multiple eureka hosts addresses of the same eureka cluster.
+ - "http://${usename}:${passowrd}@${eureka_host1}:${eureka_port1}"
+ - "http://${usename}:${passowrd}@${eureka_host2}:${eureka_port2}"
+ prefix: "/eureka/"
+ fetch_interval: 30 # 从 eureka 中拉取数据的时间间隔,默认30秒
+ weight: 100 # default weight for node
+ timeout:
+ connect: 2000 # 连接 eureka 的超时时间,默认2000ms
+ send: 2000 # 向 eureka 发送数据的超时时间,默认2000ms
+ read: 5000 # 从 eureka 读数据的超时时间,默认5000ms
+```
+
+通过 `eureka.host ` 配置 eureka 的服务器地址。
+
+如果 eureka 的地址是 `http://127.0.0.1:8761/` ,并且不需要用户名和密码验证的话,配置如下:
+
+```yaml
+eureka:
+ host:
+ - "http://127.0.0.1:8761"
+ prefix: "/eureka/"
+```
+
+## upstream 配置
+
+APISIX是通过 `upstream.service_name` 与注册中心的服务名进行关联。下面是将 URL 为 "/user/*" 的请求路由到注册中心名为 "USER-SERVICE" 的服务上例子:
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d '
+{
+ "uri": "/user/*",
+ "upstream": {
+ "service_name": "USER-SERVICE",
+ "type": "roundrobin"
+ }
+}'
+
+HTTP/1.1 201 Created
+Date: Sat, 31 Aug 2019 01:17:15 GMT
+Content-Type: text/plain
+Transfer-Encoding: chunked
+Connection: keep-alive
+Server: APISIX web server
+
+{"node":{"value":{"uri":"\/user\/*","upstream": {"service_name": "USER-SERVICE", "type": "roundrobin"}},"createdIndex":61925,"key":"\/apisix\/routes\/1","modifiedIndex":61925},"action":"create"}
+```
+
+因为上游的接口 URL 可能会有冲突,通常会在网关通过前缀来进行区分:
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d '
+{
+ "uri": "/a/*",
+ "plugins": {
+ "proxy-rewrite" : {
+ regex_uri: ["^/a/(.*)", "/${1}"]
+ }
+ }
+ "upstream": {
+ "service_name": "A-SERVICE",
+ "type": "roundrobin"
+ }
+}'
+
+$ curl http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d '
+{
+ "uri": "/b/*",
+ "plugins": {
+ "proxy-rewrite" : {
+ regex_uri: ["^/b/(.*)", "/${1}"]
+ }
+ }
+ "upstream": {
+ "service_name": "B-SERVICE",
+ "type": "roundrobin"
+ }
+}'
+```
+
+假如 A-SERVICE 和 B-SERVICE 都提供了一个 `/test` 的接口,通过上面的配置,可以通过 `/a/test` 访问 A-SERVICE 的 `/test` 接口,通过 `/b/test` 访问 B-SERVICE 的 `/test` 接口。
+
+
+**注意**:配置 `upstream.service_name` 后 `upstream.nodes` 将不再生效,而是使用从注册中心的数据来替换,即使注册中心的数据是空的。
+
+
diff --git a/doc/getting-started-cn.md b/doc/zh-cn/getting-started.md
similarity index 97%
rename from doc/getting-started-cn.md
rename to doc/zh-cn/getting-started.md
index 8825279b030b..556533d42c68 100644
--- a/doc/getting-started-cn.md
+++ b/doc/zh-cn/getting-started.md
@@ -16,7 +16,7 @@
# limitations under the License.
#
-->
-[English](getting-started.md)
+[English](../getting-started.md)
# 快速入门指南
@@ -38,12 +38,12 @@ $ curl --location --request GET "https://httpbin.org/get?foo1=bar1&foo2=bar2"
## 前提
-- 本指南使用 docker 和 docker compose 来安装 Apache APISIX。 但是, 如果您已经以其他方式安装了 Apache APISIX ,您只需跳到 [第二步](getting-started-cn.md#第二步:-在-APISIX-中设置路由) 。
+- 本指南使用 docker 和 docker compose 来安装 Apache APISIX。 但是, 如果您已经以其他方式安装了 Apache APISIX ,您只需跳到 [第二步](getting-started.md#第二步:-在-APISIX-中设置路由) 。
- Curl:指南使用 Curl 命令进行 API 测试,但是您也可以使用您选择的任何其他工具( 例如 Postman )。
## 第一步: 安装 APISIX
-Apache APISIX 可以多种操作环境中安装。[如何安装文档](how-to-build-cn.md#installation-via-source-release) 显示了多个平台中的安装步骤。
+Apache APISIX 可以多种操作环境中安装。[如何安装文档](how-to-build.md#installation-via-source-release) 显示了多个平台中的安装步骤。
为了快速入门,让我们基于 docker 容器的安装方式进行安装。启动 Apache APISIX 服务,我们可以参照这个镜像文件[repository](https://github.com/apache/incubator-apisix-docker) 并切换到 example 文件夹下执行如下命令。
如下命令会启动 Apache APISIX 服务并默认在 9080 端口( https 请求是 9443 端口) 提供 admin API 接口服务
@@ -240,7 +240,7 @@ curl -i -X GET http://127.0.0.1:9080/samplePrefix/get?param1=foo¶m2=bar -H '
到目前为止,已经通过使用 admin API 接口编排对 Apache APISIX 的 API 的调用。然而,Apache APISIX 还提供执行类似操作的一个 web 应用,就是web控制台。
可以在[repository](https://github.com/apache/incubator-apisix)中使用。控制台是直观的,您可以通过它编排同样的路由配置。
-
+
### 故障排查
diff --git a/doc/grpc-proxy-cn.md b/doc/zh-cn/grpc-proxy.md
similarity index 96%
rename from doc/grpc-proxy-cn.md
rename to doc/zh-cn/grpc-proxy.md
index 8c5a4e625a05..3f76fd8bec65 100644
--- a/doc/grpc-proxy-cn.md
+++ b/doc/zh-cn/grpc-proxy.md
@@ -17,7 +17,7 @@
#
-->
-[English](grpc-proxy.md)
+[English](../grpc-proxy.md)
# grpc-proxy
@@ -35,7 +35,7 @@
在指定 Route 中,代理 gRPC 服务接口:
* 注意: 这个 Route 的属性 `service_protocol` 必须设置为 `grpc`;
-* 注意: APISIX 使用 TLS 加密的 HTTP/2 暴露 gRPC 服务, 所以需要先 [配置 SSL 证书](https-cn.md);
+* 注意: APISIX 使用 TLS 加密的 HTTP/2 暴露 gRPC 服务, 所以需要先 [配置 SSL 证书](https.md);
* 下面例子所代理的 gRPC 服务可供参考:[grpc_server_example](https://github.com/iresty/grpc_server_example)。
```shell
diff --git a/doc/zh-cn/health-check.md b/doc/zh-cn/health-check.md
new file mode 100644
index 000000000000..a41ce86d113e
--- /dev/null
+++ b/doc/zh-cn/health-check.md
@@ -0,0 +1,107 @@
+
+
+# [English](../health-check.md)
+
+## Upstream的健康检查
+
+APISIX的健康检查使用[lua-resty-healthcheck](https://github.com/Kong/lua-resty-healthcheck)实现,你可以在upstream中使用它。
+
+下面是一个检查检查的例子:
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "uri": "/index.html",
+ "plugins": {
+ "limit-count": {
+ "count": 2,
+ "time_window": 60,
+ "rejected_code": 503,
+ "key": "remote_addr"
+ }
+ },
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:1980": 1,
+ "127.0.0.1:1970": 1
+ },
+ "type": "roundrobin",
+ "retries": 2,
+ "checks": {
+ "active": {
+ "timeout": 5,
+ "http_path": "/status",
+ "host": "foo.com",
+ "healthy": {
+ "interval": 2,
+ "successes": 1
+ },
+ "unhealthy": {
+ "interval": 1,
+ "http_failures": 2
+ },
+ "req_headers": ["User-Agent: curl/7.29.0"]
+ },
+ "passive": {
+ "healthy": {
+ "http_statuses": [200, 201],
+ "successes": 3
+ },
+ "unhealthy": {
+ "http_statuses": [500],
+ "http_failures": 3,
+ "tcp_failures": 3
+ }
+ }
+ }
+ }
+}'
+```
+
+监控检查的配置内容在`checks`中,`checks`包含两个类型:`active` 和 `passive`,详情如下
+
+* `active`: 要启动探活健康检查,需要在upstream配置中的 `checks.active` 添加如下配置项。
+
+ * `active.timeout`: 主动健康检查 socket 超时时间(秒为单位),支持小数点。比如 `1.01` 代表 `1010` 毫秒,`2` 代表 `2000` 毫秒。
+
+ * `active.http_path`: 用于发现upstream节点健康可用的HTTP GET请求路径。
+ * `active.host`: 用于发现upstream节点健康可用的HTTP请求主机名。
+ * `active.port`: 用于发现upstream节点健康可用的自定义主机端口(可选),配置此项会覆盖 `upstream` 节点中的端口。
+
+ `healthy`的阀值字段:
+ * `active.healthy.interval`: 健康的目标节点的健康检查间隔时间(以秒为单位),最小值为1。
+ * `active.healthy.successes`: 确定目标是否健康的成功次数,最小值为1。
+
+ `unhealthy`的阀值字段:
+ * `active.unhealthy.interval`: 针对不健康目标节点的健康检查之间的间隔(以秒为单位),最小值为1。
+ * `active.unhealthy.http_failures`: 确定目标节点不健康的http请求失败次数,最小值为1。
+ * `active.req_headers`: 其他请求标头。数组格式,可以填写多个标题。
+
+* `passive`: 要启用被动健康检查,需要在upstream配置中的 `checks.passive` 添加如下配置项。
+
+ `healthy`的阀值字段:
+ * `passive.healthy.http_statuses`: 如果当前HTTP响应状态码是其中任何一个,则将upstream节点设置为 `healthy` 状态。否则,请忽略此请求。
+ * `passive.healthy.successes`: 如果upstream节点被检测成功(由 `passive.healthy.http_statuses` 定义)的次数超过 `successes` 次,则将该节点设置为 `healthy` 状态。
+
+ `unhealthy`的阀值字段:
+ * `passive.unhealthy.http_statuses`: 如果当前HTTP响应状态码是其中任何一个,则将upstream节点设置为 `unhealthy` 状态。否则,请忽略此请求。
+ * `passive.unhealthy.tcp_failures`: 如果TCP通讯失败次数超过 `tcp_failures` 次,则将upstream节点设置为 `unhealthy` 状态。
+ * `passive.unhealthy.timeouts`: 如果被动健康检查超时次数超过 `timeouts` 次,则将upstream节点设置为 `unhealthy` 状态。
+ * `passive.unhealthy.http_failures`: 如果被动健康检查的HTTP请求失败(由 `passive.unhealthy.http_statuses` 定义)的次数超过 `http_failures`次,则将upstream节点设置为 `unhealthy` 状态。
diff --git a/doc/how-to-build-cn.md b/doc/zh-cn/how-to-build.md
similarity index 75%
rename from doc/how-to-build-cn.md
rename to doc/zh-cn/how-to-build.md
index 3f54eb034620..9922ddc7f610 100644
--- a/doc/how-to-build-cn.md
+++ b/doc/zh-cn/how-to-build.md
@@ -34,20 +34,20 @@ Apache APISIX 的运行环境需要 Nginx 和 etcd,
你需要先下载 Apache Release 源码包:
```shell
-wget http://www.apache.org/dist/incubator/apisix/1.2/apache-apisix-1.2-incubating-src.tar.gz
-tar zxvf apache-apisix-1.2-incubating-src.tar.gz
+wget http://www.apache.org/dist/apisix/1.4.1/apache-apisix-1.4.1-src.tar.gz
+tar zxvf apache-apisix-1.4.1-src.tar.gz
```
安装运行时依赖的 Lua 库:
```
-cd apache-apisix-1.2-incubating
+cd apache-apisix-1.4.1
make deps
```
### 通过 RPM 包安装(CentOS 7)
```shell
-sudo yum install -y https://github.com/apache/incubator-apisix/releases/download/1.2/apisix-1.2-0.el7.noarch.rpm
+sudo yum install -y https://github.com/apache/incubator-apisix/releases/download/1.4.1/apisix-1.4.1-0.el7.noarch.rpm
```
### 通过 Luarocks 安装 (不支持 macOS)
@@ -63,11 +63,11 @@ sudo sh -c "$(curl -fsSL https://raw.githubusercontent.com/apache/incubator-apis
> 通过 Luarocks 安装指定的版本:
```shell
-# 安装 apisix 的 1.2 版本
-sudo luarocks install --lua-dir=/path/openresty/luajit apisix 1.2
+# 安装 apisix 的 1.4.1 版本
+sudo luarocks install --lua-dir=/path/openresty/luajit apisix 1.4.1
# 老版本 luarocks 可能不支持 `lua-dir` 参数,可以删除该选项
-sudo luarocks install apisix 1.2
+sudo luarocks install apisix 1.4.1
```
## 3. 管理(启动、关闭等)APISIX 服务
@@ -88,19 +88,19 @@ $ make stop
$ make help
Makefile rules:
- help: Show Makefile rules.
- deps: Installation dependencies
- utils: Installation tools
- lint: Lint Lua source code
- init: Initialize the runtime environment
- run: Start the apisix server
- stop: Stop the apisix server
- verify: Verify the configuration of apisix server
- clean: Remove generated files
- reload: Reload the apisix server
- install: Install the apisix
- test: Run the test case
- license-check: Check lua souce code for Apache License
+ help: Show Makefile rules
+ deps: Installation dependencies
+ utils: Installation tools
+ lint: Lint Lua source code
+ init: Initialize the runtime environment
+ run: Start the apisix server
+ stop: Stop the apisix server
+ verify: Verify the configuration of apisix server
+ clean: Remove generated files
+ reload: Reload the apisix server
+ install: Install the apisix (only for luarocks)
+ test: Run the test case
+ license-check: Check Lua source code for Apache License
```
## 4. 运行测试案例
@@ -113,12 +113,23 @@ Makefile rules:
* 直接运行:`make test`
* 指定 nginx 二进制路径:`TEST_NGINX_BINARY=/usr/local/bin/openresty prove -Itest-nginx/lib -r t`
-### 疑难排解
+### 疑难排解测试
+
+**配置 Nginx 路径**
如果遇到问题 `Error unknown directive "lua_package_path" in /API_ASPIX/incubator-apisix/t/servroot/conf/nginx.conf`
确保将openresty设置为默认的nginx并按如下所示导出路径。
* export PATH=/usr/local/openresty/nginx/sbin:$PATH
+ * Linux 默认安装路径:
+ * export PATH=/usr/local/openresty/nginx/sbin:$PATH
+ * OSx 通过homebrew默认安装路径:
+ * export PATH=/usr/local/opt/openresty/nginx/sbin:$PATH
+
+**运行单个测试用例**
+
+- 使用以下命令运行指定的测试用例:
+ - prove -Itest-nginx/lib -r t/plugin/openid-connect.t
## 5. 更新 Admin API 的 token ,保护 Apache APISIX
diff --git a/doc/https-cn.md b/doc/zh-cn/https.md
similarity index 99%
rename from doc/https-cn.md
rename to doc/zh-cn/https.md
index 4ea82d5f21fb..33ce14c22137 100644
--- a/doc/https-cn.md
+++ b/doc/zh-cn/https.md
@@ -17,7 +17,7 @@
#
-->
-[English](https.md)
+[English](../https.md)
### HTTPS
diff --git a/doc/zh-cn/install-dependencies.md b/doc/zh-cn/install-dependencies.md
new file mode 100644
index 000000000000..f3c12f9c4866
--- /dev/null
+++ b/doc/zh-cn/install-dependencies.md
@@ -0,0 +1,129 @@
+
+
+# 安装依赖
+- [注意](#注意)
+- [CentOS 7](#centos-7)
+- [Fedora 31 & 32](#fedora-31--32)
+- [Ubuntu 16.04 & 18.04](#ubuntu-1604--1804)
+- [Debian 9 & 10](#debian-9--10)
+- [Mac OSX](#mac-osx)
+
+注意
+====
+- Apache APISIX 目前只支持 `v2` 版本的 etcd,但是最新版的 etcd (从 3.4 起)已经默认关闭了 `v2` 版本的功能。所以你需要添加启动参数 `--enable-v2=true` 来开启 `v2` 的功能,目前对 `v3` etcd 的开发工作已经启动,不久后便可投入使用。
+
+```shell
+etcd --enable-v2=true &
+```
+
+- 如果你要想使用 Tengine 替代 OpenResty,请参考 [Install Tengine at Ubuntu](../../.travis/linux_tengine_runner.sh)。
+
+
+CentOS 7
+========
+
+```shell
+# 安装 epel, `luarocks` 需要它
+wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+sudo rpm -ivh epel-release-latest-7.noarch.rpm
+
+# 添加 OpenResty 源
+sudo yum install yum-utils
+sudo yum-config-manager --add-repo https://openresty.org/package/centos/openresty.repo
+
+# 安装 OpenResty, etcd 和 编译工具
+sudo yum install -y etcd openresty curl git gcc luarocks lua-devel
+
+# 开启 etcd server
+sudo service etcd start
+```
+
+Fedora 31 & 32
+==============
+
+```shell
+# add OpenResty source
+sudo yum install yum-utils
+sudo yum-config-manager --add-repo https://openresty.org/package/fedora/openresty.repo
+
+# install OpenResty, etcd and some compilation tools
+sudo yum install -y etcd openresty curl git gcc luarocks lua-devel
+
+# start etcd server
+sudo etcd --enable-v2=true &
+```
+
+Ubuntu 16.04 & 18.04
+====================
+
+```shell
+# 添加 OpenResty 源
+wget -qO - https://openresty.org/package/pubkey.gpg | sudo apt-key add -
+sudo apt-get update
+sudo apt-get -y install software-properties-common
+sudo add-apt-repository -y "deb http://openresty.org/package/ubuntu $(lsb_release -sc) main"
+sudo apt-get update
+
+# 安装 OpenResty, etcd 和 编译工具
+sudo apt-get install -y git etcd openresty curl luarocks
+
+# 开启 etcd server
+sudo service etcd start
+```
+
+Debian 9 & 10
+=============
+
+```shell
+# 可选
+sed -i 's|^deb http://deb.debian.org/debian|deb http://mirrors.huaweicloud.com/debian|g' /etc/apt/sources.list
+sed -i 's|^deb http://security.debian.org/debian-security|deb http://mirrors.huaweicloud.com/debian-security|g' /etc/apt/sources.list
+apt update
+apt install wget gnupg -y
+
+# 添加 OpenResty 源
+wget -qO - https://openresty.org/package/pubkey.gpg | sudo apt-key add -
+sudo apt-get -y install software-properties-common
+sudo add-apt-repository -y "deb http://openresty.org/package/debian $(lsb_release -sc) openresty"
+sudo apt-get update
+
+# 安装 etcd
+wget https://github.com/etcd-io/etcd/releases/download/v3.3.13/etcd-v3.3.13-linux-amd64.tar.gz
+tar -xvf etcd-v3.3.13-linux-amd64.tar.gz && \
+ cd etcd-v3.3.13-linux-amd64 && \
+ sudo cp -a etcd etcdctl /usr/bin/
+
+# 安装 OpenResty, etcd 和 编译工具
+sudo apt-get install -y git openresty curl luarocks make
+
+# 开启 etcd server
+nohup etcd &
+```
+
+Mac OSX
+=======
+
+```shell
+# 安装 OpenResty, etcd 和 编译工具
+brew install openresty/brew/openresty etcd luarocks curl git
+
+# 开启 etcd server 并启用 v2 的功能
+etcd --enable-v2=true &
+```
diff --git a/doc/zh-cn/mtls.md b/doc/zh-cn/mtls.md
new file mode 100644
index 000000000000..8654d5994626
--- /dev/null
+++ b/doc/zh-cn/mtls.md
@@ -0,0 +1,60 @@
+
+
+[English](../mtls.md)
+
+## 双向认证
+
+### 为什么使用
+
+双向认证可以更好的防止未经授权访问 APISIX ,客户端将向服务器提供其证书,服务器将检查证书是否由提供的 CA 签名并决定是否响应请求。
+
+### 如何开启
+
+1. 生成自签证书对,包括 ca、server、client 证书对。
+
+2. 修改 `conf/config.yaml` 中的配置项:
+
+```yaml
+ port_admin: 9180
+ https_admin: true
+
+ mtls:
+ enable: true # Enable or disable mTLS. Enable depends on `port_admin` and `https_admin`.
+ ca_cert: "/data/certs/mtls_ca.crt" # Path of your self-signed CA cert.
+ server_key: "/data/certs/mtls_server.key" # Path of your self-signed server side cert.
+ server_cert: "/data/certs/mtls_server.crt" # Path of your self-signed server side key.
+```
+
+3. 执行命令:
+
+```shell
+apisix init
+apisix reload
+```
+
+### 客户端如何调用
+
+请将以下证书及域名替换为您的真实内容。
+
+* 注意:需要和服务器使用相同的 CA 证书 *
+
+```shell
+curl --cacert /data/certs/mtls_ca.crt --key /data/certs/mtls_client.key --cert /data/certs/mtls_client.crt https://admin.apisix.dev:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1'
+```
diff --git a/doc/plugin-develop-cn.md b/doc/zh-cn/plugin-develop.md
similarity index 95%
rename from doc/plugin-develop-cn.md
rename to doc/zh-cn/plugin-develop.md
index c8a7663b8d30..85083ddf1e25 100644
--- a/doc/plugin-develop-cn.md
+++ b/doc/zh-cn/plugin-develop.md
@@ -16,7 +16,7 @@
# limitations under the License.
#
-->
-[English](plugin-develop.md)
+[English](../plugin-develop.md)
# 目录
@@ -95,6 +95,12 @@ plugins: # plugin list
注:先后顺序与执行顺序无关。
+特别需要注意的是,如果你的插件有新建自己的代码目录,那么就需要修改 Makefile 文件,新增创建文件夹的操作,比如:
+```
+$(INSTALL) -d $(INST_LUADIR)/apisix/plugins/skywalking
+$(INSTALL) apisix/plugins/skywalking/*.lua $(INST_LUADIR)/apisix/plugins/skywalking/
+```
+
## 配置描述与校验
定义插件的配置项,以及对应的 [Json Schema](https://json-schema.org) 描述,并完成对 json 的校验,这样方便对配置的数据规
diff --git a/doc/plugins-cn.md b/doc/zh-cn/plugins.md
similarity index 97%
rename from doc/plugins-cn.md
rename to doc/zh-cn/plugins.md
index 103cfb070810..dc861fb6029e 100644
--- a/doc/plugins-cn.md
+++ b/doc/zh-cn/plugins.md
@@ -17,7 +17,7 @@
#
-->
-[English](plugins.md)
+[English](../plugins.md)
## 热加载
diff --git a/doc/zh-cn/plugins/authz-keycloak.md b/doc/zh-cn/plugins/authz-keycloak.md
new file mode 100644
index 000000000000..fe433c8fe6ac
--- /dev/null
+++ b/doc/zh-cn/plugins/authz-keycloak.md
@@ -0,0 +1,124 @@
+
+
+[English](../../plugins/authz-keycloak.md)
+
+# 目录
+- [**名字**](#名字)
+- [**属性**](#属性)
+- [**如何启用**](#如何启用)
+- [**测试插件**](#测试插件)
+- [**禁用插件**](#禁用插件)
+- [**示例**](#示例)
+
+## 名字
+
+`authz-keycloak` 是和 Keycloak Identity Server 配合使用的鉴权插件。Keycloak 是一种兼容 OAuth/OIDC 和 UMA 协议的身份认证服务器。尽管本插件是和 Keycloak 服务器配合开发的,但也应该能够适配任意兼容 OAuth/OIDC 和 UMA 协议的身份认证服务器。
+
+有关 Keycloak 的更多信息,可参考 [Keycloak Authorization Docs](https://www.keycloak.org/docs/latest/authorization_services) 查看更多信息。
+
+## 属性
+
+|名称 |选项 |描述|
+|--------- |-------- |-----------|
+| token_endpoint|必填 |接受 OAuth2 兼容 token 的接口,需要支持 `urn:ietf:params:oauth:grant-type:uma-ticket` 授权类型|
+| grant_type |选填 |默认值为 `urn:ietf:params:oauth:grant-type:uma-ticket`|
+| audience |选填 |客户端应用访问相应的资源服务器时所需提供的身份信息。当 permissions 参数有值时这个参数是必填的。|
+| permissions |选填 |描述客户端应用所需访问的资源和权限范围的字符串。格式必须为:`RESOURCE_ID#SCOPE_ID`|
+| timeout |选填 |与身份认证服务器的 http 连接的超时时间。默认值为 3 秒。|
+| policy_enforcement_mode|必填 |只能是 ENFORCING 或 PERMISSIVE。|
+
+### 策略执行模式
+
+定义了在处理身份认证请求时如何应用策略
+
+**Enforcing**
+
+- (默认)如果资源没有绑定任何访问策略,请求默认会被拒绝。
+
+**Permissive**
+
+- 如果资源没有绑定任何访问策略,请求会被允许。
+
+## 如何启用
+
+创建一个 `route` 对象,并在该 `route` 对象上启用 `authz-keycloak` 插件:
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/5 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "uri": "/get",
+ "plugins": {
+ "authz-keycloak": {
+ "token_endpoint": "http://127.0.0.1:8090/auth/realms/{client_id}/protocol/openid-connect/token",
+ "permissions": ["resource name#scope name"],
+ "audience": "Client ID"
+ }
+ },
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "127.0.0.1:8080": 1
+ }
+ }
+}
+```
+
+## 测试插件
+
+```shell
+curl http://127.0.0.1:9080/get -H 'Authorization: Bearer {JWT Token}'
+```
+
+## 禁用插件
+
+在插件设置页面中删除相应的 json 配置即可禁用 `authz-keycloak` 插件。APISIX 的插件是热加载的,因此无需重启 APISIX 服务。
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/5 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "uri": "/get",
+ "plugins": {
+ },
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "127.0.0.1:8080": 1
+ }
+ }
+}
+```
+
+## 示例
+
+请查看 authz-keycloak.t 中的单元测试来了解如何将身份认证策略与您的 API 工作流集成。运行以下 docker 镜像并访问 `http://localhost:8090` 来查看单元测试中绑定的访问策略:
+
+```bash
+docker run -e KEYCLOAK_USER=admin -e KEYCLOAK_PASSWORD=123456 -p 8090:8080 sshniro/keycloak-apisix
+```
+
+下面这张截图显示了如何在 Keycloak 服务器上配置访问策略:
+
+
+
+## 后续开发
+
+- 目前 `authz-plugin` 仅支持通过定义资源名和访问权限范畴来应用 `route` 的访问策略。但是 Keycloak 官方适配的其他语言的客户端 (Java, JS) 还可以通过动态查询 Keycloak 路径以及懒加载身份资源的路径来支持路径匹配。未来版本的 `authz-plugin` 将会支持这项功能。
+
+- 支持从 Keycloak JSON 文件中读取权限范畴和其他配置项。
diff --git a/doc/plugins/basic-auth-cn.md b/doc/zh-cn/plugins/basic-auth.md
similarity index 96%
rename from doc/plugins/basic-auth-cn.md
rename to doc/zh-cn/plugins/basic-auth.md
index e4e862d1ea83..f4442f0f658b 100644
--- a/doc/plugins/basic-auth-cn.md
+++ b/doc/zh-cn/plugins/basic-auth.md
@@ -17,7 +17,7 @@
#
-->
-# [English](basic-auth.md)
+# [English](../../plugins/basic-auth.md)
# 目录
@@ -58,10 +58,10 @@ curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f1
```
你可以使用浏览器打开 dashboard:`http://127.0.0.1:9080/apisix/dashboard/`,通过 web 界面来完成上面的操作,先增加一个 consumer:
-
+
然后在 consumer 页面中添加 basic-auth 插件:
-
+
### 2. 创建 Route 或 Service 对象,并开启 `basic-auth` 插件。
diff --git a/doc/zh-cn/plugins/batch-requests.md b/doc/zh-cn/plugins/batch-requests.md
new file mode 100644
index 000000000000..bea6bd5ffc7d
--- /dev/null
+++ b/doc/zh-cn/plugins/batch-requests.md
@@ -0,0 +1,139 @@
+
+
+# [English](../../plugins/batch-requests.md)
+
+# 目录
+
+- [**简介**](#简介)
+- [**属性**](#属性)
+- [**如何启用**](#如何启用)
+- [**批量接口请求/响应**](#批量接口请求/响应)
+- [**测试插件**](#测试插件)
+- [**禁用插件**](#禁用插件)
+
+## 简介
+
+`batch-requests` 插件可以一次接受多个请求并以 [http pipeline](https://en.wikipedia.org/wiki/HTTP_pipelining) 的方式在网关发起多个http请求,合并结果后再返回客户端,这在客户端需要访问多个接口时可以显著地提升请求性能。
+
+> **提示**
+>
+> 外层的 Http 请求头会自动设置到每一个独立请求中,如果独立请求中出现相同键值的请求头,那么只有独立请求的请求头会生效。
+
+## 属性
+
+无
+
+## 如何启用
+
+本插件默认启用。
+
+## 批量接口请求/响应
+插件会为 `apisix` 创建一个 `/apisix/batch-requests` 的接口来处理你的批量请求。
+
+### 接口请求参数:
+
+| 参数名 | 类型 | 可选 | 默认值 | 描述 |
+| --- | --- | --- | --- | --- |
+| query | Object | Yes | | 给所有请求都携带的 `QueryString` |
+| headers | Object | Yes | | 给所有请求都携带的 `Header` |
+| timeout | Number | Yes | 3000 | 聚合请求的超时时间,单位为 `ms` |
+| pipeline | [HttpRequest](#Request) | No | | Http 请求的详细信息 |
+
+#### HttpRequest
+| 参数名 | 类型 | 可选 | 默认值 | 描述 |
+| --- | --- | --- | --- | --- |
+| version | Enum | Yes | 1.1 | 请求用的 `http` 协议版本,可以使用 `1.0` or `1.1` |
+| method | Enum | Yes | GET | 请求使用的 `http` 方法,例如:`GET`. |
+| query | Object | Yes | | 独立请求所携带的 `QueryString`, 如果 `Key` 和全局的有冲突,以此设置为主。 |
+| headers | Object | Yes | | 独立请求所携带的 `Header`, 如果 `Key` 和全局的有冲突,以此设置为主。 |
+| path | String | No | | 请求路径 |
+| body | String | Yes | | 请求体 |
+
+### 接口响应参数:
+返回值为一个 [HttpResponse](#HttpResponse) 的 `数组`。
+
+#### HttpResponse
+| 参数名 | 类型 | 描述 |
+| --- | --- | --- |
+| status | Integer | Http 请求的状态码 |
+| reason | String | Http 请求的返回信息 |
+| body | String | Http 请求的响应体 |
+| headers | Object | Http 请求的响应头 |
+
+## 测试插件
+
+你可以将要访问的请求信息传到网关的批量请求接口( `/apisix/batch-requests` ),网关会以 [http pipeline](https://en.wikipedia.org/wiki/HTTP_pipelining) 的方式自动帮你完成请求。
+```shell
+curl --location --request POST 'http://127.0.0.1:9080/apisix/batch-requests' \
+--header 'Content-Type: application/json' \
+--d '{
+ "headers": {
+ "Content-Type": "application/json",
+ "admin-jwt":"xxxx"
+ },
+ "timeout": 500,
+ "pipeline": [
+ {
+ "method": "POST",
+ "path": "/community.GiftSrv/GetGifts",
+ "body": "test"
+ },
+ {
+ "method": "POST",
+ "path": "/community.GiftSrv/GetGifts",
+ "body": "test2"
+ }
+ ]
+}'
+```
+
+返回如下:
+```json
+[
+ {
+ "status": 200,
+ "reason": "OK",
+ "body": "{\"ret\":500,\"msg\":\"error\",\"game_info\":null,\"gift\":[],\"to_gets\":0,\"get_all_msg\":\"\"}",
+ "headers": {
+ "Connection": "keep-alive",
+ "Date": "Sat, 11 Apr 2020 17:53:20 GMT",
+ "Content-Type": "application/json",
+ "Content-Length": "81",
+ "Server": "APISIX web server"
+ }
+ },
+ {
+ "status": 200,
+ "reason": "OK",
+ "body": "{\"ret\":500,\"msg\":\"error\",\"game_info\":null,\"gift\":[],\"to_gets\":0,\"get_all_msg\":\"\"}",
+ "headers": {
+ "Connection": "keep-alive",
+ "Date": "Sat, 11 Apr 2020 17:53:20 GMT",
+ "Content-Type": "application/json",
+ "Content-Length": "81",
+ "Server": "APISIX web server"
+ }
+ }
+]
+```
+
+## 禁用插件
+
+正常来说你不需要禁用本插件,如果有特殊情况,请从 `/conf/config.yaml` 的 `plugins` 节点中移除即可。
diff --git a/doc/zh-cn/plugins/consumer-restriction.md b/doc/zh-cn/plugins/consumer-restriction.md
new file mode 100644
index 000000000000..c1f3d1e35966
--- /dev/null
+++ b/doc/zh-cn/plugins/consumer-restriction.md
@@ -0,0 +1,128 @@
+
+
+[English](../../plugins/consumer-restriction.md)
+
+# 目录
+- [**名字**](#名字)
+- [**属性**](#属性)
+- [**如何启用**](#如何启用)
+- [**测试插件**](#测试插件)
+- [**禁用插件**](#禁用插件)
+
+## 名字
+
+`consumer-restriction` 可以通过以下方式限制对服务或路线的访问,将 consumer 列入白名单或黑名单。 支持单个或多个 consumer。
+
+## 属性
+
+* `whitelist`: 可选,加入白名单的consumer
+* `blacklist`: 可选,加入黑名单的consumer
+
+只能单独启用白名单或黑名单,两个不能一起使用。
+
+## 如何启用
+
+下面是一个示例,在指定的 route 上开启了 `consumer-restriction` 插件,限制consumer访问:
+
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/consumers/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d '
+{
+ "username": "jack1",
+ "plugins": {
+ "basic-auth": {
+ "username":"jack2019",
+ "password": "123456"
+ }
+ }
+}'
+
+curl http://127.0.0.1:9080/apisix/admin/consumers/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d '
+{
+ "username": "jack2",
+ "plugins": {
+ "basic-auth": {
+ "username":"jack2020",
+ "password": "123456"
+ }
+ }
+}'
+
+curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "uri": "/index.html",
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "127.0.0.1:1980": 1
+ }
+ },
+ "plugins": {
+ "basic-auth": {},
+ "consumer-restriction": {
+ "whitelist": [
+ "jack1"
+ ]
+ }
+ }
+}'
+```
+
+## 测试插件
+
+jack1 访问:
+
+```shell
+$ curl -u jack2019:123456 http://127.0.0.1:9080/index.html
+HTTP/1.1 200 OK
+...
+```
+
+jack2 访问:
+
+```shell
+$ curl -u jack2020:123456 http://127.0.0.1:9080/index.html -i
+HTTP/1.1 403 Forbidden
+...
+{"message":"You are not allowed"}
+```
+
+## 禁用插件
+
+当你想去掉 `consumer-restriction` 插件的时候,很简单,在插件的配置中把对应的 json 配置删除即可,无须重启服务,即刻生效:
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "uri": "/index.html",
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "127.0.0.1:1980": 1
+ }
+ },
+ "plugins": {
+ "basic-auth": {}
+ }
+}'
+```
+
+现在就已移除 `consumer-restriction` 插件,其它插件的开启和移除也类似。
+
diff --git a/doc/plugins/cors-cn.md b/doc/zh-cn/plugins/cors.md
similarity index 99%
rename from doc/plugins/cors-cn.md
rename to doc/zh-cn/plugins/cors.md
index 413dc95acc85..bc26df7307cf 100644
--- a/doc/plugins/cors-cn.md
+++ b/doc/zh-cn/plugins/cors.md
@@ -17,7 +17,7 @@
#
-->
-# [English](cors.md)
+# [English](../../plugins/cors.md)
# 目录
diff --git a/doc/zh-cn/plugins/echo.md b/doc/zh-cn/plugins/echo.md
new file mode 100644
index 000000000000..71122fb4c309
--- /dev/null
+++ b/doc/zh-cn/plugins/echo.md
@@ -0,0 +1,92 @@
+
+
+# 目录
+- [**简介**](#简介)
+- [**属性**](#属性)
+- [**如何启用**](#如何启用)
+- [**测试插件**](#测试插件)
+- [**禁用插件**](#禁用插件)
+
+## 简介
+
+echo 是一个有用的插件,可帮助用户尽可能全面地了解如何开发APISIX插件。
+
+
+该插件展示了如何在常见的 phase 中实现相应的功能,常见的 phase 有:init, rewrite, access, balancer, header filer, body filter 以及 log。
+
+## 属性
+
+|属性名称 |必选项 |描述|
+|--------- |--------|-----------|
+| before_body |可选| 在 body 属性之前添加的内容,如果 body 属性没有指定将添加在 upstream response body 之前。 |
+| body |可选| 返回给客户端的响应内容,它将覆盖 upstream 返回的响应 body。 |
+| after_body |可选| 在 body 属性之后添加的内容,如果 body 属性没有指定将在 upstream 响应 body 之后添加。 |
+
+## 如何启用
+
+1. 为特定路由启用 echo 插件。
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "plugins": {
+ "echo": {
+ "before_body": "before the body modification "
+ }
+ },
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:1980": 1
+ },
+ "type": "roundrobin"
+ },
+ "uri": "/hello"
+}'
+```
+
+## 测试插件
+
+* 成功:
+
+```shell
+$ curl -i http://127.0.0.1:9080/hello
+HTTP/1.1 200 OK
+...
+before the body modification hello world
+```
+
+## 禁用插件
+
+当您要禁用`echo`插件时,这很简单,您可以在插件配置中删除相应的json配置,无需重新启动服务,它将立即生效:
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value='
+{
+ "methods": ["GET"],
+ "uri": "/hello",
+ "plugins": {},
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "127.0.0.1:1980": 1
+ }
+ }
+}'
+```
diff --git a/doc/plugins/fault-injection-cn.md b/doc/zh-cn/plugins/fault-injection.md
similarity index 98%
rename from doc/plugins/fault-injection-cn.md
rename to doc/zh-cn/plugins/fault-injection.md
index 29453a8115b4..d6155efe1b66 100644
--- a/doc/plugins/fault-injection-cn.md
+++ b/doc/zh-cn/plugins/fault-injection.md
@@ -17,7 +17,7 @@
#
-->
-# [English](fault-injection.md)
+# [English](../../plugins/fault-injection.md)
# fault-injection
diff --git a/doc/plugins/grpc-transcoding-cn.md b/doc/zh-cn/plugins/grpc-transcode.md
similarity index 98%
rename from doc/plugins/grpc-transcoding-cn.md
rename to doc/zh-cn/plugins/grpc-transcode.md
index 7bb4faa5d120..b0c74601ffe9 100644
--- a/doc/plugins/grpc-transcoding-cn.md
+++ b/doc/zh-cn/plugins/grpc-transcode.md
@@ -17,9 +17,9 @@
#
-->
-# [English](grpc-transcoding.md)
+# [English](../../plugins/grpc-transcode.md)
-# grpc-transcoding
+# grpc-transcode
HTTP(s) -> APISIX -> gRPC server
diff --git a/doc/zh-cn/plugins/http-logger.md b/doc/zh-cn/plugins/http-logger.md
new file mode 100644
index 000000000000..0f5edb44dc66
--- /dev/null
+++ b/doc/zh-cn/plugins/http-logger.md
@@ -0,0 +1,98 @@
+
+
+# 目录
+
+- [**定义**](#name)
+- [**属性列表**](#attributes)
+- [**如何开启**](#how-to-enable)
+- [**测试插件**](#test-plugin)
+- [**禁用插件**](#disable-plugin)
+
+## 定义
+
+`http-logger` 是一个插件,可将Log数据请求推送到 HTTP / HTTPS 服务器。
+
+这将提供将 Log 数据请求作为JSON对象发送到监视工具和其他 HTTP 服务器的功能。
+
+## 属性列表
+
+|名称 |必选项 |描述|
+|--------- |--------|-----------|
+| uri |必要的| 服务器的 URI |
+| authorization |可选的| 授权头部 |
+| keepalive |可选的|发送请求后保持连接活动的时间|
+| name |可选的|标识 logger 的唯一标识符|
+| batch_max_size |可选的|每批的最大大小,默认为 1000|
+| inactive_timeout |可选的|刷新缓冲区的最大时间(以秒为单位),默认值为 5|
+| buffer_duration |可选的|必须先处理批次中最旧条目的最长期限(以秒为单位),默认值为 5|
+| max_retry_count |可选的|从处理管道中移除之前的最大重试次数,默认为 0|
+| retry_delay |可选的|如果执行失败,则应延迟执行流程的秒数,默认为 1|
+
+## 如何开启
+
+1. 这是有关如何为特定路由启用 http-logger 插件的示例。
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/5 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "plugins": {
+ "http-logger": {
+ "uri": "127.0.0.1:80/postendpoint?param=1"
+ }
+ },
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "127.0.0.1:1980": 1
+ }
+ },
+ "uri": "/hello"
+}'
+```
+
+## 测试插件
+
+> 成功:
+
+```shell
+$ curl -i http://127.0.0.1:9080/hello
+HTTP/1.1 200 OK
+...
+hello, world
+```
+
+## 禁用插件
+
+在插件配置中删除相应的 json 配置以禁用 http-logger。APISIX 插件是热重载的,因此无需重新启动 APISIX:
+
+```shell
+$ curl http://127.0.0.1:2379/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value='
+{
+ "methods": ["GET"],
+ "uri": "/hello",
+ "plugins": {},
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "127.0.0.1:1980": 1
+ }
+ }
+}'
+```
diff --git a/doc/plugins/ip-restriction-cn.md b/doc/zh-cn/plugins/ip-restriction.md
similarity index 94%
rename from doc/plugins/ip-restriction-cn.md
rename to doc/zh-cn/plugins/ip-restriction.md
index c9f1fcfdb5cf..89ecf5363e55 100644
--- a/doc/plugins/ip-restriction-cn.md
+++ b/doc/zh-cn/plugins/ip-restriction.md
@@ -17,7 +17,7 @@
#
-->
-[English](ip-restriction.md)
+[English](../../plugins/ip-restriction.md)
# 目录
- [**名字**](#名字)
@@ -86,7 +86,7 @@ HTTP/1.1 403 Forbidden
当你想去掉 `ip-restriction` 插件的时候,很简单,在插件的配置中把对应的 json 配置删除即可,无须重启服务,即刻生效:
```shell
-$ curl http://127.0.0.1:2379/v2/keys/apisix/routes/1 -X PUT -d value='
+$ curl http://127.0.0.1:2379/v2/keys/apisix/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value='
{
"uri": "/index.html",
"plugins": {},
diff --git a/doc/plugins/jwt-auth-cn.md b/doc/zh-cn/plugins/jwt-auth.md
similarity index 97%
rename from doc/plugins/jwt-auth-cn.md
rename to doc/zh-cn/plugins/jwt-auth.md
index 8dfc5cb6b04e..8a33e8eec6e5 100644
--- a/doc/plugins/jwt-auth-cn.md
+++ b/doc/zh-cn/plugins/jwt-auth.md
@@ -17,7 +17,7 @@
#
-->
-[English](jwt-auth.md)
+[English](../../plugins/jwt-auth.md)
# 目录
- [**名字**](#名字)
@@ -59,10 +59,10 @@ curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f1
}'
```
你可以使用浏览器打开 dashboard:`http://127.0.0.1:9080/apisix/dashboard/`,通过 web 界面来完成上面的操作,先增加一个 consumer:
-
+
然后在 consumer 页面中添加 jwt-auth 插件:
-
+
2. 创建 Route 或 Service 对象,并开启 `jwt-auth` 插件。
diff --git a/doc/zh-cn/plugins/kafka-logger.md b/doc/zh-cn/plugins/kafka-logger.md
new file mode 100644
index 000000000000..eecded5da01e
--- /dev/null
+++ b/doc/zh-cn/plugins/kafka-logger.md
@@ -0,0 +1,127 @@
+
+
+# 目录
+- [**简介**](#简介)
+- [**属性**](#属性)
+- [**工作原理**](#工作原理)
+- [**如何启用**](#如何启用)
+- [**测试插件**](#测试插件)
+- [**禁用插件**](#禁用插件)
+
+## 简介
+
+`kafka-logger` 是一个插件,可用作ngx_lua nginx 模块的 Kafka 客户端驱动程序。
+
+它可以将接口请求日志以 JSON 的形式推送给外部 Kafka 集群。如果在短时间内没有收到日志数据,请放心,它会在我们的批处理处理器中的计时器功能到期后自动发送日志。
+
+有关 Apache APISIX 中 Batch-Processor 的更多信息,请参考。
+[Batch-Processor](../batch-processor.md)
+
+## 属性
+
+|属性名称 |必选项 |描述|
+|--------- |--------|-----------|
+| broker_list |必须| 要推送的 kafka 的 broker 列表。|
+| kafka_topic |必须| 要推送的 topic。|
+| timeout |可选| 发送数据的超时时间。|
+| key |必须| 用于加密消息的密钥。|
+| name |必须| batch processor 的唯一标识。|
+| batch_max_size |可选| 批量发送的消息最大数量,当到达该阀值后会立即发送消息|
+| inactive_timeout |可选| 不活跃时间,如果在该时间范围内都没有消息写入缓冲区,那么会立即发送到 kafka。默认值: 5(s)|
+| buffer_duration |可选| 缓冲周期,消息停留在缓冲区的最大时间,当超过该时间时会立即发送到 kafka。默认值: 60(s)|
+| max_retry_count |可选| 最大重试次数。默认值: 0|
+| retry_delay |可选| 重试间隔。默认值: 1(s)|
+
+## 工作原理
+
+消息将首先写入缓冲区。
+当缓冲区超过`batch_max_size`时,它将发送到kafka服务器,
+或每个`buffer_duration`刷新缓冲区。
+
+如果成功,则返回“ true”。
+如果出现错误,则返回“ nil”,并带有描述错误的字符串(`buffer overflow`)。
+
+##### Broker 列表
+
+插件支持一次推送到多个 Broker,如下配置:
+
+```json
+{
+ "127.0.0.1":9092,
+ "127.0.0.1":9093
+}
+```
+
+## 如何启用
+
+1. 为特定路由启用 kafka-logger 插件。
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/5 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "plugins": {
+ "kafka-logger": {
+ "broker_list" :
+ {
+ "127.0.0.1":9092
+ },
+ "kafka_topic" : "test2",
+ "key" : "key1"
+ }
+ },
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:1980": 1
+ },
+ "type": "roundrobin"
+ },
+ "uri": "/hello"
+}'
+```
+
+## 测试插件
+
+* 成功:
+
+```shell
+$ curl -i http://127.0.0.1:9080/hello
+HTTP/1.1 200 OK
+...
+hello, world
+```
+
+## 禁用插件
+
+当您要禁用`kafka-logger`插件时,这很简单,您可以在插件配置中删除相应的json配置,无需重新启动服务,它将立即生效:
+
+```shell
+$ curl http://127.0.0.1:2379/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value='
+{
+ "methods": ["GET"],
+ "uri": "/hello",
+ "plugins": {},
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "127.0.0.1:1980": 1
+ }
+ }
+}'
+```
diff --git a/doc/plugins/key-auth-cn.md b/doc/zh-cn/plugins/key-auth.md
similarity index 96%
rename from doc/plugins/key-auth-cn.md
rename to doc/zh-cn/plugins/key-auth.md
index e7b392c69bea..0c2c75c7782e 100644
--- a/doc/plugins/key-auth-cn.md
+++ b/doc/zh-cn/plugins/key-auth.md
@@ -17,7 +17,7 @@
#
-->
-[English](key-auth.md)
+[English](../../plugins/key-auth.md)
# 目录
- [**名字**](#名字)
@@ -54,10 +54,10 @@ curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f1
```
你可以使用浏览器打开 dashboard:`http://127.0.0.1:9080/apisix/dashboard/`,通过 web 界面来完成上面的操作,先增加一个 consumer:
-
+
然后在 consumer 页面中添加 key-auth 插件:
-
+
2. 创建 route 或 service 对象,并开启 `key-auth` 插件。
diff --git a/doc/plugins/limit-conn-cn.md b/doc/zh-cn/plugins/limit-conn.md
similarity index 84%
rename from doc/plugins/limit-conn-cn.md
rename to doc/zh-cn/plugins/limit-conn.md
index fb0b4d70b4ed..9bb7c3c76e2f 100644
--- a/doc/plugins/limit-conn-cn.md
+++ b/doc/zh-cn/plugins/limit-conn.md
@@ -17,15 +17,15 @@
#
-->
-[English](limit-conn.md)
+[English](../../plugins/limit-conn.md)
# limit-conn
-Apisix 的限制并发请求(或并发连接)插件。
+限制并发请求(或并发连接)插件。
### 属性
-* `conn`: 允许的最大并发请求数。 超过这个比率的请求(低于“ conn” + “ burst”)将被延迟以符合这个阈值。
-* `burst`: 允许延迟的过多并发请求(或连接)的数量。
+* `conn`: 允许的最大并发请求数。超过 `conn` 的限制、但是低于 `conn` + `burst` 的请求,将被延迟处理。
+* `burst`: 允许被延迟处理的并发请求数。
* `default_conn_delay`: 默认的典型连接(或请求)的处理延迟时间。
* `key`: 用户指定的限制并发级别的关键字,可以是客户端IP或服务端IP。
@@ -33,7 +33,8 @@ Apisix 的限制并发请求(或并发连接)插件。
现在接受以下关键字: “remote_addr”(客户端的 IP),“server_addr”(服务器的 IP),请求头中的“ X-Forwarded-For/X-Real-IP”。
-* `rejected_code`: 当请求超过阈值时返回的 HTTP状态码, 默认值是503。
+ **key 是可以被用户自定义的,只需要修改插件的一行代码即可完成。并没有在插件中放开是处于安全的考虑。**
+* `rejected_code`: 当请求超过 `conn` + `burst` 这个阈值时,返回的 HTTP状态码,默认值是503。
#### 如何启用
@@ -64,10 +65,10 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13
```
你可以使用浏览器打开 dashboard:`http://127.0.0.1:9080/apisix/dashboard/`,通过 web 界面来完成上面的操作,先增加一个 route:
-
+
然后在 route 页面中添加 limit-conn 插件:
-
+
#### test plugin
diff --git a/doc/plugins/limit-count-cn.md b/doc/zh-cn/plugins/limit-count.md
similarity index 94%
rename from doc/plugins/limit-count-cn.md
rename to doc/zh-cn/plugins/limit-count.md
index 1768ebc638e6..718c6af49e73 100644
--- a/doc/plugins/limit-count-cn.md
+++ b/doc/zh-cn/plugins/limit-count.md
@@ -17,7 +17,7 @@
#
-->
-[English](limit-count.md)
+[English](../../plugins/limit-count.md)
# limit-count
@@ -31,13 +31,16 @@
|count |必选 |指定时间窗口内的请求数量阈值|
|time_window |必选 |时间窗口的大小(以秒为单位),超过这个时间就会重置|
|key |必选 |是用来做请求计数的依据,当前接受的 key 有: "remote_addr", "server_addr", "http_x_real_ip", "http_x_forwarded_for"。|
-|rejected_code |可选 |T当请求超过阈值被拒绝时,返回的 HTTP 状态码,默认是 503|
+|rejected_code |可选 |T当请求超过阈值被拒绝时,返回的 HTTP 状态码,默认 503。|
|policy |可选 |用于检索和增加限制的速率限制策略。可选的值有:`local`(计数器被以内存方式保存在节点本地,默认选项) 和 `redis`(计数器保存在 Redis 服务节点上,从而可以跨节点共享结果,通常用它来完成全局限速).|
|redis_host |可选 |当使用 `redis` 限速策略时,该属性是 Redis 服务节点的地址。|
|redis_port |可选 |当使用 `redis` 限速策略时,该属性是 Redis 服务节点的端口,默认端口 6379。|
|redis_password|可选 |当使用 `redis` 限速策略时,该属性是 Redis 服务节点的密码。|
|redis_timeout |可选 |当使用 `redis` 限速策略时,该属性是 Redis 服务节点以毫秒为单位的超时时间,默认是 1000 ms(1 秒)。|
+
+**key 是可以被用户自定义的,只需要修改插件的一行代码即可完成。并没有在插件中放开是处于安全的考虑。**
+
### 示例
#### 开启插件
@@ -66,10 +69,10 @@ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335
```
你可以使用浏览器打开 dashboard:`http://127.0.0.1:9080/apisix/dashboard/`,通过 web 界面来完成上面的操作,先增加一个 route:
-
+
然后在 route 页面中添加 limit-count 插件:
-
+
如果你需要一个集群级别的流量控制,我们可以借助 redis server 来完成。不同的 APISIX 节点之间将共享流量限速结果,实现集群流量限速。
diff --git a/doc/plugins/limit-req-cn.md b/doc/zh-cn/plugins/limit-req.md
similarity index 75%
rename from doc/plugins/limit-req-cn.md
rename to doc/zh-cn/plugins/limit-req.md
index b05148ca736a..420359e2a422 100644
--- a/doc/plugins/limit-req-cn.md
+++ b/doc/zh-cn/plugins/limit-req.md
@@ -17,7 +17,7 @@
#
-->
-# [English](limit-req.md)
+# [English](../../plugins/limit-req.md)
# limit-req
@@ -25,10 +25,14 @@
## 参数
-* `rate`:指定的请求速率(以秒为单位),请求速率超过 `rate` 但没有超过 (`rate` + `brust`)的请求会被加上延时
-* `burst`:请求速率超过 (`rate` + `brust`)的请求会被直接拒绝
-* `rejected_code`:当请求超过阈值被拒绝时,返回的 HTTP 状态码
-* `key`:是用来做请求计数的依据,当前接受的 key 有:"remote_addr"(客户端IP地址), "server_addr"(服务端 IP 地址), 请求头中的"X-Forwarded-For" 或 "X-Real-IP"。
+|名称 |可选项 |描述|
+|--------- |--------|-----------|
+|rate |必选|指定的请求速率(以秒为单位),请求速率超过 `rate` 但没有超过 (`rate` + `brust`)的请求会被加上延时。|
+|burst |必选|请求速率超过 (`rate` + `brust`)的请求会被直接拒绝。|
+| key |必选|是用来做请求计数的依据,当前接受的 key 有:"remote_addr"(客户端IP地址), "server_addr"(服务端 IP 地址), 请求头中的"X-Forwarded-For" 或 "X-Real-IP"。|
+|rejected_code |可选|当请求超过阈值被拒绝时,返回的 HTTP 状态码,默认 503。|
+
+**key 是可以被用户自定义的,只需要修改插件的一行代码即可完成。并没有在插件中放开是处于安全的考虑。**
## 示例
@@ -60,11 +64,11 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13
你可以使用浏览器打开 dashboard:`http://127.0.0.1:9080/apisix/dashboard/`,通过 web 界面来完成上面的操作,先增加一个 route:
-
+
然后在 route 页面中添加 limit-req 插件:
-
+
### 测试插件
diff --git a/doc/plugins/mqtt-proxy-cn.md b/doc/zh-cn/plugins/mqtt-proxy.md
similarity index 98%
rename from doc/plugins/mqtt-proxy-cn.md
rename to doc/zh-cn/plugins/mqtt-proxy.md
index 141e99897e26..d55a7d391d8e 100644
--- a/doc/plugins/mqtt-proxy-cn.md
+++ b/doc/zh-cn/plugins/mqtt-proxy.md
@@ -17,7 +17,7 @@
#
-->
-[English](mqtt-proxy.md)
+[English](../../plugins/mqtt-proxy.md)
# 目录
diff --git a/doc/zh-cn/plugins/oauth.md b/doc/zh-cn/plugins/oauth.md
new file mode 100644
index 000000000000..3c6d0ec23ff1
--- /dev/null
+++ b/doc/zh-cn/plugins/oauth.md
@@ -0,0 +1,129 @@
+
+
+# 目录
+
+- [**定义**](#定义)
+- [**属性列表**](#属性列表)
+- [**令牌自省**](#令牌自省)
+
+## 定义
+
+OAuth 2 / Open ID Connect(OIDC)插件为 APISIX 提供身份验证和自省功能。
+
+## 属性列表
+
+|名称 |必选项 |描述|
+|------- |----- |------|
+|client_id |必要的 |OAuth 客户端 ID|
+|client_secret |必要的 |OAuth 客户端 secret|
+|discovery |必要的 |身份服务器的发现端点的 URL|
+|realm |可选的 |用于认证的领域; 默认为apisix|
+|bearer_only |可选的 |设置为“true”将检查请求中带有承载令牌的授权标头; 默认为`false`|
+|logout_path |可选的 |默认是`/logout`|
+|redirect_uri |可选的 |默认是 `ngx.var.request_uri`|
+|timeout |可选的 |默认是 3 秒|
+|ssl_verify |可选的 |默认是 `false`|
+|introspection_endpoint |可选的 |身份服务器的令牌验证端点的 URL|
+|introspection_endpoint_auth_method |可选的 |令牌自省的认证方法名称 |
+|public_key |可选的 |验证令牌的公钥 |
+|token_signing_alg_values_expected |可选的 |用于对令牌进行签名的算法 |
+
+### 令牌自省
+
+令牌自省通过针对 Oauth 2 授权服务器验证令牌来帮助验证请求。
+前提条件是,您应该在身份服务器中创建受信任的客户端,并生成用于自省的有效令牌(JWT)。
+下图显示了通过网关进行令牌自省的示例(成功)流程。
+
+
+
+以下是 curl 命令,用于将插件启用到外部服务。
+通过自省请求标头中提供的令牌,此路由将保护 https://httpbin.org/get(echo 服务)。
+
+```bash
+curl http://127.0.0.1:9080/apisix/admin/routes/5 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "uri":"/get",
+ "plugins":{
+ "proxy-rewrite":{
+ "scheme":"https"
+ },
+ "openid-connect":{
+ "client_id":"api_six_client_id",
+ "client_secret":"client_secret_code",
+ "discovery":"full_URL_of_the_discovery_endpoint",
+ "introspection_endpoint":"full_URL_of_introspection_endpoint",
+ "bearer_only":true,
+ "realm":"master",
+ "introspection_endpoint_auth_method":"client_secret_basic"
+ }
+ },
+ "upstream":{
+ "type":"roundrobin",
+ "nodes":{
+ "httpbin.org:443":1
+ }
+ }
+}'
+```
+
+以下命令可用于访问新路由。
+
+```bash
+curl -i -X GET http://127.0.0.1:9080/get -H "Host: httpbin.org" -H "Authorization: Bearer {replace_jwt_token}"
+```
+
+#### 公钥自省
+
+您还可以提供 JWT 令牌的公钥来验证令牌。 如果您提供了公共密钥和令牌自省端点,则将执行公共密钥工作流,而不是通过身份服务器进行验证。如果要减少额外的网络呼叫并加快过程,可以使用此方法。
+
+以下配置显示了如何向路由添加公钥自省。
+
+```bash
+curl http://127.0.0.1:9080/apisix/admin/routes/5 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "uri":"/get",
+ "plugins":{
+ "proxy-rewrite":{
+ "scheme":"https"
+ },
+ "openid-connect":{
+ "client_id":"api_six_client_id",
+ "client_secret":"client_secret_code",
+ "discovery":"full_URL_of_the_discovery_endpoint",
+ "bearer_only":true,
+ "realm":"master",
+ "token_signing_alg_values_expected":"RS256",
+ "public_key":"-----BEGIN CERTIFICATE-----
+ {public_key}
+ -----END CERTIFICATE-----"
+ }
+ },
+ "upstream":{
+ "type":"roundrobin",
+ "nodes":{
+ "httpbin.org:443":1
+ }
+ }
+}'
+```
+
+## 故障排除
+
+如果 APISIX 无法解析/连接到身份提供者,请检查/修改DNS设置(`conf / config.yaml`)。
diff --git a/doc/plugins/prometheus-cn.md b/doc/zh-cn/plugins/prometheus.md
similarity index 93%
rename from doc/plugins/prometheus-cn.md
rename to doc/zh-cn/plugins/prometheus.md
index 1dd814285cf1..946d113d10a6 100644
--- a/doc/plugins/prometheus-cn.md
+++ b/doc/zh-cn/plugins/prometheus.md
@@ -17,7 +17,7 @@
#
-->
-[English](prometheus.md)
+[English](../../plugins/prometheus.md)
# prometheus
@@ -51,11 +51,11 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13
你可以使用浏览器打开 dashboard:`http://127.0.0.1:9080/apisix/dashboard/`,通过 web 界面来完成上面的操作,先增加一个 route:
-
+
然后在 route 页面中添加 prometheus 插件:
-
+
## 如何提取指标数据
@@ -78,9 +78,9 @@ scrape_configs:
我们也可以在 prometheus 控制台中去检查状态:
-
+
-
+
### Grafana 面板
@@ -89,11 +89,11 @@ scrape_configs:
你可以到 [Grafana meta](https://grafana.com/grafana/dashboards/11719) 下载 `Grafana` 元数据.
-
+
-
+
-
+
### 可有的指标
diff --git a/doc/plugins/proxy-cache-cn.md b/doc/zh-cn/plugins/proxy-cache.md
similarity index 94%
rename from doc/plugins/proxy-cache-cn.md
rename to doc/zh-cn/plugins/proxy-cache.md
index 95f3bdaf7d0f..9381ea883ae9 100644
--- a/doc/plugins/proxy-cache-cn.md
+++ b/doc/zh-cn/plugins/proxy-cache.md
@@ -17,7 +17,7 @@
#
-->
-[English](proxy-cache.md)
+[English](../../plugins/proxy-cache.md)
# proxy-cache
@@ -48,7 +48,7 @@
示例1:为特定路由启用 `proxy-cache` 插件:
```shell
-curl http://127.0.0.1:9080/apisix/admin/routes/1 -X PUT -d '
+curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
{
"plugins": {
"proxy-cache": {
@@ -130,7 +130,7 @@ Server: APISIX web server
移除插件配置中相应的 JSON 配置可立即禁用该插件,无需重启服务:
```shell
-curl http://127.0.0.1:9080/apisix/admin/routes/1 -X PUT -d '
+curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
{
"uri": "/hello",
"plugins": {},
diff --git a/doc/plugins/proxy-mirror-cn.md b/doc/zh-cn/plugins/proxy-mirror.md
similarity index 89%
rename from doc/plugins/proxy-mirror-cn.md
rename to doc/zh-cn/plugins/proxy-mirror.md
index 4b3f0730cc21..e4c4d3d3b77b 100644
--- a/doc/plugins/proxy-mirror-cn.md
+++ b/doc/zh-cn/plugins/proxy-mirror.md
@@ -17,7 +17,7 @@
#
-->
-[English](proxy-mirror.md)
+[English](../../plugins/proxy-mirror.md)
# proxy-mirror
@@ -38,7 +38,7 @@
示例1:为特定路由启用 `proxy-mirror` 插件:
```shell
-curl http://127.0.0.1:9080/apisix/admin/routes/1 -X PUT -d '
+curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
{
"plugins": {
"proxy-mirror": {
@@ -77,7 +77,7 @@ hello world
移除插件配置中相应的 JSON 配置可立即禁用该插件,无需重启服务:
```shell
-curl http://127.0.0.1:9080/apisix/admin/routes/1 -X PUT -d '
+curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
{
"uri": "/hello",
"plugins": {},
diff --git a/doc/plugins/proxy-rewrite-cn.md b/doc/zh-cn/plugins/proxy-rewrite.md
similarity index 98%
rename from doc/plugins/proxy-rewrite-cn.md
rename to doc/zh-cn/plugins/proxy-rewrite.md
index f714ad5f4f5b..45a6c95040ea 100644
--- a/doc/plugins/proxy-rewrite-cn.md
+++ b/doc/zh-cn/plugins/proxy-rewrite.md
@@ -17,7 +17,7 @@
#
-->
-[English](proxy-rewrite.md)
+[English](../../plugins/proxy-rewrite.md)
# proxy-rewrite
上游代理信息重写插件。
diff --git a/doc/plugins/redirect-cn.md b/doc/zh-cn/plugins/redirect.md
similarity index 74%
rename from doc/plugins/redirect-cn.md
rename to doc/zh-cn/plugins/redirect.md
index ba7d94ce35eb..ead153a44421 100644
--- a/doc/plugins/redirect-cn.md
+++ b/doc/zh-cn/plugins/redirect.md
@@ -17,7 +17,7 @@
#
-->
-[English](redirect.md)
+[English](../../plugins/redirect.md)
# redirect
@@ -27,8 +27,9 @@ URI 重定向插件。
|名称 |必须|描述|
|------- |-----|------|
-|uri |是| 可以包含 Nginx 变量的 URI,例如:`/test/index.html`, `$uri/index.html`。你可以通过类似于 `$ {xxx}` 的方式引用变量,以避免产生歧义,例如:`${uri}foo/index.html`。若你需要保留 `$` 字符,那么使用如下格式:`/\$foo/index.html`。|
-|ret_code|否|请求响应码,默认值为 `302`。|
+|uri |是,与 `http_to_https` 二选一| 可以包含 Nginx 变量的 URI,例如:`/test/index.html`, `$uri/index.html`。你可以通过类似于 `$ {xxx}` 的方式引用变量,以避免产生歧义,例如:`${uri}foo/index.html`。若你需要保留 `$` 字符,那么使用如下格式:`/\$foo/index.html`。|
+|ret_code|否,只和 `uri` 配置使用。|请求响应码,默认值为 `302`。|
+|http_to_https|是,与 `uri` 二选一|布尔值,默认是 `false`。当设置为 `ture` 并且请求是 http 时,会自动 301 重定向为 https,uri 保持不变|
### 示例
@@ -94,6 +95,21 @@ Location: /test/default.html
我们可以检查响应码和响应头中的 `Location` 参数,它表示该插件已启用。
+```
+
+下面是一个实现 http 到 https 跳转的示例:
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "uri": "/hello",
+ "plugins": {
+ "redirect": {
+ "http_to_https": true
+ }
+ }
+}'
+```
+
#### 禁用插件
移除插件配置中相应的 JSON 配置可立即禁用该插件,无需重启服务:
diff --git a/doc/zh-cn/plugins/request-validation.md b/doc/zh-cn/plugins/request-validation.md
new file mode 100644
index 000000000000..9882495bc726
--- /dev/null
+++ b/doc/zh-cn/plugins/request-validation.md
@@ -0,0 +1,255 @@
+
+
+[English](../../plugins/request-validation.md)
+
+# 目录
+- [**名字**](#名字)
+- [**属性**](#属性)
+- [**如何启用**](#如何启用)
+- [**测试插件**](#测试插件)
+- [**禁用插件**](#禁用插件)
+- [**示例**](#示例)
+
+## 名字
+
+`request-validation` 插件用于提前验证请求向上游转发请求,可以验证请求的 `body` 及 `header` 数据。
+
+该插件使用 `Json Schema` 进行数据验证,有关 `Json Schema` 的更多信息,请参阅 [JSON schema](https://github.com/api7/jsonschema)。
+
+
+## 属性
+
+|名称 |必选项 |描述|
+|--------- |-------- |-----------|
+| header_schema |可选 |`header` 数据的 `schema` 数据结构|
+| body_schema |可选 |`body` 数据的 `schema` 数据结构|
+
+
+## 如何启用
+
+创建一条路由并在该路由上启用 `request-validation` 插件:
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/5 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "uri": "/get",
+ "plugins": {
+ "request-validation": {
+ "body_schema": {
+ "type": "object",
+ "required": ["required_payload"],
+ "properties": {
+ "required_payload": {"type": "string"},
+ "boolean_payload": {"type": "boolean"}
+ }
+ }
+ }
+ },
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "127.0.0.1:8080": 1
+ }
+ }
+}
+```
+
+
+## 测试插件
+
+```shell
+curl --header "Content-Type: application/json" \
+ --request POST \
+ --data '{"boolean-payload":true,"required_payload":"hello"}' \
+ http://127.0.0.1:9080/get
+```
+
+如果 `Schema` 验证失败,将返回 `400 bad request` 错误。
+
+
+## 禁用插件
+
+在路由 `plugins` 配置块中删除 `request-validation` 配置,即可禁用该插件。
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/5 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "uri": "/get",
+ "plugins": {
+ },
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "127.0.0.1:8080": 1
+ }
+ }
+}
+```
+
+
+## 示例
+
+**枚举(Enums)验证:**
+
+```shell
+{
+ "body_schema": {
+ "type": "object",
+ "required": ["emum_payload"],
+ "properties": {
+ "emum_payload": {
+ "type": "string",
+ enum: ["enum_string_1", "enum_string_2"]
+ default = "enum_string_1"
+ }
+ }
+ }
+}
+```
+
+**布尔(Boolean)验证:**
+
+```shell
+{
+ "body_schema": {
+ "type": "object",
+ "required": ["bool_payload"],
+ "properties": {
+ "bool_payload": {
+ type = "boolean",
+ default = true
+ }
+ }
+ }
+}
+```
+
+**数字范围(Number or Integer)验证:**
+
+```shell
+{
+ "body_schema": {
+ "type": "object",
+ "required": ["integer_payload"],
+ "properties": {
+ "integer_payload": {
+ type = "integer",
+ minimum = 1,
+ maximum = 65535
+ }
+ }
+ }
+}
+```
+
+**字符串长度(String)验证:**
+
+```shell
+{
+ "body_schema": {
+ "type": "object",
+ "required": ["string_payload"],
+ "properties": {
+ "string_payload": {
+ type = "string",
+ minLength = 1,
+ maxLength = 32
+ }
+ }
+ }
+}
+```
+
+**正则表达式(Regex)验证:**
+
+```shell
+{
+ "body_schema": {
+ "type": "object",
+ "required": ["regex_payload"],
+ "properties": {
+ "regex_payload": {
+ type = "string",
+ minLength = 1,
+ maxLength = 32,
+ pattern = [[^[a-zA-Z0-9_]+$]]
+ }
+ }
+ }
+}
+```
+
+
+**数组(Array)验证:**
+
+```shell
+{
+ "body_schema": {
+ "type": "object",
+ "required": ["array_payload"],
+ "properties": {
+ "array_payload": {
+ type = "array",
+ minItems = 1,
+ items = {
+ type = "integer",
+ minimum = 200,
+ maximum = 599
+ },
+ uniqueItems = true,
+ default = {200, 302}
+ }
+ }
+ }
+}
+```
+
+**多字段组合(Multiple Fields)验证:**
+
+```shell
+{
+ "body_schema": {
+ "type": "object",
+ "required": ["boolean_payload", "array_payload", "regex_payload"],
+ "properties": {
+ "boolean_payload": {
+ "type": "boolean"
+ },
+ "array_payload": {
+ type = "array",
+ minItems = 1,
+ items = {
+ type = "integer",
+ minimum = 200,
+ maximum = 599
+ },
+ uniqueItems = true,
+ default = {200, 302}
+ },
+ "regex_payload": {
+ type = "string",
+ minLength = 1,
+ maxLength = 32,
+ pattern = [[^[a-zA-Z0-9_]+$]]
+ }
+ }
+ }
+}
+```
diff --git a/doc/plugins/response-rewrite-cn.md b/doc/zh-cn/plugins/response-rewrite.md
similarity index 91%
rename from doc/plugins/response-rewrite-cn.md
rename to doc/zh-cn/plugins/response-rewrite.md
index aa79bc2c5b9a..83c5aac51c96 100644
--- a/doc/plugins/response-rewrite-cn.md
+++ b/doc/zh-cn/plugins/response-rewrite.md
@@ -17,27 +17,29 @@
#
-->
-[English](response-rewrite.md)
+[English](../../plugins/response-rewrite.md)
+
# response-rewrite
该插件支持修改上游服务返回的 body 和 header 信息。
使用场景:
1、可以设置 `Access-Control-Allow-*` 等 header 信息,来实现 CORS (跨域资源共享)的功能。
-2、另外也可以通过配置 status_code 和 header 里面的 Location 来实现重定向,当然如果只是需要重定向功能,最好使用 [redirect](redirect-cn.md) 插件。
+2、另外也可以通过配置 status_code 和 header 里面的 Location 来实现重定向,当然如果只是需要重定向功能,最好使用 [redirect](redirect.md) 插件。
+
+## 配置参数
-#### 配置参数
|名字 |可选|说明|
|------- |-----|------|
-|status_code |可选| 修改上游返回状态码|
+|status_code |可选| 修改上游返回状态码,默认保留原始响应代码。|
|body |可选| 修改上游返回的 `body` 内容,如果设置了新内容,header 里面的 content-length 字段也会被去掉|
|body_base64 |可选| 布尔类型,描述 `body` 字段是否需要 base64 解码之后再返回给客户端,用在某些图片和 Protobuffer 场景|
|headers |可选| 返回给客户端的 `headers`,这里可以设置多个。头信息如果存在将重写,不存在则添加。想要删除某个 header 的话,把对应的值设置为空字符串即可|
+## 示例
-### 示例
+### 开启插件
-#### 开启插件
下面是一个示例,在指定的 route 上开启了 `response rewrite` 插件:
```shell
@@ -63,7 +65,8 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f1
}'
```
-#### 测试插件
+### 测试插件
+
基于上述配置进行测试:
```shell
@@ -71,7 +74,8 @@ curl -X GET -i http://127.0.0.1:9080/test/index.html
```
如果看到返回的头部信息和内容都被修改了,即表示 `response rewrite` 插件生效了。
-```
+
+```shell
HTTP/1.1 200 OK
Date: Sat, 16 Nov 2019 09:15:12 GMT
Transfer-Encoding: chunked
diff --git a/doc/plugins/serverless-cn.md b/doc/zh-cn/plugins/serverless.md
similarity index 95%
rename from doc/plugins/serverless-cn.md
rename to doc/zh-cn/plugins/serverless.md
index be8e7c84a4fb..7ef432662f00 100644
--- a/doc/plugins/serverless-cn.md
+++ b/doc/zh-cn/plugins/serverless.md
@@ -17,27 +17,32 @@
#
-->
-[English](serverless.md)
+[English](../../plugins/serverless.md)
# serverless
+
serverless 的插件有两个,分别是 `serverless-pre-function` 和 `serverless-post-function`,
前者会在指定阶段的最开始运行,后者是在指定阶段的最后运行。
这两个插件接收的参数都是一样的。
-### Parameters
+## Parameters
+
* `phase`: 指定的运行阶段,没有指定的话默认是 `access`。允许的阶段有:`rewrite`、`access`
`header_filer`、`body_filter`、`log` 和 `balancer` 阶段。
* `functions`: 指定运行的函数列表,是数组类型,里面可以包含一个函数,也可以是多个函数,按照先后顺序执行。
+
需要注意的是,这里只接受函数,而不接受其他类型的 Lua 代码。比如匿名函数是合法的:
-```
+
+```lua
return function()
ngx.log(ngx.ERR, 'one')
end
```
闭包也是合法的:
-```
+
+```lua
local count = 1
return function()
count = count + 1
@@ -46,14 +51,16 @@ end
```
但不是函数类型的代码就是非法的:
-```
+
+```lua
local count = 1
ngx.say(count)
```
-### 示例
+## 示例
+
+### 启动插件
-#### 启动插件
下面是一个示例,在指定的 route 上开启了 serverless 插件:
```shell
@@ -75,8 +82,10 @@ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f03433
}'
```
-#### 测试插件
+### 测试插件
+
使用 curl 访问:
+
```shell
curl -i http://127.0.0.1:9080/index.html
```
@@ -84,7 +93,8 @@ curl -i http://127.0.0.1:9080/index.html
然后你在 error.log 日志中就会发现 `serverless pre function` 这个 error 级别的日志,
表示指定的函数已经生效。
-#### 移除插件
+### 移除插件
+
当你想去掉插件的时候,很简单,在插件的配置中把对应的 json 配置删除即可,无须重启服务,即刻生效:
```shell
diff --git a/doc/zh-cn/plugins/skywalking.md b/doc/zh-cn/plugins/skywalking.md
new file mode 100644
index 000000000000..91ae3c1ebffb
--- /dev/null
+++ b/doc/zh-cn/plugins/skywalking.md
@@ -0,0 +1,192 @@
+
+
+[English](../../plugins/skywalking.md)
+
+# 目录
+- [目录](#目录)
+ - [名字](#名字)
+ - [属性](#属性)
+ - [如何启用](#如何启用)
+ - [测试插件](#测试插件)
+ - [运行 Skywalking 实例](#运行-Skywalking-实例)
+ - [禁用插件](#禁用插件)
+ - [上游服务是java的SpringBoot示例代码](#上游服务是java的SpringBoot示例代码)
+
+## 名字
+
+`Skywalking`(https://github.com/apache/skywalking) 是一个开源的服务跟踪插件。
+
+服务端目前支持http和grpc两种协议,在apisix中目前只支持http协议
+
+## 属性
+
+* `endpoint`: Skywalking 的 http 节点,例如`http://127.0.0.1:12800`。
+* `sample_ratio`: 监听的比例,最小为0.00001,最大为1。
+* `service_name`: 可选参数,标记当前服务的名称,默认值是`APISIX`。
+
+## 如何启用
+
+下面是一个示例,在指定的 route 上开启了 skywalking 插件:
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "methods": ["GET"],
+ "uris": [
+ "/uid/*"
+ ],
+ "plugins": {
+ "skywalking": {
+ "endpoint": "http://10.110.149.175:12800",
+ "sample_ratio": 1,
+ "service_name": "APISIX_SERVER"
+ }
+ },
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "10.110.149.175:8089": 1
+ }
+ }
+}'
+```
+
+你可以使用浏览器打开 dashboard:`http://127.0.0.1:9080/apisix/dashboard/`,通过 web 界面来完成上面的操作,先增加一个 route:
+
+
+
+然后在 route 页面中添加 skywalking 插件:
+
+
+
+## 测试插件
+
+### 运行 Skywalking 实例
+
+#### 例子:
+1. 启动Skywalking Server:
+ - 默认使用H2存储,直接启动skywalking即可
+ ```
+ sudo docker run --name skywalking -d -p 1234:1234 -p 11800:11800 -p 12800:12800 --restart always apache/skywalking-oap-server
+ ```
+
+ - 如果使用elasticsearch存储
+ 1. 则需要先安装elasticsearch:
+ ```
+ sudo docker run -d --name elasticsearch -p 9200:9200 -p 9300:9300 --restart always -e "discovery.type=single-node" elasticsearch:6.7.2
+
+ ```
+ 2. 安装 ElasticSearch管理界面elasticsearch-hq
+ ```
+ sudo docker run -d --name elastic-hq -p 5000:5000 --restart always elastichq/elasticsearch-hq
+ ```
+ 3. 启动skywalking:
+ ```
+ sudo docker run --name skywalking -d -p 1234:1234 -p 11800:11800 -p 12800:12800 --restart always --link elasticsearch:elasticsearch -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server
+ ```
+2. Skywalking管理系统:
+ 1. 启动管理系统:
+ ```
+ sudo docker run --name skywalking-ui -d -p 8080:8080 --link skywalking:skywalking -e SW_OAP_ADDRESS=skywalking:12800 --restart always apache/skywalking-ui
+ ```
+ 2. 打开管理页面
+ 在浏览器里面输入http://10.110.149.175:8080,出现了如下界面,则表示安装成功
+ 
+
+3. 测试示例:
+ - 通过访问apisix,访问上游服务
+
+ ```bash
+ $ curl -v http://10.110.149.192:9080/uid/12
+ HTTP/1.1 200 OK
+ OK
+ ...
+ ```
+ - 打开浏览器,访问 Skywalking 的 web 页面:
+ ```
+ http://10.110.149.175:8080/
+ ```
+ 可以看到访问拓扑图\
+ \
+ 可以看到服务追踪图\
+ 
+## 禁用插件
+
+当你想去掉插件的时候,很简单,在插件的配置中把对应的 json 配置删除即可,无须重启服务,即刻生效:
+
+```shell
+$ curl http://127.0.0.1:2379/v2/keys/apisix/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value='
+{
+ "methods": ["GET"],
+ "uris": [
+ "/uid/*"
+ ],
+ "plugins": {
+ },
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "10.110.149.175:8089": 1
+ }
+ }
+}'
+```
+
+现在就已经移除了 Skywalking 插件了。其他插件的开启和移除也是同样的方法。
+
+
+## 上游服务是java的SpringBoot示例代码
+
+```java
+package com.lenovo.ai.controller;
+
+import org.springframework.web.bind.annotation.PathVariable;
+import org.springframework.web.bind.annotation.RequestMapping;
+import org.springframework.web.bind.annotation.RestController;
+import javax.servlet.http.HttpServletRequest;
+
+/**
+ * @author cyxinda
+ * @create 2020-05-29 14:02
+ * @desc skywalking测试中央控制层
+ **/
+@RestController
+public class TestController {
+ @RequestMapping("/uid/{count}")
+ public String getUidList(@PathVariable("count") String countStr, HttpServletRequest request) {
+ System.out.println("counter:::::"+countStr);
+ return "OK";
+ }
+}
+```
+启动服务的时候,需要配置skywalking agent,
+修改agent/config/agent.config中的配置
+```
+agent.service_name=yourservername
+collector.backend_service=10.110.149.175:11800
+```
+启动服务脚本:
+```
+nohup java -javaagent:/root/skywalking/app/agent/skywalking-agent.jar \
+-jar /root/skywalking/app/app.jar \
+--server.port=8089 \
+2>&1 > /root/skywalking/app/logs/nohup.log &
+```
+
diff --git a/doc/zh-cn/plugins/syslog.md b/doc/zh-cn/plugins/syslog.md
new file mode 100644
index 000000000000..486d1606424c
--- /dev/null
+++ b/doc/zh-cn/plugins/syslog.md
@@ -0,0 +1,105 @@
+
+
+# 摘要
+- [**定义**](#name)
+- [**属性列表**](#attributes)
+- [**如何开启**](#how-to-enable)
+- [**测试插件**](#test-plugin)
+- [**禁用插件**](#disable-plugin)
+
+
+## 定义
+
+`sys` 是一个将Log data请求推送到Syslog的插件。
+
+这将提供将Log数据请求作为JSON对象发送的功能。
+
+## 属性列表
+
+|属性名称 |必选项 |描述|
+|--------- |-------- |-----------|
+|host |必要的 |IP地址或主机名。|
+|port |必要的 |目标上游端口。|
+|timeout |可选的 |上游发送数据超时。|
+|tls |可选的 |布尔值,用于控制是否执行SSL验证。|
+|flush_limit |可选的 |如果缓冲的消息的大小加上当前消息的大小达到(> =)此限制(以字节为单位),则缓冲的日志消息将被写入日志服务器。默认为4096(4KB)。|
+|drop_limit |可选的 |如果缓冲的消息的大小加上当前消息的大小大于此限制(以字节为单位),则由于缓冲区大小有限,当前的日志消息将被丢弃。默认drop_limit为1048576(1MB)。|
+|sock_type|可选的 |用于传输层的IP协议类型。可以是“ tcp”或“ udp”。默认值为“ tcp”。|
+|max_retry_times|可选的 |连接到日志服务器失败或将日志消息发送到日志服务器失败后的最大重试次数。|
+|retry_interval|可选的 |重试连接到日志服务器或重试向日志服务器发送日志消息之前的时间延迟(以毫秒为单位),默认为100(0.1s)。|
+|pool_size |可选的 |sock:keepalive使用的Keepalive池大小。默认为10。|
+
+## 如何开启
+
+1. 下面例子展示了如何为指定路由开启 `sys-logger` 插件的。
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "username": "foo",
+ "plugins": {
+ "plugins": {
+ "syslog": {
+ "host" : "127.0.0.1",
+ "port" : 5044,
+ "flush_limit" : 1
+ }
+ },
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "127.0.0.1:1980": 1
+ }
+ },
+ "uri": "/hello"
+ }
+}'
+```
+
+## 测试插件
+
+* 成功的情况:
+
+```shell
+$ curl -i http://127.0.0.1:9080/hello
+HTTP/1.1 200 OK
+...
+hello, world
+```
+
+## 禁用插件
+
+
+想要禁用“sys-logger”插件,是非常简单的,将对应的插件配置从json配置删除,就会立即生效,不需要重新启动服务:
+
+```shell
+$ curl http://127.0.0.1:2379/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value='
+{
+ "methods": ["GET"],
+ "uri": "/hello",
+ "plugins": {},
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "127.0.0.1:1980": 1
+ }
+ }
+}'
+```
diff --git a/doc/plugins/tcp-logger-cn.md b/doc/zh-cn/plugins/tcp-logger.md
similarity index 70%
rename from doc/plugins/tcp-logger-cn.md
rename to doc/zh-cn/plugins/tcp-logger.md
index 6ec103bffec6..652f2afadabb 100644
--- a/doc/plugins/tcp-logger-cn.md
+++ b/doc/zh-cn/plugins/tcp-logger.md
@@ -18,19 +18,24 @@
-->
# 摘要
+
- [**定义**](#name)
- [**属性列表**](#attributes)
- [**如何开启**](#how-to-enable)
- [**测试插件**](#test-plugin)
- [**禁用插件**](#disable-plugin)
-
## 定义
`tcp-logger` 是用于将日志数据发送到TCP服务的插件。
以实现将日志数据以JSON格式发送到监控工具或其它TCP服务的能力。
+该插件提供了将Log Data作为批处理推送到外部TCP服务器的功能。如果您没有收到日志数据,请放心一些时间,它会在我们的批处理处理器中的计时器功能到期后自动发送日志。
+
+有关Apache APISIX中Batch-Processor的更多信息,请参考。
+[Batch-Processor](../batch-processor.md)
+
## 属性列表
|属性名称 |必选项 |描述|
@@ -41,31 +46,27 @@
| tls |可选的|布尔值,用于控制是否执行SSL验证。|
| tls_options |可选的|TLS 选项|
-
## 如何开启
1. 下面例子展示了如何为指定路由开启 `tcp-logger` 插件的。
```shell
-curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+curl http://127.0.0.1:9080/apisix/admin/routes/5 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
{
- "username": "foo",
- "plugins": {
- "plugins": {
- "tcp-logger": {
- "host": "127.0.0.1",
- "port": 5044,
- "tls": false
- }
- },
- "upstream": {
- "type": "roundrobin",
- "nodes": {
- "127.0.0.1:1980": 1
- }
- },
- "uri": "/hello"
- }
+ "plugins": {
+ "tcp-logger": {
+ "host": "127.0.0.1",
+ "port": 5044,
+ "tls": false
+ }
+ },
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "127.0.0.1:1980": 1
+ }
+ },
+ "uri": "/hello"
}'
```
@@ -82,11 +83,10 @@ hello, world
## 禁用插件
-
想要禁用“tcp-logger”插件,是非常简单的,将对应的插件配置从json配置删除,就会立即生效,不需要重新启动服务:
```shell
-$ curl http://127.0.0.1:2379/apisix/admin/routes/1 -X PUT -d value='
+$ curl http://127.0.0.1:2379/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value='
{
"methods": ["GET"],
"uri": "/hello",
diff --git a/doc/plugins/udp-logger-cn.md b/doc/zh-cn/plugins/udp-logger.md
similarity index 69%
rename from doc/plugins/udp-logger-cn.md
rename to doc/zh-cn/plugins/udp-logger.md
index d5425c34c998..f129dc2f8b64 100644
--- a/doc/plugins/udp-logger-cn.md
+++ b/doc/zh-cn/plugins/udp-logger.md
@@ -18,19 +18,24 @@
-->
# 摘要
+
- [**定义**](#name)
- [**属性列表**](#attributes)
- [**如何开启**](#how-to-enable)
- [**测试插件**](#test-plugin)
- [**禁用插件**](#disable-plugin)
-
## 定义
`udp-logger` 是用于将日志数据发送到UDP服务的插件。
以实现将日志数据以JSON格式发送到监控工具或其它UDP服务的能力。
+此插件提供了将批处理数据批量推送到外部UDP服务器的功能。如果您没有收到日志数据,请放心一些时间,它会在我们的批处理处理器中的计时器功能到期后自动发送日志
+
+有关Apache APISIX中Batch-Processor的更多信息,请参考。
+[Batch-Processor](../../batch-processor.md)
+
## 属性列表
|属性名称 |必选项 |描述|
@@ -39,31 +44,27 @@
| port |必要的| 目标端口。|
| timeout |可选的|发送数据超时间。|
-
## 如何开启
1. 下面例子展示了如何为指定路由开启 `udp-logger` 插件的。
```shell
-curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+curl http://127.0.0.1:9080/apisix/admin/routes/5 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
{
- "username": "foo",
- "plugins": {
- "plugins": {
- "tcp-logger": {
- "host": "127.0.0.1",
- "port": 5044,
- "tls": false
- }
- },
- "upstream": {
- "type": "roundrobin",
- "nodes": {
- "127.0.0.1:1980": 1
- }
- },
- "uri": "/hello"
- }
+ "plugins": {
+ "tcp-logger": {
+ "host": "127.0.0.1",
+ "port": 5044,
+ "tls": false
+ }
+ },
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "127.0.0.1:1980": 1
+ }
+ },
+ "uri": "/hello"
}'
```
@@ -80,11 +81,10 @@ hello, world
## 禁用插件
-
想要禁用“udp-logger”插件,是非常简单的,将对应的插件配置从json配置删除,就会立即生效,不需要重新启动服务:
```shell
-$ curl http://127.0.0.1:2379/apisix/admin/routes/1 -X PUT -d value='
+$ curl http://127.0.0.1:2379/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value='
{
"methods": ["GET"],
"uri": "/hello",
diff --git a/doc/zh-cn/plugins/uri-blocker.md b/doc/zh-cn/plugins/uri-blocker.md
new file mode 100644
index 000000000000..da4713b66904
--- /dev/null
+++ b/doc/zh-cn/plugins/uri-blocker.md
@@ -0,0 +1,94 @@
+
+
+[English](../../plugins/uri-blocker.md)
+
+# 目录
+
+- [**定义**](#定义)
+- [**属性列表**](#属性列表)
+- [**启用方式**](#启用方式)
+- [**测试插件**](#测试插件)
+- [**禁用插件**](#禁用插件)
+
+## 定义
+
+该插件可帮助我们拦截用户请求,只需要指定`block_rules`即可。
+
+## 属性列表
+
+|Name |Requirement |Description|
+|--------- |--------|-----------|
+|block_rules |必须|正则过滤数组。它们都是正则规则,如果当前请求 URI 命中任何一个,请将响应代码设置为 rejected_code 以退出当前用户请求。例如: `["root.exe", "root.m+"]`。|
+|rejected_code |可选|当请求 URI 命中`block_rules`中的任何一个时,将返回的 HTTP 状态代码,默认为 `403`。|
+
+## 启用方式
+
+这是一个示例,在指定的路由上启用`uri blocker`插件:
+
+```shell
+curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "uri": "/*",
+ "plugins": {
+ "uri-blocker": {
+ "block_rules": ["root.exe", "root.m+"]
+ }
+ },
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "127.0.0.1:1980": 1
+ }
+ }
+}'
+```
+
+## 测试插件
+
+```shell
+$ curl -i http://127.0.0.1:9080/root.exe?a=a
+HTTP/1.1 403 Forbidden
+Date: Wed, 17 Jun 2020 13:55:41 GMT
+Content-Type: text/html; charset=utf-8
+Content-Length: 150
+Connection: keep-alive
+Server: APISIX web server
+
+... ...
+```
+
+## 禁用插件
+
+当想禁用`uri blocker`插件时,非常简单,只需要在插件配置中删除相应的 json 配置,无需重启服务,即可立即生效:
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "uri": "/*",
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "127.0.0.1:1980": 1
+ }
+ }
+}'
+```
+
+ `uri blocker` 插件现在已被禁用,它也适用于其他插件。
diff --git a/doc/plugins/wolf-rbac-cn.md b/doc/zh-cn/plugins/wolf-rbac.md
similarity index 98%
rename from doc/plugins/wolf-rbac-cn.md
rename to doc/zh-cn/plugins/wolf-rbac.md
index 1b9996c91ecc..0ed3f5cd42fc 100644
--- a/doc/plugins/wolf-rbac-cn.md
+++ b/doc/zh-cn/plugins/wolf-rbac.md
@@ -17,7 +17,7 @@
#
-->
-[English](wolf-rbac.md)
+[English](../../plugins/wolf-rbac.md)
# 目录
@@ -70,10 +70,10 @@ curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f
```
你可以使用浏览器打开 dashboard:`http://127.0.0.1:9080/apisix/dashboard/`,通过 web 界面来完成上面的操作,先增加一个 consumer:
-
+
然后在 consumer 页面中添加 wolf-rbac 插件:
-
+
注意: 上面填写的 `appid` 需要在wolf控制台中已经存在的.
diff --git a/doc/plugins/zipkin-cn.md b/doc/zh-cn/plugins/zipkin.md
similarity index 93%
rename from doc/plugins/zipkin-cn.md
rename to doc/zh-cn/plugins/zipkin.md
index ad53dc2aa569..fb7fc096a34c 100644
--- a/doc/plugins/zipkin-cn.md
+++ b/doc/zh-cn/plugins/zipkin.md
@@ -17,7 +17,7 @@
#
-->
-[English](zipkin.md)
+[English](../../plugins/zipkin.md)
# 目录
- [**名字**](#名字)
@@ -67,11 +67,11 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f1
你可以使用浏览器打开 dashboard:`http://127.0.0.1:9080/apisix/dashboard/`,通过 web 界面来完成上面的操作,先增加一个 route:
-
+
然后在 route 页面中添加 zipkin 插件:
-
+
## 测试插件
@@ -97,16 +97,16 @@ HTTP/1.1 200 OK
http://127.0.0.1:9411/zipkin
```
-
+
-
+
## 禁用插件
当你想去掉插件的时候,很简单,在插件的配置中把对应的 json 配置删除即可,无须重启服务,即刻生效:
```shell
-$ curl http://127.0.0.1:2379/v2/keys/apisix/routes/1 -X PUT -d value='
+$ curl http://127.0.0.1:2379/v2/keys/apisix/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value='
{
"methods": ["GET"],
"uri": "/index.html",
diff --git a/doc/profile-cn.md b/doc/zh-cn/profile.md
similarity index 100%
rename from doc/profile-cn.md
rename to doc/zh-cn/profile.md
diff --git a/doc/stand-alone-cn.md b/doc/zh-cn/stand-alone.md
similarity index 99%
rename from doc/stand-alone-cn.md
rename to doc/zh-cn/stand-alone.md
index 0da4b0912002..34127ce0c2de 100644
--- a/doc/stand-alone-cn.md
+++ b/doc/zh-cn/stand-alone.md
@@ -17,7 +17,7 @@
#
-->
-[English](stand-alone.md)
+[English](../stand-alone.md)
## Stand-alone mode
diff --git a/doc/stream-proxy-cn.md b/doc/zh-cn/stream-proxy.md
similarity index 96%
rename from doc/stream-proxy-cn.md
rename to doc/zh-cn/stream-proxy.md
index 0a413d73b3bf..afb9fd298826 100644
--- a/doc/stream-proxy-cn.md
+++ b/doc/zh-cn/stream-proxy.md
@@ -17,7 +17,7 @@
#
-->
-[English](stream-proxy.md)
+[English](../stream-proxy.md)
# Stream 代理
@@ -59,7 +59,7 @@ curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f03
```
例子中 APISIX 对客户端IP为 `127.0.0.1` 的请求代理转发到上游主机 `127.0.0.1:1995`。
-更多用例,请参照 [test case](../t/stream-node/sanity.t).
+更多用例,请参照 [test case](../../t/stream-node/sanity.t).
## 更多限制选项
diff --git a/kubernetes/README.md b/kubernetes/README.md
index 3d914e7d3046..6014401b0819 100644
--- a/kubernetes/README.md
+++ b/kubernetes/README.md
@@ -16,6 +16,42 @@
# limitations under the License.
#
-->
+### kubernetes
+
+There are some yaml files for deploying apisix in Kubernetes.
+
+### Prerequisites
+- use `etcd` , if there is no `etcd` service, please install and set env `etcd_url` in `config.sh`
+
+- Run `config.sh` to generate `apisix-gw-config-cm.yaml` from the latest `config.yaml`
+
+```
+# if config.sh have no permission to executethen, then execute `chmod +x config.sh`
+# Generate apisix-gw-config-cm.yaml
+# sh config.sh
+```
+
+
+#### when using etcd-operator
+when using etcd-operator, you need to change apisix-gw-config-cm.yaml:
+
+* add CoreDNS IP into dns_resolver
+
+```
+dns_resolver:
+ - 10.233.0.3 # default coreDNS cluster ip
+
+```
+* change etcd host
+
+Following {your-namespace} should be changed to your namespace, for example `default`.
+> Mention: must use `Full Qualified Domain Name`. Short name `etcd-cluster-client` is not work.
+
+```
+etcd:
+ host:
+ - "http://etcd-cluster-client.{your-namespace}.svc.cluster.local:2379" # multiple etcd address
+```
### Usage
@@ -29,7 +65,7 @@ or
$ kubectl create configmap apisix-gw-config.yaml --from-file=../conf/config.yaml
```
-##### Note: you should modify etcd addr in config file `apisix-gw-config-cm.yaml` or `../conf/config.yaml` first
+##### Note: you should check etcd addr in config file `apisix-gw-config-cm.yaml` or `../conf/config.yaml` first, make sure the etcd addresses are correct.
```
etcd:
@@ -49,12 +85,6 @@ $ kubectl apply -f deployment.yaml
$ kubectl apply -f service.yaml
```
-#### Create service for apache incubator-apisix (when using Aliyun SLB)
-
-```
-$ kubectl apply -f service-aliyun-slb.yaml
-```
-
#### Scale apache incubator-apisix
```
diff --git a/kubernetes/apisix-gw-config-cm.yaml b/kubernetes/apisix-gw-config-cm.yaml
index 67833f09a21e..e1d50222960d 100644
--- a/kubernetes/apisix-gw-config-cm.yaml
+++ b/kubernetes/apisix-gw-config-cm.yaml
@@ -18,136 +18,7 @@
apiVersion: v1
data:
config.yaml: |
- #
- # Licensed to the Apache Software Foundation (ASF) under one or more
- # contributor license agreements. See the NOTICE file distributed with
- # this work for additional information regarding copyright ownership.
- # The ASF licenses this file to You under the Apache License, Version 2.0
- # (the "License"); you may not use this file except in compliance with
- # the License. You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- #
- apisix:
- node_listen: 9080 # APISIX listening port
- enable_heartbeat: true
- enable_admin: true
- enable_admin_cors: true # Admin API support CORS response headers.
- enable_debug: false
- enable_dev_mode: false # Sets nginx worker_processes to 1 if set to true
- enable_reuseport: true # Enable nginx SO_REUSEPORT switch if set to true.
- enable_ipv6: true
- config_center: etcd # etcd: use etcd to store the config value
- # yaml: fetch the config value from local yaml file `/your_path/conf/apisix.yaml`
-
- #proxy_protocol: # Proxy Protocol configuration
- # listen_http_port: 9181 # The port with proxy protocol for http, it differs from node_listen and port_admin.
- # This port can only receive http request with proxy protocol, but node_listen & port_admin
- # can only receive http request. If you enable proxy protocol, you must use this port to
- # receive http request with proxy protocol
- # listen_https_port: 9182 # The port with proxy protocol for https
- # enable_tcp_pp: true # Enable the proxy protocol for tcp proxy, it works for stream_proxy.tcp option
- # enable_tcp_pp_to_upstream: true # Enables the proxy protocol to the upstream server
-
- # allow_admin: # http://nginx.org/en/docs/http/ngx_http_access_module.html#allow
- # - 127.0.0.0/24 # If we don't set any IP list, then any IP access is allowed by default.
- # - "::/64"
- # port_admin: 9180 # use a separate port
-
- # Default token when use API to call for Admin API.
- # *NOTE*: Highly recommended to modify this value to protect APISIX's Admin API.
- # Disabling this configuration item means that the Admin API does not
- # require any authentication.
- admin_key:
- -
- name: "admin"
- key: edd1c9f034335f136f87ad84b625c8f1
- role: admin # admin: manage all configuration data
- # viewer: only can view configuration data
- -
- name: "viewer"
- key: 4054f7cf07e344346cd3f287985e76a2
- role: viewer
- router:
- http: 'radixtree_uri' # radixtree_uri: match route by uri(base on radixtree)
- # radixtree_host_uri: match route by host + uri(base on radixtree)
- ssl: 'radixtree_sni' # radixtree_sni: match route by SNI(base on radixtree)
- # stream_proxy: # TCP/UDP proxy
- # tcp: # TCP proxy port list
- # - 9100
- # - 9101
- # udp: # UDP proxy port list
- # - 9200
- # - 9211
- dns_resolver: # default DNS resolver, with disable IPv6 and enable local DNS
- - 114.114.114.114
- - 223.5.5.5
- - 1.1.1.1
- - 8.8.8.8
- dns_resolver_valid: 30 # valid time for dns result 30 seconds
-
- ssl:
- enable: true
- enable_http2: true
- listen_port: 9443
- ssl_protocols: "TLSv1 TLSv1.1 TLSv1.2 TLSv1.3"
- ssl_ciphers: "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA256:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA"
-
- nginx_config: # config for render the template to genarate nginx.conf
- error_log: "logs/error.log"
- error_log_level: "warn" # warn,error
- worker_rlimit_nofile: 20480 # the number of files a worker process can open, should be larger than worker_connections
- event:
- worker_connections: 10620
- http:
- access_log: "logs/access.log"
- keepalive_timeout: 60s # timeout during which a keep-alive client connection will stay open on the server side.
- client_header_timeout: 60s # timeout for reading client request header, then 408 (Request Time-out) error is returned to the client
- client_body_timeout: 60s # timeout for reading client request body, then 408 (Request Time-out) error is returned to the client
- send_timeout: 10s # timeout for transmitting a response to the client.then the connection is closed
- underscores_in_headers: "on" # default enables the use of underscores in client request header fields
- real_ip_header: "X-Real-IP" # http://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_header
- real_ip_from: # http://nginx.org/en/docs/http/ngx_http_realip_module.html#set_real_ip_from
- - 127.0.0.1
- - 'unix:'
-
- etcd:
- host: "http://127.0.0.1:2379" # etcd address
- prefix: "/apisix" # apisix configurations prefix
- timeout: 3 # 3 seconds
-
- plugins: # plugin list
- - example-plugin
- - limit-req
- - limit-count
- - limit-conn
- - key-auth
- - basic-auth
- - prometheus
- - node-status
- - jwt-auth
- - zipkin
- - ip-restriction
- - grpc-transcode
- - serverless-pre-function
- - serverless-post-function
- - openid-connect
- - proxy-rewrite
- - redirect
- - response-rewrite
- - fault-injection
- - udp-logger
- - wolf-rbac
-
- stream_plugins:
- - mqtt-proxy
-
+ #CONFIG_YAML#
kind: ConfigMap
metadata:
name: apisix-gw-config.yaml
diff --git a/kubernetes/config.sh b/kubernetes/config.sh
new file mode 100755
index 000000000000..689ce38f523c
--- /dev/null
+++ b/kubernetes/config.sh
@@ -0,0 +1,29 @@
+#!/bin/sh
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+export etcd_url='http://$ETCD_IP_ADDRESS:2379'
+
+wget https://raw.githubusercontent.com/apache/incubator-apisix/master/conf/config.yaml
+
+sed -i -e ':a' -e 'N' -e '$!ba' -e "s/allow_admin[a-z: #\/._]*\n\( *- [0-9a-zA-Z: #\/._',]*\n*\)*//g" config.yaml
+
+sed -i -e "s%http://[0-9.]*:2379%`echo $etcd_url`%g" config.yaml
+
+sed -i -e '/#CONFIG_YAML#/{r config.yaml' -e 'd}' apisix-gw-config-cm.yaml
+
diff --git a/kubernetes/deployment.yaml b/kubernetes/deployment.yaml
index 60d54b231e37..4b03cb73b7f5 100644
--- a/kubernetes/deployment.yaml
+++ b/kubernetes/deployment.yaml
@@ -32,13 +32,6 @@ spec:
labels:
app: apisix-gw
spec:
- # tolerations:
- # - key: "group"
- # operator: "Equal"
- # value: "prod"
- # effect: "NoSchedule"
- # nodeSelector:
- # env: prod
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
@@ -95,16 +88,6 @@ spec:
- containerPort: 9443
name: https
protocol: TCP
- # livenessProbe:
- # failureThreshold: 3
- # httpGet:
- # path: /healthz
- # port: 10254
- # scheme: HTTP
- # initialDelaySeconds: 10
- # periodSeconds: 10
- # successThreshold: 1
- # timeoutSeconds: 1
readinessProbe:
failureThreshold: 6
initialDelaySeconds: 10
@@ -113,28 +96,6 @@ spec:
tcpSocket:
port: 9080
timeoutSeconds: 1
- lifecycle:
- # For alpine based image
- # https://k8s.imroc.io/troubleshooting/cases/dns-lookup-5s-delay
- # postStart:
- # exec:
- # command:
- # - /bin/sh
- # - -c
- # - "/bin/echo 'options single-request-reopen' >> /etc/resolv.conf"
- preStop:
- exec:
- command:
- - /bin/sh
- - -c
- - "sleep 30"
- # cpu core(s), 1 == 1000m
- resources:
- limits:
- cpu: '2'
- requests:
- cpu: '50m'
-
volumeMounts:
- mountPath: /usr/local/apisix/conf/config.yaml
name: apisix-config-yaml-configmap
@@ -142,13 +103,6 @@ spec:
- mountPath: /etc/localtime
name: localtime
readOnly: true
- # - mountPath: /usr/local/apisix/conf/nginx.conf
- # name: apisix-nginx-conf-configmap
- # subPath: nginx.conf
- # - mountPath: /usr/local/openresty/openssl/ssl/openssl.cnf
- # name: apisix-openssl-cnf-configmap
- # subPath: openssl.cnf
-
volumes:
- configMap:
name: apisix-gw-config.yaml
@@ -157,9 +111,3 @@ spec:
path: /etc/localtime
type: File
name: localtime
- # - configMap:
- # name: apisix-gw-nginx.conf
- # name: apisix-nginx-conf-configmap
- # - configMap:
- # name: apisix-gw-openssl.cnf.conf
- # name: apisix-openssl-cnf-configmap
diff --git a/kubernetes/service-aliyun-slb.yaml b/kubernetes/service-aliyun-slb.yaml
deleted file mode 100644
index a28f150c3f00..000000000000
--- a/kubernetes/service-aliyun-slb.yaml
+++ /dev/null
@@ -1,78 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# https://help.aliyun.com/document_detail/94925.html?spm=5176.2020520152.0.0.44ca16ddon5iJF
-apiVersion: v1
-kind: Service
-metadata:
- name: apisix-gw-lb
- # namespace: default
- annotations:
- # service.beta.kubernetes.io/alibaba-cloud-loadbalancer-additional-resource-tags: ""
- #
- # service.beta.kubernetes.io/alibaba-cloud-loadbalancer-AddressType: "intranet"
- # service.beta.kubernetes.io/alibaba-cloud-loadbalancer-network-type: "vpc"
- service.beta.kubernetes.io/alibaba-cloud-loadbalancer-force-override-listeners: "true"
- service.beta.kubernetes.io/alibaba-cloud-loadbalancer-persistence-timeout: "1800"
- service.beta.kubernetes.io/alibaba-cloud-loadbalancer-id: "lb-xx"
- #
- # http
- # service.beta.kubernetes.io/alibaba-cloud-loadbalancer-cert-id: ''
- # service.beta.kubernetes.io/alibaba-cloud-loadbalancer-protocol-port: 'https:443'
- # service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec: "slb.s1.small"
- # service.beta.kubernetes.io/alibaba-cloud-loadbalancer-forward-port
- # http sticky-session
- # service.beta.kubernetes.io/alibaba-cloud-loadbalancer-sticky-session: "on"
- # service.beta.kubernetes.io/alibaba-cloud-loadbalancer-sticky-session-type: "insert"
- # service.beta.kubernetes.io/alibaba-cloud-loadbalancer-cookie-timeout: "1800"
- # service.beta.kubernetes.io/alibaba-cloud-loadbalancer-protocol-port: "http:80"
- #
- # health-check
- service.beta.kubernetes.io/alibaba-cloud-loadbalancer-health-check-type: "tcp"
- service.beta.kubernetes.io/alibaba-cloud-loadbalancer-health-check-connect-timeout: "4"
- service.beta.kubernetes.io/alibaba-cloud-loadbalancer-healthy-threshold: "4"
- service.beta.kubernetes.io/alibaba-cloud-loadbalancer-unhealthy-threshold: "4"
- service.beta.kubernetes.io/alibaba-cloud-loadbalancer-health-check-interval: "6"
- #
- service.beta.kubernetes.io/alibaba-cloud-loadbalancer-scheduler: "wlc"
- # ACL
- # service.beta.kubernetes.io/alibaba-cloud-loadbalancer-acl-status: "on"
- # service.beta.kubernetes.io/alibaba-cloud-loadbalancer-acl-id: "acl-xx"
- # service.beta.kubernetes.io/alibaba-cloud-loadbalancer-acl-type: "white"
- #
- service.beta.kubernetes.io/alibaba-cloud-loadbalancer-remove-unscheduled-backend: "on"
- labels:
- app: apisix-gw
-spec:
- selector:
- app: apisix-gw
- ports:
- - protocol: TCP
- port: 80
- name: http
- targetPort: 9080
- - protocol: TCP
- port: 443
- name: https
- targetPort: 9443
- # - protocol: TCP
- # port: 9180
- # name: admin-port
- # targetPort: 9180
- type: LoadBalancer
- externalTrafficPolicy: Local
- # sessionAffinity: ClientIP
diff --git a/kubernetes/service-monitor-for-prometheus.yaml b/kubernetes/service-monitor-for-prometheus.yaml
new file mode 100644
index 000000000000..41a62f8acde0
--- /dev/null
+++ b/kubernetes/service-monitor-for-prometheus.yaml
@@ -0,0 +1,39 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# when using prometheus-operator, you can apply this into k8s.
+# Mention: ServiceMonitor should be the same namespace with prometheus-operator.
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: apisix-gw
+ labels:
+ app: apisix-gw
+spec:
+ endpoints:
+ - interval: 10s
+ honorLabels: true
+ port: http
+ path: /apisix/prometheus/metrics
+ scheme: http
+ selector:
+ matchLabels:
+ app: apisix-gw
+ namespaceSelector:
+ any: true
+
diff --git a/kubernetes/service.yaml b/kubernetes/service.yaml
index c207660fcb98..1de6e7d53816 100644
--- a/kubernetes/service.yaml
+++ b/kubernetes/service.yaml
@@ -20,6 +20,8 @@ kind: Service
metadata:
name: apisix-gw-lb
# namespace: default
+ labels:
+ app: apisix-gw # useful for service discovery, for example, prometheus-operator.
spec:
ports:
- name: http
diff --git a/rockspec/apisix-0.9-0.rockspec b/rockspec/apisix-0.9-0.rockspec
index 28952bb01a3a..94c7962a2d25 100644
--- a/rockspec/apisix-0.9-0.rockspec
+++ b/rockspec/apisix-0.9-0.rockspec
@@ -24,7 +24,7 @@ source = {
}
description = {
- summary = "Apache APISIX(incubating) is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.",
+ summary = "Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.",
homepage = "https://github.com/apache/incubator-apisix",
license = "Apache License 2.0",
}
diff --git a/rockspec/apisix-1.0-0.rockspec b/rockspec/apisix-1.0-0.rockspec
index 5e4f401857f4..0a971989f488 100644
--- a/rockspec/apisix-1.0-0.rockspec
+++ b/rockspec/apisix-1.0-0.rockspec
@@ -24,7 +24,7 @@ source = {
}
description = {
- summary = "Apache APISIX(incubating) is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.",
+ summary = "Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.",
homepage = "https://github.com/apache/incubator-apisix",
license = "Apache License 2.0",
}
diff --git a/rockspec/apisix-1.1-0.rockspec b/rockspec/apisix-1.1-0.rockspec
index f1940267f5dd..1add5624a29f 100644
--- a/rockspec/apisix-1.1-0.rockspec
+++ b/rockspec/apisix-1.1-0.rockspec
@@ -24,7 +24,7 @@ source = {
}
description = {
- summary = "Apache APISIX(incubating) is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.",
+ summary = "Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.",
homepage = "https://github.com/apache/incubator-apisix",
license = "Apache License 2.0",
}
diff --git a/rockspec/apisix-1.2-0.rockspec b/rockspec/apisix-1.2-0.rockspec
index 6be16b9aa5c3..e57a61d65e7f 100644
--- a/rockspec/apisix-1.2-0.rockspec
+++ b/rockspec/apisix-1.2-0.rockspec
@@ -20,11 +20,11 @@ supported_platforms = {"linux", "macosx"}
source = {
url = "git://github.com/apache/incubator-apisix",
- branch = "v1.2",
+ tag = "1.2",
}
description = {
- summary = "Apache APISIX(incubating) is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.",
+ summary = "Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.",
homepage = "https://github.com/apache/incubator-apisix",
license = "Apache License 2.0",
}
diff --git a/rockspec/apisix-1.3-0.rockspec b/rockspec/apisix-1.3-0.rockspec
new file mode 100644
index 000000000000..580bf4874357
--- /dev/null
+++ b/rockspec/apisix-1.3-0.rockspec
@@ -0,0 +1,72 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+package = "apisix"
+version = "1.3-0"
+supported_platforms = {"linux", "macosx"}
+
+source = {
+ url = "git://github.com/apache/incubator-apisix",
+ tag = "1.3",
+}
+
+description = {
+ summary = "Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.",
+ homepage = "https://github.com/apache/incubator-apisix",
+ license = "Apache License 2.0",
+}
+
+dependencies = {
+ "lua-resty-template = 1.9",
+ "lua-resty-etcd = 0.9",
+ "lua-resty-balancer = 0.02rc5",
+ "lua-resty-ngxvar = 0.5",
+ "lua-resty-jit-uuid = 0.0.7",
+ "lua-resty-healthcheck-api7 = 2.2.0",
+ "lua-resty-jwt = 0.2.0",
+ "lua-resty-cookie = 0.1.0",
+ "lua-resty-session = 2.24",
+ "opentracing-openresty = 0.1",
+ "lua-resty-radixtree = 1.8",
+ "lua-protobuf = 0.3.1",
+ "lua-resty-openidc = 1.7.2-1",
+ "luafilesystem = 1.7.0-2",
+ "lua-tinyyaml = 0.1",
+ "lua-resty-prometheus = 1.0",
+ "jsonschema = 0.8",
+ "lua-resty-ipmatcher = 0.6",
+ "lua-resty-kafka = 0.07",
+ "lua-resty-logger-socket = 2.0-0",
+}
+
+build = {
+ type = "make",
+ build_variables = {
+ CFLAGS="$(CFLAGS)",
+ LIBFLAG="$(LIBFLAG)",
+ LUA_LIBDIR="$(LUA_LIBDIR)",
+ LUA_BINDIR="$(LUA_BINDIR)",
+ LUA_INCDIR="$(LUA_INCDIR)",
+ LUA="$(LUA)",
+ },
+ install_variables = {
+ INST_PREFIX="$(PREFIX)",
+ INST_BINDIR="$(BINDIR)",
+ INST_LIBDIR="$(LIBDIR)",
+ INST_LUADIR="$(LUADIR)",
+ INST_CONFDIR="$(CONFDIR)",
+ },
+}
diff --git a/rockspec/apisix-1.4-0.rockspec b/rockspec/apisix-1.4-0.rockspec
new file mode 100644
index 000000000000..f0f82421367e
--- /dev/null
+++ b/rockspec/apisix-1.4-0.rockspec
@@ -0,0 +1,74 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+package = "apisix"
+version = "1.4-0"
+supported_platforms = {"linux", "macosx"}
+
+source = {
+ url = "git://github.com/apache/incubator-apisix",
+ branch = "1.4",
+}
+
+description = {
+ summary = "Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.",
+ homepage = "https://github.com/apache/incubator-apisix",
+ license = "Apache License 2.0",
+}
+
+dependencies = {
+ "lua-resty-template = 1.9",
+ "lua-resty-etcd = 1.0",
+ "lua-resty-balancer = 0.02rc5",
+ "lua-resty-ngxvar = 0.5",
+ "lua-resty-jit-uuid = 0.0.7",
+ "lua-resty-healthcheck-api7 = 2.2.0",
+ "lua-resty-jwt = 0.2.0",
+ "lua-resty-cookie = 0.1.0",
+ "lua-resty-session = 2.24",
+ "opentracing-openresty = 0.1",
+ "lua-resty-radixtree = 1.9",
+ "lua-protobuf = 0.3.1",
+ "lua-resty-openidc = 1.7.2-1",
+ "luafilesystem = 1.7.0-2",
+ "lua-tinyyaml = 0.1",
+ "lua-resty-prometheus = 1.1",
+ "jsonschema = 0.8",
+ "lua-resty-ipmatcher = 0.6",
+ "lua-resty-kafka = 0.07",
+ "lua-resty-logger-socket = 2.0-0",
+ "skywalking-nginx-lua-plugin = 1.0-0",
+}
+
+build = {
+ type = "make",
+ build_variables = {
+ CFLAGS="$(CFLAGS)",
+ LIBFLAG="$(LIBFLAG)",
+ LUA_LIBDIR="$(LUA_LIBDIR)",
+ LUA_BINDIR="$(LUA_BINDIR)",
+ LUA_INCDIR="$(LUA_INCDIR)",
+ LUA="$(LUA)",
+ },
+ install_variables = {
+ INST_PREFIX="$(PREFIX)",
+ INST_BINDIR="$(BINDIR)",
+ INST_LIBDIR="$(LIBDIR)",
+ INST_LUADIR="$(LUADIR)",
+ INST_CONFDIR="$(CONFDIR)",
+ },
+}
diff --git a/rockspec/apisix-1.4.1-0.rockspec b/rockspec/apisix-1.4.1-0.rockspec
new file mode 100644
index 000000000000..3fc6ea42f085
--- /dev/null
+++ b/rockspec/apisix-1.4.1-0.rockspec
@@ -0,0 +1,74 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+package = "apisix"
+version = "1.4.1-0"
+supported_platforms = {"linux", "macosx"}
+
+source = {
+ url = "git://github.com/apache/incubator-apisix",
+ branch = "1.4.1",
+}
+
+description = {
+ summary = "Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.",
+ homepage = "https://github.com/apache/incubator-apisix",
+ license = "Apache License 2.0",
+}
+
+dependencies = {
+ "lua-resty-template = 1.9",
+ "lua-resty-etcd = 1.0",
+ "lua-resty-balancer = 0.02rc5",
+ "lua-resty-ngxvar = 0.5",
+ "lua-resty-jit-uuid = 0.0.7",
+ "lua-resty-healthcheck-api7 = 2.2.0",
+ "lua-resty-jwt = 0.2.0",
+ "lua-resty-cookie = 0.1.0",
+ "lua-resty-session = 2.24",
+ "opentracing-openresty = 0.1",
+ "lua-resty-radixtree = 1.9",
+ "lua-protobuf = 0.3.1",
+ "lua-resty-openidc = 1.7.2-1",
+ "luafilesystem = 1.7.0-2",
+ "lua-tinyyaml = 0.1",
+ "lua-resty-prometheus = 1.1",
+ "jsonschema = 0.8",
+ "lua-resty-ipmatcher = 0.6",
+ "lua-resty-kafka = 0.07",
+ "lua-resty-logger-socket = 2.0-0",
+ "skywalking-nginx-lua-plugin = 1.0-0",
+}
+
+build = {
+ type = "make",
+ build_variables = {
+ CFLAGS="$(CFLAGS)",
+ LIBFLAG="$(LIBFLAG)",
+ LUA_LIBDIR="$(LUA_LIBDIR)",
+ LUA_BINDIR="$(LUA_BINDIR)",
+ LUA_INCDIR="$(LUA_INCDIR)",
+ LUA="$(LUA)",
+ },
+ install_variables = {
+ INST_PREFIX="$(PREFIX)",
+ INST_BINDIR="$(BINDIR)",
+ INST_LIBDIR="$(LIBDIR)",
+ INST_LUADIR="$(LUADIR)",
+ INST_CONFDIR="$(CONFDIR)",
+ },
+}
diff --git a/rockspec/apisix-master-0.rockspec b/rockspec/apisix-master-0.rockspec
index 3c4f344e0273..d01d8f6d278b 100644
--- a/rockspec/apisix-master-0.rockspec
+++ b/rockspec/apisix-master-0.rockspec
@@ -25,14 +25,14 @@ source = {
}
description = {
- summary = "Apache APISIX(incubating) is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.",
+ summary = "Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.",
homepage = "https://github.com/apache/incubator-apisix",
license = "Apache License 2.0",
}
dependencies = {
"lua-resty-template = 1.9",
- "lua-resty-etcd = 0.9",
+ "lua-resty-etcd = 1.0",
"lua-resty-balancer = 0.02rc5",
"lua-resty-ngxvar = 0.5",
"lua-resty-jit-uuid = 0.0.7",
@@ -41,15 +41,17 @@ dependencies = {
"lua-resty-cookie = 0.1.0",
"lua-resty-session = 2.24",
"opentracing-openresty = 0.1",
- "lua-resty-radixtree = 1.8",
+ "lua-resty-radixtree = 2.0",
"lua-protobuf = 0.3.1",
"lua-resty-openidc = 1.7.2-1",
"luafilesystem = 1.7.0-2",
- "lua-tinyyaml = 0.1",
- "lua-resty-prometheus = 1.0",
+ "lua-tinyyaml = 1.0",
+ "lua-resty-prometheus = 1.1",
"jsonschema = 0.8",
"lua-resty-ipmatcher = 0.6",
"lua-resty-kafka = 0.07",
+ "lua-resty-logger-socket = 2.0-0",
+ "skywalking-nginx-lua-plugin = 1.0-0",
}
build = {
diff --git a/t/APISIX.pm b/t/APISIX.pm
index b8aa664bf941..d0e7e3bface9 100644
--- a/t/APISIX.pm
+++ b/t/APISIX.pm
@@ -72,11 +72,20 @@ if ($enable_local_dns) {
my $yaml_config = read_file("conf/config.yaml");
my $ssl_crt = read_file("conf/cert/apisix.crt");
my $ssl_key = read_file("conf/cert/apisix.key");
+my $test2_crt = read_file("conf/cert/test2.crt");
+my $test2_key = read_file("conf/cert/test2.key");
$yaml_config =~ s/node_listen: 9080/node_listen: 1984/;
-$yaml_config =~ s/enable_heartbeat: true/enable_heartbeat: false/;
$yaml_config =~ s/ # stream_proxy:/ stream_proxy:\n tcp:\n - 9100/;
$yaml_config =~ s/admin_key:/disable_admin_key:/;
+my $etcd_enable_auth = $ENV{"ETCD_ENABLE_AUTH"} || "false";
+
+if ($etcd_enable_auth eq "true") {
+ $yaml_config =~ s/ # user:/ user:/;
+ $yaml_config =~ s/ # password:/ password:/;
+}
+
+
my $profile = $ENV{"APISIX_PROFILE"};
@@ -100,7 +109,7 @@ add_block_preprocessor(sub {
my $main_config = $block->main_config // <<_EOC_;
worker_rlimit_core 500M;
-working_directory $apisix_home;
+env ENABLE_ETCD_AUTH;
env APISIX_PROFILE;
_EOC_
@@ -199,6 +208,7 @@ _EOC_
lua_shared_dict upstream-healthcheck 32m;
lua_shared_dict worker-events 10m;
lua_shared_dict lrucache-lock 10m;
+ lua_shared_dict skywalking-tracing-buffer 100m;
resolver $dns_addrs_str;
resolver_timeout 5;
@@ -417,6 +427,10 @@ $user_yaml_config
$ssl_crt
>>> ../conf/cert/apisix.key
$ssl_key
+>>> ../conf/cert/test2.crt
+$test2_crt
+>>> ../conf/cert/test2.key
+$test2_key
$user_apisix_yaml
_EOC_
diff --git a/t/admin/balancer.t b/t/admin/balancer.t
index 0be70dbd47bf..1afcedafa214 100644
--- a/t/admin/balancer.t
+++ b/t/admin/balancer.t
@@ -26,17 +26,20 @@ add_block_preprocessor(sub {
my $init_by_lua_block = <<_EOC_;
require "resty.core"
apisix = require("apisix")
+ core = require("apisix.core")
apisix.http_init()
function test(route, ctx, count)
local balancer = require("apisix.balancer")
local res = {}
for i = 1, count or 12 do
- local host, port, err = balancer.pick_server(route, ctx)
+ local server, err = balancer.pick_server(route, ctx)
if err then
ngx.say("failed: ", err)
end
- res[host] = (res[host] or 0) + 1
+
+ core.log.warn("host: ", server.host, " port: ", server.port)
+ res[server.host] = (res[server.host] or 0) + 1
end
local keys = {}
@@ -61,20 +64,18 @@ __DATA__
--- config
location /t {
content_by_lua_block {
- local route = {
- value = {
- upstream = {
- nodes = {
- ["39.97.63.215:80"] = 1,
- ["39.97.63.216:81"] = 1,
- ["39.97.63.217:82"] = 1,
- },
- type = "roundrobin",
- },
- id = 1
- }
+ local up_conf = {
+ type = "roundrobin",
+ nodes = {
+ {host = "39.97.63.215", port = 80, weight = 1},
+ {host = "39.97.63.216", port = 81, weight = 1},
+ {host = "39.97.63.217", port = 82, weight = 1},
}
+ }
local ctx = {conf_version = 1}
+ ctx.upstream_conf = up_conf
+ ctx.upstream_version = "ver"
+ ctx.upstream_key = up_conf.type .. "#route_" .. "id"
test(route, ctx)
}
@@ -94,23 +95,18 @@ host: 39.97.63.217 count: 4
--- config
location /t {
content_by_lua_block {
- local core = require("apisix.core")
- local balancer = require("apisix.balancer")
-
- local route = {
- value = {
- upstream = {
- nodes = {
- ["39.97.63.215:80"] = 1,
- ["39.97.63.216:81"] = 2,
- ["39.97.63.217:82"] = 3,
- },
- type = "roundrobin",
- },
- id = 1
- }
+ local up_conf = {
+ type = "roundrobin",
+ nodes = {
+ {host = "39.97.63.215", port = 80, weight = 1},
+ {host = "39.97.63.216", port = 81, weight = 2},
+ {host = "39.97.63.217", port = 82, weight = 3},
}
+ }
local ctx = {conf_version = 1}
+ ctx.upstream_conf = up_conf
+ ctx.upstream_version = "ver"
+ ctx.upstream_key = up_conf.type .. "#route_" .. "id"
test(route, ctx)
}
@@ -130,33 +126,30 @@ host: 39.97.63.217 count: 6
--- config
location /t {
content_by_lua_block {
- local balancer = require("apisix.balancer")
-
- local route = {
- value = {
- upstream = {
- nodes = {
- ["39.97.63.215:80"] = 1,
- ["39.97.63.216:81"] = 1,
- ["39.97.63.217:82"] = 1,
- },
- type = "roundrobin",
- },
- id = 1
- }
+ local up_conf = {
+ type = "roundrobin",
+ nodes = {
+ {host = "39.97.63.215", port = 80, weight = 1},
+ {host = "39.97.63.216", port = 81, weight = 1},
+ {host = "39.97.63.217", port = 82, weight = 1},
}
- local ctx = {conf_version = 1}
+ }
+ local ctx = {}
+ ctx.upstream_conf = up_conf
+ ctx.upstream_version = 1
+ ctx.upstream_key = up_conf.type .. "#route_" .. "id"
test(route, ctx)
-- cached by version
- route.value.upstream.nodes = {
- ["39.97.63.218:83"] = 1,
+ up_conf.nodes = {
+ {host = "39.97.63.218", port = 80, weight = 1},
+ {host = "39.97.63.219", port = 80, weight = 0},
}
test(route, ctx)
-- update, version changed
- ctx = {conf_version = 2}
+ ctx.upstream_version = 2
test(route, ctx)
}
}
@@ -179,37 +172,33 @@ host: 39.97.63.218 count: 12
--- config
location /t {
content_by_lua_block {
- local route = {
- value = {
- upstream = {
- nodes = {
- ["39.97.63.215:80"] = 1,
- ["39.97.63.216:81"] = 1,
- ["39.97.63.217:82"] = 1,
- },
- type = "chash",
- key = "remote_addr",
- },
- id = 1
- }
+ local up_conf = {
+ type = "chash",
+ key = "remote_addr",
+ nodes = {
+ {host = "39.97.63.215", port = 80, weight = 1},
+ {host = "39.97.63.216", port = 81, weight = 1},
+ {host = "39.97.63.217", port = 82, weight = 1},
}
+ }
local ctx = {
- conf_version = 1,
- var = {
- remote_addr = "127.0.0.1"
- }
+ var = {remote_addr = "127.0.0.1"},
}
+ ctx.upstream_conf = up_conf
+ ctx.upstream_version = 1
+ ctx.upstream_key = up_conf.type .. "#route_" .. "id"
test(route, ctx)
-- cached by version
- route.value.upstream.nodes = {
- ["39.97.63.218:83"] = 1,
+ up_conf.nodes = {
+ {host = "39.97.63.218", port = 80, weight = 1},
+ {host = "39.97.63.219", port = 80, weight = 0},
}
test(route, ctx)
-- update, version changed
- ctx.conf_version = 2
+ ctx.upstream_version = 2
test(route, ctx)
}
}
@@ -221,3 +210,41 @@ host: 39.97.63.215 count: 12
host: 39.97.63.218 count: 12
--- no_error_log
[error]
+
+
+
+=== TEST 5: return item directly if only have one item in `nodes`
+--- config
+ location /t {
+ content_by_lua_block {
+ local up_conf = {
+ type = "roundrobin",
+ nodes = {
+ {host = "39.97.63.215", port = 80, weight = 1},
+ {host = "39.97.63.216", port = 81, weight = 1},
+ {host = "39.97.63.217", port = 82, weight = 1},
+ }
+ }
+ local ctx = {}
+ ctx.upstream_conf = up_conf
+ ctx.upstream_version = 1
+ ctx.upstream_key = up_conf.type .. "#route_" .. "id"
+
+ test(route, ctx)
+
+ -- one item in nodes, return it directly
+ up_conf.nodes = {
+ {host = "39.97.63.218", port = 80, weight = 1},
+ }
+ test(route, ctx)
+ }
+ }
+--- request
+GET /t
+--- response_body
+host: 39.97.63.215 count: 4
+host: 39.97.63.216 count: 4
+host: 39.97.63.217 count: 4
+host: 39.97.63.218 count: 12
+--- no_error_log
+[error]
diff --git a/t/admin/global-rules.t b/t/admin/global-rules.t
index aa7a0c600ea1..0babd93f8201 100644
--- a/t/admin/global-rules.t
+++ b/t/admin/global-rules.t
@@ -160,6 +160,53 @@ passed
=== TEST 4: PATCH global rules
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/global_rules/1',
+ ngx.HTTP_PATCH,
+ [[{
+ "plugins": {
+ "limit-count": {
+ "count": 3,
+ "time_window": 60,
+ "rejected_code": 503,
+ "key": "remote_addr"
+ }
+ }}]],
+ [[{
+ "node": {
+ "value": {
+ "plugins": {
+ "limit-count": {
+ "count": 3,
+ "time_window": 60,
+ "rejected_code": 503,
+ "key": "remote_addr"
+ }
+ }
+ },
+ "key": "/apisix/global_rules/1"
+ },
+ "action": "set"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 5: PATCH global rules (sub path)
--- config
location /t {
content_by_lua_block {
@@ -205,7 +252,7 @@ passed
-=== TEST 5: delete global rules
+=== TEST 6: delete global rules
--- config
location /t {
content_by_lua_block {
@@ -229,7 +276,7 @@ GET /t
-=== TEST 6: delete global rules(not_found)
+=== TEST 7: delete global rules(not_found)
--- config
location /t {
content_by_lua_block {
@@ -253,7 +300,7 @@ GET /t
-=== TEST 7: set global rules(invalid host option)
+=== TEST 8: set global rules(invalid host option)
--- config
location /t {
content_by_lua_block {
@@ -287,7 +334,7 @@ GET /t
-=== TEST 8: set global rules(missing plugins)
+=== TEST 9: set global rules(missing plugins)
--- config
location /t {
content_by_lua_block {
@@ -308,3 +355,59 @@ GET /t
{"error_msg":"invalid configuration: property \"plugins\" is required"}
--- no_error_log
[error]
+
+
+
+=== TEST 10: string id
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/global_rules/a-b-c-ABC_0123',
+ ngx.HTTP_PUT,
+ [[{
+ "plugins": {
+ "limit-count": {
+ "count": 2,
+ "time_window": 60,
+ "rejected_code": 503,
+ "key": "remote_addr"
+ }
+ }
+ }]]
+ )
+ if code >= 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 11: string id(DELETE)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/global_rules/a-b-c-ABC_0123',
+ ngx.HTTP_DELETE
+ )
+ if code >= 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
diff --git a/t/admin/health-check.t b/t/admin/health-check.t
index 8680768e75cd..698d69d11b92 100644
--- a/t/admin/health-check.t
+++ b/t/admin/health-check.t
@@ -476,3 +476,45 @@ GET /t
{"error_msg":"invalid configuration: property \"upstream\" validation failed: property \"checks\" validation failed: object matches none of the requireds: [\"active\"] or [\"active\",\"passive\"]"}
--- no_error_log
[error]
+
+
+
+=== TEST 13: number type timeout
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+
+ req_data.upstream.checks = json.decode([[{
+ "active": {
+ "http_path": "/status",
+ "host": "foo.com",
+ "timeout": 1.01,
+ "healthy": {
+ "interval": 2,
+ "successes": 1
+ },
+ "unhealthy": {
+ "interval": 1,
+ "http_failures": 2
+ }
+ }
+ }]])
+ exp_data.node.value.upstream.checks = req_data.upstream.checks
+
+ local code, body = t('/apisix/admin/routes/1',
+ ngx.HTTP_PUT,
+ req_data,
+ exp_data
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
diff --git a/t/admin/plugins.t b/t/admin/plugins.t
index 20ee9ed88f7c..9d96556cf3cd 100644
--- a/t/admin/plugins.t
+++ b/t/admin/plugins.t
@@ -30,7 +30,7 @@ __DATA__
--- request
GET /apisix/admin/plugins/list
--- response_body_like eval
-qr/\["limit-req","limit-count","limit-conn","key-auth","basic-auth","prometheus","node-status","jwt-auth","zipkin","ip-restriction","grpc-transcode","serverless-pre-function","serverless-post-function","openid-connect","proxy-rewrite","redirect","response-rewrite","fault-injection","udp-logger","wolf-rbac","proxy-cache","tcp-logger","proxy-mirror","kafka-logger","cors"\]/
+qr/\["fault-injection","serverless-pre-function","batch-requests","cors","ip-restriction","uri-blocker","request-validation","openid-connect","wolf-rbac","basic-auth","jwt-auth","key-auth","consumer-restriction","authz-keycloak","proxy-mirror","proxy-cache","proxy-rewrite","limit-conn","limit-count","limit-req","node-status","redirect","response-rewrite","grpc-transcode","prometheus","echo","http-logger","tcp-logger","kafka-logger","syslog","udp-logger","zipkin","skywalking","serverless-post-function"\]/
--- no_error_log
[error]
@@ -51,7 +51,7 @@ GET /apisix/admin/plugins
--- request
GET /apisix/admin/plugins/limit-req
--- response_body
-{"properties":{"rate":{"minimum":0,"type":"number"},"burst":{"minimum":0,"type":"number"},"key":{"enum":["remote_addr","server_addr","http_x_real_ip","http_x_forwarded_for"],"type":"string"},"rejected_code":{"minimum":200,"type":"integer"}},"required":["rate","burst","key","rejected_code"],"type":"object"}
+{"properties":{"rate":{"minimum":0,"type":"number"},"burst":{"minimum":0,"type":"number"},"key":{"enum":["remote_addr","server_addr","http_x_real_ip","http_x_forwarded_for"],"type":"string"},"rejected_code":{"type":"integer","default":503,"minimum":200}},"required":["rate","burst","key"],"type":"object"}
--- no_error_log
[error]
@@ -63,14 +63,4 @@ GET /apisix/admin/plugins/node-status
--- response_body
{"additionalProperties":false,"type":"object"}
--- no_error_log
-[error]
-
-
-
-=== TEST 5: get plugin heartbeat schema
---- request
-GET /apisix/admin/plugins/heartbeat
---- response_body
-{"additionalProperties":false,"type":"object"}
---- no_error_log
-[error]
+[error]
diff --git a/t/admin/routes-array-nodes.t b/t/admin/routes-array-nodes.t
new file mode 100644
index 000000000000..c9b141883a28
--- /dev/null
+++ b/t/admin/routes-array-nodes.t
@@ -0,0 +1,125 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+use t::APISIX 'no_plan';
+
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+log_level("info");
+
+run_tests;
+
+__DATA__
+
+=== TEST 1: set route(id: 1)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/routes/1',
+ ngx.HTTP_PUT,
+ [[{
+ "methods": ["GET"],
+ "upstream": {
+ "nodes": [{
+ "host": "127.0.0.1",
+ "port": 8080,
+ "weight": 1
+ }],
+ "type": "roundrobin"
+ },
+ "desc": "new route",
+ "uri": "/index.html"
+ }]],
+ [[{
+ "node": {
+ "value": {
+ "methods": [
+ "GET"
+ ],
+ "uri": "/index.html",
+ "desc": "new route",
+ "upstream": {
+ "nodes": [{
+ "host": "127.0.0.1",
+ "port": 8080,
+ "weight": 1
+ }],
+ "type": "roundrobin"
+ }
+ },
+ "key": "/apisix/routes/1"
+ },
+ "action": "set"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 2: get route(id: 1)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/routes/1',
+ ngx.HTTP_GET,
+ nil,
+ [[{
+ "node": {
+ "value": {
+ "methods": [
+ "GET"
+ ],
+ "uri": "/index.html",
+ "desc": "new route",
+ "upstream": {
+ "nodes": [{
+ "host": "127.0.0.1",
+ "port": 8080,
+ "weight": 1
+ }],
+ "type": "roundrobin"
+ }
+ },
+ "key": "/apisix/routes/1"
+ },
+ "action": "get"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
diff --git a/t/admin/routes.t b/t/admin/routes.t
index 4c54c4ffa133..afa846a42e44 100644
--- a/t/admin/routes.t
+++ b/t/admin/routes.t
@@ -434,7 +434,7 @@ GET /t
local code, body = t('/apisix/admin/routes/1',
ngx.HTTP_PUT,
[[{
- "service_id": "invalid_id",
+ "service_id": "invalid_id$",
"uri": "/index.html"
}]]
)
@@ -569,7 +569,7 @@ GET /t
local code, body = t('/apisix/admin/routes/1',
ngx.HTTP_PUT,
[[{
- "upstream_id": "invalid",
+ "upstream_id": "invalid$",
"uri": "/index.html"
}]]
)
@@ -988,19 +988,172 @@ passed
-=== TEST 28: patch route(new methods)
+=== TEST 28: patch route(new uri)
--- config
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
- local code, body = t('/apisix/admin/routes/1/methods',
+ local code, body = t('/apisix/admin/routes/1',
+ ngx.HTTP_PATCH,
+ [[{
+ "uri": "/patch_test"
+ }]],
+ [[{
+ "node": {
+ "value": {
+ "uri": "/patch_test"
+ },
+ "key": "/apisix/routes/1"
+ },
+ "action": "set"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 29: patch route(multi)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/routes/1',
ngx.HTTP_PATCH,
- '["GET"]',
+ [[{
+ "methods": ["GET"],
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": null,
+ "127.0.0.2:8080": 1
+ }
+ },
+ "desc": "new route"
+ }]],
[[{
"node": {
"value": {
"methods": [
"GET"
+ ],
+ "uri": "/patch_test",
+ "desc": "new route",
+ "upstream": {
+ "nodes": {
+ "127.0.0.2:8080": 1
+ },
+ "type": "roundrobin"
+ }
+ },
+ "key": "/apisix/routes/1"
+ },
+ "action": "set"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 30: patch route(new methods)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/routes/1',
+ ngx.HTTP_PATCH,
+ [[{
+ "methods": ["GET", "DELETE", "PATCH", "POST", "PUT"]
+ }]],
+ [[{
+ "node": {
+ "value": {
+ "methods": ["GET", "DELETE", "PATCH", "POST", "PUT"]
+ },
+ "key": "/apisix/routes/1"
+ },
+ "action": "set"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 31: patch route(minus methods)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/routes/1',
+ ngx.HTTP_PATCH,
+ [[{
+ "methods": ["GET", "POST"]
+ }]],
+ [[{
+ "node": {
+ "value": {
+ "methods": ["GET", "POST"]
+ },
+ "key": "/apisix/routes/1"
+ },
+ "action": "set"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 32: patch route(new methods - sub path way)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/routes/1/methods',
+ ngx.HTTP_PATCH,
+ '["POST"]',
+ [[{
+ "node": {
+ "value": {
+ "methods": [
+ "POST"
]
},
"key": "/apisix/routes/1"
@@ -1022,18 +1175,18 @@ passed
-=== TEST 29: patch route(new uri)
+=== TEST 33: patch route(new uri)
--- config
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
local code, body = t('/apisix/admin/routes/1/uri',
ngx.HTTP_PATCH,
- '"/patch_test"',
+ '"/patch_uri_test"',
[[{
"node": {
"value": {
- "uri": "/patch_test"
+ "uri": "/patch_uri_test"
},
"key": "/apisix/routes/1"
},
@@ -1054,7 +1207,7 @@ passed
-=== TEST 30: patch route(whole)
+=== TEST 34: patch route(whole)
--- config
location /t {
content_by_lua_block {
@@ -1106,7 +1259,7 @@ passed
-=== TEST 31: multiple hosts
+=== TEST 35: multiple hosts
--- config
location /t {
content_by_lua_block {
@@ -1146,7 +1299,7 @@ passed
-=== TEST 32: enable hosts and host together
+=== TEST 36: enable hosts and host together
--- config
location /t {
content_by_lua_block {
@@ -1181,7 +1334,7 @@ GET /t
-=== TEST 33: multiple remote_addrs
+=== TEST 37: multiple remote_addrs
--- config
location /t {
content_by_lua_block {
@@ -1221,7 +1374,7 @@ passed
-=== TEST 34: multiple vars
+=== TEST 38: multiple vars
--- config
location /t {
content_by_lua_block {
@@ -1261,7 +1414,7 @@ passed
-=== TEST 35: filter function
+=== TEST 39: filter function
--- config
location /t {
content_by_lua_block {
@@ -1300,7 +1453,7 @@ passed
-=== TEST 36: filter function (invalid)
+=== TEST 40: filter function (invalid)
--- config
location /t {
content_by_lua_block {
@@ -1333,7 +1486,7 @@ GET /t
-=== TEST 37: Support for multiple URIs
+=== TEST 41: Support for multiple URIs
--- config
location /t {
content_by_lua_block {
@@ -1364,7 +1517,7 @@ passed
-=== TEST 38: set route with ttl
+=== TEST 42: set route with ttl
--- config
location /t {
content_by_lua_block {
@@ -1428,7 +1581,7 @@ message: Key not found
-=== TEST 39: post route with ttl
+=== TEST 43: post route with ttl
--- config
location /t {
content_by_lua_block {
@@ -1478,7 +1631,7 @@ message: Key not found
-=== TEST 40: invalid argument: ttl
+=== TEST 44: invalid argument: ttl
--- config
location /t {
content_by_lua_block {
@@ -1515,7 +1668,7 @@ GET /t
-=== TEST 41: set route(id: 1, check priority)
+=== TEST 45: set route(id: 1, check priority)
--- config
location /t {
content_by_lua_block {
@@ -1557,7 +1710,7 @@ passed
-=== TEST 42: set route(id: 1 + priority: 0)
+=== TEST 46: set route(id: 1 + priority: 0)
--- config
location /t {
content_by_lua_block {
@@ -1600,7 +1753,7 @@ passed
-=== TEST 43: set route(id: 1) and upstream(type:chash, default hash_on: vars, missing key)
+=== TEST 47: set route(id: 1) and upstream(type:chash, default hash_on: vars, missing key)
--- config
location /t {
content_by_lua_block {
@@ -1632,7 +1785,7 @@ GET /t
-=== TEST 44: set route(id: 1) and upstream(type:chash, hash_on: header, missing key)
+=== TEST 48: set route(id: 1) and upstream(type:chash, hash_on: header, missing key)
--- config
location /t {
content_by_lua_block {
@@ -1665,7 +1818,7 @@ GET /t
-=== TEST 45: set route(id: 1) and upstream(type:chash, hash_on: cookie, missing key)
+=== TEST 49: set route(id: 1) and upstream(type:chash, hash_on: cookie, missing key)
--- config
location /t {
content_by_lua_block {
@@ -1698,7 +1851,7 @@ GET /t
-=== TEST 46: set route(id: 1) and upstream(type:chash, hash_on: consumer, missing key is ok)
+=== TEST 50: set route(id: 1) and upstream(type:chash, hash_on: consumer, missing key is ok)
--- config
location /t {
content_by_lua_block {
@@ -1727,3 +1880,270 @@ GET /t
passed
--- no_error_log
[error]
+
+
+
+=== TEST 51: set route(id: 1 + name: test name)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/routes/1',
+ ngx.HTTP_PUT,
+ [[{
+ "methods": ["GET"],
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin"
+ },
+ "name": "test name",
+ "uri": "/index.html"
+ }]],
+ [[{
+ "node": {
+ "value": {
+ "name": "test name"
+ },
+ "key": "/apisix/routes/1"
+ },
+ "action": "set"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 52: string id
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/routes/a-b-c-ABC_0123',
+ ngx.HTTP_PUT,
+ [[{
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin"
+ },
+ "uri": "/index.html"
+ }]]
+ )
+ if code >= 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 53: string id(delete)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/routes/a-b-c-ABC_0123',
+ ngx.HTTP_DELETE
+ )
+ if code >= 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 54: invalid string id
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/routes/*invalid',
+ ngx.HTTP_PUT,
+ [[{
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin"
+ },
+ "uri": "/index.html"
+ }]]
+ )
+ if code >= 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- no_error_log
+[error]
+
+
+
+=== TEST 55: Verify Response Content-Type=applciation/json
+--- config
+ location /t {
+ content_by_lua_block {
+ local http = require("resty.http")
+ local httpc = http.new()
+ httpc:set_timeout(500)
+ httpc:connect(ngx.var.server_addr, ngx.var.server_port)
+ local res, err = httpc:request(
+ {
+ path = '/apisix/admin/routes/1?ttl=1',
+ method = "GET",
+ }
+ )
+
+ ngx.header["Content-Type"] = res.headers["Content-Type"]
+ ngx.status = 200
+ ngx.say("passed")
+ }
+ }
+--- request
+GET /t
+--- response_headers
+Content-Type: application/json
+
+
+
+=== TEST 56: set route with size 36k (temporary file to store request body)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+
+ local core = require("apisix.core")
+ local s = string.rep("a", 1024 * 35)
+ local req_body = [[{
+ "upstream": {
+ "nodes": {
+ "]] .. s .. [[": 1
+ },
+ "type": "roundrobin"
+ },
+ "uri": "/index.html"
+ }]]
+
+ local code, body = t('/apisix/admin/routes/1',
+ ngx.HTTP_PUT, req_body)
+
+ if code >= 300 then
+ ngx.status = code
+ end
+
+ ngx.say("req size: ", #req_body)
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+req size: 36066
+passed
+--- error_log
+a client request body is buffered to a temporary file
+
+
+
+=== TEST 57: route size more than 1.5 MiB
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local s = string.rep( "a", 1024 * 1024 * 1.6 )
+ local code, body = t('/apisix/admin/routes/1',
+ ngx.HTTP_PUT,
+ [[{
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin"
+ },
+ "desc": "]] .. s .. [[",
+ "uri": "/index.html"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.print(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body
+{"error_msg":"invalid request body: request size 1678025 is greater than the maximum size 1572864 allowed"}
+--- error_log
+failed to read request body: request size 1678025 is greater than the maximum size 1572864 allowed
+
+
+
+=== TEST 58: uri + plugins + script failed
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local t = require("lib.test_admin").test
+ local code, message, res = t('/apisix/admin/routes/1',
+ ngx.HTTP_PUT,
+ [[{
+ "plugins": {
+ "limit-count": {
+ "count": 2,
+ "time_window": 60,
+ "rejected_code": 503,
+ "key": "remote_addr"
+ }
+ },
+ "script": "local _M = {} \n function _M.access(api_ctx) \n ngx.log(ngx.INFO,\"hit access phase\") \n end \nreturn _M",
+ "uri": "/index.html"
+ }]]
+ )
+
+ if code ~= 200 then
+ ngx.status = code
+ ngx.say(message)
+ return
+ end
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body_like
+{"error_msg":"invalid configuration: value wasn't supposed to match schema"}
+--- no_error_log
+[error]
diff --git a/t/admin/schema.t b/t/admin/schema.t
index 54ef58ee7ee7..d98b491d79a5 100644
--- a/t/admin/schema.t
+++ b/t/admin/schema.t
@@ -93,9 +93,23 @@ location /t {
sni = {
type = "string",
pattern = [[^\*?[0-9a-zA-Z-.]+$]],
- }
+ },
+ snis = {
+ type = "array",
+ items = {
+ type = "string",
+ pattern = [[^\*?[0-9a-zA-Z-.]+$]],
+ }
+ },
+ exptime = {
+ type = "integer",
+ minimum = 1588262400, -- 2020/5/1 0:0:0
+ },
+ },
+ oneOf = {
+ {required = {"sni", "key", "cert"}},
+ {required = {"snis", "key", "cert"}}
},
- required = {"sni", "key", "cert"},
additionalProperties = false,
}
)
@@ -117,7 +131,7 @@ passed
--- request
GET /apisix/admin/schema/plugins/limit-count
--- response_body eval
-qr/"required":\["count","time_window","key","rejected_code"]/
+qr/"required":\["count","time_window","key"\]/
--- no_error_log
[error]
diff --git a/t/admin/services-array-nodes.t b/t/admin/services-array-nodes.t
new file mode 100644
index 000000000000..7ca2c6cb5f8a
--- /dev/null
+++ b/t/admin/services-array-nodes.t
@@ -0,0 +1,115 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+use t::APISIX 'no_plan';
+
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+log_level("info");
+
+run_tests;
+
+__DATA__
+
+=== TEST 1: set service(id: 1)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services/1',
+ ngx.HTTP_PUT,
+ [[{
+ "upstream": {
+ "nodes": [{
+ "host": "127.0.0.1",
+ "port": 8080,
+ "weight": 1
+ }],
+ "type": "roundrobin"
+ },
+ "desc": "new service"
+ }]],
+ [[{
+ "node": {
+ "value": {
+ "upstream": {
+ "nodes": [{
+ "host": "127.0.0.1",
+ "port": 8080,
+ "weight": 1
+ }],
+ "type": "roundrobin"
+ },
+ "desc": "new service"
+ },
+ "key": "/apisix/services/1"
+ },
+ "action": "set"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 2: get service(id: 1)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services/1',
+ ngx.HTTP_GET,
+ nil,
+ [[{
+ "node": {
+ "value": {
+ "upstream": {
+ "nodes": [{
+ "host": "127.0.0.1",
+ "port": 8080,
+ "weight": 1
+ }],
+ "type": "roundrobin"
+ },
+ "desc": "new service"
+ },
+ "key": "/apisix/services/1"
+ },
+ "action": "get"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
diff --git a/t/admin/services-string-id.t b/t/admin/services-string-id.t
new file mode 100644
index 000000000000..63ffce13c482
--- /dev/null
+++ b/t/admin/services-string-id.t
@@ -0,0 +1,884 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+use t::APISIX 'no_plan';
+
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+log_level("info");
+
+run_tests;
+
+__DATA__
+
+=== TEST 1: set service(id: 5eeb3dc90f747328b2930b0b)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b',
+ ngx.HTTP_PUT,
+ [[{
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin"
+ },
+ "desc": "new service"
+ }]],
+ [[{
+ "node": {
+ "value": {
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin"
+ },
+ "desc": "new service"
+ },
+ "key": "/apisix/services/5eeb3dc90f747328b2930b0b"
+ },
+ "action": "set"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 2: get service(id: 5eeb3dc90f747328b2930b0b)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b',
+ ngx.HTTP_GET,
+ nil,
+ [[{
+ "node": {
+ "value": {
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin"
+ },
+ "desc": "new service"
+ },
+ "key": "/apisix/services/5eeb3dc90f747328b2930b0b"
+ },
+ "action": "get"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 3: delete service(id: 5eeb3dc90f747328b2930b0b)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, message = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b',
+ ngx.HTTP_DELETE,
+ nil,
+ [[{
+ "action": "delete"
+ }]]
+ )
+ ngx.say("[delete] code: ", code, " message: ", message)
+ }
+ }
+--- request
+GET /t
+--- response_body
+[delete] code: 200 message: passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 4: delete service(id: not_found)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code = t('/apisix/admin/services/not_found',
+ ngx.HTTP_DELETE,
+ nil,
+ [[{
+ "action": "delete"
+ }]]
+ )
+
+ ngx.say("[delete] code: ", code)
+ }
+ }
+--- request
+GET /t
+--- response_body
+[delete] code: 404
+--- no_error_log
+[error]
+
+
+
+=== TEST 5: post service + delete
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, message, res = t('/apisix/admin/services',
+ ngx.HTTP_POST,
+ [[{
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin"
+ }
+ }]],
+ [[{
+ "node": {
+ "value": {
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin"
+ }
+ }
+ },
+ "action": "create"
+ }]]
+ )
+
+ if code ~= 200 then
+ ngx.status = code
+ ngx.say(message)
+ return
+ end
+
+ ngx.say("[push] code: ", code, " message: ", message)
+
+ local id = string.sub(res.node.key, #"/apisix/services/" + 1)
+ code, message = t('/apisix/admin/services/' .. id,
+ ngx.HTTP_DELETE,
+ nil,
+ [[{
+ "action": "delete"
+ }]]
+ )
+ ngx.say("[delete] code: ", code, " message: ", message)
+ }
+ }
+--- request
+GET /t
+--- response_body
+[push] code: 200 message: passed
+[delete] code: 200 message: passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 6: uri + upstream
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local t = require("lib.test_admin").test
+ local code, message, res = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b',
+ ngx.HTTP_PUT,
+ [[{
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin"
+ }
+ }]],
+ [[{
+ "node": {
+ "value": {
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin"
+ }
+ }
+ },
+ "action": "set"
+ }]]
+ )
+
+ if code ~= 200 then
+ ngx.status = code
+ ngx.say(message)
+ return
+ end
+
+ ngx.say("[push] code: ", code, " message: ", message)
+ }
+ }
+--- request
+GET /t
+--- response_body
+[push] code: 200 message: passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 7: uri + plugins
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local t = require("lib.test_admin").test
+ local code, message, res = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b',
+ ngx.HTTP_PUT,
+ [[{
+ "plugins": {
+ "limit-count": {
+ "count": 2,
+ "time_window": 60,
+ "rejected_code": 503,
+ "key": "remote_addr"
+ }
+ }
+ }]],
+ [[{
+ "node": {
+ "value": {
+ "plugins": {
+ "limit-count": {
+ "count": 2,
+ "time_window": 60,
+ "rejected_code": 503,
+ "key": "remote_addr"
+ }
+ }
+ }
+ },
+ "action": "set"
+ }]]
+ )
+
+ if code ~= 200 then
+ ngx.status = code
+ ngx.say(message)
+ return
+ end
+
+ ngx.say("[push] code: ", code, " message: ", message)
+ }
+ }
+--- request
+GET /t
+--- response_body
+[push] code: 200 message: passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 8: invalid empty plugins (todo)
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local t = require("lib.test_admin").test
+ local code, message, res = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b',
+ ngx.HTTP_PUT,
+ [[{
+ "plugins": {}
+ }]]
+ )
+
+ if code ~= 200 then
+ ngx.status = code
+ ngx.print(message)
+ return
+ end
+
+ ngx.say("[push] code: ", code, " message: ", message)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- SKIP
+
+
+
+=== TEST 9: invalid service id
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services/*invalid_id$',
+ ngx.HTTP_PUT,
+ [[{
+ "plugins": {
+ "limit-count": {
+ "count": 2,
+ "time_window": 60,
+ "rejected_code": 503,
+ "key": "remote_addr"
+ }
+ }
+ }]]
+ )
+
+ ngx.exit(code)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- no_error_log
+[error]
+
+
+
+=== TEST 10: invalid id
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b',
+ ngx.HTTP_PUT,
+ [[{
+ "id": "3",
+ "plugins": {}
+ }]]
+ )
+
+ ngx.status = code
+ ngx.print(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body
+{"error_msg":"wrong service id"}
+--- no_error_log
+[error]
+
+
+
+=== TEST 11: id in the rule
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services',
+ ngx.HTTP_PUT,
+ [[{
+ "id": "5eeb3dc90f747328b2930b0b",
+ "plugins": {}
+ }]],
+ [[{
+ "node": {
+ "value": {
+ "plugins": {}
+ },
+ "key": "/apisix/services/5eeb3dc90f747328b2930b0b"
+ },
+ "action": "set"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 12: integer id less than 1
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services',
+ ngx.HTTP_PUT,
+ [[{
+ "id": -100,
+ "plugins": {}
+ }]]
+ )
+
+ ngx.status = code
+ ngx.print(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body
+{"error_msg":"invalid configuration: property \"id\" validation failed: object matches none of the requireds"}
+--- no_error_log
+[error]
+
+
+
+=== TEST 13: invalid service id: contains symbols value
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services',
+ ngx.HTTP_PUT,
+ [[{
+ "id": "*invalid_id$",
+ "plugins": {}
+ }]]
+ )
+
+ ngx.status = code
+ ngx.print(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body
+{"error_msg":"invalid configuration: property \"id\" validation failed: object matches none of the requireds"}
+--- no_error_log
+[error]
+
+
+
+=== TEST 14: no additional properties is valid
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services',
+ ngx.HTTP_PUT,
+ [[{
+ "id": "5eeb3dc90f747328b2930b0b",
+ "invalid_property": "/index.html"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.print(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body
+{"error_msg":"invalid configuration: additional properties forbidden, found invalid_property"}
+--- no_error_log
+[error]
+
+
+
+=== TEST 15: invalid upstream_id
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services',
+ ngx.HTTP_PUT,
+ [[{
+ "id": "5eeb3dc90f747328b2930b0b",
+ "upstream_id": "invalid$"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.print(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body
+{"error_msg":"invalid configuration: property \"upstream_id\" validation failed: object matches none of the requireds"}
+--- no_error_log
+[error]
+
+
+
+=== TEST 16: not exist upstream_id
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services',
+ ngx.HTTP_PUT,
+ [[{
+ "id": "5eeb3dc90f747328b2930b0b",
+ "upstream_id": "9999999999"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.print(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body
+{"error_msg":"failed to fetch upstream info by upstream id [9999999999], response code: 404"}
+--- no_error_log
+[error]
+
+
+
+=== TEST 17: wrong service id
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b',
+ ngx.HTTP_POST,
+ [[{
+ "plugins": {}
+ }]]
+ )
+
+ ngx.status = code
+ ngx.print(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body
+{"error_msg":"wrong service id, do not need it"}
+--- no_error_log
+[error]
+
+
+
+=== TEST 18: wrong service id
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services',
+ ngx.HTTP_POST,
+ [[{
+ "id": "5eeb3dc90f747328b2930b0b",
+ "plugins": {}
+ }]]
+ )
+
+ ngx.status = code
+ ngx.print(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body
+{"error_msg":"wrong service id, do not need it"}
+--- no_error_log
+[error]
+
+
+
+=== TEST 19: patch service(whole)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b',
+ ngx.HTTP_PATCH,
+ [[{
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin"
+ },
+ "desc": "new 20 service"
+ }]],
+ [[{
+ "node": {
+ "value": {
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin"
+ },
+ "desc": "new 20 service"
+ },
+ "key": "/apisix/services/5eeb3dc90f747328b2930b0b"
+ },
+ "action": "set"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 20: patch service(new desc)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b',
+ ngx.HTTP_PATCH,
+ [[{
+ "desc": "new 19 service"
+ }]],
+ [[{
+ "node": {
+ "value": {
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin"
+ },
+ "desc": "new 19 service"
+ },
+ "key": "/apisix/services/5eeb3dc90f747328b2930b0b"
+ },
+ "action": "set"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 21: patch service(new nodes)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b',
+ ngx.HTTP_PATCH,
+ [[{
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8081": 3,
+ "127.0.0.1:8082": 4
+ },
+ "type": "roundrobin"
+ }
+ }]],
+ [[{
+ "node": {
+ "value": {
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1,
+ "127.0.0.1:8081": 3,
+ "127.0.0.1:8082": 4
+ },
+ "type": "roundrobin"
+ }
+ }
+ }
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 22: set service(id: 5eeb3dc90f747328b2930b0b) and upstream(type:chash, default hash_on: vars, missing key)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b',
+ ngx.HTTP_PUT,
+ [[{
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "chash"
+ },
+ "desc": "new service"
+ }]])
+
+ ngx.status = code
+ ngx.print(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body
+{"error_msg":"missing key"}
+--- no_error_log
+[error]
+
+
+
+=== TEST 23: set service(id: 5eeb3dc90f747328b2930b0b) and upstream(type:chash, hash_on: header, missing key)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b',
+ ngx.HTTP_PUT,
+ [[{
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "chash",
+ "hash_on": "header"
+ },
+ "desc": "new service"
+ }]])
+
+ ngx.status = code
+ ngx.print(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body
+{"error_msg":"missing key"}
+--- no_error_log
+[error]
+
+
+
+=== TEST 24: set service(id: 5eeb3dc90f747328b2930b0b) and upstream(type:chash, hash_on: cookie, missing key)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b',
+ ngx.HTTP_PUT,
+ [[{
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "chash",
+ "hash_on": "cookie"
+ },
+ "desc": "new service"
+ }]])
+
+ ngx.status = code
+ ngx.print(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body
+{"error_msg":"missing key"}
+--- no_error_log
+[error]
+
+
+
+=== TEST 25: set service(id: 5eeb3dc90f747328b2930b0b) and upstream(type:chash, hash_on: consumer, missing key is ok)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b',
+ ngx.HTTP_PUT,
+ [[{
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "chash",
+ "hash_on": "consumer"
+ },
+ "desc": "new service"
+ }]])
+
+ ngx.status = code
+ ngx.say(code .. " " .. body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+200 passed
+--- no_error_log
+[error]
diff --git a/t/admin/services.t b/t/admin/services.t
index 97d0aeac6f19..17c1b28caf55 100644
--- a/t/admin/services.t
+++ b/t/admin/services.t
@@ -353,7 +353,7 @@ GET /t
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
- local code, body = t('/apisix/admin/services/invalid_id',
+ local code, body = t('/apisix/admin/services/invalid_id$',
ngx.HTTP_PUT,
[[{
"plugins": {
@@ -475,7 +475,7 @@ GET /t
local code, body = t('/apisix/admin/services',
ngx.HTTP_PUT,
[[{
- "id": "invalid_id",
+ "id": "invalid_id$",
"plugins": {}
}]]
)
@@ -530,7 +530,7 @@ GET /t
ngx.HTTP_PUT,
[[{
"id": 1,
- "upstream_id": "invalid"
+ "upstream_id": "invalid$"
}]]
)
@@ -633,7 +633,7 @@ GET /t
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
- local code, body = t('/apisix/admin/services/1/',
+ local code, body = t('/apisix/admin/services/1',
ngx.HTTP_PATCH,
[[{
"upstream": {
@@ -679,9 +679,11 @@ passed
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
- local code, body = t('/apisix/admin/services/1/desc',
+ local code, body = t('/apisix/admin/services/1',
ngx.HTTP_PATCH,
- '"new 19 service"',
+ [[{
+ "desc": "new 19 service"
+ }]],
[[{
"node": {
"value": {
@@ -713,6 +715,135 @@ passed
=== TEST 21: patch service(new nodes)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services/1',
+ ngx.HTTP_PATCH,
+ [[{
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8081": 3,
+ "127.0.0.1:8082": 4
+ },
+ "type": "roundrobin"
+ }
+ }]],
+ [[{
+ "node": {
+ "value": {
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1,
+ "127.0.0.1:8081": 3,
+ "127.0.0.1:8082": 4
+ },
+ "type": "roundrobin"
+ }
+ }
+ }
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 22: patch service(whole - sub path)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services/1/',
+ ngx.HTTP_PATCH,
+ [[{
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin"
+ },
+ "desc": "new 22 service"
+ }]],
+ [[{
+ "node": {
+ "value": {
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin"
+ },
+ "desc": "new 22 service"
+ },
+ "key": "/apisix/services/1"
+ },
+ "action": "set"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 23: patch service(new desc - sub path)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services/1/desc',
+ ngx.HTTP_PATCH,
+ '"new 23 service"',
+ [[{
+ "node": {
+ "value": {
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin"
+ },
+ "desc": "new 23 service"
+ },
+ "key": "/apisix/services/1"
+ },
+ "action": "set"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 24: patch service(new nodes - sub path)
--- config
location /t {
content_by_lua_block {
@@ -721,8 +852,8 @@ passed
ngx.HTTP_PATCH,
[[{
"nodes": {
- "127.0.0.1:8081": 3,
- "127.0.0.1:8082": 4
+ "127.0.0.2:8081": 3,
+ "127.0.0.3:8082": 4
},
"type": "roundrobin"
}]],
@@ -731,8 +862,8 @@ passed
"value": {
"upstream": {
"nodes": {
- "127.0.0.1:8081": 3,
- "127.0.0.1:8082": 4
+ "127.0.0.2:8081": 3,
+ "127.0.0.3:8082": 4
},
"type": "roundrobin"
}
@@ -754,7 +885,7 @@ passed
-=== TEST 22: set service(id: 1) and upstream(type:chash, default hash_on: vars, missing key)
+=== TEST 25: set service(id: 1) and upstream(type:chash, default hash_on: vars, missing key)
--- config
location /t {
content_by_lua_block {
@@ -785,7 +916,7 @@ GET /t
-=== TEST 23: set service(id: 1) and upstream(type:chash, hash_on: header, missing key)
+=== TEST 26: set service(id: 1) and upstream(type:chash, hash_on: header, missing key)
--- config
location /t {
content_by_lua_block {
@@ -817,7 +948,7 @@ GET /t
-=== TEST 24: set service(id: 1) and upstream(type:chash, hash_on: cookie, missing key)
+=== TEST 27: set service(id: 1) and upstream(type:chash, hash_on: cookie, missing key)
--- config
location /t {
content_by_lua_block {
@@ -849,7 +980,7 @@ GET /t
-=== TEST 25: set service(id: 1) and upstream(type:chash, hash_on: consumer, missing key is ok)
+=== TEST 28: set service(id: 1) and upstream(type:chash, hash_on: consumer, missing key is ok)
--- config
location /t {
content_by_lua_block {
@@ -877,3 +1008,80 @@ GET /t
200 passed
--- no_error_log
[error]
+
+
+
+=== TEST 29: set service(id: 1 + test service name)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services/1',
+ ngx.HTTP_PUT,
+ [[{
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin"
+ },
+ "name": "test service name"
+ }]],
+ [[{
+ "node": {
+ "value": {
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin"
+ },
+ "name": "test service name"
+ },
+ "key": "/apisix/services/1"
+ },
+ "action": "set"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 30: invalid string id
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services/*invalid',
+ ngx.HTTP_PUT,
+ [[{
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin"
+ }
+ }]]
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- no_error_log
+[error]
diff --git a/t/admin/ssl.t b/t/admin/ssl.t
index 15bfb0ae4301..e93ca1971174 100644
--- a/t/admin/ssl.t
+++ b/t/admin/ssl.t
@@ -228,7 +228,7 @@ GET /t
GET /t
--- error_code: 400
--- response_body
-{"error_msg":"invalid configuration: property \"cert\" is required"}
+{"error_msg":"invalid configuration: value should match only one schema, but matches none"}
--- no_error_log
[error]
@@ -269,3 +269,175 @@ GET /t
passed
--- no_error_log
[error]
+
+
+
+=== TEST 8: store sni in `snis`
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local t = require("lib.test_admin")
+
+ local ssl_cert = t.read_file("conf/cert/apisix.crt")
+ local ssl_key = t.read_file("conf/cert/apisix.key")
+ local data = {
+ cert = ssl_cert, key = ssl_key,
+ snis = {"*.foo.com", "bar.com"},
+ }
+
+ local code, body = t.test('/apisix/admin/ssl/1',
+ ngx.HTTP_PUT,
+ core.json.encode(data),
+ [[{
+ "node": {
+ "value": {
+ "snis": ["*.foo.com", "bar.com"]
+ },
+ "key": "/apisix/ssl/1"
+ },
+ "action": "set"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 9: store exptime
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local t = require("lib.test_admin")
+
+ local ssl_cert = t.read_file("conf/cert/apisix.crt")
+ local ssl_key = t.read_file("conf/cert/apisix.key")
+ local data = {
+ cert = ssl_cert, key = ssl_key,
+ sni = "bar.com",
+ exptime = 1588262400 + 60 * 60 * 24 * 365,
+ }
+
+ local code, body = t.test('/apisix/admin/ssl/1',
+ ngx.HTTP_PUT,
+ core.json.encode(data),
+ [[{
+ "node": {
+ "value": {
+ "sni": "bar.com",
+ "exptime": 1619798400
+ },
+ "key": "/apisix/ssl/1"
+ },
+ "action": "set"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 10: string id
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local t = require("lib.test_admin")
+
+ local ssl_cert = t.read_file("conf/cert/apisix.crt")
+ local ssl_key = t.read_file("conf/cert/apisix.key")
+ local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"}
+
+ local code, body = t.test('/apisix/admin/ssl/a-b-c-ABC_0123',
+ ngx.HTTP_PUT,
+ core.json.encode(data)
+ )
+ if code > 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 11: string id(delete)
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local t = require("lib.test_admin")
+
+ local ssl_cert = t.read_file("conf/cert/apisix.crt")
+ local ssl_key = t.read_file("conf/cert/apisix.key")
+ local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"}
+
+ local code, body = t.test('/apisix/admin/ssl/a-b-c-ABC_0123',
+ ngx.HTTP_DELETE
+ )
+ if code > 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 12: invalid id
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local t = require("lib.test_admin")
+
+ local ssl_cert = t.read_file("conf/cert/apisix.crt")
+ local ssl_key = t.read_file("conf/cert/apisix.key")
+ local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"}
+
+ local code, body = t.test('/apisix/admin/ssl/*invalid',
+ ngx.HTTP_PUT,
+ core.json.encode(data)
+ )
+ if code > 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- no_error_log
+[error]
diff --git a/t/admin/stream-routes-disable.t b/t/admin/stream-routes-disable.t
index 8f927670dcc2..c363722336bb 100644
--- a/t/admin/stream-routes-disable.t
+++ b/t/admin/stream-routes-disable.t
@@ -36,7 +36,6 @@ sub read_file($) {
my $yaml_config = read_file("conf/config.yaml");
$yaml_config =~ s/node_listen: 9080/node_listen: 1984/;
-$yaml_config =~ s/enable_heartbeat: true/enable_heartbeat: false/;
$yaml_config =~ s/admin_key:/disable_admin_key:/;
add_block_preprocessor(sub {
diff --git a/t/admin/stream-routes.t b/t/admin/stream-routes.t
index 24b5e5ef0331..5bd36ff33613 100644
--- a/t/admin/stream-routes.t
+++ b/t/admin/stream-routes.t
@@ -297,3 +297,89 @@ GET /t
[delete] code: 200 message: passed
--- no_error_log
[error]
+
+
+
+=== TEST 8: string id
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/stream_routes/a-b-c-ABC_0123',
+ ngx.HTTP_PUT,
+ [[{
+ "remote_addr": "127.0.0.1",
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin"
+ }
+ }]]
+ )
+ if code >= 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 9: string id(delete)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/stream_routes/a-b-c-ABC_0123',
+ ngx.HTTP_DELETE
+ )
+ if code >= 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 10: invalid string id
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/stream_routes/*invalid',
+ ngx.HTTP_PUT,
+ [[{
+ "remote_addr": "127.0.0.1",
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin"
+ }
+ }]]
+ )
+ if code >= 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- no_error_log
+[error]
diff --git a/t/admin/token.t b/t/admin/token.t
index 80cc4417fee0..e3b8769f63a5 100644
--- a/t/admin/token.t
+++ b/t/admin/token.t
@@ -36,7 +36,6 @@ sub read_file($) {
my $yaml_config = read_file("conf/config.yaml");
$yaml_config =~ s/node_listen: 9080/node_listen: 1984/;
-$yaml_config =~ s/enable_heartbeat: true/enable_heartbeat: false/;
add_block_preprocessor(sub {
my ($block) = @_;
diff --git a/t/admin/upstream-array-nodes.t b/t/admin/upstream-array-nodes.t
new file mode 100644
index 000000000000..9f0c5b8d9978
--- /dev/null
+++ b/t/admin/upstream-array-nodes.t
@@ -0,0 +1,409 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+use t::APISIX 'no_plan';
+
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+log_level("info");
+
+run_tests;
+
+__DATA__
+
+=== TEST 1: set upstream(id: 1)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/upstreams/1',
+ ngx.HTTP_PUT,
+ [[{
+ "nodes": [{
+ "host": "127.0.0.1",
+ "port": 8080,
+ "weight": 1
+ }],
+ "type": "roundrobin",
+ "desc": "new upstream"
+ }]],
+ [[{
+ "node": {
+ "value": {
+ "nodes": [{
+ "host": "127.0.0.1",
+ "port": 8080,
+ "weight": 1
+ }],
+ "type": "roundrobin",
+ "desc": "new upstream"
+ },
+ "key": "/apisix/upstreams/1"
+ },
+ "action": "set"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 2: get upstream(id: 1)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/upstreams/1',
+ ngx.HTTP_GET,
+ nil,
+ [[{
+ "node": {
+ "value": {
+ "nodes": [{
+ "host": "127.0.0.1",
+ "port": 8080,
+ "weight": 1
+ }],
+ "type": "roundrobin",
+ "desc": "new upstream"
+ },
+ "key": "/apisix/upstreams/1"
+ },
+ "action": "get"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 3: delete upstream(id: 1)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, message = t('/apisix/admin/upstreams/1',
+ ngx.HTTP_DELETE,
+ nil,
+ [[{
+ "action": "delete"
+ }]]
+ )
+ ngx.say("[delete] code: ", code, " message: ", message)
+ }
+ }
+--- request
+GET /t
+--- response_body
+[delete] code: 200 message: passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 4: delete upstream(id: not_found)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code = t('/apisix/admin/upstreams/not_found',
+ ngx.HTTP_DELETE,
+ nil,
+ [[{
+ "action": "delete"
+ }]]
+ )
+
+ ngx.say("[delete] code: ", code)
+ }
+ }
+--- request
+GET /t
+--- response_body
+[delete] code: 404
+--- no_error_log
+[error]
+
+
+
+=== TEST 5: push upstream + delete
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, message, res = t('/apisix/admin/upstreams',
+ ngx.HTTP_POST,
+ [[{
+ "nodes": [{
+ "host": "127.0.0.1",
+ "port": 8080,
+ "weight": 1
+ }],
+ "type": "roundrobin"
+ }]],
+ [[{
+ "node": {
+ "value": {
+ "nodes": [{
+ "host": "127.0.0.1",
+ "port": 8080,
+ "weight": 1
+ }],
+ "type": "roundrobin"
+ }
+ },
+ "action": "create"
+ }]]
+ )
+
+ if code ~= 200 then
+ ngx.status = code
+ ngx.say(message)
+ return
+ end
+
+ ngx.say("[push] code: ", code, " message: ", message)
+
+ local id = string.sub(res.node.key, #"/apisix/upstreams/" + 1)
+ code, message = t('/apisix/admin/upstreams/' .. id,
+ ngx.HTTP_DELETE,
+ nil,
+ [[{
+ "action": "delete"
+ }]]
+ )
+ ngx.say("[delete] code: ", code, " message: ", message)
+ }
+ }
+--- request
+GET /t
+--- response_body
+[push] code: 200 message: passed
+[delete] code: 200 message: passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 6: empty nodes
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local t = require("lib.test_admin").test
+ local code, message, res = t('/apisix/admin/upstreams/1',
+ ngx.HTTP_PUT,
+ [[{
+ "nodes": [],
+ "type": "roundrobin"
+ }]]
+ )
+
+ if code ~= 200 then
+ ngx.status = code
+ ngx.print(message)
+ return
+ end
+
+ ngx.say("[push] code: ", code, " message: ", message)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body
+{"error_msg":"invalid configuration: property \"nodes\" validation failed: object matches none of the requireds"}
+
+
+
+=== TEST 7: no additional properties is valid
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/upstreams',
+ ngx.HTTP_PUT,
+ [[{
+ "id": 1,
+ "nodes": [{
+ "host": "127.0.0.1",
+ "port": 8080,
+ "weight": 1
+ }],
+ "type": "roundrobin",
+ "invalid_property": "/index.html"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.print(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body
+{"error_msg":"invalid configuration: additional properties forbidden, found invalid_property"}
+--- no_error_log
+[error]
+
+
+
+=== TEST 8: invalid weight of node
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/upstreams',
+ ngx.HTTP_PUT,
+ [[{
+ "id": 1,
+ "nodes": [{
+ "host": "127.0.0.1",
+ "port": 8080,
+ "weight": "1"
+ }],
+ "type": "chash"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.print(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body
+{"error_msg":"invalid configuration: property \"nodes\" validation failed: object matches none of the requireds"}
+--- no_error_log
+[error]
+
+
+
+=== TEST 9: invalid weight of node
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/upstreams',
+ ngx.HTTP_PUT,
+ [[{
+ "id": 1,
+ "nodes": [{
+ "host": "127.0.0.1",
+ "port": 8080,
+ "weight": -100
+ }],
+ "type": "chash"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.print(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body
+{"error_msg":"invalid configuration: property \"nodes\" validation failed: object matches none of the requireds"}
+--- no_error_log
+[error]
+
+
+
+=== TEST 10: invalid port of node
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/upstreams',
+ ngx.HTTP_PUT,
+ [[{
+ "id": 1,
+ "nodes": [{
+ "host": "127.0.0.1",
+ "port": 0,
+ "weight": 1
+ }],
+ "type": "chash"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.print(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body
+{"error_msg":"invalid configuration: property \"nodes\" validation failed: object matches none of the requireds"}
+--- no_error_log
+[error]
+
+
+
+=== TEST 11: invalid host of node
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/upstreams',
+ ngx.HTTP_PUT,
+ [[{
+ "id": 1,
+ "nodes": [{
+ "host": "127.#.%.1",
+ "port": 8080,
+ "weight": 1
+ }],
+ "type": "chash"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.print(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body
+{"error_msg":"invalid configuration: property \"nodes\" validation failed: object matches none of the requireds"}
+--- no_error_log
+[error]
diff --git a/t/admin/upstream.t b/t/admin/upstream.t
index 67359147bb16..02cd5e07af6d 100644
--- a/t/admin/upstream.t
+++ b/t/admin/upstream.t
@@ -235,7 +235,7 @@ GET /t
GET /t
--- error_code: 400
--- response_body
-{"error_msg":"invalid configuration: property \"nodes\" validation failed: expect object to have at least 1 properties"}
+{"error_msg":"invalid configuration: property \"nodes\" validation failed: object matches none of the requireds"}
@@ -244,7 +244,7 @@ GET /t
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
- local code, body = t('/apisix/admin/upstreams/invalid_id',
+ local code, body = t('/apisix/admin/upstreams/invalid_id$',
ngx.HTTP_PUT,
[[{
"nodes": {
@@ -374,7 +374,7 @@ GET /t
local code, body = t('/apisix/admin/upstreams',
ngx.HTTP_PUT,
[[{
- "id": "invalid_id",
+ "id": "invalid_id$",
"nodes": {
"127.0.0.1:8080": 1
},
@@ -523,7 +523,7 @@ GET /t
GET /t
--- error_code: 400
--- response_body
-{"error_msg":"invalid configuration: property \"nodes\" validation failed: failed to validate 127.0.0.1:8080 (matching \".*\"): wrong type: expected integer, got string"}
+{"error_msg":"invalid configuration: property \"nodes\" validation failed: object matches none of the requireds"}
--- no_error_log
[error]
@@ -553,7 +553,7 @@ GET /t
GET /t
--- error_code: 400
--- response_body
-{"error_msg":"invalid configuration: property \"nodes\" validation failed: failed to validate 127.0.0.1:8080 (matching \".*\"): expected -100 to be greater than 0"}
+{"error_msg":"invalid configuration: property \"nodes\" validation failed: object matches none of the requireds"}
--- no_error_log
[error]
@@ -652,7 +652,7 @@ GET /t
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
- local code, body = t('/apisix/admin/upstreams/1/',
+ local code, body = t('/apisix/admin/upstreams/1',
ngx.HTTP_PATCH,
[[{
"nodes": {
@@ -694,9 +694,11 @@ passed
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
- local code, body = t('/apisix/admin/upstreams/1/desc',
+ local code, body = t('/apisix/admin/upstreams/1',
ngx.HTTP_PATCH,
- '"new 21 upstream"',
+ [[{
+ "desc": "new 21 upstream"
+ }]],
[[{
"node": {
"value": {
@@ -730,16 +732,19 @@ passed
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
- local code, body = t('/apisix/admin/upstreams/1/nodes',
+ local code, body = t('/apisix/admin/upstreams/1',
ngx.HTTP_PATCH,
[[{
- "127.0.0.1:8081": 3,
- "127.0.0.1:8082": 4
+ "nodes": {
+ "127.0.0.1:8081": 3,
+ "127.0.0.1:8082": 4
+ }
}]],
[[{
"node": {
"value": {
"nodes": {
+ "127.0.0.1:8080": 1,
"127.0.0.1:8081": 3,
"127.0.0.1:8082": 4
},
@@ -768,18 +773,20 @@ passed
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
- local code, body = t('/apisix/admin/upstreams/1/nodes',
+ local code, body = t('/apisix/admin/upstreams/1',
ngx.HTTP_PATCH,
[[{
- "127.0.0.1:8081": 0,
- "127.0.0.1:8082": 4
+ "nodes": {
+ "127.0.0.1:8081": 3,
+ "127.0.0.1:8082": 0
+ }
}]],
[[{
"node": {
"value": {
"nodes": {
- "127.0.0.1:8081": 0,
- "127.0.0.1:8082": 4
+ "127.0.0.1:8081": 3,
+ "127.0.0.1:8082": 0
},
"type": "roundrobin",
"desc": "new 21 upstream"
@@ -801,7 +808,161 @@ passed
-=== TEST 24: set upstream(type: chash)
+=== TEST 24: patch upstream(whole - sub path)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/upstreams/1/',
+ ngx.HTTP_PATCH,
+ [[{
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin",
+ "desc": "new upstream 24"
+ }]],
+ [[{
+ "node": {
+ "value": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin",
+ "desc": "new upstream 24"
+ },
+ "key": "/apisix/upstreams/1"
+ },
+ "action": "set"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 25: patch upstream(new desc - sub path)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/upstreams/1/desc',
+ ngx.HTTP_PATCH,
+ '"new 25 upstream"',
+ [[{
+ "node": {
+ "value": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin",
+ "desc": "new 25 upstream"
+ },
+ "key": "/apisix/upstreams/1"
+ },
+ "action": "set"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 26: patch upstream(new nodes)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/upstreams/1/nodes',
+ ngx.HTTP_PATCH,
+ [[{
+ "127.0.0.6:8081": 3,
+ "127.0.0.7:8082": 4
+ }]],
+ [[{
+ "node": {
+ "value": {
+ "nodes": {
+ "127.0.0.6:8081": 3,
+ "127.0.0.7:8082": 4
+ },
+ "type": "roundrobin",
+ "desc": "new 25 upstream"
+ }
+ }
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 27: patch upstream(weight is 0 - sub path)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/upstreams/1/nodes',
+ ngx.HTTP_PATCH,
+ [[{
+ "127.0.0.7:8081": 0,
+ "127.0.0.8:8082": 4
+ }]],
+ [[{
+ "node": {
+ "value": {
+ "nodes": {
+ "127.0.0.7:8081": 0,
+ "127.0.0.8:8082": 4
+ },
+ "type": "roundrobin",
+ "desc": "new 25 upstream"
+ }
+ }
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 28: set upstream(type: chash)
--- config
location /t {
content_by_lua_block {
@@ -830,7 +991,7 @@ passed
-=== TEST 25: wrong upstream key, hash_on default vars
+=== TEST 29: wrong upstream key, hash_on default vars
--- config
location /t {
content_by_lua_block {
@@ -862,7 +1023,7 @@ GET /t
-=== TEST 26: set upstream with args(type: chash)
+=== TEST 30: set upstream with args(type: chash)
--- config
location /t {
content_by_lua_block {
@@ -892,7 +1053,7 @@ passed
-=== TEST 27: set upstream(type: chash)
+=== TEST 31: set upstream(type: chash)
--- config
location /t {
content_by_lua_block {
@@ -921,7 +1082,7 @@ passed
-=== TEST 28: wrong upstream key, hash_on default vars
+=== TEST 32: wrong upstream key, hash_on default vars
--- config
location /t {
content_by_lua_block {
@@ -953,7 +1114,7 @@ GET /t
-=== TEST 29: set upstream with args(type: chash)
+=== TEST 33: set upstream with args(type: chash)
--- config
location /t {
content_by_lua_block {
@@ -983,7 +1144,7 @@ passed
-=== TEST 30: type chash, hash_on: vars
+=== TEST 34: type chash, hash_on: vars
--- config
location /t {
content_by_lua_block {
@@ -1014,7 +1175,7 @@ passed
-=== TEST 31: type chash, hash_on: header, header name with '_', underscores_in_headers on
+=== TEST 35: type chash, hash_on: header, header name with '_', underscores_in_headers on
--- config
location /t {
content_by_lua_block {
@@ -1045,7 +1206,7 @@ passed
-=== TEST 32: type chash, hash_on: header, header name with invalid character
+=== TEST 36: type chash, hash_on: header, header name with invalid character
--- config
location /t {
content_by_lua_block {
@@ -1077,7 +1238,7 @@ GET /t
-=== TEST 33: type chash, hash_on: cookie
+=== TEST 37: type chash, hash_on: cookie
--- config
location /t {
content_by_lua_block {
@@ -1108,7 +1269,7 @@ passed
-=== TEST 34: type chash, hash_on: cookie, cookie name with invalid character
+=== TEST 38: type chash, hash_on: cookie, cookie name with invalid character
--- config
location /t {
content_by_lua_block {
@@ -1140,7 +1301,7 @@ GET /t
-=== TEST 35: type chash, hash_on: consumer, don't need upstream key
+=== TEST 39: type chash, hash_on: consumer, do not need upstream key
--- config
location /t {
content_by_lua_block {
@@ -1170,7 +1331,7 @@ passed
-=== TEST 36: type chash, hash_on: consumer, set key but invalid
+=== TEST 40: type chash, hash_on: consumer, set key but invalid
--- config
location /t {
content_by_lua_block {
@@ -1201,7 +1362,7 @@ passed
-=== TEST 37: type chash, invalid hash_on type
+=== TEST 41: type chash, invalid hash_on type
--- config
location /t {
content_by_lua_block {
@@ -1230,3 +1391,190 @@ GET /t
{"error_msg":"invalid configuration: property \"hash_on\" validation failed: matches non of the enum values"}
--- no_error_log
[error]
+
+
+
+=== TEST 42: set upstream(id: 1 + name: test name)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/upstreams/1',
+ ngx.HTTP_PUT,
+ [[{
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin",
+ "name": "test upstream name"
+ }]],
+ [[{
+ "node": {
+ "value": {
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin",
+ "name": "test upstream name"
+ },
+ "key": "/apisix/upstreams/1"
+ },
+ "action": "set"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 43: string id
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/upstreams/a-b-c-ABC_0123',
+ ngx.HTTP_PUT,
+ [[{
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin"
+ }]]
+ )
+ if code >= 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 44: string id(delete)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/upstreams/a-b-c-ABC_0123',
+ ngx.HTTP_DELETE
+ )
+ if code >= 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 45: invalid string id
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/upstreams/*invalid',
+ ngx.HTTP_PUT,
+ [[{
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin"
+ }]]
+ )
+ if code >= 300 then
+ ngx.status = code
+ end
+ ngx.print(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body
+{"error_msg":"invalid configuration: property \"id\" validation failed: object matches none of the requireds"}
+--- no_error_log
+[error]
+
+
+
+=== TEST 46: retries is 0
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/upstreams/a-b-c-ABC_0123',
+ ngx.HTTP_PUT,
+ [[{
+ "nodes": {
+ "127.0.0.1:8080": 1,
+ "127.0.0.1:8090": 1
+ },
+ "retries": 0,
+ "type": "roundrobin"
+ }]]
+ )
+ if code >= 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 47: retries is -1 (INVALID)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/upstreams/a-b-c-ABC_0123',
+ ngx.HTTP_PUT,
+ [[{
+ "nodes": {
+ "127.0.0.1:8080": 1,
+ "127.0.0.1:8090": 1
+ },
+ "retries": -1,
+ "type": "roundrobin"
+ }]]
+ )
+ if code >= 300 then
+ ngx.status = code
+ end
+ ngx.print(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body
+{"error_msg":"invalid configuration: property \"retries\" validation failed: expected -1 to be greater than 0"}
+--- no_error_log
+[error]
diff --git a/t/apisix.luacov b/t/apisix.luacov
index f9792d895309..0694c2bffa1a 100644
--- a/t/apisix.luacov
+++ b/t/apisix.luacov
@@ -28,6 +28,7 @@ return {
["apisix/plugins/prometheus/*"] = "plugins/prometheus",
["apisix/plugins/zipkin/*"] = "plugins/zipkin",
["apisix/utils/*"] = "utils",
+ ["apisix/discovery/*"] = "discovery",
-- can not enable both at http and stream, will fix it later.
-- ["apisix/stream/*"] = "stream",
diff --git a/t/certs/mtls_ca.crt b/t/certs/mtls_ca.crt
new file mode 100644
index 000000000000..b57e390849ca
--- /dev/null
+++ b/t/certs/mtls_ca.crt
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDSjCCAjICCQDmBdlKmGaJITANBgkqhkiG9w0BAQUFADBnMQswCQYDVQQGEwJj
+bjESMBAGA1UECAwJR3VhbmdEb25nMQ8wDQYDVQQHDAZaaHVIYWkxDTALBgNVBAoM
+BGFwaTcxDDAKBgNVBAsMA29wczEWMBQGA1UEAwwNY2EuYXBpc2l4LmRldjAeFw0y
+MDA2MjAxMzEzNDFaFw0zMDA2MTgxMzEzNDFaMGcxCzAJBgNVBAYTAmNuMRIwEAYD
+VQQIDAlHdWFuZ0RvbmcxDzANBgNVBAcMBlpodUhhaTENMAsGA1UECgwEYXBpNzEM
+MAoGA1UECwwDb3BzMRYwFAYDVQQDDA1jYS5hcGlzaXguZGV2MIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEAun+Gq/bp7CcZ9i5ZdjuCvyZVXsiAaBELVi/Q
+QQtC90z5aQyWudTPB1Lcpk5HosbT73eHh03hFCRMFv6Miase1T59KJ4zGSFKoFEr
+j2cbNmWFJEhTGce1pn52zMzZrXERYhKBA0n4bwHK/IND0XeEZ2RQPtGnGBqj3vKL
+3px+mOzIeMy4VMSkIkL2jlgo5jN0IjQIsvHRSrhIWzFhr6qtIJhuh0oI6gs+/yvA
+vspGeVFtIg/1PY3bOgFfhJg08/Aw7vgMjmADypEbBabLaWOZ8RZ3Ci2is6cL/1wX
+Sr8OIIBXTmTGmXEuSsMsBgC7BFwEY4XEsGx8QQJsrh1dSf2t0QIDAQABMA0GCSqG
+SIb3DQEBBQUAA4IBAQCKC98wWieC66NHAYb9ICOwr+XTmoFABpFNaM4bPXMD4IUq
+BaMGfBh92e4ANz2bm1D3J0ZNH3TVC7OhF2ymi6wSMde/Ygkh5xu2HgTEX2QTDQVd
+J27jwEIe45VLdvuu33jvE/iNNQHI6J6zP45gs/FS+CwMoYRnNcC+428YUf9XMcgM
+UkeMOnnkhw1OUzmoACY705hAEAPFbb7KkQ109lgbh6cucMy7Nw/N1t6Pyuxlqteg
+d8Wy6VFYPRRK43dYoA9B0yvsZCERvxgR1IrDjo0B2wIDzM4eM6ldLfnr8pPnBFfS
+g/Pdo6VZsXeSv3o00lBEY/25Vqxn3sPBK4E7a+mX
+-----END CERTIFICATE-----
diff --git a/t/certs/mtls_client.crt b/t/certs/mtls_client.crt
new file mode 100644
index 000000000000..847a544ed391
--- /dev/null
+++ b/t/certs/mtls_client.crt
@@ -0,0 +1,69 @@
+Certificate:
+ Data:
+ Version: 1 (0x0)
+ Serial Number: 64207 (0xfacf)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: C=cn, ST=GuangDong, L=ZhuHai, O=api7, OU=ops, CN=ca.apisix.dev
+ Validity
+ Not Before: Jun 20 13:15:00 2020 GMT
+ Not After : Jul 8 13:15:00 2030 GMT
+ Subject: C=cn, ST=GuangDong, O=api7, L=ZhuHai, CN=client.apisix.dev
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (2048 bit)
+ Modulus:
+ 00:9f:28:8f:2e:88:41:ff:89:f6:62:91:29:d1:6b:
+ 7f:c4:d8:1e:28:85:55:91:c2:3a:3f:23:1c:83:11:
+ 6a:26:81:1e:2d:2e:4d:69:48:98:4f:ff:84:82:2d:
+ 6b:8c:41:31:56:4d:b4:aa:b7:52:05:63:2e:19:6d:
+ 54:87:1f:21:a8:34:f9:89:1a:b1:d1:24:21:84:fa:
+ c8:29:7f:39:f4:1a:35:78:95:74:0f:24:3d:24:e8:
+ 64:75:09:7d:8c:a3:54:d6:74:5a:92:27:f1:dc:e4:
+ 04:30:71:01:67:3d:fa:0b:03:0b:01:cb:8c:aa:ae:
+ 59:9f:f7:a6:40:53:2b:65:ff:b6:64:8d:fe:0f:ee:
+ 62:64:24:7b:4c:fd:68:12:47:4a:46:86:36:53:00:
+ 64:5f:e4:32:56:a0:ee:75:92:2d:e2:dc:92:3e:d7:
+ 99:8e:86:69:e7:0a:99:e4:b2:71:95:3d:f9:7d:da:
+ af:76:1f:3f:f8:bf:78:aa:13:e5:13:84:f6:11:a5:
+ c1:9b:9d:d7:73:32:f3:da:09:78:9a:be:0f:01:fe:
+ ed:8b:55:b9:f8:97:46:9d:6a:6a:90:19:ea:4e:02:
+ 30:ff:d7:1a:da:39:53:f6:5b:6d:96:d0:fc:ed:0d:
+ 72:78:ac:b7:be:71:aa:4d:4b:8a:06:b9:25:1f:90:
+ 81:0d
+ Exponent: 65537 (0x10001)
+ Signature Algorithm: sha256WithRSAEncryption
+ 72:a7:1f:15:21:ba:4f:e7:2f:64:a0:e5:40:7c:e0:ea:09:7b:
+ 95:cf:80:d0:6f:54:c2:8d:d1:cf:cd:00:f2:95:20:f9:e2:9e:
+ f5:1c:1b:f9:87:78:a7:b1:3f:31:34:b0:c8:1a:44:da:2c:ef:
+ 93:76:d7:df:44:5f:27:6a:51:cb:09:f2:32:f4:70:db:50:da:
+ 4e:49:41:75:e0:d2:7b:4d:0b:8b:6e:0a:02:0a:00:e9:ce:f3:
+ bf:72:e6:14:86:df:a7:b9:ef:09:80:a1:52:a7:69:b8:23:7a:
+ 3d:3d:cc:6d:64:91:7b:c0:9a:98:2a:a3:17:95:0a:ee:e1:ed:
+ f2:be:02:ea:cb:6e:c1:82:4d:a1:e8:03:9a:46:d6:d7:07:0f:
+ 12:50:7e:95:5c:6c:17:f0:40:34:81:5b:74:90:8e:24:6a:5f:
+ 8e:77:ff:4d:67:c3:a9:1b:39:e2:ca:62:b6:89:ca:c6:86:f1:
+ 95:36:2b:cf:96:a5:6e:89:0e:e6:dc:88:78:f0:7d:09:e9:53:
+ 65:35:e9:72:a2:be:1c:5e:b8:a6:2b:57:f2:0d:2f:4b:31:8f:
+ f7:d9:ad:a3:58:12:bb:c9:5b:38:79:96:5b:c8:74:d2:e6:79:
+ 23:e6:bd:be:74:25:42:2c:fa:50:ea:9f:53:28:6d:35:f3:0e:
+ 9b:82:15:70
+-----BEGIN CERTIFICATE-----
+MIIDOjCCAiICAwD6zzANBgkqhkiG9w0BAQsFADBnMQswCQYDVQQGEwJjbjESMBAG
+A1UECAwJR3VhbmdEb25nMQ8wDQYDVQQHDAZaaHVIYWkxDTALBgNVBAoMBGFwaTcx
+DDAKBgNVBAsMA29wczEWMBQGA1UEAwwNY2EuYXBpc2l4LmRldjAeFw0yMDA2MjAx
+MzE1MDBaFw0zMDA3MDgxMzE1MDBaMF0xCzAJBgNVBAYTAmNuMRIwEAYDVQQIDAlH
+dWFuZ0RvbmcxDTALBgNVBAoMBGFwaTcxDzANBgNVBAcMBlpodUhhaTEaMBgGA1UE
+AwwRY2xpZW50LmFwaXNpeC5kZXYwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
+AoIBAQCfKI8uiEH/ifZikSnRa3/E2B4ohVWRwjo/IxyDEWomgR4tLk1pSJhP/4SC
+LWuMQTFWTbSqt1IFYy4ZbVSHHyGoNPmJGrHRJCGE+sgpfzn0GjV4lXQPJD0k6GR1
+CX2Mo1TWdFqSJ/Hc5AQwcQFnPfoLAwsBy4yqrlmf96ZAUytl/7Zkjf4P7mJkJHtM
+/WgSR0pGhjZTAGRf5DJWoO51ki3i3JI+15mOhmnnCpnksnGVPfl92q92Hz/4v3iq
+E+UThPYRpcGbnddzMvPaCXiavg8B/u2LVbn4l0adamqQGepOAjD/1xraOVP2W22W
+0PztDXJ4rLe+capNS4oGuSUfkIENAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAHKn
+HxUhuk/nL2Sg5UB84OoJe5XPgNBvVMKN0c/NAPKVIPninvUcG/mHeKexPzE0sMga
+RNos75N2199EXydqUcsJ8jL0cNtQ2k5JQXXg0ntNC4tuCgIKAOnO879y5hSG36e5
+7wmAoVKnabgjej09zG1kkXvAmpgqoxeVCu7h7fK+AurLbsGCTaHoA5pG1tcHDxJQ
+fpVcbBfwQDSBW3SQjiRqX453/01nw6kbOeLKYraJysaG8ZU2K8+WpW6JDubciHjw
+fQnpU2U16XKivhxeuKYrV/INL0sxj/fZraNYErvJWzh5llvIdNLmeSPmvb50JUIs
++lDqn1MobTXzDpuCFXA=
+-----END CERTIFICATE-----
diff --git a/t/certs/mtls_client.key b/t/certs/mtls_client.key
new file mode 100644
index 000000000000..d939c62b159b
--- /dev/null
+++ b/t/certs/mtls_client.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAnyiPLohB/4n2YpEp0Wt/xNgeKIVVkcI6PyMcgxFqJoEeLS5N
+aUiYT/+Egi1rjEExVk20qrdSBWMuGW1Uhx8hqDT5iRqx0SQhhPrIKX859Bo1eJV0
+DyQ9JOhkdQl9jKNU1nRakifx3OQEMHEBZz36CwMLAcuMqq5Zn/emQFMrZf+2ZI3+
+D+5iZCR7TP1oEkdKRoY2UwBkX+QyVqDudZIt4tySPteZjoZp5wqZ5LJxlT35fdqv
+dh8/+L94qhPlE4T2EaXBm53XczLz2gl4mr4PAf7ti1W5+JdGnWpqkBnqTgIw/9ca
+2jlT9lttltD87Q1yeKy3vnGqTUuKBrklH5CBDQIDAQABAoIBAHDe5bPdQ9jCcW3z
+fpGax/DER5b6//UvpfkSoGy/E+Wcmdb2yEVLC2FoVwOuzF+Z+DA5SU/sVAmoDZBQ
+vapZxJeygejeeo5ULkVNSFhNdr8LOzJ54uW+EHK1MFDj2xq61jaEK5sNIvRA7Eui
+SJl8FXBrxwmN3gNJRBwzF770fImHUfZt0YU3rWKw5Qin7QnlUzW2KPUltnSEq/xB
+kIzyWpuj7iAm9wTjH9Vy06sWCmxj1lzTTXlanjPb1jOTaOhbQMpyaAzRgQN8PZiE
+YKCarzVj7BJr7/vZYpnQtQDY12UL5n33BEqMP0VNHVqv+ZO3bktfvlwBru5ZJ7Cf
+URLsSc0CgYEAyz7FzV7cZYgjfUFD67MIS1HtVk7SX0UiYCsrGy8zA19tkhe3XVpc
+CZSwkjzjdEk0zEwiNAtawrDlR1m2kverbhhCHqXUOHwEpujMBjeJCNUVEh3OABr8
+vf2WJ6D1IRh8FA5CYLZP7aZ41fcxAnvIPAEThemLQL3C4H5H5NG2WFsCgYEAyHhP
+onpS/Eo/OXKYFLR/mvjizRVSomz1lVVL+GWMUYQsmgsPyBJgyAOX3Pqt9catgxhM
+DbEr7EWTxth3YeVzamiJPNVK0HvCax9gQ0KkOmtbrfN54zBHOJ+ieYhsieZLMgjx
+iu7Ieo6LDGV39HkvekzutZpypiCpKlMaFlCFiLcCgYEAmAgRsEj4Nh665VPvuZzH
+ZIgZMAlwBgHR7/v6l7AbybcVYEXLTNJtrGEEH6/aOL8V9ogwwZuIvb/TEidCkfcf
+zg/pTcGf2My0MiJLk47xO6EgzNdso9mMG5ZYPraBBsuo7NupvWxCp7NyCiOJDqGH
+K5NmhjInjzsjTghIQRq5+qcCgYEAxnm/NjjvslL8F69p/I3cDJ2/RpaG0sMXvbrO
+VWaMryQyWGz9OfNgGIbeMu2Jj90dar6ChcfUmb8lGOi2AZl/VGmc/jqaMKFnElHl
+J5JyMFicUzPMiG8DBH+gB71W4Iy+BBKwugHBQP2hkytewQ++PtKuP+RjADEz6vCN
+0mv0WS8CgYBnbMRP8wIOLJPRMw/iL9BdMf606X4xbmNn9HWVp2mH9D3D51kDFvls
+7y2vEaYkFv3XoYgVN9ZHDUbM/YTUozKjcAcvz0syLQb8wRwKeo+XSmo09+360r18
+zRugoE7bPl39WdGWaW3td0qf1r9z3sE2iWUTJPRQ3DYpsLOYIgyKmw==
+-----END RSA PRIVATE KEY-----
diff --git a/t/certs/mtls_server.crt b/t/certs/mtls_server.crt
new file mode 100644
index 000000000000..14f48ef7ecaf
--- /dev/null
+++ b/t/certs/mtls_server.crt
@@ -0,0 +1,69 @@
+Certificate:
+ Data:
+ Version: 1 (0x0)
+ Serial Number: 64206 (0xface)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: C=cn, ST=GuangDong, L=ZhuHai, O=api7, OU=ops, CN=ca.apisix.dev
+ Validity
+ Not Before: Jun 20 13:14:34 2020 GMT
+ Not After : Jun 18 13:14:34 2030 GMT
+ Subject: C=cn, ST=GuangDong, O=api7, L=ZhuHai, CN=admin.apisix.dev
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (2048 bit)
+ Modulus:
+ 00:9b:45:2a:e1:c9:6e:a7:af:af:bd:46:5c:5e:5f:
+ 72:66:02:78:69:16:fd:f9:69:8e:47:68:0f:8d:35:
+ 92:c4:14:40:5c:cf:57:3d:41:ea:13:7b:f4:de:c8:
+ ab:e8:62:56:1e:60:61:f6:38:65:5f:30:b5:91:25:
+ 79:07:12:45:ce:24:31:86:1f:2c:a6:cb:1d:8b:4b:
+ 9e:5f:1f:c7:b6:f3:e8:98:ee:b3:70:c7:9e:5d:10:
+ ce:29:e4:22:68:69:9e:df:ae:f6:bb:11:e8:b8:f1:
+ 07:bf:2d:d5:57:f2:e4:07:8a:da:d2:7b:8a:53:d1:
+ b4:f4:42:19:9a:14:98:01:3e:23:27:3a:0f:ad:d0:
+ 1d:c5:31:9a:ee:ae:df:7f:fb:2e:34:0b:51:ca:b4:
+ 8c:59:ae:86:5f:95:69:2b:4a:c6:2d:a5:ae:04:46:
+ 7a:93:09:15:72:0a:78:ef:98:7d:00:b5:b4:b2:f2:
+ e2:a9:2e:04:fb:de:84:ad:da:8e:a3:31:53:3a:d5:
+ 91:cd:77:f5:b8:ea:eb:14:aa:d9:62:d1:12:79:87:
+ 08:27:6d:c1:b9:e3:7d:f1:07:52:3c:a3:34:6a:c1:
+ 96:cf:a2:84:cc:14:50:49:40:0b:38:3c:3b:1e:df:
+ 57:6f:f2:05:35:92:9b:4f:b1:21:0b:f7:62:3a:2d:
+ 83:c7
+ Exponent: 65537 (0x10001)
+ Signature Algorithm: sha256WithRSAEncryption
+ 7a:1c:a3:d8:d4:97:5d:91:d2:c8:31:c4:40:ef:f1:38:ac:5c:
+ b9:74:66:81:94:4f:71:02:38:49:5a:0d:7b:10:17:73:a5:96:
+ 3e:de:0e:a4:75:8c:1b:c7:51:f9:f6:eb:9d:f4:bd:4c:1c:92:
+ 41:d0:16:c6:73:c1:f9:7c:b6:71:7d:16:53:13:fa:70:90:c0:
+ 95:e3:a3:51:30:96:02:f2:32:32:fe:a9:d1:ef:c5:7e:04:58:
+ ca:20:ef:d0:43:8c:52:8d:52:3a:71:ed:0f:87:4e:8b:c6:28:
+ 51:56:13:fd:71:81:10:cc:2f:2c:aa:8d:6a:93:d7:52:34:08:
+ 23:7b:2b:a7:a4:3e:6b:8f:c3:af:59:b9:1c:b8:d8:6c:a3:88:
+ c7:bd:b5:e1:eb:6b:6a:f2:7d:a3:89:c6:b0:21:f8:1b:9a:dc:
+ bf:ef:d6:21:91:7f:65:99:4d:f4:49:24:ab:46:09:a0:c9:a1:
+ 64:14:f4:56:73:ce:1b:22:dd:b7:1f:58:0f:29:ae:6a:6e:41:
+ 6e:b4:5c:90:97:4e:59:4e:cf:e3:a1:89:d1:5a:65:a3:68:2f:
+ b9:97:82:6f:4c:21:cb:f6:9b:7d:fd:d8:07:70:14:cd:10:fb:
+ bf:03:70:fa:51:7c:56:4c:1b:a5:87:d3:1b:18:5c:22:87:6f:
+ 04:08:59:53
+-----BEGIN CERTIFICATE-----
+MIIDOTCCAiECAwD6zjANBgkqhkiG9w0BAQsFADBnMQswCQYDVQQGEwJjbjESMBAG
+A1UECAwJR3VhbmdEb25nMQ8wDQYDVQQHDAZaaHVIYWkxDTALBgNVBAoMBGFwaTcx
+DDAKBgNVBAsMA29wczEWMBQGA1UEAwwNY2EuYXBpc2l4LmRldjAeFw0yMDA2MjAx
+MzE0MzRaFw0zMDA2MTgxMzE0MzRaMFwxCzAJBgNVBAYTAmNuMRIwEAYDVQQIDAlH
+dWFuZ0RvbmcxDTALBgNVBAoMBGFwaTcxDzANBgNVBAcMBlpodUhhaTEZMBcGA1UE
+AwwQYWRtaW4uYXBpc2l4LmRldjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAJtFKuHJbqevr71GXF5fcmYCeGkW/flpjkdoD401ksQUQFzPVz1B6hN79N7I
+q+hiVh5gYfY4ZV8wtZEleQcSRc4kMYYfLKbLHYtLnl8fx7bz6Jjus3DHnl0Qzink
+Imhpnt+u9rsR6LjxB78t1Vfy5AeK2tJ7ilPRtPRCGZoUmAE+Iyc6D63QHcUxmu6u
+33/7LjQLUcq0jFmuhl+VaStKxi2lrgRGepMJFXIKeO+YfQC1tLLy4qkuBPvehK3a
+jqMxUzrVkc139bjq6xSq2WLREnmHCCdtwbnjffEHUjyjNGrBls+ihMwUUElACzg8
+Ox7fV2/yBTWSm0+xIQv3Yjotg8cCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAehyj
+2NSXXZHSyDHEQO/xOKxcuXRmgZRPcQI4SVoNexAXc6WWPt4OpHWMG8dR+fbrnfS9
+TBySQdAWxnPB+Xy2cX0WUxP6cJDAleOjUTCWAvIyMv6p0e/FfgRYyiDv0EOMUo1S
+OnHtD4dOi8YoUVYT/XGBEMwvLKqNapPXUjQII3srp6Q+a4/Dr1m5HLjYbKOIx721
+4etravJ9o4nGsCH4G5rcv+/WIZF/ZZlN9Ekkq0YJoMmhZBT0VnPOGyLdtx9YDymu
+am5BbrRckJdOWU7P46GJ0Vplo2gvuZeCb0why/abff3YB3AUzRD7vwNw+lF8Vkwb
+pYfTGxhcIodvBAhZUw==
+-----END CERTIFICATE-----
diff --git a/t/certs/mtls_server.key b/t/certs/mtls_server.key
new file mode 100644
index 000000000000..5f2c75b98873
--- /dev/null
+++ b/t/certs/mtls_server.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAm0Uq4clup6+vvUZcXl9yZgJ4aRb9+WmOR2gPjTWSxBRAXM9X
+PUHqE3v03sir6GJWHmBh9jhlXzC1kSV5BxJFziQxhh8spssdi0ueXx/HtvPomO6z
+cMeeXRDOKeQiaGme3672uxHouPEHvy3VV/LkB4ra0nuKU9G09EIZmhSYAT4jJzoP
+rdAdxTGa7q7ff/suNAtRyrSMWa6GX5VpK0rGLaWuBEZ6kwkVcgp475h9ALW0svLi
+qS4E+96ErdqOozFTOtWRzXf1uOrrFKrZYtESeYcIJ23BueN98QdSPKM0asGWz6KE
+zBRQSUALODw7Ht9Xb/IFNZKbT7EhC/diOi2DxwIDAQABAoIBAC3NJW0dAissw+ZN
+Twn3lcNJj0NQqPJdlL6zj4LT/ssgPiwibVWAkA/XTNA62ZrfBxBG1h7PW/fMYoLC
+TwUq+rRoMMOjhoRc/gYM9FaTBVKOeFpEb2IhQDGrt2TcCtpJ7beF4PolukRztRlL
+59bdqy4eY5YbIx6+iWZT6UFuObiDqi7i4SLWEgK+/P4Uk8/SmhVqIWcj1m3SPK6I
+YbzsgXiT64fNd7/O06ISKia1UzvUCtH7tbxWxCvsqw+PqQT+YuEmNY1pOQGYp0dU
+4ndzvrP0Ajuu3xH7aYP/Kilkz69PPMLygwNey4HRIAuUqw/HBfTR0/ccRSuhrYxb
+9QaOP0ECgYEAyuqLo/tjWrFiJnDbhK3z2qcydktFS58da2QitSRYlQ6AQXjZ3+v7
+buL1QV59aXzIGTZz3gjO+omdpfIagBI47YnWIUtj+NylNROWv+aZXQwgC7ayQWTg
+eBu8L2YXBvAR9TgHhqj3Fl4YcuipVE3XFVjjvLjrbE1nssMmaJqi95kCgYEAw+O7
+Zdj/NedbI2GtnZv31YlLmrMdtmeAmU2x8eC5v30Kx3GCU9DdZzImsaYxxjfSL+6c
+eP/DF8JHWIGo9GQPcMSijHsaNMIwgv6+5rx+Lp/zsjwRApJsVQeoff2ZdWjnFsi3
+rRHE8QZfWMqcnOsr4io7xfVd3t4tV22BBrnt8l8CgYEAncU3xcxUN9PryI+/Xq4S
+CFQvvCJSQrX4neNByS31Yg/gUQex/5Tv7guxPZ5GTJqkylW4SU73/3y4gqp3SFTi
+xm6Be2mu1XRZT6dnctXNMLeYwwLOHmJc1YZbD0+FX/ORQuTJlT4Sv+VxhQa5gb70
+GLkAeWAeTBrzId7yIir5wyECgYAw2iJqC+vZrZD1Ce8aV0M/ZbYNJo5KZxWTJeUy
+xTCNqMl/Y7d036nXipJLy36uSE2K1p7/LgmhazoPwIY6LJoNLXy8PBcVATjH8m/5
+axis2AcWdBRp58pMilRi11PmC/tVm0jzSHMtCMHOivjzyVJwXMf7Xm3CnvX/z7dV
+zhihUQKBgHWtWfNk/svgLp6w8T6aMgyAb9ud5pX/CbNZhGNRqhPhJkss1tFr6/Mv
+bJiZoEP3C0sDdA1JRuMkXm5EE60xyhzCNmv5H0cQ3C2Y9Q9ly89ggwIXNiNfKWpP
+VrdvXQ3NkP/RaDy83B9dN2Jb6lUpcNQnB5Q5yAlsYaYgsGBedcvc
+-----END RSA PRIVATE KEY-----
diff --git a/t/config-center-yaml/route-service.t b/t/config-center-yaml/route-service.t
index 2e85fc6cc96a..7f11fcedc6d8 100644
--- a/t/config-center-yaml/route-service.t
+++ b/t/config-center-yaml/route-service.t
@@ -32,7 +32,6 @@ sub read_file($) {
our $yaml_config = read_file("conf/config.yaml");
$yaml_config =~ s/node_listen: 9080/node_listen: 1984/;
-$yaml_config =~ s/enable_heartbeat: true/enable_heartbeat: false/;
$yaml_config =~ s/config_center: etcd/config_center: yaml/;
$yaml_config =~ s/enable_admin: true/enable_admin: false/;
diff --git a/t/config-center-yaml/route-upstream.t b/t/config-center-yaml/route-upstream.t
index d8d44a7c6794..4d3868b45292 100644
--- a/t/config-center-yaml/route-upstream.t
+++ b/t/config-center-yaml/route-upstream.t
@@ -32,7 +32,6 @@ sub read_file($) {
our $yaml_config = read_file("conf/config.yaml");
$yaml_config =~ s/node_listen: 9080/node_listen: 1984/;
-$yaml_config =~ s/enable_heartbeat: true/enable_heartbeat: false/;
$yaml_config =~ s/config_center: etcd/config_center: yaml/;
$yaml_config =~ s/enable_admin: true/enable_admin: false/;
@@ -139,3 +138,25 @@ GET /hello
hello world
--- no_error_log
[error]
+
+
+
+=== TEST 5: upstream domain
+--- yaml_config eval: $::yaml_config
+--- apisix_yaml
+routes:
+ -
+ uri: /get
+ upstream_id: 1
+upstreams:
+ -
+ id: 1
+ nodes:
+ "httpbin.org:80": 1
+ type: roundrobin
+#END
+--- request
+GET /get
+--- error_code: 200
+--- no_error_log
+[error]
diff --git a/t/config-center-yaml/route.t b/t/config-center-yaml/route.t
index 5af375367d49..4e207fc6e5eb 100644
--- a/t/config-center-yaml/route.t
+++ b/t/config-center-yaml/route.t
@@ -32,7 +32,6 @@ sub read_file($) {
our $yaml_config = read_file("conf/config.yaml");
$yaml_config =~ s/node_listen: 9080/node_listen: 1984/;
-$yaml_config =~ s/enable_heartbeat: true/enable_heartbeat: false/;
$yaml_config =~ s/config_center: etcd/config_center: yaml/;
$yaml_config =~ s/enable_admin: true/enable_admin: false/;
diff --git a/t/core/etcd-auth-fail.t b/t/core/etcd-auth-fail.t
new file mode 100644
index 000000000000..dfeaffee178f
--- /dev/null
+++ b/t/core/etcd-auth-fail.t
@@ -0,0 +1,56 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+BEGIN {
+ $ENV{"ETCD_ENABLE_AUTH"} = "false"
+}
+
+use t::APISIX 'no_plan';
+
+repeat_each(1);
+no_long_string();
+no_root_location();
+log_level("info");
+
+# Authentication is enabled at etcd and credentials are set
+system('etcdctl --endpoints="http://127.0.0.1:2379" -u root:5tHkHhYkjr6cQY user add root:5tHkHhYkjr6cQY');
+system('etcdctl --endpoints="http://127.0.0.1:2379" -u root:5tHkHhYkjr6cQY auth enable');
+system('etcdctl --endpoints="http://127.0.0.1:2379" -u root:5tHkHhYkjr6cQY role revoke --path "/*" -rw guest');
+
+run_tests;
+
+# Authentication is disabled at etcd & guest access is granted
+system('etcdctl --endpoints="http://127.0.0.1:2379" -u root:5tHkHhYkjr6cQY auth disable');
+system('etcdctl --endpoints="http://127.0.0.1:2379" -u root:5tHkHhYkjr6cQY role grant --path "/*" -rw guest');
+
+
+__DATA__
+
+=== TEST 1: Set and Get a value pass
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local key = "/test_key"
+ local val = "test_value"
+ local res, err = core.etcd.set(key, val)
+ ngx.say(err)
+ }
+ }
+--- request
+GET /t
+--- response_body
+insufficient credentials code: 401
diff --git a/t/core/etcd-auth.t b/t/core/etcd-auth.t
new file mode 100644
index 000000000000..3051a68ffbde
--- /dev/null
+++ b/t/core/etcd-auth.t
@@ -0,0 +1,59 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+BEGIN {
+ $ENV{"ETCD_ENABLE_AUTH"} = "true"
+}
+
+use t::APISIX 'no_plan';
+
+repeat_each(1);
+no_long_string();
+no_root_location();
+log_level("info");
+
+# Authentication is enabled at etcd and credentials are set
+system('etcdctl --endpoints="http://127.0.0.1:2379" -u root:5tHkHhYkjr6cQY user add root:5tHkHhYkjr6cQY');
+system('etcdctl --endpoints="http://127.0.0.1:2379" -u root:5tHkHhYkjr6cQY auth enable');
+system('etcdctl --endpoints="http://127.0.0.1:2379" -u root:5tHkHhYkjr6cQY role revoke --path "/*" -rw guest');
+
+run_tests;
+
+# Authentication is disabled at etcd & guest access is granted
+system('etcdctl --endpoints="http://127.0.0.1:2379" -u root:5tHkHhYkjr6cQY auth disable');
+system('etcdctl --endpoints="http://127.0.0.1:2379" -u root:5tHkHhYkjr6cQY role grant --path "/*" -rw guest');
+
+__DATA__
+
+=== TEST 1: Set and Get a value pass with authentication
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local key = "/test_key"
+ local val = "test_value"
+ core.etcd.set(key, val)
+ local res, err = core.etcd.get(key)
+ ngx.say(res.body.node.value)
+ core.etcd.delete(val)
+ }
+ }
+--- request
+GET /t
+--- response_body
+test_value
+--- no_error_log
+[error]
diff --git a/t/core/etcd-sync.t b/t/core/etcd-sync.t
new file mode 100644
index 000000000000..ef0ea57cdf5b
--- /dev/null
+++ b/t/core/etcd-sync.t
@@ -0,0 +1,96 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+use t::APISIX 'no_plan';
+
+no_root_location();
+
+run_tests;
+
+__DATA__
+
+=== TEST 1: auto update prev_index when other keys update
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+
+ local consumers, _ = core.config.new("/consumers", {
+ automatic = true,
+ item_schema = core.schema.consumer,
+ timeout = 0.2
+ })
+
+ ngx.sleep(0.6)
+
+ local idx = consumers.prev_index
+ local key = "/test_key"
+ local val = "test_value"
+ core.etcd.set(key, val)
+
+ ngx.sleep(2)
+
+ local new_idx = consumers.prev_index
+
+ if new_idx > idx then
+ ngx.say("prev_index updated")
+ end
+ }
+ }
+--- request
+GET /t
+--- response_body
+prev_index updated
+--- no_error_log
+[error]
+
+
+
+=== TEST 2: using default timeout
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+
+ local consumers, _ = core.config.new("/consumers", {
+ automatic = true,
+ item_schema = core.schema.consumer
+ })
+
+ ngx.sleep(0.6)
+ local idx = consumers.prev_index
+
+ local key = "/test_key"
+ local val = "test_value"
+ core.etcd.set(key, val)
+
+ ngx.sleep(2)
+
+ local new_idx = consumers.prev_index
+
+ if new_idx > idx then
+ ngx.say("prev_index updated")
+ else
+ ngx.say("prev_index not update")
+ end
+ }
+ }
+--- request
+GET /t
+--- response_body
+prev_index not update
+--- no_error_log
+[error]
diff --git a/t/core/lrucache.t b/t/core/lrucache.t
index 5a9aefe08d7c..83b930869672 100644
--- a/t/core/lrucache.t
+++ b/t/core/lrucache.t
@@ -258,7 +258,7 @@ obj: {"idx":2,"_cache_ver":"ver"}
end
local lru_get = core.lrucache.new({
- ttl = 0.1, count = 256, invalid_stale = true,
+ ttl = 1, count = 256, invalid_stale = true,
})
local function f()
diff --git a/t/core/request.t b/t/core/request.t
index 2fe9cd53c394..5f6fd7ee698e 100644
--- a/t/core/request.t
+++ b/t/core/request.t
@@ -206,3 +206,151 @@ X-Forwarded-For: 10.0.0.1
10.0.0.1
--- no_error_log
[error]
+
+
+
+=== TEST 6: get_host
+--- config
+ location = /hello {
+ real_ip_header X-Real-IP;
+
+ set_real_ip_from 0.0.0.0/0;
+ set_real_ip_from ::/0;
+ set_real_ip_from unix:;
+
+ access_by_lua_block {
+ local core = require("apisix.core")
+ local ngx_ctx = ngx.ctx
+ local api_ctx = ngx_ctx.api_ctx
+ if api_ctx == nil then
+ api_ctx = core.tablepool.fetch("api_ctx", 0, 32)
+ ngx_ctx.api_ctx = api_ctx
+ end
+
+ core.ctx.set_vars_meta(api_ctx)
+ }
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local host = core.request.get_host(ngx.ctx.api_ctx)
+ ngx.say(host)
+ }
+ }
+--- request
+GET /hello
+--- more_headers
+X-Real-IP: 10.0.0.1
+--- response_body
+localhost
+--- no_error_log
+[error]
+
+
+
+=== TEST 7: get_scheme
+--- config
+ location = /hello {
+ real_ip_header X-Real-IP;
+
+ set_real_ip_from 0.0.0.0/0;
+ set_real_ip_from ::/0;
+ set_real_ip_from unix:;
+
+ access_by_lua_block {
+ local core = require("apisix.core")
+ local ngx_ctx = ngx.ctx
+ local api_ctx = ngx_ctx.api_ctx
+ if api_ctx == nil then
+ api_ctx = core.tablepool.fetch("api_ctx", 0, 32)
+ ngx_ctx.api_ctx = api_ctx
+ end
+
+ core.ctx.set_vars_meta(api_ctx)
+ }
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local scheme = core.request.get_scheme(ngx.ctx.api_ctx)
+ ngx.say(scheme)
+ }
+ }
+--- request
+GET /hello
+--- more_headers
+X-Real-IP: 10.0.0.1
+--- response_body
+http
+--- no_error_log
+[error]
+
+
+
+=== TEST 8: get_port
+--- config
+ location = /hello {
+ real_ip_header X-Real-IP;
+
+ set_real_ip_from 0.0.0.0/0;
+ set_real_ip_from ::/0;
+ set_real_ip_from unix:;
+
+ access_by_lua_block {
+ local core = require("apisix.core")
+ local ngx_ctx = ngx.ctx
+ local api_ctx = ngx_ctx.api_ctx
+ if api_ctx == nil then
+ api_ctx = core.tablepool.fetch("api_ctx", 0, 32)
+ ngx_ctx.api_ctx = api_ctx
+ end
+
+ core.ctx.set_vars_meta(api_ctx)
+ }
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local port = core.request.get_port(ngx.ctx.api_ctx)
+ ngx.say(port)
+ }
+ }
+--- request
+GET /hello
+--- more_headers
+X-Real-IP: 10.0.0.1
+--- response_body
+1984
+--- no_error_log
+[error]
+
+
+
+=== TEST 9: get_http_version
+--- config
+ location = /hello {
+ real_ip_header X-Real-IP;
+
+ set_real_ip_from 0.0.0.0/0;
+ set_real_ip_from ::/0;
+ set_real_ip_from unix:;
+
+ access_by_lua_block {
+ local core = require("apisix.core")
+ local ngx_ctx = ngx.ctx
+ local api_ctx = ngx_ctx.api_ctx
+ if api_ctx == nil then
+ api_ctx = core.tablepool.fetch("api_ctx", 0, 32)
+ ngx_ctx.api_ctx = api_ctx
+ end
+
+ core.ctx.set_vars_meta(api_ctx)
+ }
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local http_version = core.request.get_http_version()
+ ngx.say(http_version)
+ }
+ }
+--- request
+GET /hello
+--- more_headers
+X-Real-IP: 10.0.0.1
+--- response_body
+1.1
+--- no_error_log
+[error]
diff --git a/t/debug/debug-mode.t b/t/debug/debug-mode.t
index 0acdb8244347..0b81bada4d35 100644
--- a/t/debug/debug-mode.t
+++ b/t/debug/debug-mode.t
@@ -31,10 +31,8 @@ sub read_file($) {
our $yaml_config = read_file("conf/config.yaml");
$yaml_config =~ s/node_listen: 9080/node_listen: 1984/;
-$yaml_config =~ s/enable_heartbeat: true/enable_heartbeat: false/;
$yaml_config =~ s/enable_debug: false/enable_debug: true/;
-
run_tests;
__DATA__
@@ -57,13 +55,18 @@ qr/loaded plugin and sort by priority: [-\d]+ name: [\w-]+/
--- grep_error_log_out
loaded plugin and sort by priority: 11000 name: fault-injection
loaded plugin and sort by priority: 10000 name: serverless-pre-function
+loaded plugin and sort by priority: 4010 name: batch-requests
loaded plugin and sort by priority: 4000 name: cors
loaded plugin and sort by priority: 3000 name: ip-restriction
+loaded plugin and sort by priority: 2900 name: uri-blocker
+loaded plugin and sort by priority: 2800 name: request-validation
loaded plugin and sort by priority: 2599 name: openid-connect
loaded plugin and sort by priority: 2555 name: wolf-rbac
loaded plugin and sort by priority: 2520 name: basic-auth
loaded plugin and sort by priority: 2510 name: jwt-auth
loaded plugin and sort by priority: 2500 name: key-auth
+loaded plugin and sort by priority: 2400 name: consumer-restriction
+loaded plugin and sort by priority: 2000 name: authz-keycloak
loaded plugin and sort by priority: 1010 name: proxy-mirror
loaded plugin and sort by priority: 1009 name: proxy-cache
loaded plugin and sort by priority: 1008 name: proxy-rewrite
@@ -75,16 +78,19 @@ loaded plugin and sort by priority: 900 name: redirect
loaded plugin and sort by priority: 899 name: response-rewrite
loaded plugin and sort by priority: 506 name: grpc-transcode
loaded plugin and sort by priority: 500 name: prometheus
+loaded plugin and sort by priority: 412 name: echo
+loaded plugin and sort by priority: 410 name: http-logger
loaded plugin and sort by priority: 405 name: tcp-logger
loaded plugin and sort by priority: 403 name: kafka-logger
+loaded plugin and sort by priority: 401 name: syslog
loaded plugin and sort by priority: 400 name: udp-logger
loaded plugin and sort by priority: 0 name: example-plugin
loaded plugin and sort by priority: -1000 name: zipkin
+loaded plugin and sort by priority: -1100 name: skywalking
loaded plugin and sort by priority: -2000 name: serverless-post-function
-
=== TEST 2: set route(no plugin)
--- config
location /t {
diff --git a/t/discovery/eureka.t b/t/discovery/eureka.t
new file mode 100644
index 000000000000..d39178ca2d2a
--- /dev/null
+++ b/t/discovery/eureka.t
@@ -0,0 +1,130 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+use t::APISIX 'no_plan';
+
+repeat_each(1);
+log_level('info');
+no_root_location();
+no_shuffle();
+
+sub read_file($) {
+ my $infile = shift;
+ open my $in, $infile
+ or die "cannot open $infile for reading: $!";
+ my $cert = do { local $/; <$in> };
+ close $in;
+ $cert;
+}
+
+our $yaml_config = read_file("conf/config.yaml");
+$yaml_config =~ s/node_listen: 9080/node_listen: 1984/;
+$yaml_config =~ s/config_center: etcd/config_center: yaml/;
+$yaml_config =~ s/enable_admin: true/enable_admin: false/;
+$yaml_config =~ s/enable_admin: true/enable_admin: false/;
+$yaml_config =~ s/ discovery:/ discovery: eureka #/;
+$yaml_config =~ s/# discovery:/ discovery: eureka #/;
+$yaml_config =~ s/error_log_level: "warn"/error_log_level: "info"/;
+
+
+$yaml_config .= <<_EOC_;
+eureka:
+ host:
+ - "http://127.0.0.1:8761"
+ prefix: "/eureka/"
+ fetch_interval: 10
+ weight: 80
+ timeout:
+ connect: 1500
+ send: 1500
+ read: 1500
+_EOC_
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: get APISIX-EUREKA info from EUREKA
+--- yaml_config eval: $::yaml_config
+--- apisix_yaml
+routes:
+ -
+ uri: /eureka/*
+ upstream:
+ service_name: APISIX-EUREKA
+ type: roundrobin
+
+#END
+--- request
+GET /eureka/apps/APISIX-EUREKA
+--- response_body_like
+.*